For Python 32 bit, version 2.7 you are limited by the number of bytes you can add to the stack at a given time. One option is to read in the image in parts and then resize the individual chunks and reassemble them into a image that requires less RAM.
I recommend using the packages libtiff and opencv for that.
import os
os.environ["PATH"] += os.pathsep + "C:\\Program Files (x86)\\GnuWin32\\bin"
import numpy as np
import libtiff
import cv2
tif = libtiff.TIFF.open("HUGETIFFILE.tif", 'r')
width = tif.GetField("ImageWidth")
height = tif.GetField("ImageLength")
bits = tif.GetField('BitsPerSample')
sample_format = tif.GetField('SampleFormat')
ResizeFactor = 10 #Reduce Image Size by 10
Chunks = 8 #Read Image in 8 Chunks to prevent Memory Error (can be increased for
# bigger files)
ReadStrip = tif.ReadEncodedStrip
typ = tif.get_numpy_type(bits, sample_format)
#ReadStrip
newarr = np.zeros((1, width/ResizeFactor), typ)
for ii in range(0,Chunks):
pos = 0
arr = np.empty((height/Chunks, width), typ)
size = arr.nbytes
for strip in range((ii*tif.NumberOfStrips()/Chunks),((ii+1)*tif.NumberOfStrips()/Chunks)):
elem = ReadStrip(strip, arr.ctypes.data + pos, max(size-pos, 0))
pos = pos + elem
resized = cv2.resize(arr, (0,0), fx=float(1)/float(ResizeFactor), fy=float(1)/float(ResizeFactor))
# Now remove the large array to free up Memory for the next chunk
del arr
# Finally recombine the individual resized chunks into the final resized image.
newarr = np.vstack((newarr,resized))
newarr = np.delete(newarr, (0), axis=0)
cv2.imwrite('resized.tif', newarr)
conventional conversion doesn't work- how doesn't it work? What is your output? How does it differ from what you expect? Do you receive an error? If so, what does the traceback say?imarrayis an empty array