Merge pull request #661 from rtburns-jpl/numpy-int

Replace deprecated numpy types with native types
LT1AB
Ryan Burns 2023-02-21 15:56:00 -08:00 committed by GitHub
commit 735fba0bdb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 107 additions and 107 deletions

View File

@ -81,7 +81,7 @@ class orbit_info:
def getBaseline(self, secondary): def getBaseline(self, secondary):
'''Compute baseline between current object and another orbit object.''' '''Compute baseline between current object and another orbit object.'''
ind = np.int(self.nvec/2) ind = int(self.nvec/2)
mpos = np.array(self.pos[ind]) mpos = np.array(self.pos[ind])
mvel = np.array(self.vel[ind]) mvel = np.array(self.vel[ind])
@ -93,7 +93,7 @@ class orbit_info:
vvec = np.cross(crp, rvec) vvec = np.cross(crp, rvec)
mvel = np.linalg.norm(mvel) mvel = np.linalg.norm(mvel)
ind = np.int(secondary.nvec/2) #First guess ind = int(secondary.nvec/2) #First guess
spos = np.array(secondary.pos[ind]) spos = np.array(secondary.pos[ind])
svel = np.array(secondary.vel[ind]) svel = np.array(secondary.vel[ind])
svel = np.linalg.norm(svel) svel = np.linalg.norm(svel)
@ -101,7 +101,7 @@ class orbit_info:
dx = spos - mpos; dx = spos - mpos;
z_offset = secondary.prf*np.dot(dx, vvec)/mvel z_offset = secondary.prf*np.dot(dx, vvec)/mvel
ind = np.int(ind - z_offset) #Refined estimate ind = int(ind - z_offset) #Refined estimate
spos = secondary.pos[ind] spos = secondary.pos[ind]
svel = secondary.vel[ind] svel = secondary.vel[ind]
svel = np.linalg.norm(svel) svel = np.linalg.norm(svel)

View File

@ -626,7 +626,7 @@ def cal_coherence(inf, win=5, edge=0):
if win % 2 != 1: if win % 2 != 1:
raise Exception('window size must be odd!') raise Exception('window size must be odd!')
hwin = np.int(np.around((win - 1) / 2)) hwin = int(np.around((win - 1) / 2))
filt = np.ones((win, win)) filt = np.ones((win, win))
amp = np.absolute(inf) amp = np.absolute(inf)

View File

@ -25,7 +25,7 @@ def runDownloadDem(self):
bboxGeo = getBboxGeo(referenceTrack) bboxGeo = getBboxGeo(referenceTrack)
bbox = np.array(bboxGeo) bbox = np.array(bboxGeo)
bboxStr = '{} {} {} {}'.format(np.int(np.floor(bbox[0])), np.int(np.ceil(bbox[1])), np.int(np.floor(bbox[2])), np.int(np.ceil(bbox[3]))) bboxStr = '{} {} {} {}'.format(int(np.floor(bbox[0])), int(np.ceil(bbox[1])), int(np.floor(bbox[2])), int(np.ceil(bbox[3])))
#get 1 arcsecond dem for coregistration #get 1 arcsecond dem for coregistration
@ -92,7 +92,7 @@ def runDownloadDem(self):
#cmd = 'wbd.py {}'.format(bboxStr) #cmd = 'wbd.py {}'.format(bboxStr)
#runCmd(cmd) #runCmd(cmd)
download_wbd(np.int(np.floor(bbox[0])), np.int(np.ceil(bbox[1])), np.int(np.floor(bbox[2])), np.int(np.ceil(bbox[3]))) download_wbd(int(np.floor(bbox[0])), int(np.ceil(bbox[1])), int(np.floor(bbox[2])), int(np.ceil(bbox[3])))
#cmd = 'fixImageXml.py -i swbdLat_*_*_Lon_*_*.wbd -f' #cmd = 'fixImageXml.py -i swbdLat_*_*_Lon_*_*.wbd -f'
#runCmd(cmd) #runCmd(cmd)
#cmd = 'rm *.log' #cmd = 'rm *.log'
@ -151,7 +151,7 @@ def downloadDem(bbox, demType='version3', resolution=1, fillingValue=-32768, out
ds.setFillingValue(fillingValue) ds.setFillingValue(fillingValue)
ds.setFilling() ds.setFilling()
bbox = [np.int(np.floor(bbox[0])), np.int(np.ceil(bbox[1])), np.int(np.floor(bbox[2])), np.int(np.ceil(bbox[3]))] bbox = [int(np.floor(bbox[0])), int(np.ceil(bbox[1])), int(np.floor(bbox[2])), int(np.ceil(bbox[3]))]
if outputFile==None: if outputFile==None:
outputFile = ds.defaultName(bbox) outputFile = ds.defaultName(bbox)

View File

@ -441,11 +441,11 @@ def adaptive_gaussian_v0(ionos, wgt, size_max, size_min):
#sigma of window size: size_max #sigma of window size: size_max
sigma = size_max / 2.0 sigma = size_max / 2.0
for i in range(size_num): for i in range(size_num):
size2 = np.int(np.around(size[i])) size2 = int(np.around(size[i]))
if size2 % 2 == 0: if size2 % 2 == 0:
size2 += 1 size2 += 1
if (i+1) % 10 == 0: if (i+1) % 10 == 0:
print('min win: %4d, max win: %4d, current win: %4d'%(np.int(np.around(size_min)), np.int(np.around(size_max)), size2)) print('min win: %4d, max win: %4d, current win: %4d'%(int(np.around(size_min)), int(np.around(size_max)), size2))
g2d = gaussian(size2, sigma*size2/size_max, scale=1.0) g2d = gaussian(size2, sigma*size2/size_max, scale=1.0)
scale = ss.fftconvolve(wgt, g2d, mode='same') scale = ss.fftconvolve(wgt, g2d, mode='same')
flt[:, :, i] = ss.fftconvolve(ionos*wgt, g2d, mode='same') / (scale + (scale==0)) flt[:, :, i] = ss.fftconvolve(ionos*wgt, g2d, mode='same') / (scale + (scale==0))

View File

@ -188,7 +188,7 @@ class Radarsat2_GRD(Component):
gcps = self.readGCPsFromXML() gcps = self.readGCPsFromXML()
#print('gcps=',gcps) #print('gcps=',gcps)
azt = np.zeros((len(gcps),3), dtype=np.float) azt = np.zeros((len(gcps),3), dtype=float)
nvalid = 0 nvalid = 0
for ind,gcp in enumerate(gcps): for ind,gcp in enumerate(gcps):
try: try:

View File

@ -227,7 +227,7 @@ class Terrasar_GRD(Component):
#gcps = self.readGCPsFromXML() #gcps = self.readGCPsFromXML()
#print('gcps=',gcps) #print('gcps=',gcps)
#azt = np.zeros((len(gcps),3), dtype=np.float) #azt = np.zeros((len(gcps),3), dtype=float)
#nvalid = 0 #nvalid = 0
#for ind,gcp in enumerate(gcps): #for ind,gcp in enumerate(gcps):
#try: #try:

View File

@ -490,7 +490,7 @@ class Sentinel1(Component):
for index in indices: for index in indices:
aslice = slices[index] aslice = slices[index]
offset = np.int(np.rint((aslice.product.bursts[0].burstStartUTC - t0).total_seconds() / burstStartInterval.total_seconds())) offset = int(np.rint((aslice.product.bursts[0].burstStartUTC - t0).total_seconds() / burstStartInterval.total_seconds()))
for kk in range(aslice.product.numberOfBursts): for kk in range(aslice.product.numberOfBursts):
#####Skip appending if burst also exists from previous scene #####Skip appending if burst also exists from previous scene

View File

@ -282,8 +282,8 @@ class TOPSSwathSLCProduct(Component):
from isceobj.Util.Poly2D import Poly2D from isceobj.Util.Poly2D import Poly2D
####TOPS steering component of the azimuth carrier ####TOPS steering component of the azimuth carrier
x = np.arange(0, burst.numberOfSamples,xstep,dtype=np.int) x = np.arange(0, burst.numberOfSamples,xstep,dtype=int)
y = np.arange(0, burst.numberOfLines, ystep, dtype=np.int) y = np.arange(0, burst.numberOfLines, ystep, dtype=int)
xx,yy = np.meshgrid(x,y) xx,yy = np.meshgrid(x,y)

View File

@ -128,13 +128,13 @@ def cropFrame(frame, limits, outname, israw=False):
####sensing start ####sensing start
ymin = np.floor( (limits[0] - frame.sensingStart).total_seconds() * frame.PRF) ymin = np.floor( (limits[0] - frame.sensingStart).total_seconds() * frame.PRF)
print('Line start: ', ymin) print('Line start: ', ymin)
ymin = np.int( np.clip(ymin, 0, frame.numberOfLines-1)) ymin = int(np.clip(ymin, 0, frame.numberOfLines-1))
####sensing stop ####sensing stop
ymax = np.ceil( (limits[1] - frame.sensingStart).total_seconds() * frame.PRF) + 1 ymax = np.ceil( (limits[1] - frame.sensingStart).total_seconds() * frame.PRF) + 1
print('Line stop: ', ymax) print('Line stop: ', ymax)
ymax = np.int( np.clip(ymax, 1, frame.numberOfLines)) ymax = int( np.clip(ymax, 1, frame.numberOfLines))
print('Line limits: ', ymin, ymax) print('Line limits: ', ymin, ymax)
print('Original Line Limits: ', 0, frame.numberOfLines) print('Original Line Limits: ', 0, frame.numberOfLines)
@ -152,13 +152,13 @@ def cropFrame(frame, limits, outname, israw=False):
####starting range ####starting range
xmin = np.floor( (limits[2] - frame.startingRange)/frame.instrument.rangePixelSize) xmin = np.floor( (limits[2] - frame.startingRange)/frame.instrument.rangePixelSize)
print('Pixel start: ', xmin) print('Pixel start: ', xmin)
xmin = np.int(np.clip(xmin, 0, (frame.image.width//factor)-1)) xmin = int(np.clip(xmin, 0, (frame.image.width//factor)-1))
####far range ####far range
xmax = np.ceil( (limits[3] - frame.startingRange)/frame.instrument.rangePixelSize)+1 xmax = np.ceil( (limits[3] - frame.startingRange)/frame.instrument.rangePixelSize)+1
print('Pixel stop: ', xmax) print('Pixel stop: ', xmax)
xmax = np.int(np.clip(xmax, 1, frame.image.width//factor)) xmax = int(np.clip(xmax, 1, frame.image.width//factor))
print('Pixel limits: ', xmin, xmax) print('Pixel limits: ', xmin, xmax)
print('Original Pixel Limits: ', 0, frame.image.width//factor) print('Original Pixel Limits: ', 0, frame.image.width//factor)

View File

@ -155,8 +155,8 @@ class VRTConstructor(object):
for ind, burst in enumerate(swath.prod.bursts): for ind, burst in enumerate(swath.prod.bursts):
xoff = np.int(np.round( (burst.startingRange - self.rref)/self.dr)) xoff = int(np.round( (burst.startingRange - self.rref)/self.dr))
yoff = np.int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt)) yoff = int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt))
infile = filelist[ind] infile = filelist[ind]
self.addBurst( burst, infile, yoff, xoff, band=band, validOnly=validOnly) self.addBurst( burst, infile, yoff, xoff, band=band, validOnly=validOnly)

View File

@ -260,12 +260,12 @@ def removeHammingWindow(inputfile, outputfile, bandwidth, samplingRate, alpha, v
#fft length #fft length
nfft = next_pow2(width) nfft = next_pow2(width)
#Hamming window length #Hamming window length
nwin = np.int(np.around(bandwidth / samplingRate*nfft)) nwin = int(np.around(bandwidth / samplingRate*nfft))
#make it a even number, since we are going to use even fft length #make it a even number, since we are going to use even fft length
nwin = ((nwin+1)//2)*2 nwin = ((nwin+1)//2)*2
#the starting and ending index of window in the spectrum #the starting and ending index of window in the spectrum
start = np.int(np.around((nfft - nwin) / 2)) start = int(np.around((nfft - nwin) / 2))
end = np.int(np.around(start + nwin - 1)) end = int(np.around(start + nwin - 1))
hammingWindow = alpha - (1.0-alpha) * np.cos(np.linspace(-np.pi, np.pi, num=nwin, endpoint=True)) hammingWindow = alpha - (1.0-alpha) * np.cos(np.linspace(-np.pi, np.pi, num=nwin, endpoint=True))
hammingWindow = 1.0/np.fft.fftshift(hammingWindow) hammingWindow = 1.0/np.fft.fftshift(hammingWindow)
spec = np.fft.fft(slc, n=nfft, axis=1) spec = np.fft.fft(slc, n=nfft, axis=1)
@ -649,7 +649,7 @@ def cal_coherence(inf, win=5, edge=0):
if win % 2 != 1: if win % 2 != 1:
raise Exception('window size must be odd!') raise Exception('window size must be odd!')
hwin = np.int(np.around((win - 1) / 2)) hwin = int(np.around((win - 1) / 2))
filt = np.ones((win, win)) filt = np.ones((win, win))
amp = np.absolute(inf) amp = np.absolute(inf)
@ -849,7 +849,7 @@ def snaphuUnwrap(self, xmlDirname, wrapName, corrfile, unwrapName, nrlks, nalks,
tmid = tstart + 0.5*(tend - tstart) tmid = tstart + 0.5*(tend - tstart)
#14-APR-2018 #14-APR-2018
burst_index = np.int(np.around(len(ifg.bursts)/2)) burst_index = int(np.around(len(ifg.bursts)/2))
orbit = ifg.bursts[burst_index].orbit orbit = ifg.bursts[burst_index].orbit
peg = orbit.interpolateOrbit(tmid, method='hermite') peg = orbit.interpolateOrbit(tmid, method='hermite')
@ -978,8 +978,8 @@ def multilook_unw(self, ionParam, mergedDirname):
os.rename(filename0, filename) os.rename(filename0, filename)
#multi-looking #multi-looking
nrlks = np.int(np.around(ionParam.numberRangeLooks / ionParam.numberRangeLooks0)) nrlks = int(np.around(ionParam.numberRangeLooks / ionParam.numberRangeLooks0))
nalks = np.int(np.around(ionParam.numberAzimuthLooks / ionParam.numberAzimuthLooks0)) nalks = int(np.around(ionParam.numberAzimuthLooks / ionParam.numberAzimuthLooks0))
#coherence #coherence
if dirx == ionParam.lowerDirname: if dirx == ionParam.lowerDirname:
corName0 = os.path.join(oridir, self._insar.correlationFilename) corName0 = os.path.join(oridir, self._insar.correlationFilename)
@ -987,15 +987,15 @@ def multilook_unw(self, ionParam, mergedDirname):
corimg.load(corName0 + '.xml') corimg.load(corName0 + '.xml')
width = corimg.width width = corimg.width
length = corimg.length length = corimg.length
widthNew = np.int(width / nrlks) widthNew = int(width / nrlks)
lengthNew = np.int(length / nalks) lengthNew = int(length / nalks)
cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :] cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :] amp0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
wgt = cor0**2 wgt = cor0**2
a = multilook(wgt, nalks, nrlks) a = multilook(wgt, nalks, nrlks)
b = multilook(cor0, nalks, nrlks) b = multilook(cor0, nalks, nrlks)
c = multilook(amp0**2, nalks, nrlks) c = multilook(amp0**2, nalks, nrlks)
d = multilook((cor0!=0).astype(np.int), nalks, nrlks) d = multilook((cor0!=0).astype(int), nalks, nrlks)
#coherence after multiple looking #coherence after multiple looking
cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32) cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
cor[0:lengthNew*2:2, :] = np.sqrt(c / (d + (d==0))) cor[0:lengthNew*2:2, :] = np.sqrt(c / (d + (d==0)))
@ -1359,10 +1359,10 @@ def cal_cross_ab_ramp(swathList, width, numberRangeLooks, passDirection):
#getting x #getting x
nswath = len(swathList) nswath = len(swathList)
if nswath == 3: if nswath == 3:
width2 = np.int(width/numberRangeLooks) width2 = int(width/numberRangeLooks)
x = np.arange(width2) / (width2 - 1.0) x = np.arange(width2) / (width2 - 1.0)
else: else:
width2 = np.int(width/numberRangeLooks) width2 = int(width/numberRangeLooks)
#WARNING: what if the some swaths does not have bursts, and are not merged? #WARNING: what if the some swaths does not have bursts, and are not merged?
# here I just simply ignore this case # here I just simply ignore this case
offset = swath_offset[swathList[0]-1] offset = swath_offset[swathList[0]-1]
@ -1460,7 +1460,7 @@ def ionSwathBySwath(self, ionParam):
ifg = self._insar.loadProduct( os.path.join(burstDirname, 'IW{0}.xml'.format(swath))) ifg = self._insar.loadProduct( os.path.join(burstDirname, 'IW{0}.xml'.format(swath)))
bst = [os.path.join(burstDirname, 'IW{0}'.format(swath), burstPattern%(x+1)) for x in range(minBurst, maxBurst)] bst = [os.path.join(burstDirname, 'IW{0}'.format(swath), burstPattern%(x+1)) for x in range(minBurst, maxBurst)]
#doing adjustment before use #doing adjustment before use
adjustValidWithLooks([ifg], box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid=np.int(np.around(numberRangeLooks/8.0))) adjustValidWithLooks([ifg], box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid=int(np.around(numberRangeLooks/8.0)))
mergeBurstsVirtual([ifg], [bst], box, os.path.join(outputDirname, outputFilename+suffix)) mergeBurstsVirtual([ifg], [bst], box, os.path.join(outputDirname, outputFilename+suffix))
#take looks #take looks
@ -1628,10 +1628,10 @@ def ionSwathBySwath(self, ionParam):
for j in range(nBurst): for j in range(nBurst):
#index after multi-looking in merged image, index starts from 1 #index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1)) first_line = int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks)) last_line = int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1)) first_sample = int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / numberRangeLooks)) last_sample = int(np.around(burstValidBox[i][j][3] / numberRangeLooks))
corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \ corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
@ -1748,7 +1748,7 @@ def grd2ion(self, ionParam):
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0]))) reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1) minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all #no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0)) midBurst = int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst] masBurst = reference.bursts[midBurst]
#satellite height #satellite height
satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition()) satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition())
@ -1786,10 +1786,10 @@ def grd2ion(self, ionParam):
# 0 1 2 3 # 0 1 2 3
#firstlineAdj, lastlineAdj, firstcolumnAdj, lastcolumnAdj, #firstlineAdj, lastlineAdj, firstcolumnAdj, lastcolumnAdj,
#after multiplication, index starts from 1 #after multiplication, index starts from 1
firstline = np.int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1)) firstline = int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1))
lastline = np.int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks)) lastline = int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks))
firstcolumn = np.int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1)) firstcolumn = int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1))
lastcolumn = np.int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks)) lastcolumn = int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks))
#extract image #extract image
burstImage = band[firstline-1:lastline, firstcolumn-1:lastcolumn] burstImage = band[firstline-1:lastline, firstcolumn-1:lastcolumn]
@ -1803,8 +1803,8 @@ def grd2ion(self, ionParam):
value = burstImage[:, k] value = burstImage[:, k]
f = interp1d(index, value, kind='cubic', fill_value="extrapolate") f = interp1d(index, value, kind='cubic', fill_value="extrapolate")
index_min = np.int(np.around(np.amin(index))) index_min = int(np.around(np.amin(index)))
index_max = np.int(np.around(np.amax(index))) index_max = int(np.around(np.amax(index)))
flag = index0 * 0.0 flag = index0 * 0.0
flag[index_min:index_max+1] = 1.0 flag[index_min:index_max+1] = 1.0
#replace the original column with new column in burstImage #replace the original column with new column in burstImage
@ -1869,11 +1869,11 @@ def adaptive_gaussian(ionos, wgt, size_max, size_min):
#sigma of window size: size_max #sigma of window size: size_max
sigma = size_max / 2.0 sigma = size_max / 2.0
for i in range(size_num): for i in range(size_num):
size2 = np.int(np.around(size[i])) size2 = int(np.around(size[i]))
if size2 % 2 == 0: if size2 % 2 == 0:
size2 += 1 size2 += 1
if (i+1) % 10 == 0: if (i+1) % 10 == 0:
print('min win: %4d, max win: %4d, current win: %4d'%(np.int(np.around(size_min)), np.int(np.around(size_max)), size2)) print('min win: %4d, max win: %4d, current win: %4d'%(int(np.around(size_min)), int(np.around(size_max)), size2))
g2d = gaussian(size2, sigma*size2/size_max, scale=1.0) g2d = gaussian(size2, sigma*size2/size_max, scale=1.0)
scale = ss.fftconvolve(wgt, g2d, mode='same') scale = ss.fftconvolve(wgt, g2d, mode='same')
flt[:, :, i] = ss.fftconvolve(ionos*wgt, g2d, mode='same') / (scale + (scale==0)) flt[:, :, i] = ss.fftconvolve(ionos*wgt, g2d, mode='same') / (scale + (scale==0))
@ -1995,7 +1995,7 @@ def ionosphere_shift(self, ionParam):
################################################# #################################################
#SET PARAMETERS HERE #SET PARAMETERS HERE
#gaussian filtering window size #gaussian filtering window size
#size = np.int(np.around(width / 12.0)) #size = int(np.around(width / 12.0))
#size = ionParam.ionshiftFilteringWinsize #size = ionParam.ionshiftFilteringWinsize
size_max = ionParam.ionshiftFilteringWinsizeMax size_max = ionParam.ionshiftFilteringWinsizeMax
size_min = ionParam.ionshiftFilteringWinsizeMin size_min = ionParam.ionshiftFilteringWinsizeMin
@ -2122,7 +2122,7 @@ def ionosphere_shift(self, ionParam):
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0]))) reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1) minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all #no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0)) midBurst = int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst] masBurst = reference.bursts[midBurst]
#shift casued by ionosphere [unit: masBurst.azimuthTimeInterval] #shift casued by ionosphere [unit: masBurst.azimuthTimeInterval]
@ -2189,7 +2189,7 @@ def ion2grd(self, ionParam):
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0]))) reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1) minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all #no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0)) midBurst = int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst] masBurst = reference.bursts[midBurst]
#satellite height #satellite height
satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition()) satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition())
@ -2258,10 +2258,10 @@ def ion2grd(self, ionParam):
#calculate phase caused by ionospheric shift and non-zero center frequency #calculate phase caused by ionospheric shift and non-zero center frequency
#index after multi-looking in merged image, index starts from 1 #index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1)) first_line = int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks)) last_line = int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1)) first_sample = int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks)) last_sample = int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks))
burstDionMultilook = dion[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] burstDionMultilook = dion[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
#for avoid areas with strong decorrelation like water #for avoid areas with strong decorrelation like water
@ -2310,8 +2310,8 @@ def multilook(data, nalks, nrlks):
''' '''
(length, width)=data.shape (length, width)=data.shape
width2 = np.int(width/nrlks) width2 = int(width/nrlks)
length2 = np.int(length/nalks) length2 = int(length/nalks)
tmp2 = np.zeros((length2, width), dtype=data.dtype) tmp2 = np.zeros((length2, width), dtype=data.dtype)
data2 = np.zeros((length2, width2), dtype=data.dtype) data2 = np.zeros((length2, width2), dtype=data.dtype)
@ -2338,7 +2338,7 @@ def get_overlap_box(swath, minBurst, maxBurst):
curBurst = swath.bursts[ii] curBurst = swath.bursts[ii]
#overlap lines, line index starts from 1 #overlap lines, line index starts from 1
offLine = np.int(np.round( (curBurst.sensingStart - topBurst.sensingStart).total_seconds() / curBurst.azimuthTimeInterval)) offLine = int(np.round( (curBurst.sensingStart - topBurst.sensingStart).total_seconds() / curBurst.azimuthTimeInterval))
firstLineTop = topBurst.firstValidLine + 1 firstLineTop = topBurst.firstValidLine + 1
lastLineTop = topBurst.firstValidLine + topBurst.numValidLines lastLineTop = topBurst.firstValidLine + topBurst.numValidLines
firstLineCur = offLine + curBurst.firstValidLine + 1 firstLineCur = offLine + curBurst.firstValidLine + 1
@ -2351,7 +2351,7 @@ def get_overlap_box(swath, minBurst, maxBurst):
lastLine = lastLineTop lastLine = lastLineTop
#overlap samples, sample index starts from 1 #overlap samples, sample index starts from 1
offSample = np.int(np.round( (curBurst.startingRange - topBurst.startingRange) / curBurst.rangePixelSize )) offSample = int(np.round( (curBurst.startingRange - topBurst.startingRange) / curBurst.rangePixelSize ))
firstSampleTop = topBurst.firstValidSample + 1 firstSampleTop = topBurst.firstValidSample + 1
lastSampleTop = topBurst.firstValidSample + topBurst.numValidSamples lastSampleTop = topBurst.firstValidSample + topBurst.numValidSamples
firstSampleCur = offSample + curBurst.firstValidSample + 1 firstSampleCur = offSample + curBurst.firstValidSample + 1

View File

@ -141,7 +141,7 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
avalidList = list(range(1, nalks+1)) avalidList = list(range(1, nalks+1))
avalidList.reverse() avalidList.reverse()
else: else:
avalidList = [np.int(np.around(avalid))] avalidList = [int(np.around(avalid))]
avalidnum = len(avalidList) avalidnum = len(avalidList)
for i in range(avalidnum): for i in range(avalidnum):
@ -154,7 +154,7 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
rvalidList = list(range(1, nrlks+1)) rvalidList = list(range(1, nrlks+1))
rvalidList.reverse() rvalidList.reverse()
else: else:
rvalidList = [np.int(np.around(rvalid))] rvalidList = [int(np.around(rvalid))]
rvalidnum = len(rvalidList) rvalidnum = len(rvalidList)
for i in range(rvalidnum): for i in range(rvalidnum):
@ -233,8 +233,8 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
nburst = len(swaths[i].bursts) nburst = len(swaths[i].bursts)
for j in range(nburst): for j in range(nburst):
#offsample = np.int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr)) #offsample = int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr))
offline = np.int(np.round( (swaths[i].bursts[j].sensingStart - sensingStart).total_seconds() / dt)) offline = int(np.round( (swaths[i].bursts[j].sensingStart - sensingStart).total_seconds() / dt))
#index in burst, index starts from 1 #index in burst, index starts from 1
firstline = swaths[i].bursts[j].firstValidLine + 1 firstline = swaths[i].bursts[j].firstValidLine + 1
@ -302,7 +302,7 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
# firstcolumn0 = [] # firstcolumn0 = []
# lastcolumn0 = [] # lastcolumn0 = []
# for j in range(nburst): # for j in range(nburst):
# offsample = np.int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr)) # offsample = int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr))
# #index in merged image, index starts from 1 # #index in merged image, index starts from 1
# firstcolumn0.append(swaths[i].bursts[j].firstValidSample + 1 + offsample) # firstcolumn0.append(swaths[i].bursts[j].firstValidSample + 1 + offsample)
# lastcolumn0.append(firstcolumn + swaths[i].bursts[j].numValidSamples - 1 + offsample) # lastcolumn0.append(firstcolumn + swaths[i].bursts[j].numValidSamples - 1 + offsample)
@ -324,7 +324,7 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
# #index in burst, index starts from 0 # #index in burst, index starts from 0
# for j in range(nburst): # for j in range(nburst):
# offsample = np.int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr)) # offsample = int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr))
# swaths[i].bursts[j].firstValidSample = firstcolumnAdj - offsample - 1 # swaths[i].bursts[j].firstValidSample = firstcolumnAdj - offsample - 1
# swaths[i].bursts[j].numValidSamples = lastcolumnAdj - firstcolumnAdj + 1 # swaths[i].bursts[j].numValidSamples = lastcolumnAdj - firstcolumnAdj + 1
@ -358,7 +358,7 @@ def adjustValidWithLooks(swaths, box, nalks, nrlks, edge=0, avalid='strict', rva
nburst = len(swaths[i].bursts) nburst = len(swaths[i].bursts)
for j in range(nburst): for j in range(nburst):
offsample = np.int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr)) offsample = int(np.round( (swaths[i].bursts[j].startingRange - nearRange)/dr))
#index in burst, index starts from 1 #index in burst, index starts from 1
firstcolumn = swaths[i].bursts[j].firstValidSample + 1 firstcolumn = swaths[i].bursts[j].firstValidSample + 1

View File

@ -53,7 +53,7 @@ def runUnwrap(self,costMode = None,initMethod = None, defomax = None, initOnly =
#some times tmid may exceed the time span, so use mid burst instead #some times tmid may exceed the time span, so use mid burst instead
#14-APR-2018, Cunren Liang #14-APR-2018, Cunren Liang
#orbit = ifg.bursts[0].orbit #orbit = ifg.bursts[0].orbit
burst_index = np.int(np.around(len(ifg.bursts)/2)) burst_index = int(np.around(len(ifg.bursts)/2))
orbit = ifg.bursts[burst_index].orbit orbit = ifg.bursts[burst_index].orbit
peg = orbit.interpolateOrbit(tmid, method='hermite') peg = orbit.interpolateOrbit(tmid, method='hermite')

View File

@ -113,7 +113,7 @@ if __name__ == '__main__':
import numpy as np import numpy as np
Nx = 500 Nx = 500
Ny = 300 Ny = 300
Nrand = np.int(0.4*Nx*Ny) Nrand = int(0.4*Nx*Ny)
x = np.arange(Nx, dtype=np.float32)/(1.0*Nx) x = np.arange(Nx, dtype=np.float32)/(1.0*Nx)
y = np.arange(Ny, dtype=np.float32)/(1.0*Ny) y = np.arange(Ny, dtype=np.float32)/(1.0*Ny)

View File

@ -106,7 +106,7 @@ def runOffPoly(offField):
if ynorm == 0: if ynorm == 0:
ynorm = 1.0 ynorm = 1.0
yoff = np.int(np.round(np.mean(dy))) yoff = int(np.round(np.mean(dy)))
y = (y - ymin)/ynorm y = (y - ymin)/ynorm

View File

@ -99,7 +99,7 @@ class Edge(object):
return None not in (self.src.getPhase(), self.dst.getPhase()) return None not in (self.src.getPhase(), self.dst.getPhase())
def diff(self): def diff(self):
return np.int(np.round((self.dst.phase - self.src.phase)/(2*np.pi))) return int(np.round((self.dst.phase - self.src.phase)/(2*np.pi)))
def updateTri(self, index): def updateTri(self, index):
if self.triIdx is not None: if self.triIdx is not None:

View File

@ -121,8 +121,8 @@ class SWBDStitcher(DemStitcher):
deltaLon = maskim.coord1.coordDelta deltaLon = maskim.coord1.coordDelta
#remember mask starts from top left corner #remember mask starts from top left corner
#deltaLat < 0 #deltaLat < 0
lati = np.clip(((lat - startLat)/deltaLat).astype(np.int), 0, mask.shape[0]-1) lati = np.clip(((lat - startLat)/deltaLat).astype(int), 0, mask.shape[0]-1)
loni = np.clip(((lon - startLon)/deltaLon).astype(np.int), 0, mask.shape[1]-1) loni = np.clip(((lon - startLon)/deltaLon).astype(int), 0, mask.shape[1]-1)
cropped = (mask[lati,loni] + 1).astype(maskim.toNumpyDataType()) cropped = (mask[lati,loni] + 1).astype(maskim.toNumpyDataType())
cropped = np.reshape(cropped,(latim.coord2.coordSize,latim.coord1.coordSize)) cropped = np.reshape(cropped,(latim.coord2.coordSize,latim.coord1.coordSize))
cropped.tofile(output) cropped.tofile(output)

View File

@ -317,7 +317,7 @@ class autoRIFT:
self.ChipSizeMaxX = self.ChipSizeMaxX.astype(np.float32) self.ChipSizeMaxX = self.ChipSizeMaxX.astype(np.float32)
ChipSizeX = np.zeros(self.xGrid.shape, np.float32) ChipSizeX = np.zeros(self.xGrid.shape, np.float32)
InterpMask = np.zeros(self.xGrid.shape, np.bool) InterpMask = np.zeros(self.xGrid.shape, bool)
Dx = np.empty(self.xGrid.shape, dtype=np.float32) Dx = np.empty(self.xGrid.shape, dtype=np.float32)
Dx.fill(np.nan) Dx.fill(np.nan)
Dy = np.empty(self.xGrid.shape, dtype=np.float32) Dy = np.empty(self.xGrid.shape, dtype=np.float32)
@ -372,7 +372,7 @@ class autoRIFT:
M0 = (ChipSizeX == 0) & (self.ChipSizeMinX <= ChipSizeUniX[i]) & (self.ChipSizeMaxX >= ChipSizeUniX[i]) M0 = (ChipSizeX == 0) & (self.ChipSizeMinX <= ChipSizeUniX[i]) & (self.ChipSizeMaxX >= ChipSizeUniX[i])
M0 = colfilt(M0.copy(), (int(1/Scale*6), int(1/Scale*6)), 0, self.colfiltChunkSize) M0 = colfilt(M0.copy(), (int(1/Scale*6), int(1/Scale*6)), 0, self.colfiltChunkSize)
M0 = cv2.resize(np.logical_not(M0).astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool) M0 = cv2.resize(np.logical_not(M0).astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(bool)
SearchLimitX0 = colfilt(self.SearchLimitX.copy(), (int(1/Scale), int(1/Scale)), 0, self.colfiltChunkSize) + colfilt(self.Dx0.copy(), (int(1/Scale), int(1/Scale)), 4, self.colfiltChunkSize) SearchLimitX0 = colfilt(self.SearchLimitX.copy(), (int(1/Scale), int(1/Scale)), 0, self.colfiltChunkSize) + colfilt(self.Dx0.copy(), (int(1/Scale), int(1/Scale)), 4, self.colfiltChunkSize)
SearchLimitY0 = colfilt(self.SearchLimitY.copy(), (int(1/Scale), int(1/Scale)), 0, self.colfiltChunkSize) + colfilt(self.Dy0.copy(), (int(1/Scale), int(1/Scale)), 4, self.colfiltChunkSize) SearchLimitY0 = colfilt(self.SearchLimitY.copy(), (int(1/Scale), int(1/Scale)), 0, self.colfiltChunkSize) + colfilt(self.Dy0.copy(), (int(1/Scale), int(1/Scale)), 4, self.colfiltChunkSize)
@ -467,7 +467,7 @@ class autoRIFT:
MC2 = ndimage.distance_transform_edt(np.logical_not(MC)) < self.BuffDistanceC MC2 = ndimage.distance_transform_edt(np.logical_not(MC)) < self.BuffDistanceC
dstShape = (int(MC2.shape[0]*(self.sparseSearchSampleRate*ChipSize0_GridSpacing_oversample_ratio)),int(MC2.shape[1]*(self.sparseSearchSampleRate*ChipSize0_GridSpacing_oversample_ratio))) dstShape = (int(MC2.shape[0]*(self.sparseSearchSampleRate*ChipSize0_GridSpacing_oversample_ratio)),int(MC2.shape[1]*(self.sparseSearchSampleRate*ChipSize0_GridSpacing_oversample_ratio)))
MC2 = cv2.resize(MC2.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool) MC2 = cv2.resize(MC2.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(bool)
# pdb.set_trace() # pdb.set_trace()
if np.logical_not(np.all(MC2.shape == SearchLimitX0.shape)): if np.logical_not(np.all(MC2.shape == SearchLimitX0.shape)):
rowAdd = SearchLimitX0.shape[0] - MC2.shape[0] rowAdd = SearchLimitX0.shape[0] - MC2.shape[0]
@ -505,7 +505,7 @@ class autoRIFT:
DyFM = colfilt(DyF.copy(), (self.fillFiltWidth, self.fillFiltWidth), 3, self.colfiltChunkSize) DyFM = colfilt(DyF.copy(), (self.fillFiltWidth, self.fillFiltWidth), 3, self.colfiltChunkSize)
# M0 is mask for original valid estimates, MF is mask for filled ones, MM is mask where filtered ones exist for filling # M0 is mask for original valid estimates, MF is mask for filled ones, MM is mask where filtered ones exist for filling
MF = np.zeros(M0.shape, dtype=np.bool) MF = np.zeros(M0.shape, dtype=bool)
MM = np.logical_not(np.isnan(DxFM)) MM = np.logical_not(np.isnan(DxFM))
for j in range(3): for j in range(3):
@ -555,8 +555,8 @@ class autoRIFT:
dstShape = (Dx.shape[0],Dx.shape[1]) dstShape = (Dx.shape[0],Dx.shape[1])
DxF = cv2.resize(DxF,dstShape[::-1],interpolation=cv2.INTER_CUBIC) DxF = cv2.resize(DxF,dstShape[::-1],interpolation=cv2.INTER_CUBIC)
DyF = cv2.resize(DyF,dstShape[::-1],interpolation=cv2.INTER_CUBIC) DyF = cv2.resize(DyF,dstShape[::-1],interpolation=cv2.INTER_CUBIC)
MF = cv2.resize(MF.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool) MF = cv2.resize(MF.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(bool)
M0 = cv2.resize(M0.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool) M0 = cv2.resize(M0.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(bool)
idxRaw = M0 & (ChipSizeX == 0) idxRaw = M0 & (ChipSizeX == 0)
idxFill = MF & (ChipSizeX == 0) idxFill = MF & (ChipSizeX == 0)
@ -940,9 +940,9 @@ def arImgDisp_u(I1, I2, xGrid, yGrid, ChipSizeX, ChipSizeY, SearchLimitX, Search
Dy0 = -Dy0 Dy0 = -Dy0
SLx_max = np.max(SearchLimitX + np.abs(Dx0)) SLx_max = np.max(SearchLimitX + np.abs(Dx0))
Px = np.int(np.max(ChipSizeX)/2 + SLx_max + 2) Px = int(np.max(ChipSizeX)/2 + SLx_max + 2)
SLy_max = np.max(SearchLimitY + np.abs(Dy0)) SLy_max = np.max(SearchLimitY + np.abs(Dy0))
Py = np.int(np.max(ChipSizeY)/2 + SLy_max + 2) Py = int(np.max(ChipSizeY)/2 + SLy_max + 2)
I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant') I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant')
I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant') I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant')
@ -1188,9 +1188,9 @@ def arImgDisp_s(I1, I2, xGrid, yGrid, ChipSizeX, ChipSizeY, SearchLimitX, Search
Dy0 = -Dy0 Dy0 = -Dy0
SLx_max = np.max(SearchLimitX + np.abs(Dx0)) SLx_max = np.max(SearchLimitX + np.abs(Dx0))
Px = np.int(np.max(ChipSizeX)/2 + SLx_max + 2) Px = int(np.max(ChipSizeX)/2 + SLx_max + 2)
SLy_max = np.max(SearchLimitY + np.abs(Dy0)) SLy_max = np.max(SearchLimitY + np.abs(Dy0))
Py = np.int(np.max(ChipSizeY)/2 + SLy_max + 2) Py = int(np.max(ChipSizeY)/2 + SLy_max + 2)
I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant') I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant')
I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant') I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant')

View File

@ -161,13 +161,13 @@ def cropFrame(frame, limits, outdir, israw=False):
####sensing start ####sensing start
ymin = np.floor( (limits[0] - frame.sensingStart).total_seconds() * frame.PRF) ymin = np.floor( (limits[0] - frame.sensingStart).total_seconds() * frame.PRF)
print('Line start: ', ymin) print('Line start: ', ymin)
ymin = np.int( np.clip(ymin, 0, frame.numberOfLines-1)) ymin = int( np.clip(ymin, 0, frame.numberOfLines-1))
####sensing stop ####sensing stop
ymax = np.ceil( (limits[1] - frame.sensingStart).total_seconds() * frame.PRF) + 1 ymax = np.ceil( (limits[1] - frame.sensingStart).total_seconds() * frame.PRF) + 1
print('Line stop: ', ymax) print('Line stop: ', ymax)
ymax = np.int( np.clip(ymax, 1, frame.numberOfLines)) ymax = int( np.clip(ymax, 1, frame.numberOfLines))
print('Line limits: ', ymin, ymax) print('Line limits: ', ymin, ymax)
print('Original Line Limits: ', 0, frame.numberOfLines) print('Original Line Limits: ', 0, frame.numberOfLines)
@ -185,13 +185,13 @@ def cropFrame(frame, limits, outdir, israw=False):
####starting range ####starting range
xmin = np.floor( (limits[2] - frame.startingRange)/frame.instrument.rangePixelSize) xmin = np.floor( (limits[2] - frame.startingRange)/frame.instrument.rangePixelSize)
print('Pixel start: ', xmin) print('Pixel start: ', xmin)
xmin = np.int(np.clip(xmin, 0, (frame.image.width//factor)-1)) xmin = int(np.clip(xmin, 0, (frame.image.width//factor)-1))
####far range ####far range
xmax = np.ceil( (limits[3] - frame.startingRange)/frame.instrument.rangePixelSize)+1 xmax = np.ceil( (limits[3] - frame.startingRange)/frame.instrument.rangePixelSize)+1
print('Pixel stop: ', xmax) print('Pixel stop: ', xmax)
xmax = np.int(np.clip(xmax, 1, frame.image.width//factor)) xmax = int(np.clip(xmax, 1, frame.image.width//factor))
print('Pixel limits: ', xmin, xmax) print('Pixel limits: ', xmin, xmax)
print('Original Pixel Limits: ', 0, frame.image.width//factor) print('Original Pixel Limits: ', 0, frame.image.width//factor)

View File

@ -41,8 +41,8 @@ def makePlot(filename, pos):
for index, (num, line, pixel) in enumerate(pos): for index, (num, line, pixel) in enumerate(pos):
print(line, pixel) print(line, pixel)
xx = np.int(pixel) xx = int(pixel)
yy = np.int(line) yy = int(line)
box = 10 * np.log10(np.abs(data[yy-win:yy+win, yy-win:yy+win])) box = 10 * np.log10(np.abs(data[yy-win:yy+win, yy-win:yy+win]))
plt.subplot(7,3,index+1) plt.subplot(7,3,index+1)

View File

@ -155,8 +155,8 @@ class VRTConstructor(object):
for ind, burst in enumerate(swath.prod.bursts): for ind, burst in enumerate(swath.prod.bursts):
xoff = np.int(np.round( (burst.startingRange - self.rref)/self.dr)) xoff = int(np.round( (burst.startingRange - self.rref)/self.dr))
yoff = np.int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt)) yoff = int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt))
infile = filelist[ind] infile = filelist[ind]
self.addBurst( burst, infile, yoff, xoff, band=band, validOnly=validOnly) self.addBurst( burst, infile, yoff, xoff, band=band, validOnly=validOnly)

View File

@ -55,8 +55,8 @@ def main(iargs=None):
upperint = np.fromfile(inps.upper, dtype=np.complex64).reshape(length, width) upperint = np.fromfile(inps.upper, dtype=np.complex64).reshape(length, width)
if (inps.nrlks != 1) or (inps.nalks != 1): if (inps.nrlks != 1) or (inps.nalks != 1):
width = np.int(width/inps.nrlks) width = int(width/inps.nrlks)
length = np.int(length/inps.nalks) length = int(length/inps.nalks)
lowerint = multilook(lowerint, inps.nalks, inps.nrlks) lowerint = multilook(lowerint, inps.nalks, inps.nrlks)
upperint = multilook(upperint, inps.nalks, inps.nrlks) upperint = multilook(upperint, inps.nalks, inps.nrlks)

View File

@ -179,8 +179,8 @@ dr = swath.bursts[0].rangePixelSize
print (slcPath) print (slcPath)
for ind, burst in enumerate(swath.bursts): for ind, burst in enumerate(swath.bursts):
xoff = np.int(np.round( (burst.startingRange - rref)/dr)) xoff = int(np.round( (burst.startingRange - rref)/dr))
yoff = np.int(np.round( (burst.sensingStart - tref).total_seconds() / dt)) yoff = int(np.round( (burst.sensingStart - tref).total_seconds() / dt))
tyoff = int(burst.firstValidLine) tyoff = int(burst.firstValidLine)
txoff = int(burst.firstValidSample) txoff = int(burst.firstValidSample)
wysize = int(burst.numValidLines) wysize = int(burst.numValidLines)

View File

@ -71,8 +71,8 @@ def getGridLimits(geofile=None, latfile=None, lonfile=None):
width = latds.RasterXSize width = latds.RasterXSize
lgth = latds.RasterYSize lgth = latds.RasterYSize
xs = np.linspace(0, width-1, num=samples).astype(np.int) xs = np.linspace(0, width-1, num=samples).astype(int)
ys = np.linspace(0, lgth-1, num=samples).astype(np.int) ys = np.linspace(0, lgth-1, num=samples).astype(int)
for line in range(samples): for line in range(samples):
@ -122,11 +122,11 @@ def getGridLimits(geofile=None, latfile=None, lonfile=None):
raise Exception('Either geofile is provided (or) latfile and lonfile. All 3 inputs cannot be provided') raise Exception('Either geofile is provided (or) latfile and lonfile. All 3 inputs cannot be provided')
ii0 = max(np.int((ymax - maxyy - dely/2.0) / dely ), 0) ii0 = max(int((ymax - maxyy - dely/2.0) / dely ), 0)
ii1 = min(np.int((ymax - minyy + dely/2.0) / dely ) + 1, Ny) ii1 = min(int((ymax - minyy + dely/2.0) / dely ) + 1, Ny)
jj0 = max(np.int((minxx - xmin - delx/2.0)/delx), 0) jj0 = max(int((minxx - xmin - delx/2.0)/delx), 0)
jj1 = min(np.int((maxxx - xmin + delx/2.0)/delx) + 1, Nx) jj1 = min(int((maxxx - xmin + delx/2.0)/delx) + 1, Nx)
ylim = ymax - np.array([ii1,ii0]) * dely ylim = ymax - np.array([ii1,ii0]) * dely

View File

@ -56,12 +56,12 @@ def main(iargs=None):
corimg.load(corName0 + '.xml') corimg.load(corName0 + '.xml')
width = corimg.width width = corimg.width
length = corimg.length length = corimg.length
widthNew = np.int(width / nrlks) widthNew = int(width / nrlks)
lengthNew = np.int(length / nalks) lengthNew = int(length / nalks)
cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :] cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
wgt = cor0**2 wgt = cor0**2
a = multilook(wgt, nalks, nrlks) a = multilook(wgt, nalks, nrlks)
d = multilook((cor0!=0).astype(np.int), nalks, nrlks) d = multilook((cor0!=0).astype(int), nalks, nrlks)
#unwrapped file #unwrapped file
unwrapName0 = inps.unw unwrapName0 = inps.unw

View File

@ -181,10 +181,10 @@ def main(iargs=None):
for j in range(nBurst): for j in range(nBurst):
#index after multi-looking in merged image, index starts from 1 #index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1)) first_line = int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks)) last_line = int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1)) first_sample = int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / numberRangeLooks)) last_sample = int(np.around(burstValidBox[i][j][3] / numberRangeLooks))
corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \ corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]

View File

@ -178,8 +178,8 @@ class VRTConstructor(object):
Add one swath to the VRT. Add one swath to the VRT.
''' '''
for ind, burst in enumerate(swath.prod.bursts): for ind, burst in enumerate(swath.prod.bursts):
xoff = np.int(np.round( (burst.startingRange - self.rref)/self.dr)) xoff = int(np.round( (burst.startingRange - self.rref)/self.dr))
yoff = np.int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt)) yoff = int(np.round( (burst.sensingStart - self.tref).total_seconds() / self.dt))
self.addBurst( burst, swath.tiff, yoff, xoff, swath.ysize, swath.xsize) self.addBurst( burst, swath.tiff, yoff, xoff, swath.ysize, swath.xsize)