Modified algorithm for ionospheric phase estimation (polar regions)

LT1AB
vbrancat 2020-02-10 17:07:09 -08:00
commit 3f01fd2f07
82 changed files with 2018 additions and 1333 deletions

View File

@ -72,7 +72,7 @@ jobs:
set -ex set -ex
pwd pwd
. /opt/conda/bin/activate root . /opt/conda/bin/activate root
export ISCE_HOME=/root/project/install/isce ISCE_HOME=/root/project/install/isce
export PATH="$ISCE_HOME/bin:$ISCE_HOME/applications:/opt/conda/bin:$PATH" export PATH="$ISCE_HOME/bin:$ISCE_HOME/applications:/opt/conda/bin:$PATH"
export PYTHONPATH="/root/project/install:$PYTHONPATH" export PYTHONPATH="/root/project/install:$PYTHONPATH"
export LD_LIBRARY_PATH="/opt/conda/lib:$LD_LIBRARY_PATH" export LD_LIBRARY_PATH="/opt/conda/lib:$LD_LIBRARY_PATH"

View File

@ -216,43 +216,12 @@ else:
### End of GPU branch-specific modifications ### End of GPU branch-specific modifications
file = '__init__.py' env.Install(inst, '__init__.py')
if not os.path.exists(file): env.Install(inst, 'release_history.py')
fout = open(file,"w")
fout.write("#!/usr/bin/env python3")
fout.close()
env.Install(inst,file)
try:
from subprocess import check_output
svn_revision = check_output('svnversion').strip() or 'Unknown'
if sys.version_info[0] == 3:
svn_revision = svn_revision.decode('utf-8')
except ImportError:
try:
import popen2
stdout, stdin, stderr = popen2.popen3('svnversion')
svn_revision = stdout.read().strip()
if stderr.read():
raise Exception
except Exception:
svn_revision = 'Unknown'
except OSError:
svn_revision = 'Unknown'
if not os.path.exists(inst): if not os.path.exists(inst):
os.makedirs(inst) os.makedirs(inst)
fvers = open(os.path.join(inst,'version.py'),'w')
from release_history import release_version, release_svn_revision, release_date
fvers_lines = ["release_version = '"+release_version+"'\n",
"release_svn_revision = '"+release_svn_revision+"'\n",
"release_date = '"+release_date+"'\n",
"svn_revision = '"+svn_revision+"'\n\n"]
fvers.write(''.join(fvers_lines))
fvers.close()
v = 0 v = 0
if isrerun == 'no': if isrerun == 'no':
cmd = 'scons -Q install --isrerun=yes' cmd = 'scons -Q install --isrerun=yes'

View File

@ -25,18 +25,19 @@
# Author: Giangi Sacco # Author: Giangi Sacco
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from .release_history import release_version, release_svn_revision, release_date
svn_revision = release_svn_revision
version = release_history # compatibility alias
from __future__ import print_function
from .version import release_version, release_svn_revision, release_date
from .version import svn_revision
__version__ = release_version __version__ = release_version
import sys, os import sys, os
isce_path = os.path.split(os.path.abspath(__file__))[0] isce_path = os.path.dirname(os.path.abspath(__file__))
import logging
from logging.config import fileConfig as _fc
_fc(os.path.join(isce_path, 'defaults', 'logging', 'logging.conf'))
sys.path.insert(1,isce_path) sys.path.insert(1,isce_path)
sys.path.insert(1,os.path.join(isce_path,'applications')) sys.path.insert(1,os.path.join(isce_path,'applications'))
sys.path.insert(1,os.path.join(isce_path,'components')) sys.path.insert(1,os.path.join(isce_path,'components'))

View File

@ -31,12 +31,8 @@
import os
import math import math
import logging from isce import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion() Compatibility.checkPythonVersion()
from isceobj.Location.Peg import Peg from isceobj.Location.Peg import Peg

View File

@ -30,11 +30,7 @@
import os from isce import logging
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion() Compatibility.checkPythonVersion()
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit

View File

@ -30,11 +30,7 @@
import os from isce import logging
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
import isceobj import isceobj
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit

View File

@ -30,12 +30,8 @@
import os
import datetime import datetime
import logging from isce import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion() Compatibility.checkPythonVersion()
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit

View File

@ -30,12 +30,8 @@
import os
import math import math
import logging from isce import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
import isceobj import isceobj
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit
from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil as DTU from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil as DTU

View File

@ -34,8 +34,7 @@ from __future__ import print_function
import time import time
import os import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -46,11 +45,6 @@ from iscesys.Component.Configurable import SELF
import isceobj.InsarProc as InsarProc import isceobj.InsarProc as InsarProc
from isceobj.Scene.Frame import FrameMixin from isceobj.Scene.Frame import FrameMixin
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.insar') logger = logging.getLogger('isce.insar')

View File

@ -41,8 +41,7 @@ import datetime
import os import os
import sys import sys
import math import math
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -1438,11 +1437,6 @@ class IsceApp(Application, FrameMixin):
sys.exit("Could not find the output directory: %s" % self.outputDir) sys.exit("Could not find the output directory: %s" % self.outputDir)
os.chdir(self.outputDir) ##change working directory to given output directory os.chdir(self.outputDir) ##change working directory to given output directory
##read configfile only here so that log path is in output directory
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.isceProc') logger = logging.getLogger('isce.isceProc')
logger.info(self.intromsg) logger.info(self.intromsg)
self._isce.dataDirectory = self.outputDir self._isce.dataDirectory = self.outputDir

View File

@ -27,16 +27,8 @@
# Author: Walter Szeliga # Author: Walter Szeliga
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
import isce import isce
from isce import logging
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
from iscesys.Component.Component import Component, Port from iscesys.Component.Component import Component, Port
from isceobj.Planet.Ellipsoid import Ellipsoid from isceobj.Planet.Ellipsoid import Ellipsoid

View File

@ -30,10 +30,8 @@
import time import time
import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -44,11 +42,6 @@ from iscesys.Component.Configurable import SELF
from isceobj import RtcProc from isceobj import RtcProc
from isceobj.Util.decorators import use_api from isceobj.Util.decorators import use_api
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.grdsar') logger = logging.getLogger('isce.grdsar')

View File

@ -27,13 +27,9 @@
# Authors: Giangi Sacco, Eric Gurrola # Authors: Giangi Sacco, Eric Gurrola
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import time import time
import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -43,11 +39,6 @@ from iscesys.Compatibility import Compatibility
from iscesys.Component.Configurable import SELF from iscesys.Component.Configurable import SELF
from isceobj import ScansarProc from isceobj import ScansarProc
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.insar') logger = logging.getLogger('isce.insar')

View File

@ -35,10 +35,8 @@
from __future__ import print_function from __future__ import print_function
import time import time
import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -50,11 +48,6 @@ import isceobj.StripmapProc as StripmapProc
from isceobj.Scene.Frame import FrameMixin from isceobj.Scene.Frame import FrameMixin
from isceobj.Util.decorators import use_api from isceobj.Util.decorators import use_api
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.insar') logger = logging.getLogger('isce.insar')
@ -265,7 +258,7 @@ RUBBERSHEET_SNR_THRESHOLD = Application.Parameter('rubberSheetSNRThreshold',
RUBBERSHEET_FILTER_SIZE = Application.Parameter('rubberSheetFilterSize', RUBBERSHEET_FILTER_SIZE = Application.Parameter('rubberSheetFilterSize',
public_name='rubber sheet filter size', public_name='rubber sheet filter size',
default = 8, default = 9,
type = int, type = int,
mandatory = False, mandatory = False,
doc = '') doc = '')

View File

@ -34,10 +34,8 @@
import time import time
import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -47,11 +45,6 @@ from iscesys.Compatibility import Compatibility
from iscesys.Component.Configurable import SELF from iscesys.Component.Configurable import SELF
from isceobj import TopsProc from isceobj import TopsProc
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.insar') logger = logging.getLogger('isce.insar')

View File

@ -30,10 +30,8 @@
import time import time
import os
import sys import sys
import logging from isce import logging
import logging.config
import isce import isce
import isceobj import isceobj
@ -42,11 +40,6 @@ from isce.applications.topsApp import TopsInSAR
from iscesys.Component.Application import Application from iscesys.Component.Application import Application
from isceobj.Util.decorators import use_api from isceobj.Util.decorators import use_api
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults', 'logging',
'logging.conf')
)
logger = logging.getLogger('isce.insar') logger = logging.getLogger('isce.insar')
WINDOW_SIZE_WIDTH = Application.Parameter( WINDOW_SIZE_WIDTH = Application.Parameter(

View File

@ -27,14 +27,7 @@
# Author: Walter Szeliga # Author: Walter Szeliga
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from isce import logging
import os
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion() Compatibility.checkPythonVersion()
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit

View File

@ -29,11 +29,7 @@
import math import math
import os from isce import logging
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from isceobj.Util.decorators import type_check, force, pickled, logged from isceobj.Util.decorators import type_check, force, pickled, logged
import numpy as np import numpy as np

View File

@ -1061,7 +1061,7 @@ class Orbit(Component):
###This wont break the old interface but could cause ###This wont break the old interface but could cause
###issues at midnight crossing ###issues at midnight crossing
if reference is None: if reference is None:
reference = self.minTime() reference = self.minTime
refEpoch = reference.replace(hour=0, minute=0, second=0, microsecond=0) refEpoch = reference.replace(hour=0, minute=0, second=0, microsecond=0)

View File

@ -27,14 +27,7 @@
# Author: Walter Szeliga # Author: Walter Szeliga
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from isce import logging
import os
import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from isceobj.Sensor.ERS import ERS from isceobj.Sensor.ERS import ERS
from isceobj.Scene.Track import Track from isceobj.Scene.Track import Track
logger = logging.getLogger("testTrack") logger = logging.getLogger("testTrack")

View File

@ -75,11 +75,11 @@ def estimateOffsetField(master, slave, denseOffsetFileName,
def runDenseOffsets(self): def runDenseOffsets(self):
if self.doDenseOffsets or self.doRubbersheeting: if self.doDenseOffsets or self.doRubbersheetingAzimuth:
if self.doDenseOffsets: if self.doDenseOffsets:
print('Dense offsets explicitly requested') print('Dense offsets explicitly requested')
if self.doRubbersheeting: if self.doRubbersheetingAzimuth:
print('Generating offsets as rubber sheeting requested') print('Generating offsets as rubber sheeting requested')
else: else:
return return

View File

@ -8,10 +8,13 @@ import isceobj
from isceobj.Constants import SPEED_OF_LIGHT from isceobj.Constants import SPEED_OF_LIGHT
import numpy as np import numpy as np
import gdal import gdal
<<<<<<< HEAD
from scipy.ndimage import median_filter from scipy.ndimage import median_filter
from astropy.convolution import convolve from astropy.convolution import convolve
from scipy import ndimage from scipy import ndimage
import numpy as np import numpy as np
=======
>>>>>>> upstream/master
try: try:
import cv2 import cv2
@ -299,6 +302,8 @@ def fill(data, invalid=None):
Output: Output:
Return a filled array. Return a filled array.
""" """
from scipy import ndimage
if invalid is None: invalid = np.isnan(data) if invalid is None: invalid = np.isnan(data)
ind = ndimage.distance_transform_edt(invalid, ind = ndimage.distance_transform_edt(invalid,

View File

@ -56,7 +56,7 @@ def compute_FlatEarth(self,ifgFilename,width,length,radarWavelength):
# Open the interferogram # Open the interferogram
#ifgFilename= os.path.join(self.insar.ifgDirname, self.insar.ifgFilename) #ifgFilename= os.path.join(self.insar.ifgDirname, self.insar.ifgFilename)
intf = np.memmap(ifgFilename+'.full',dtype=np.complex64,mode='r+',shape=(length,width)) intf = np.memmap(ifgFilename,dtype=np.complex64,mode='r+',shape=(length,width))
for ll in range(length): for ll in range(length):
intf[ll,:] *= np.exp(cJ*fact*rng2[ll,:]) intf[ll,:] *= np.exp(cJ*fact*rng2[ll,:])
@ -155,10 +155,13 @@ def generateIgram(self,imageSlc1, imageSlc2, resampName, azLooks, rgLooks,radarW
else: else:
resampAmp += '.amp' resampAmp += '.amp'
if not self.doRubbersheetingRange:
resampInt = resampName resampInt = resampName
else:
resampInt = resampName + ".full"
objInt = isceobj.createIntImage() objInt = isceobj.createIntImage()
objInt.setFilename(resampInt+'.full') objInt.setFilename(resampInt)
objInt.setWidth(intWidth) objInt.setWidth(intWidth)
imageInt = isceobj.createIntImage() imageInt = isceobj.createIntImage()
IU.copyAttributes(objInt, imageInt) IU.copyAttributes(objInt, imageInt)
@ -166,7 +169,7 @@ def generateIgram(self,imageSlc1, imageSlc2, resampName, azLooks, rgLooks,radarW
objInt.createImage() objInt.createImage()
objAmp = isceobj.createAmpImage() objAmp = isceobj.createAmpImage()
objAmp.setFilename(resampAmp+'.full') objAmp.setFilename(resampAmp)
objAmp.setWidth(intWidth) objAmp.setWidth(intWidth)
imageAmp = isceobj.createAmpImage() imageAmp = isceobj.createAmpImage()
IU.copyAttributes(objAmp, imageAmp) IU.copyAttributes(objAmp, imageAmp)
@ -196,8 +199,8 @@ def generateIgram(self,imageSlc1, imageSlc2, resampName, azLooks, rgLooks,radarW
compute_FlatEarth(self,resampInt,intWidth,lines,radarWavelength) compute_FlatEarth(self,resampInt,intWidth,lines,radarWavelength)
# Perform Multilook # Perform Multilook
multilook(resampInt+'.full', outname=resampInt, alks=azLooks, rlks=rgLooks) #takeLooks(objAmp,azLooks,rgLooks) multilook(resampInt, outname=resampName, alks=azLooks, rlks=rgLooks) #takeLooks(objAmp,azLooks,rgLooks)
multilook(resampAmp+'.full', outname=resampAmp, alks=azLooks, rlks=rgLooks) #takeLooks(objInt,azLooks,rgLooks) multilook(resampAmp, outname=resampAmp.replace(".full",""), alks=azLooks, rlks=rgLooks) #takeLooks(objInt,azLooks,rgLooks)
#os.system('rm ' + resampInt+'.full* ' + resampAmp + '.full* ') #os.system('rm ' + resampInt+'.full* ' + resampAmp + '.full* ')
# End of modification # End of modification

View File

@ -75,6 +75,7 @@ def runResampleSlc(self, kind='coarse'):
if kind in ['coarse', 'refined']: if kind in ['coarse', 'refined']:
azname = os.path.join(offsetsDir, self.insar.azimuthOffsetFilename) azname = os.path.join(offsetsDir, self.insar.azimuthOffsetFilename)
rgname = os.path.join(offsetsDir, self.insar.rangeOffsetFilename) rgname = os.path.join(offsetsDir, self.insar.rangeOffsetFilename)
flatten = True
else: else:
azname = os.path.join(offsetsDir, self.insar.azimuthRubbersheetFilename) azname = os.path.join(offsetsDir, self.insar.azimuthRubbersheetFilename)
if self.doRubbersheetingRange: if self.doRubbersheetingRange:

View File

@ -6,7 +6,6 @@
import isce import isce
import isceobj import isceobj
from osgeo import gdal from osgeo import gdal
from scipy import ndimage
import numpy as np import numpy as np
import os import os
@ -24,6 +23,9 @@ def fill(data, invalid=None):
Output: Output:
Return a filled array. Return a filled array.
""" """
from scipy import ndimage
if invalid is None: invalid = np.isnan(data) if invalid is None: invalid = np.isnan(data)
ind = ndimage.distance_transform_edt(invalid, ind = ndimage.distance_transform_edt(invalid,
@ -35,6 +37,8 @@ def fill(data, invalid=None):
def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName): def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName):
#masking and Filtering #masking and Filtering
from scipy import ndimage
##Read in the offset file ##Read in the offset file
ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly) ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly)
Offset = ds.GetRasterBand(1).ReadAsArray() Offset = ds.GetRasterBand(1).ReadAsArray()
@ -140,7 +144,7 @@ def resampleOffset(maskedFiltOffset, geometryOffset, outName):
def runRubbersheet(self): def runRubbersheet(self):
if not self.doRubbersheeting: if not self.doRubbersheetingAzimuth:
print('Rubber sheeting not requested ... skipping') print('Rubber sheeting not requested ... skipping')
return return
@ -170,5 +174,3 @@ def runRubbersheet(self):
print("I'm here") print("I'm here")
return None return None

View File

@ -9,14 +9,14 @@
import isce import isce
import isceobj import isceobj
from osgeo import gdal from osgeo import gdal
from scipy import ndimage
from astropy.convolution import convolve
import numpy as np import numpy as np
import os import os
def mask_filterNoSNR(denseOffsetFile,filterSize,outName): def mask_filterNoSNR(denseOffsetFile,filterSize,outName):
# Masking the offsets with a data-based approach # Masking the offsets with a data-based approach
from scipy import ndimage
# Open the offsets # Open the offsets
ds = gdal.Open(denseOffsetFile+'.vrt',gdal.GA_ReadOnly) ds = gdal.Open(denseOffsetFile+'.vrt',gdal.GA_ReadOnly)
off_az = ds.GetRasterBand(1).ReadAsArray() off_az = ds.GetRasterBand(1).ReadAsArray()
@ -79,6 +79,9 @@ def mask_filterNoSNR(denseOffsetFile,filterSize,outName):
def off_masking(off,filterSize,thre=2): def off_masking(off,filterSize,thre=2):
from scipy import ndimage
# Define the mask to fill the offsets # Define the mask to fill the offsets
vram = ndimage.median_filter(off.real, filterSize) vram = ndimage.median_filter(off.real, filterSize)
vazm = ndimage.median_filter(off.imag, filterSize) vazm = ndimage.median_filter(off.imag, filterSize)
@ -101,6 +104,8 @@ def fill(data, invalid=None):
Output: Output:
Return a filled array. Return a filled array.
""" """
from scipy import ndimage
if invalid is None: invalid = np.isnan(data) if invalid is None: invalid = np.isnan(data)
ind = ndimage.distance_transform_edt(invalid, ind = ndimage.distance_transform_edt(invalid,
@ -112,6 +117,8 @@ def fill(data, invalid=None):
def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName): def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName):
#masking and Filtering #masking and Filtering
from scipy import ndimage
##Read in the offset file ##Read in the offset file
ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly) ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly)
Offset = ds.GetRasterBand(band).ReadAsArray() Offset = ds.GetRasterBand(band).ReadAsArray()
@ -155,6 +162,8 @@ def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outNam
def fill_with_smoothed(off,filterSize): def fill_with_smoothed(off,filterSize):
from astropy.convolution import convolve
off_2filt=np.copy(off) off_2filt=np.copy(off)
kernel = np.ones((filterSize,filterSize),np.float32)/(filterSize*filterSize) kernel = np.ones((filterSize,filterSize),np.float32)/(filterSize*filterSize)
loop = 0 loop = 0
@ -272,5 +281,3 @@ def runRubbersheetAzimuth(self):
resampleOffset(filtAzOffsetFile, geometryAzimuthOffset, sheetOffset) resampleOffset(filtAzOffsetFile, geometryAzimuthOffset, sheetOffset)
return None return None

View File

@ -9,15 +9,14 @@
import isce import isce
import isceobj import isceobj
from osgeo import gdal from osgeo import gdal
from scipy import ndimage
import numpy as np import numpy as np
import os import os
from astropy.convolution import convolve
def mask_filterNoSNR(denseOffsetFile,filterSize,outName): def mask_filterNoSNR(denseOffsetFile,filterSize,outName):
# Masking the offsets with a data-based approach # Masking the offsets with a data-based approach
from scipy import ndimage
# Open the offsets # Open the offsets
ds = gdal.Open(denseOffsetFile+'.vrt',gdal.GA_ReadOnly) ds = gdal.Open(denseOffsetFile+'.vrt',gdal.GA_ReadOnly)
off_az = ds.GetRasterBand(1).ReadAsArray() off_az = ds.GetRasterBand(1).ReadAsArray()
@ -78,6 +77,9 @@ def mask_filterNoSNR(denseOffsetFile,filterSize,outName):
return return
def off_masking(off,filterSize,thre=2): def off_masking(off,filterSize,thre=2):
from scipy import ndimage
vram = ndimage.median_filter(off.real, filterSize) vram = ndimage.median_filter(off.real, filterSize)
vazm = ndimage.median_filter(off.imag, filterSize) vazm = ndimage.median_filter(off.imag, filterSize)
@ -100,6 +102,8 @@ def fill(data, invalid=None):
Output: Output:
Return a filled array. Return a filled array.
""" """
from scipy import ndimage
if invalid is None: invalid = np.isnan(data) if invalid is None: invalid = np.isnan(data)
ind = ndimage.distance_transform_edt(invalid, ind = ndimage.distance_transform_edt(invalid,
@ -109,6 +113,8 @@ def fill(data, invalid=None):
def fill_with_smoothed(off,filterSize): def fill_with_smoothed(off,filterSize):
from astropy.convolution import convolve
off_2filt=np.copy(off) off_2filt=np.copy(off)
kernel = np.ones((filterSize,filterSize),np.float32)/(filterSize*filterSize) kernel = np.ones((filterSize,filterSize),np.float32)/(filterSize*filterSize)
loop = 0 loop = 0
@ -131,6 +137,8 @@ def fill_with_smoothed(off,filterSize):
def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName): def mask_filter(denseOffsetFile, snrFile, band, snrThreshold, filterSize, outName):
#masking and Filtering #masking and Filtering
from scipy import ndimage
##Read in the offset file ##Read in the offset file
ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly) ds = gdal.Open(denseOffsetFile + '.vrt', gdal.GA_ReadOnly)
Offset = ds.GetRasterBand(band).ReadAsArray() Offset = ds.GetRasterBand(band).ReadAsArray()
@ -236,6 +244,8 @@ def resampleOffset(maskedFiltOffset, geometryOffset, outName):
def runRubbersheetRange(self): def runRubbersheetRange(self):
from scipy import ndimage
if not self.doRubbersheetingRange: if not self.doRubbersheetingRange:
print('Rubber sheeting in azimuth not requested ... skipping') print('Rubber sheeting in azimuth not requested ... skipping')
return return

View File

@ -9,9 +9,6 @@ import shutil
import datetime import datetime
import numpy as np import numpy as np
import numpy.matlib import numpy.matlib
import scipy.signal as ss
from scipy import interpolate
from scipy.interpolate import interp1d
import isceobj import isceobj
import logging import logging
@ -638,6 +635,7 @@ def cal_coherence(inf, win=5, edge=0):
4: keep all samples 4: keep all samples
''' '''
import scipy.signal as ss
if win % 2 != 1: if win % 2 != 1:
raise Exception('window size must be odd!') raise Exception('window size must be odd!')
@ -1682,6 +1680,9 @@ def computeDopplerOffset(burst, firstline, lastline, firstcolumn, lastcolumn, nr
output: first lines > 0, last lines < 0 output: first lines > 0, last lines < 0
''' '''
from scipy import interpolate
from scipy.interpolate import interp1d
Vs = np.linalg.norm(burst.orbit.interpolateOrbit(burst.sensingMid, method='hermite').getVelocity()) Vs = np.linalg.norm(burst.orbit.interpolateOrbit(burst.sensingMid, method='hermite').getVelocity())
Ks = 2 * Vs * burst.azimuthSteeringRate / burst.radarWavelength Ks = 2 * Vs * burst.azimuthSteeringRate / burst.radarWavelength
@ -1830,6 +1831,7 @@ def adaptive_gaussian(ionos, wgt, size_max, size_min):
size_max: maximum window size size_max: maximum window size
size_min: minimum window size size_min: minimum window size
''' '''
import scipy.signal as ss
length = (ionos.shape)[0] length = (ionos.shape)[0]
width = (ionos.shape)[1] width = (ionos.shape)[1]
@ -1892,6 +1894,8 @@ def filt_gaussian(self, ionParam):
currently not implemented. currently not implemented.
a less accurate method is to use ionsphere without any projection a less accurate method is to use ionsphere without any projection
''' '''
from scipy import interpolate
from scipy.interpolate import interp1d
################################################# #################################################
#SET PARAMETERS HERE #SET PARAMETERS HERE
@ -2659,5 +2663,3 @@ def runIon(self):
#esd_noion(self, ionParam) #esd_noion(self, ionParam)
return return

View File

@ -3,7 +3,6 @@
# Copyright 2016 # Copyright 2016
# #
from scipy.ndimage.filters import median_filter
import numpy as np import numpy as np
import isce import isce
import isceobj import isceobj
@ -20,6 +19,8 @@ def runOffsetFilter(self):
if not self.doDenseOffsets: if not self.doDenseOffsets:
return return
from scipy.ndimage.filters import median_filter
offsetfile = os.path.join(self._insar.mergedDirname, self._insar.offsetfile) offsetfile = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
snrfile = os.path.join(self._insar.mergedDirname, self._insar.snrfile) snrfile = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
print('\n======================================') print('\n======================================')

View File

@ -8,7 +8,6 @@ import numpy as np
import os import os
import isceobj import isceobj
import logging import logging
import scipy.signal as SS
from isceobj.Util.ImageUtil import ImageLib as IML from isceobj.Util.ImageUtil import ImageLib as IML
import datetime import datetime
import pprint import pprint
@ -177,6 +176,7 @@ def createCoherence(intfile, win=5):
''' '''
Compute coherence using scipy convolve 2D. Compute coherence using scipy convolve 2D.
''' '''
import scipy.signal as SS
corfile = os.path.splitext(intfile)[0] + '.cor' corfile = os.path.splitext(intfile)[0] + '.cor'
filt = np.ones((win,win))/ (1.0*win*win) filt = np.ones((win,win))/ (1.0*win*win)

View File

@ -54,7 +54,7 @@ class snaphu(Component):
self.azimuthLooks = obj.insar.topo.numberAzimuthLooks self.azimuthLooks = obj.insar.topo.numberAzimuthLooks
azres = obj.insar.masterFrame.platform.antennaLength/2.0 azres = obj.insar.masterFrame.platform.antennaLength/2.0
azfact = obj.insar.topo.numberAzimuthLooks *azres / obj.insar.topo.azimuthSpacing azfact = azres / obj.insar.topo.azimuthSpacing
rBW = obj.insar.masterFrame.instrument.pulseLength * obj.insar.masterFrame.instrument.chirpSlope rBW = obj.insar.masterFrame.instrument.pulseLength * obj.insar.masterFrame.instrument.chirpSlope
rgres = abs(SPEED_OF_LIGHT / (2.0 * rBW)) rgres = abs(SPEED_OF_LIGHT / (2.0 * rBW))

View File

@ -54,7 +54,7 @@ class snaphu_mcf(Component):
self.azimuthLooks = obj.insar.topo.numberAzimuthLooks self.azimuthLooks = obj.insar.topo.numberAzimuthLooks
azres = obj.insar.masterFrame.platform.antennaLength/2.0 azres = obj.insar.masterFrame.platform.antennaLength/2.0
azfact = obj.insar.topo.numberAzimuthLooks *azres / obj.insar.topo.azimuthSpacing azfact = azres / obj.insar.topo.azimuthSpacing
rBW = obj.insar.masterFrame.instrument.pulseLength * obj.insar.masterFrame.instrument.chirpSlope rBW = obj.insar.masterFrame.instrument.pulseLength * obj.insar.masterFrame.instrument.chirpSlope
rgres = abs(SPEED_OF_LIGHT / (2.0 * rBW)) rgres = abs(SPEED_OF_LIGHT / (2.0 * rBW))

View File

@ -45,10 +45,6 @@ ellipsoid oblate ellipsoid of revolution (e.g, WGS84) with all the
See mainpage.txt for a complete dump of geo's philosophy-- otherwise, See mainpage.txt for a complete dump of geo's philosophy-- otherwise,
use the docstrings. use the docstrings.
""" """
import os
isce_path = os.getenv("ISCE_HOME")
## \namespace geo Vector- and Affine-spaces, on Earth ## \namespace geo Vector- and Affine-spaces, on Earth
__all__ = ['euclid', 'coordinates', 'ellipsoid', 'charts', 'affine', 'motion'] __all__ = ['euclid', 'coordinates', 'ellipsoid', 'charts', 'affine', 'motion']

View File

@ -32,10 +32,7 @@ from __future__ import print_function
import os import os
import sys import sys
import operator import operator
import logging from isce import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
from iscesys.DictUtils.DictUtils import DictUtils as DU from iscesys.DictUtils.DictUtils import DictUtils as DU
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
Compatibility.checkPythonVersion() Compatibility.checkPythonVersion()

View File

@ -37,8 +37,7 @@ import isce
import zipfile import zipfile
import os import os
import sys import sys
import logging from isce import logging
import logging.config
from iscesys.Component.Component import Component from iscesys.Component.Component import Component
import shutil import shutil
from urllib import request from urllib import request
@ -325,8 +324,4 @@ class DataRetriever(Component):
# logger not defined until baseclass is called # logger not defined until baseclass is called
if not self.logger: if not self.logger:
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf')
)
self.logger = logging.getLogger('isce.iscesys.DataRetriever') self.logger = logging.getLogger('isce.iscesys.DataRetriever')

View File

@ -1,7 +1,6 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <complex.h> #include <complex.h>
#include <malloc.h>
/************************************************************************ /************************************************************************
* cfft1d is a subroutine used to call and initialize perflib Fortran FFT * * cfft1d is a subroutine used to call and initialize perflib Fortran FFT *
* routines. * * routines. *

View File

@ -29,10 +29,8 @@
import os
import logging import logging
import math import math
import logging.config
from iscesys.Compatibility import Compatibility from iscesys.Compatibility import Compatibility
@ -40,9 +38,6 @@ from isceobj.Planet import Planet
from isceobj import Constants as CN from isceobj import Constants as CN
from iscesys.Component.Component import Component, Port from iscesys.Component.Component import Component, Port
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
RANGE_SAMPLING_RATE = Component.Parameter('rangeSamplingRate', RANGE_SAMPLING_RATE = Component.Parameter('rangeSamplingRate',
public_name='range sampling rate', public_name='range sampling rate',
type=float, type=float,

View File

@ -49,7 +49,7 @@ if envGPUampcor['GPU_ACC_ENABLED']:
build_base += "-ccbin " + envGPUampcor['NVCC_CCBIN'] + " " build_base += "-ccbin " + envGPUampcor['NVCC_CCBIN'] + " "
else: else:
print('Assuming default system compiler for nvcc.') print('Assuming default system compiler for nvcc.')
build_base += "-arch=sm_35 -shared -Xcompiler -fPIC -O3 " build_base += "-shared -Xcompiler -fPIC -O3 "
build_cmd = build_base + "-dc -m64 -o $TARGET -c $SOURCE" build_cmd = build_base + "-dc -m64 -o $TARGET -c $SOURCE"
built_path = os.path.join(build, 'gpu-ampcor.o') built_path = os.path.join(build, 'gpu-ampcor.o')
linked_path = os.path.join(build, 'gpu-ampcor-linked.o') linked_path = os.path.join(build, 'gpu-ampcor-linked.o')

View File

@ -1,2 +1,2 @@
nvcc -arch=sm_35 -Xcompiler -fPIC -o gpu-topo.o -c Topo.cu nvcc -Xcompiler -fPIC -o gpu-topo.o -c Topo.cu
cp -f gpu-topo.o .. cp -f gpu-topo.o ..

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import os import os
@ -28,7 +28,7 @@ if envPyCuAmpcor['GPU_ACC_ENABLED']:
if not os.path.exists(initFile): if not os.path.exists(initFile):
with open(initFile, 'w') as fout: with open(initFile, 'w') as fout:
fout.write("#!/usr/bin/env python") fout.write("#!/usr/bin/env python3")
listFiles = [initFile] listFiles = [initFile]
envPyCuAmpcor.Install(install, listFiles) envPyCuAmpcor.Install(install, listFiles)

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python3
#
# Test program to run ampcor with GPU
# For two GeoTiff images
#
import argparse
import numpy as np
from PyCuAmpcor import PyCuAmpcor
def main():
'''
main program
'''
objOffset = PyCuAmpcor() # create the processor
objOffset.algorithm = 0 # cross-correlation method 0=freq 1=time
objOffset.deviceID = 0 # GPU device id to be used
objOffset.nStreams = 2 # cudaStreams; multiple streams to overlap data transfer with gpu calculations
objOffset.masterImageName = "master.tif"
objOffset.masterImageHeight = 16480 # RasterYSize
objOffset.masterImageWidth = 17000 # RasterXSize
objOffset.slaveImageName = "slave.tif"
objOffset.slaveImageHeight = 16480
objOffset.slaveImageWidth = 17000
objOffset.windowSizeWidth = 64 # template window size
objOffset.windowSizeHeight = 64
objOffset.halfSearchRangeDown = 20 # search range
objOffset.halfSearchRangeAcross = 20
objOffset.derampMethod = 1 # deramping for complex signal, set to 1 for real images
objOffset.skipSampleDown = 128 # strides between windows
objOffset.skipSampleAcross = 64
# gpu processes several windows in one batch/Chunk
# total windows in Chunk = numberWindowDownInChunk*numberWindowAcrossInChunk
# the max number of windows depending on gpu memory and type
objOffset.numberWindowDownInChunk = 1
objOffset.numberWindowAcrossInChunk = 10
objOffset.corrSurfaceOverSamplingFactor = 8 # oversampling factor for correlation surface
objOffset.corrSurfaceZoomInWindow = 16 # area in correlation surface to be oversampled
objOffset.corrSufaceOverSamplingMethod = 1 # fft or sinc oversampler
objOffset.useMmap = 1 # default using memory map as buffer, if having troubles, set to 0
objOffset.mmapSize = 1 # mmap or buffer size used for transferring data from file to gpu, in GB
objOffset.numberWindowDown = 40 # number of windows to be processed
objOffset.numberWindowAcross = 100
# if to process the whole image; some math needs to be done
# margin = 0 # margins to be neglected
#objOffset.numberWindowDown = (objOffset.slaveImageHeight - 2*margin - 2*objOffset.halfSearchRangeDown - objOffset.windowSizeHeight) // objOffset.skipSampleDown
#objOffset.numberWindowAcross = (objOffset.slaveImageWidth - 2*margin - 2*objOffset.halfSearchRangeAcross - objOffset.windowSizeWidth) // objOffset.skipSampleAcross
objOffset.setupParams()
objOffset.masterStartPixelDownStatic = objOffset.halfSearchRangeDown # starting pixel offset
objOffset.masterStartPixelAcrossStatic = objOffset.halfSearchRangeDown
objOffset.setConstantGrossOffset(0, 0) # gross offset between master and slave images
objOffset.checkPixelInImageRange() # check whether there is something wrong with
objOffset.runAmpcor()
if __name__ == '__main__':

View File

@ -7,8 +7,8 @@
import argparse import argparse
import numpy as np import numpy as np
#from PyCuAmpcor import PyCuAmpcor from PyCuAmpcor import PyCuAmpcor
from isce.components.contrib.PyCuAmpcor import PyCuAmpcor
def main(): def main():
''' '''
@ -20,10 +20,10 @@ def main():
objOffset.algorithm = 0 objOffset.algorithm = 0
objOffset.deviceID = 0 # -1:let system find the best GPU objOffset.deviceID = 0 # -1:let system find the best GPU
objOffset.nStreams = 2 #cudaStreams objOffset.nStreams = 2 #cudaStreams
objOffset.masterImageName = "master.slc" objOffset.masterImageName = "20131213.slc.vrt"
objOffset.masterImageHeight = 43008 objOffset.masterImageHeight = 43008
objOffset.masterImageWidth = 24320 objOffset.masterImageWidth = 24320
objOffset.slaveImageName = "slave.slc" objOffset.slaveImageName = "20131221.slc.vrt"
objOffset.slaveImageHeight = 43008 objOffset.slaveImageHeight = 43008
objOffset.slaveImageWidth = 24320 objOffset.slaveImageWidth = 24320
objOffset.windowSizeWidth = 64 objOffset.windowSizeWidth = 64
@ -40,6 +40,7 @@ def main():
objOffset.corrSurfaceOverSamplingFactor = 8 objOffset.corrSurfaceOverSamplingFactor = 8
objOffset.corrSurfaceZoomInWindow = 16 objOffset.corrSurfaceZoomInWindow = 16
objOffset.corrSufaceOverSamplingMethod = 1 objOffset.corrSufaceOverSamplingMethod = 1
objOffset.useMmap = 1
objOffset.mmapSize = 8 objOffset.mmapSize = 8
objOffset.setupParams() objOffset.setupParams()

View File

@ -11,10 +11,10 @@ def main():
objOffset = PyCuAmpcor() objOffset = PyCuAmpcor()
#step 1 set constant parameters #step 1 set constant parameters
objOffset.masterImageName = "master.slc" objOffset.masterImageName = "master.slc.vrt"
objOffset.masterImageHeight = 128 objOffset.masterImageHeight = 128
objOffset.masterImageWidth = 128 objOffset.masterImageWidth = 128
objOffset.slaveImageName = "slave.slc" objOffset.slaveImageName = "slave.slc.vrt"
objOffset.masterImageHeight = 128 objOffset.masterImageHeight = 128
objOffset.masterImageWidth = 128 objOffset.masterImageWidth = 128
objOffset.skipSampleDown = 2 objOffset.skipSampleDown = 2

View File

@ -0,0 +1,154 @@
#include "GDALImage.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <assert.h>
#include <cublas_v2.h>
#include "cudaError.h"
#include <errno.h>
#include <unistd.h>
/**
* \brief Constructor
*
* @param filename a std::string with the raster image file name
*/
GDALImage::GDALImage(std::string filename, int band, int cacheSizeInGB, int useMmap)
: _useMmap(useMmap)
{
// open the file as dataset
_poDataset = (GDALDataset *) GDALOpen(filename.c_str(), GA_ReadOnly );
// if something is wrong, throw an exception
// GDAL reports the error message
if(!_poDataset)
throw;
// check the band info
int count = _poDataset->GetRasterCount();
if(band > count)
{
std::cout << "The desired band " << band << " is greated than " << count << " bands available";
throw;
}
// get the desired band
_poBand = _poDataset->GetRasterBand(band);
if(!_poBand)
throw;
// get the width(x), and height(y)
_width = _poBand->GetXSize();
_height = _poBand->GetYSize();
_dataType = _poBand->GetRasterDataType();
// determine the image type
_isComplex = GDALDataTypeIsComplex(_dataType);
// determine the pixel size in bytes
_pixelSize = GDALGetDataTypeSize(_dataType);
_bufferSize = 1024*1024*cacheSizeInGB;
// checking whether using memory map
if(_useMmap) {
char **papszOptions = NULL;
// if cacheSizeInGB = 0, use default
// else set the option
if(cacheSizeInGB > 0)
papszOptions = CSLSetNameValue( papszOptions,
"CACHE_SIZE",
std::to_string(_bufferSize).c_str());
// space between two lines
GIntBig pnLineSpace;
// set up the virtual mem buffer
_poBandVirtualMem = GDALGetVirtualMemAuto(
static_cast<GDALRasterBandH>(_poBand),
GF_Read,
&_pixelSize,
&pnLineSpace,
papszOptions);
// check it
if(!_poBandVirtualMem)
throw;
// get the starting pointer
_memPtr = CPLVirtualMemGetAddr(_poBandVirtualMem);
}
else { // use a buffer
checkCudaErrors(cudaMallocHost((void **)&_memPtr, _bufferSize));
}
// make sure memPtr is not Null
if (!_memPtr)
throw;
// all done
}
/// load a tile of data h_tile x w_tile from CPU (mmap) to GPU
/// @param dArray pointer for array in device memory
/// @param h_offset Down/Height offset
/// @param w_offset Across/Width offset
/// @param h_tile Down/Height tile size
/// @param w_tile Across/Width tile size
/// @param stream CUDA stream for copying
void GDALImage::loadToDevice(void *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, cudaStream_t stream)
{
size_t tileStartOffset = (h_offset*_width + w_offset)*_pixelSize;
char * startPtr = (char *)_memPtr ;
startPtr += tileStartOffset;
// @note
// We assume down/across directions as rows/cols. Therefore, SLC mmap and device array are both row major.
// cuBlas assumes both source and target arrays are column major.
// To use cublasSetMatrix, we need to switch w_tile/h_tile for rows/cols
// checkCudaErrors(cublasSetMatrixAsync(w_tile, h_tile, sizeof(float2), startPtr, width, dArray, w_tile, stream));
if (_useMmap)
checkCudaErrors(cudaMemcpy2DAsync(dArray, w_tile*_pixelSize, startPtr, _width*_pixelSize,
w_tile*_pixelSize, h_tile, cudaMemcpyHostToDevice,stream));
else {
// get the total tile size in bytes
size_t tileSize = h_tile*w_tile*_pixelSize;
// if the size is bigger than existing buffer, reallocate
if (tileSize > _bufferSize) {
// maybe we need to make it to fit the pagesize
_bufferSize = tileSize;
checkCudaErrors(cudaFree(_memPtr));
checkCudaErrors(cudaMallocHost((void **)&_memPtr, _bufferSize));
}
// copy from file to buffer
CPLErr err = _poBand->RasterIO(GF_Read, //eRWFlag
w_offset, h_offset, //nXOff, nYOff
w_tile, h_tile, // nXSize, nYSize
_memPtr, // pData
w_tile*h_tile, 1, // nBufXSize, nBufYSize
_dataType, //eBufType
0, 0, //nPixelSpace, nLineSpace in pData
NULL //psExtraArg extra resampling callback
);
if(err != CE_None)
throw;
// copy from buffer to gpu
checkCudaErrors(cudaMemcpyAsync(dArray, _memPtr, tileSize, cudaMemcpyHostToDevice, stream));
}
}
GDALImage::~GDALImage()
{
// free the virtual memory
CPLVirtualMemFree(_poBandVirtualMem),
// free the GDAL Dataset, close the file
delete _poDataset;
}
// end of file

View File

@ -0,0 +1,79 @@
// -*- c++ -*-
/**
* \brief Class for an image described GDAL vrt
*
* only complex (pixelOffset=8) or real(pixelOffset=4) images are supported, such as SLC and single-precision TIFF
*/
#ifndef __GDALIMAGE_H
#define __GDALIMAGE_H
#include <cublas_v2.h>
#include <string>
#include <gdal/gdal_priv.h>
#include <gdal/cpl_conv.h>
class GDALImage{
public:
using size_t = std::size_t;
private:
size_t _fileSize;
int _height;
int _width;
// buffer pointer
void * _memPtr = NULL;
int _pixelSize; //in bytes
int _isComplex;
size_t _bufferSize;
int _useMmap;
GDALDataType _dataType;
CPLVirtualMem * _poBandVirtualMem = NULL;
GDALDataset * _poDataset = NULL;
GDALRasterBand * _poBand = NULL;
public:
GDALImage() = delete;
GDALImage(std::string fn, int band=1, int cacheSizeInGB=0, int useMmap=1);
void * getmemPtr()
{
return(_memPtr);
}
size_t getFileSize()
{
return (_fileSize);
}
size_t getHeight() {
return (_height);
}
size_t getWidth()
{
return (_width);
}
int getPixelSize()
{
return _pixelSize;
}
bool isComplex()
{
return _isComplex;
}
void loadToDevice(void *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, cudaStream_t stream);
~GDALImage();
};
#endif //__GDALIMAGE_H

View File

@ -4,22 +4,23 @@ LDFLAGS = -lcuda -lcudart -lcufft -lcublas
CXXFLAGS = -std=c++11 -fpermissive -fPIC -shared CXXFLAGS = -std=c++11 -fpermissive -fPIC -shared
NVCCFLAGS = -ccbin g++ -m64 \ NVCCFLAGS = -ccbin g++ -m64 \
-gencode arch=compute_35,code=sm_35 \ -gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_60,code=sm_60 \
-Xcompiler -fPIC -shared -Wno-deprecated-gpu-targets \ -Xcompiler -fPIC -shared -Wno-deprecated-gpu-targets \
-ftz=false -prec-div=true -prec-sqrt=true -ftz=false -prec-div=true -prec-sqrt=true
CXX=g++ CXX=g++
NVCC=nvcc NVCC=nvcc
DEPS = cudaUtil.h cudaError.h cuArrays.h SlcImage.h cuAmpcorParameter.h DEPS = cudaUtil.h cudaError.h cuArrays.h GDALImage.h cuAmpcorParameter.h
OBJS = SlcImage.o cuArrays.o cuArraysCopy.o cuArraysPadding.o cuOverSampler.o \ OBJS = GDALImage.o cuArrays.o cuArraysCopy.o cuArraysPadding.o cuOverSampler.o \
cuSincOverSampler.o cuDeramp.o cuOffset.o \ cuSincOverSampler.o cuDeramp.o cuOffset.o \
cuCorrNormalization.o cuAmpcorParameter.o cuCorrTimeDomain.o cuCorrFrequency.o \ cuCorrNormalization.o cuAmpcorParameter.o cuCorrTimeDomain.o cuCorrFrequency.o \
cuAmpcorChunk.o cuAmpcorController.o cuEstimateStats.o cuAmpcorChunk.o cuAmpcorController.o cuEstimateStats.o
all: cuampcor all: pyampcor
SlcImage.o: SlcImage.cu $(DEPS) GDALImage.o: GDALImage.cu $(DEPS)
$(NVCC) $(NVCCFLAGS) -c -o $@ SlcImage.cu $(NVCC) $(NVCCFLAGS) -c -o $@ GDALImage.cu
cuArrays.o: cuArrays.cu $(DEPS) cuArrays.o: cuArrays.cu $(DEPS)
$(NVCC) $(NVCCFLAGS) -c -o $@ cuArrays.cu $(NVCC) $(NVCCFLAGS) -c -o $@ cuArrays.cu
@ -64,7 +65,7 @@ cuEstimateStats.o: cuEstimateStats.cu
$(NVCC) $(NVCCFLAGS) -c -o $@ cuEstimateStats.cu $(NVCC) $(NVCCFLAGS) -c -o $@ cuEstimateStats.cu
cuampcor: $(OBJS) pyampcor: $(OBJS)
rm -f PyCuAmpcor.cpp && python3 setup.py build_ext --inplace rm -f PyCuAmpcor.cpp && python3 setup.py build_ext --inplace
clean: clean:

View File

@ -62,7 +62,8 @@ cdef extern from "cuAmpcorParameter.h":
int slaveImageHeight ## slave image height int slaveImageHeight ## slave image height
int slaveImageWidth ## slave image width int slaveImageWidth ## slave image width
int mmapSizeInGB ## mmap buffer size in unit of Gigabytes int useMmap ## whether to use mmap
int mmapSizeInGB ## mmap buffer size in unit of Gigabytes (if not mmmap, the buffer size)
## total number of chips/windows ## total number of chips/windows
int numberWindowDown ## number of total windows (down) int numberWindowDown ## number of total windows (down)
@ -103,6 +104,7 @@ cdef extern from "cuAmpcorParameter.h":
string grossOffsetImageName string grossOffsetImageName
string offsetImageName ## Output Offset fields filename string offsetImageName ## Output Offset fields filename
string snrImageName ## Output SNR filename string snrImageName ## Output SNR filename
string covImageName ## Output COV filename
void setStartPixels(int*, int*, int*, int*) void setStartPixels(int*, int*, int*, int*)
void setStartPixels(int, int, int*, int*) void setStartPixels(int, int, int*, int*)
void setStartPixels(int, int, int, int) void setStartPixels(int, int, int, int)
@ -143,6 +145,12 @@ cdef class PyCuAmpcor(object):
def nStreams(self, int a): def nStreams(self, int a):
self.c_cuAmpcor.param.nStreams = a self.c_cuAmpcor.param.nStreams = a
@property @property
def useMmap(self):
return self.c_cuAmpcor.param.useMmap
@useMmap.setter
def useMmap(self, int a):
self.c_cuAmpcor.param.useMmap = a
@property
def mmapSize(self): def mmapSize(self):
return self.c_cuAmpcor.param.mmapSizeInGB return self.c_cuAmpcor.param.mmapSizeInGB
@mmapSize.setter @mmapSize.setter
@ -324,6 +332,7 @@ cdef class PyCuAmpcor(object):
@offsetImageName.setter @offsetImageName.setter
def offsetImageName(self, str a): def offsetImageName(self, str a):
self.c_cuAmpcor.param.offsetImageName = <string> a.encode() self.c_cuAmpcor.param.offsetImageName = <string> a.encode()
@property @property
def snrImageName(self): def snrImageName(self):
return self.c_cuAmpcor.param.snrImageName return self.c_cuAmpcor.param.snrImageName
@ -331,6 +340,13 @@ cdef class PyCuAmpcor(object):
def snrImageName(self, str a): def snrImageName(self, str a):
self.c_cuAmpcor.param.snrImageName = <string> a.encode() self.c_cuAmpcor.param.snrImageName = <string> a.encode()
@property
def covImageName(self):
return self.c_cuAmpcor.param.covImageName
@covImageName.setter
def covImageName(self, str a):
self.c_cuAmpcor.param.covImageName = <string> a.encode()
@property @property
def masterStartPixelDownStatic(self): def masterStartPixelDownStatic(self):
return self.c_cuAmpcor.param.masterStartPixelDown0 return self.c_cuAmpcor.param.masterStartPixelDown0

View File

@ -6,7 +6,7 @@ package = envPyCuAmpcor['PACKAGE']
project = envPyCuAmpcor['PROJECT'] project = envPyCuAmpcor['PROJECT']
build = envPyCuAmpcor['PRJ_LIB_DIR'] build = envPyCuAmpcor['PRJ_LIB_DIR']
install = envPyCuAmpcor['PRJ_SCONS_INSTALL'] + '/' + package + '/' + project install = envPyCuAmpcor['PRJ_SCONS_INSTALL'] + '/' + package + '/' + project
listFiles = ['SlcImage.cu', 'cuArrays.cu', 'cuArraysCopy.cu', listFiles = ['GDALImage.cu', 'cuArrays.cu', 'cuArraysCopy.cu',
'cuArraysPadding.cu', 'cuOverSampler.cu', 'cuArraysPadding.cu', 'cuOverSampler.cu',
'cuSincOverSampler.cu', 'cuDeramp.cu', 'cuSincOverSampler.cu', 'cuDeramp.cu',
'cuOffset.cu', 'cuCorrNormalization.cu', 'cuOffset.cu', 'cuCorrNormalization.cu',

View File

@ -33,22 +33,38 @@ void cuAmpcorChunk::run(int idxDown_, int idxAcross_)
cuCorrTimeDomain(r_masterBatchRaw, r_slaveBatchRaw, r_corrBatchRaw, stream); //time domain cross correlation cuCorrTimeDomain(r_masterBatchRaw, r_slaveBatchRaw, r_corrBatchRaw, stream); //time domain cross correlation
} }
cuCorrNormalize(r_masterBatchRaw, r_slaveBatchRaw, r_corrBatchRaw, stream); cuCorrNormalize(r_masterBatchRaw, r_slaveBatchRaw, r_corrBatchRaw, stream);
//find the maximum location of none-oversampled correlation
cuArraysMaxloc2D(r_corrBatchRaw, offsetInit, stream);
// Estimate SNR (Minyan Zhong)
//std::cout<< "flag stats 1" <<std::endl; // find the maximum location of none-oversampled correlation
//cuArraysCopyExtractCorr(r_corrBatchRaw, r_corrBatchZoomIn, i_corrBatchZoomInValid, offsetInit, stream); // 41 x 41, if halfsearchrange=20
//cuArraysMaxloc2D(r_corrBatchRaw, offsetInit, stream);
cuArraysMaxloc2D(r_corrBatchRaw, offsetInit, r_maxval, stream);
//std::cout<< "flag stats 2" <<std::endl; offsetInit->outputToFile("offsetInit1", stream);
//cuArraysSumCorr(r_corrBatchZoomIn, i_corrBatchZoomInValid, r_corrBatchSum, i_corrBatchValidCount, stream);
//std::cout<< "flag stats 3" <<std::endl; // Estimation of statistics
//cuEstimateSnr(r_corrBatchSum, i_corrBatchValidCount, r_maxval, r_snrValue, stream); // Author: Minyan Zhong
// Extraction of correlation surface around the peak
cuArraysCopyExtractCorr(r_corrBatchRaw, r_corrBatchRawZoomIn, i_corrBatchZoomInValid, offsetInit, stream);
// cudaDeviceSynchronize();
// debug: output the intermediate results
r_maxval->outputToFile("r_maxval",stream);
r_corrBatchRaw->outputToFile("r_corrBatchRaw",stream);
r_corrBatchRawZoomIn->outputToFile("r_corrBatchRawZoomIn",stream);
i_corrBatchZoomInValid->outputToFile("i_corrBatchZoomInValid",stream);
// Summation of correlation and data point values
cuArraysSumCorr(r_corrBatchRawZoomIn, i_corrBatchZoomInValid, r_corrBatchSum, i_corrBatchValidCount, stream);
// SNR
cuEstimateSnr(r_corrBatchSum, i_corrBatchValidCount, r_maxval, r_snrValue, stream);
// Variance
// cuEstimateVariance(r_corrBatchRaw, offsetInit, r_maxval, r_covValue, stream);
// Using the approximate estimation to adjust slave image (half search window size becomes only 4 pixels)
//offsetInit->debuginfo(stream); //offsetInit->debuginfo(stream);
// determine the starting pixel to extract slave images around the max location // determine the starting pixel to extract slave images around the max location
cuDetermineSlaveExtractOffset(offsetInit, cuDetermineSlaveExtractOffset(offsetInit,
@ -109,12 +125,21 @@ void cuAmpcorChunk::run(int idxDown_, int idxAcross_)
//offsetZoomIn->debuginfo(stream); //offsetZoomIn->debuginfo(stream);
//offsetFinal->debuginfo(stream); //offsetFinal->debuginfo(stream);
// Do insertion.
// Offsetfields.
cuArraysCopyInsert(offsetFinal, offsetImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream); cuArraysCopyInsert(offsetFinal, offsetImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
// Minyan Zhong // Debugging matrix.
//cuArraysCopyInsert(corrMaxValue, snrImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream); cuArraysCopyInsert(r_corrBatchSum, floatImage1, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
//cuArraysCopyInsert(r_snrValue, snrImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream); cuArraysCopyInsert(i_corrBatchValidCount, intImage1, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
// Old: save max correlation coefficients.
//cuArraysCopyInsert(corrMaxValue, snrImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
// New: save SNR
cuArraysCopyInsert(r_snrValue, snrImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
// Variance.
cuArraysCopyInsert(r_covValue, covImage, idxDown_*param->numberWindowDownInChunk, idxAcross_*param->numberWindowAcrossInChunk,stream);
} }
void cuAmpcorChunk::setIndex(int idxDown_, int idxAcross_) void cuAmpcorChunk::setIndex(int idxDown_, int idxAcross_)
@ -162,19 +187,37 @@ void cuAmpcorChunk::getRelativeOffset(int *rStartPixel, const int *oStartPixel,
void cuAmpcorChunk::loadMasterChunk() void cuAmpcorChunk::loadMasterChunk()
{ {
//load a chunk from mmap to gpu
int startD = param->masterChunkStartPixelDown[idxChunk]; // we first load the whole chunk of image from cpu to a gpu buffer c(r)_masterChunkRaw
int startA = param->masterChunkStartPixelAcross[idxChunk]; // then copy to a batch of windows with (nImages, height, width) (leading dimension on the right)
int height = param->masterChunkHeight[idxChunk];
int width = param->masterChunkWidth[idxChunk]; // get the chunk size to be loaded to gpu
masterImage->loadToDevice(c_masterChunkRaw->devData, startD, startA, height, width, stream); int startD = param->masterChunkStartPixelDown[idxChunk]; //start pixel down (along height)
std::cout << "debug load master: " << startD << " " << startA << " " << height << " " << width << "\n"; int startA = param->masterChunkStartPixelAcross[idxChunk]; // start pixel across (along width)
//copy the chunk to a batch of images format (nImages, height, width) int height = param->masterChunkHeight[idxChunk]; // number of pixels along height
//use cpu for some simple math int width = param->masterChunkWidth[idxChunk]; // number of pixels along width
//use cpu to compute the starting positions for each window
getRelativeOffset(ChunkOffsetDown->hostData, param->masterStartPixelDown, param->masterChunkStartPixelDown[idxChunk]); getRelativeOffset(ChunkOffsetDown->hostData, param->masterStartPixelDown, param->masterChunkStartPixelDown[idxChunk]);
// copy the positions to gpu
ChunkOffsetDown->copyToDevice(stream); ChunkOffsetDown->copyToDevice(stream);
// same for the across direction
getRelativeOffset(ChunkOffsetAcross->hostData, param->masterStartPixelAcross, param->masterChunkStartPixelAcross[idxChunk]); getRelativeOffset(ChunkOffsetAcross->hostData, param->masterStartPixelAcross, param->masterChunkStartPixelAcross[idxChunk]);
ChunkOffsetAcross->copyToDevice(stream); ChunkOffsetAcross->copyToDevice(stream);
// check whether the image is complex (e.g., SLC) or real( e.g. TIFF)
if(masterImage->isComplex())
{
// allocate a gpu buffer to load data from cpu/file
// try allocate/deallocate the buffer on the fly to save gpu memory 07/09/19
c_masterChunkRaw = new cuArrays<float2> (param->maxMasterChunkHeight, param->maxMasterChunkWidth);
c_masterChunkRaw->allocate();
// load the data from cpu
masterImage->loadToDevice((void *)c_masterChunkRaw->devData, startD, startA, height, width, stream);
//std::cout << "debug load master: " << startD << " " << startA << " " << height << " " << width << "\n";
//copy the chunk to a batch format (nImages, height, width)
// if derampMethod = 0 (no deramp), take amplitudes; otherwise, copy complex data // if derampMethod = 0 (no deramp), take amplitudes; otherwise, copy complex data
if(param->derampMethod == 0) { if(param->derampMethod == 0) {
cuArraysCopyToBatchAbsWithOffset(c_masterChunkRaw, param->masterChunkWidth[idxChunk], cuArraysCopyToBatchAbsWithOffset(c_masterChunkRaw, param->masterChunkWidth[idxChunk],
@ -184,10 +227,41 @@ void cuAmpcorChunk::loadMasterChunk()
cuArraysCopyToBatchWithOffset(c_masterChunkRaw, param->masterChunkWidth[idxChunk], cuArraysCopyToBatchWithOffset(c_masterChunkRaw, param->masterChunkWidth[idxChunk],
c_masterBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream); c_masterBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream);
} }
// deallocate the gpu buffer
delete c_masterChunkRaw;
}
// if the image is real
else {
r_masterChunkRaw = new cuArrays<float> (param->maxMasterChunkHeight, param->maxMasterChunkWidth);
r_masterChunkRaw->allocate();
// load the data from cpu
masterImage->loadToDevice((void *)r_masterChunkRaw->devData, startD, startA, height, width, stream);
// copy the chunk (real) to a batch format (complex)
cuArraysCopyToBatchWithOffsetR2C(r_masterChunkRaw, param->masterChunkWidth[idxChunk],
c_masterBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream);
// deallocate the gpu buffer
delete r_masterChunkRaw;
}
} }
void cuAmpcorChunk::loadSlaveChunk() void cuAmpcorChunk::loadSlaveChunk()
{ {
//copy to a batch format (nImages, height, width)
getRelativeOffset(ChunkOffsetDown->hostData, param->slaveStartPixelDown, param->slaveChunkStartPixelDown[idxChunk]);
ChunkOffsetDown->copyToDevice(stream);
getRelativeOffset(ChunkOffsetAcross->hostData, param->slaveStartPixelAcross, param->slaveChunkStartPixelAcross[idxChunk]);
ChunkOffsetAcross->copyToDevice(stream);
if(slaveImage->isComplex())
{
c_slaveChunkRaw = new cuArrays<float2> (param->maxSlaveChunkHeight, param->maxSlaveChunkWidth);
c_slaveChunkRaw->allocate();
//load a chunk from mmap to gpu //load a chunk from mmap to gpu
slaveImage->loadToDevice(c_slaveChunkRaw->devData, slaveImage->loadToDevice(c_slaveChunkRaw->devData,
param->slaveChunkStartPixelDown[idxChunk], param->slaveChunkStartPixelDown[idxChunk],
@ -195,38 +269,60 @@ void cuAmpcorChunk::loadSlaveChunk()
param->slaveChunkHeight[idxChunk], param->slaveChunkHeight[idxChunk],
param->slaveChunkWidth[idxChunk], param->slaveChunkWidth[idxChunk],
stream); stream);
//copy to a batch format (nImages, height, width)
getRelativeOffset(ChunkOffsetDown->hostData, param->slaveStartPixelDown, param->slaveChunkStartPixelDown[idxChunk]);
ChunkOffsetDown->copyToDevice(stream);
getRelativeOffset(ChunkOffsetAcross->hostData, param->slaveStartPixelAcross, param->slaveChunkStartPixelAcross[idxChunk]);
ChunkOffsetAcross->copyToDevice(stream);
if(param->derampMethod == 0) { if(param->derampMethod == 0) {
cuArraysCopyToBatchAbsWithOffset(c_slaveChunkRaw, param->slaveChunkWidth[idxChunk], cuArraysCopyToBatchAbsWithOffset(c_slaveChunkRaw, param->slaveChunkWidth[idxChunk],
c_slaveBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream); c_slaveBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream);
} }
else else {
{
cuArraysCopyToBatchWithOffset(c_slaveChunkRaw, param->slaveChunkWidth[idxChunk], cuArraysCopyToBatchWithOffset(c_slaveChunkRaw, param->slaveChunkWidth[idxChunk],
c_slaveBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream); c_slaveBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream);
} }
delete c_slaveChunkRaw;
}
else { //real image
//allocate the gpu buffer
r_slaveChunkRaw = new cuArrays<float> (param->maxSlaveChunkHeight, param->maxSlaveChunkWidth);
r_slaveChunkRaw->allocate();
//load a chunk from mmap to gpu
slaveImage->loadToDevice(r_slaveChunkRaw->devData,
param->slaveChunkStartPixelDown[idxChunk],
param->slaveChunkStartPixelAcross[idxChunk],
param->slaveChunkHeight[idxChunk],
param->slaveChunkWidth[idxChunk],
stream);
// convert to the batch format
cuArraysCopyToBatchWithOffsetR2C(r_slaveChunkRaw, param->slaveChunkWidth[idxChunk],
c_slaveBatchRaw, ChunkOffsetDown->devData, ChunkOffsetAcross->devData, stream);
delete r_slaveChunkRaw;
}
} }
cuAmpcorChunk::cuAmpcorChunk(cuAmpcorParameter *param_, SlcImage *master_, SlcImage *slave_, cuAmpcorChunk::cuAmpcorChunk(cuAmpcorParameter *param_, GDALImage *master_, GDALImage *slave_,
cuArrays<float2> *offsetImage_, cuArrays<float> *snrImage_, cudaStream_t stream_) cuArrays<float2> *offsetImage_, cuArrays<float> *snrImage_, cuArrays<float3> *covImage_, cuArrays<int> *intImage1_, cuArrays<float> *floatImage1_, cudaStream_t stream_)
{ {
param = param_; param = param_;
masterImage = master_; masterImage = master_;
slaveImage = slave_; slaveImage = slave_;
offsetImage = offsetImage_; offsetImage = offsetImage_;
snrImage = snrImage_; snrImage = snrImage_;
covImage = covImage_;
intImage1 = intImage1_;
floatImage1 = floatImage1_;
stream = stream_; stream = stream_;
std::cout << "debug Chunk creator " << param->maxMasterChunkHeight << " " << param->maxMasterChunkWidth << "\n"; // std::cout << "debug Chunk creator " << param->maxMasterChunkHeight << " " << param->maxMasterChunkWidth << "\n";
c_masterChunkRaw = new cuArrays<float2> (param->maxMasterChunkHeight, param->maxMasterChunkWidth); // try allocate/deallocate on the fly to save gpu memory 07/09/19
c_masterChunkRaw->allocate(); // c_masterChunkRaw = new cuArrays<float2> (param->maxMasterChunkHeight, param->maxMasterChunkWidth);
// c_masterChunkRaw->allocate();
c_slaveChunkRaw = new cuArrays<float2> (param->maxSlaveChunkHeight, param->maxSlaveChunkWidth); // c_slaveChunkRaw = new cuArrays<float2> (param->maxSlaveChunkHeight, param->maxSlaveChunkWidth);
c_slaveChunkRaw->allocate(); // c_slaveChunkRaw->allocate();
ChunkOffsetDown = new cuArrays<int> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk); ChunkOffsetDown = new cuArrays<int> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
ChunkOffsetDown->allocate(); ChunkOffsetDown->allocate();
@ -329,6 +425,54 @@ cuAmpcorChunk::cuAmpcorChunk(cuAmpcorParameter *param_, SlcImage *master_, SlcIm
corrMaxValue = new cuArrays<float> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk); corrMaxValue = new cuArrays<float> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
corrMaxValue->allocate(); corrMaxValue->allocate();
// new arrays due to snr estimation
std::cout<< "corrRawZoomInHeight: " << param->corrRawZoomInHeight << "\n";
std::cout<< "corrRawZoomInWidth: " << param->corrRawZoomInWidth << "\n";
r_corrBatchRawZoomIn = new cuArrays<float> (
param->corrRawZoomInHeight,
param->corrRawZoomInWidth,
param->numberWindowDownInChunk,
param->numberWindowAcrossInChunk);
r_corrBatchRawZoomIn->allocate();
i_corrBatchZoomInValid = new cuArrays<int> (
param->corrRawZoomInHeight,
param->corrRawZoomInWidth,
param->numberWindowDownInChunk,
param->numberWindowAcrossInChunk);
i_corrBatchZoomInValid->allocate();
r_corrBatchSum = new cuArrays<float> (
param->numberWindowDownInChunk,
param->numberWindowAcrossInChunk);
r_corrBatchSum->allocate();
i_corrBatchValidCount = new cuArrays<int> (
param->numberWindowDownInChunk,
param->numberWindowAcrossInChunk);
i_corrBatchValidCount->allocate();
i_maxloc = new cuArrays<int2> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
i_maxloc->allocate();
r_maxval = new cuArrays<float> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
r_maxval->allocate();
r_snrValue = new cuArrays<float> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
r_snrValue->allocate();
r_covValue = new cuArrays<float3> (param->numberWindowDownInChunk, param->numberWindowAcrossInChunk);
r_covValue->allocate();
// end of new arrays
if(param->oversamplingMethod) { if(param->oversamplingMethod) {
corrSincOverSampler = new cuSincOverSamplerR2R(param->zoomWindowSize, param->oversamplingFactor, stream); corrSincOverSampler = new cuSincOverSamplerR2R(param->zoomWindowSize, param->oversamplingFactor, stream);
} }

View File

@ -6,7 +6,7 @@
#ifndef __CUAMPCORCHUNK_H #ifndef __CUAMPCORCHUNK_H
#define __CUAMPCORCHUNK_H #define __CUAMPCORCHUNK_H
#include "SlcImage.h" #include "GDALImage.h"
#include "cuArrays.h" #include "cuArrays.h"
#include "cuAmpcorParameter.h" #include "cuAmpcorParameter.h"
#include "cuOverSampler.h" #include "cuOverSampler.h"
@ -24,15 +24,26 @@ private:
int devId; int devId;
cudaStream_t stream; cudaStream_t stream;
SlcImage *masterImage; GDALImage *masterImage;
SlcImage *slaveImage; GDALImage *slaveImage;
cuAmpcorParameter *param; cuAmpcorParameter *param;
cuArrays<float2> *offsetImage; cuArrays<float2> *offsetImage;
cuArrays<float> *snrImage; cuArrays<float> *snrImage;
cuArrays<float3> *covImage;
// added for test
cuArrays<int> *intImage1;
cuArrays<float> *floatImage1;
// gpu buffer
cuArrays<float2> * c_masterChunkRaw, * c_slaveChunkRaw; cuArrays<float2> * c_masterChunkRaw, * c_slaveChunkRaw;
cuArrays<float> * r_masterChunkRaw, * r_slaveChunkRaw;
// gpu windows raw data
cuArrays<float2> * c_masterBatchRaw, * c_slaveBatchRaw, * c_slaveBatchZoomIn; cuArrays<float2> * c_masterBatchRaw, * c_slaveBatchRaw, * c_slaveBatchZoomIn;
cuArrays<float> * r_masterBatchRaw, * r_slaveBatchRaw; cuArrays<float> * r_masterBatchRaw, * r_slaveBatchRaw;
// gpu windows oversampled data
cuArrays<float2> * c_masterBatchOverSampled, * c_slaveBatchOverSampled; cuArrays<float2> * c_masterBatchOverSampled, * c_slaveBatchOverSampled;
cuArrays<float> * r_masterBatchOverSampled, * r_slaveBatchOverSampled; cuArrays<float> * r_masterBatchOverSampled, * r_slaveBatchOverSampled;
cuArrays<float> * r_corrBatchRaw, * r_corrBatchZoomIn, * r_corrBatchZoomInOverSampled, * r_corrBatchZoomInAdjust; cuArrays<float> * r_corrBatchRaw, * r_corrBatchZoomIn, * r_corrBatchZoomInOverSampled, * r_corrBatchZoomInAdjust;
@ -50,26 +61,32 @@ private:
cuArrays<int2> *offsetInit; cuArrays<int2> *offsetInit;
cuArrays<int2> *offsetZoomIn; cuArrays<int2> *offsetZoomIn;
cuArrays<float2> *offsetFinal; cuArrays<float2> *offsetFinal;
cuArrays<float> *corrMaxValue;
//corr statistics
cuArrays<int2> *i_maxloc;
cuArrays<float> *r_maxval;
//SNR estimation
cuArrays<float> *r_corrBatchRawZoomIn;
cuArrays<float> *r_corrBatchSum; cuArrays<float> *r_corrBatchSum;
cuArrays<int> *i_corrBatchZoomInValid, *i_corrBatchValidCount; cuArrays<int> *i_corrBatchZoomInValid, *i_corrBatchValidCount;
cuArrays<float> *corrMaxValue;
cuArrays<float> *r_snrValue; cuArrays<float> *r_snrValue;
cuArrays<int2> *i_maxloc;
cuArrays<float> *r_maxval;
// Varince estimation.
cuArrays<float3> *r_covValue;
public: public:
cuAmpcorChunk() {} cuAmpcorChunk() {}
//cuAmpcorChunk(cuAmpcorParameter *param_, SlcImage *master_, SlcImage *slave_); //cuAmpcorChunk(cuAmpcorParameter *param_, SlcImage *master_, SlcImage *slave_);
void setIndex(int idxDown_, int idxAcross_); void setIndex(int idxDown_, int idxAcross_);
cuAmpcorChunk(cuAmpcorParameter *param_, GDALImage *master_, GDALImage *slave_, cuArrays<float2> *offsetImage_,
cuArrays<float> *snrImage_, cuArrays<float3> *covImage_, cuArrays<int> *intImage1_, cuArrays<float> *floatImage1_, cudaStream_t stream_);
cuAmpcorChunk(cuAmpcorParameter *param_, SlcImage *master_, SlcImage *slave_, cuArrays<float2> *offsetImage_,
cuArrays<float> *snrImage_, cudaStream_t stream_);
void loadMasterChunk(); void loadMasterChunk();
void loadSlaveChunk(); void loadSlaveChunk();

View File

@ -1,7 +1,7 @@
// Implementation of cuAmpcorController // Implementation of cuAmpcorController
#include "cuAmpcorController.h" #include "cuAmpcorController.h"
#include "SlcImage.h" #include "GDALImage.h"
#include "cuArrays.h" #include "cuArrays.h"
#include "cudaUtil.h" #include "cudaUtil.h"
#include "cuAmpcorChunk.h" #include "cuAmpcorChunk.h"
@ -13,48 +13,64 @@ cuAmpcorController::~cuAmpcorController() { delete param; }
void cuAmpcorController::runAmpcor() { void cuAmpcorController::runAmpcor() {
// set the gpu id
param->deviceID = gpuDeviceInit(param->deviceID); param->deviceID = gpuDeviceInit(param->deviceID);
SlcImage *masterImage; // initialize the gdal driver
SlcImage *slaveImage; GDALAllRegister();
// master and slave images; use band=1 as default
// TODO: selecting band
GDALImage *masterImage = new GDALImage(param->masterImageName, 1, param->mmapSizeInGB);
GDALImage *slaveImage = new GDALImage(param->slaveImageName, 1, param->mmapSizeInGB);
cuArrays<float2> *offsetImage, *offsetImageRun; cuArrays<float2> *offsetImage, *offsetImageRun;
cuArrays<float> *snrImage, *snrImageRun; cuArrays<float> *snrImage, *snrImageRun;
cuArrays<float3> *covImage, *covImageRun;
// For debugging.
cuArrays<int> *intImage1;
cuArrays<float> *floatImage1;
// cuArrays<float> *floatImage; int nWindowsDownRun = param->numberChunkDown * param->numberWindowDownInChunk;
// cuArrays<int> *intImage; int nWindowsAcrossRun = param->numberChunkAcross * param->numberWindowAcrossInChunk;
masterImage = new SlcImage(param->masterImageName, param->masterImageHeight, param->masterImageWidth, param->mmapSizeInGB);
slaveImage = new SlcImage(param->slaveImageName, param->slaveImageHeight, param->slaveImageWidth, param->mmapSizeInGB);
int nWindowsDownRun = param->numberChunkDown*param->numberWindowDownInChunk;
int nWindowsAcrossRun = param->numberChunkAcross*param->numberWindowAcrossInChunk;
std::cout << "Debug " << nWindowsDownRun << " " << param->numberWindowDown << "\n"; std::cout << "Debug " << nWindowsDownRun << " " << param->numberWindowDown << "\n";
offsetImageRun = new cuArrays<float2>(nWindowsDownRun, nWindowsAcrossRun); offsetImageRun = new cuArrays<float2>(nWindowsDownRun, nWindowsAcrossRun);
snrImageRun = new cuArrays<float>(nWindowsDownRun, nWindowsAcrossRun);
offsetImageRun->allocate(); offsetImageRun->allocate();
snrImageRun = new cuArrays<float>(nWindowsDownRun, nWindowsAcrossRun);
snrImageRun->allocate(); snrImageRun->allocate();
covImageRun = new cuArrays<float3>(nWindowsDownRun, nWindowsAcrossRun);
covImageRun->allocate();
// intImage 1 and floatImage 1 are added for debugging issues
intImage1 = new cuArrays<int>(nWindowsDownRun, nWindowsAcrossRun);
intImage1->allocate();
floatImage1 = new cuArrays<float>(nWindowsDownRun, nWindowsAcrossRun);
floatImage1->allocate();
// Offsetfields.
offsetImage = new cuArrays<float2>(param->numberWindowDown, param->numberWindowAcross); offsetImage = new cuArrays<float2>(param->numberWindowDown, param->numberWindowAcross);
snrImage = new cuArrays<float>(param->numberWindowDown, param->numberWindowAcross);
offsetImage->allocate(); offsetImage->allocate();
// SNR.
snrImage = new cuArrays<float>(param->numberWindowDown, param->numberWindowAcross);
snrImage->allocate(); snrImage->allocate();
// Minyan Zhong // Variance.
// floatImage = new cuArrays<float>(param->numberWindowDown, param->numberWindowAcross); covImage = new cuArrays<float3>(param->numberWindowDown, param->numberWindowAcross);
// intImage = new cuArrays<int>(param->numberWindowDown, param->numberWindowAcross); covImage->allocate();
// floatImage->allocate();
// intImage->allocate();
//
cudaStream_t streams[param->nStreams]; cudaStream_t streams[param->nStreams];
cuAmpcorChunk *chunk[param->nStreams]; cuAmpcorChunk *chunk[param->nStreams];
for(int ist=0; ist<param->nStreams; ist++) for(int ist=0; ist<param->nStreams; ist++)
{ {
cudaStreamCreate(&streams[ist]); cudaStreamCreate(&streams[ist]);
chunk[ist]= new cuAmpcorChunk(param, masterImage, slaveImage, offsetImageRun, snrImageRun, streams[ist]); chunk[ist]= new cuAmpcorChunk(param, masterImage, slaveImage, offsetImageRun, snrImageRun, covImageRun, intImage1, floatImage1, streams[ist]);
} }
int nChunksDown = param->numberChunkDown; int nChunksDown = param->numberChunkDown;
@ -63,7 +79,7 @@ void cuAmpcorController::runAmpcor() {
std::cout << "Total number of windows (azimuth x range): " <<param->numberWindowDown << " x " << param->numberWindowAcross << std::endl; std::cout << "Total number of windows (azimuth x range): " <<param->numberWindowDown << " x " << param->numberWindowAcross << std::endl;
std::cout << "to be processed in the number of chunks: " <<nChunksDown << " x " << nChunksAcross << std::endl; std::cout << "to be processed in the number of chunks: " <<nChunksDown << " x " << nChunksAcross << std::endl;
for(int i = 60; i<nChunksDown; i++) for(int i = 0; i<nChunksDown; i++)
{ {
std::cout << "Processing chunk (" << i <<", x" << ")" << std::endl; std::cout << "Processing chunk (" << i <<", x" << ")" << std::endl;
for(int j=0; j<nChunksAcross; j+=param->nStreams) for(int j=0; j<nChunksAcross; j+=param->nStreams)
@ -81,26 +97,39 @@ void cuAmpcorController::runAmpcor() {
cudaDeviceSynchronize(); cudaDeviceSynchronize();
// Do extraction.
cuArraysCopyExtract(offsetImageRun, offsetImage, make_int2(0,0), streams[0]); cuArraysCopyExtract(offsetImageRun, offsetImage, make_int2(0,0), streams[0]);
cuArraysCopyExtract(snrImageRun, snrImage, make_int2(0,0), streams[0]); cuArraysCopyExtract(snrImageRun, snrImage, make_int2(0,0), streams[0]);
cuArraysCopyExtract(covImageRun, covImage, make_int2(0,0), streams[0]);
offsetImage->outputToFile(param->offsetImageName, streams[0]); offsetImage->outputToFile(param->offsetImageName, streams[0]);
snrImage->outputToFile(param->snrImageName, streams[0]); snrImage->outputToFile(param->snrImageName, streams[0]);
covImage->outputToFile(param->covImageName, streams[0]);
// Minyan Zhong // Output debugging arrays.
// floatImage->allocate(); intImage1->outputToFile("intImage1", streams[0]);
// intImage->allocate(); floatImage1->outputToFile("floatImage1", streams[0]);
//
outputGrossOffsets(); outputGrossOffsets();
// Delete arrays.
delete offsetImage; delete offsetImage;
delete snrImage; delete snrImage;
delete covImage;
delete intImage1;
delete floatImage1;
delete offsetImageRun; delete offsetImageRun;
delete snrImageRun; delete snrImageRun;
delete covImageRun;
for (int ist=0; ist<param->nStreams; ist++) for (int ist=0; ist<param->nStreams; ist++)
delete chunk[ist]; delete chunk[ist];
delete masterImage; delete masterImage;
delete slaveImage; delete slaveImage;
} }
void cuAmpcorController::outputGrossOffsets() void cuAmpcorController::outputGrossOffsets()

View File

@ -17,6 +17,8 @@
cuAmpcorParameter::cuAmpcorParameter() cuAmpcorParameter::cuAmpcorParameter()
{ {
// default settings
// will be changed if they are set by python scripts
algorithm = 0; //0 freq; 1 time algorithm = 0; //0 freq; 1 time
deviceID = 0; deviceID = 0;
nStreams = 1; nStreams = 1;
@ -43,6 +45,7 @@ cuAmpcorParameter::cuAmpcorParameter()
offsetImageName = "DenseOffset.off"; offsetImageName = "DenseOffset.off";
grossOffsetImageName = "GrossOffset.off"; grossOffsetImageName = "GrossOffset.off";
snrImageName = "snr.snr"; snrImageName = "snr.snr";
covImageName = "cov.cov";
numberWindowDown = 1; numberWindowDown = 1;
numberWindowAcross = 1; numberWindowAcross = 1;
numberWindowDownInChunk = 1; numberWindowDownInChunk = 1;
@ -50,6 +53,13 @@ cuAmpcorParameter::cuAmpcorParameter()
masterStartPixelDown0 = 0; masterStartPixelDown0 = 0;
masterStartPixelAcross0 = 0; masterStartPixelAcross0 = 0;
corrRawZoomInHeight = 17; // 8*2+1
corrRawZoomInWidth = 17;
useMmap = 1; // use mmap
mmapSizeInGB = 1;
} }
/** /**

View File

@ -50,6 +50,8 @@ public:
int searchWindowSizeHeightRawZoomIn; int searchWindowSizeHeightRawZoomIn;
int searchWindowSizeWidthRawZoomIn; int searchWindowSizeWidthRawZoomIn;
int corrRawZoomInHeight; // window to estimate snr
int corrRawZoomInWidth;
// chip or window size after oversampling // chip or window size after oversampling
int rawDataOversamplingFactor; /// Raw data overampling factor (from original size to oversampled size) int rawDataOversamplingFactor; /// Raw data overampling factor (from original size to oversampled size)
@ -101,7 +103,8 @@ public:
int numberChunkAcross; /// number of chunks (across) int numberChunkAcross; /// number of chunks (across)
int numberChunks; int numberChunks;
int mmapSizeInGB; int useMmap; /// whether to use mmap 0=not 1=yes (default = 0)
int mmapSizeInGB; /// size for mmap buffer(useMmap=1) or a cpu memory buffer (useMmap=0)
int masterStartPixelDown0; int masterStartPixelDown0;
int masterStartPixelAcross0; int masterStartPixelAcross0;
@ -128,6 +131,7 @@ public:
std::string grossOffsetImageName; std::string grossOffsetImageName;
std::string offsetImageName; /// Output Offset fields filename std::string offsetImageName; /// Output Offset fields filename
std::string snrImageName; /// Output SNR filename std::string snrImageName; /// Output SNR filename
std::string covImageName;
cuAmpcorParameter(); /// Class constructor and default parameters setter cuAmpcorParameter(); /// Class constructor and default parameters setter
~cuAmpcorParameter(); /// Class descontructor ~cuAmpcorParameter(); /// Class descontructor

View File

@ -22,16 +22,23 @@ void cuArraysCopyToBatchWithOffset(cuArrays<float2> *image1, const int lda1, cuA
const int *offsetH, const int* offsetW, cudaStream_t stream); const int *offsetH, const int* offsetW, cudaStream_t stream);
void cuArraysCopyToBatchAbsWithOffset(cuArrays<float2> *image1, const int lda1, cuArrays<float2> *image2, void cuArraysCopyToBatchAbsWithOffset(cuArrays<float2> *image1, const int lda1, cuArrays<float2> *image2,
const int *offsetH, const int* offsetW, cudaStream_t stream); const int *offsetH, const int* offsetW, cudaStream_t stream);
void cuArraysCopyToBatchWithOffsetR2C(cuArrays<float> *image1, const int lda1, cuArrays<float2> *image2,
const int *offsetH, const int* offsetW, cudaStream_t stream);
void cuArraysCopyC2R(cuArrays<float2> *image1, cuArrays<float> *image2, int strideH, int strideW, cudaStream_t stream); void cuArraysCopyC2R(cuArrays<float2> *image1, cuArrays<float> *image2, int strideH, int strideW, cudaStream_t stream);
// same routine name overloaded for different data type
void cuArraysCopyExtract(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, cuArrays<int2> *offset, cudaStream_t stream); void cuArraysCopyExtract(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, cuArrays<int2> *offset, cudaStream_t stream);
void cuArraysCopyExtract(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream); void cuArraysCopyExtract(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream);
void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream); void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream);
void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float2> *imagesOut, int2 offset, cudaStream_t stream); void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float2> *imagesOut, int2 offset, cudaStream_t stream);
void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float2> *imagesOut, cuArrays<int2> *offsets, cudaStream_t stream); void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float2> *imagesOut, cuArrays<int2> *offsets, cudaStream_t stream);
void cuArraysCopyExtract(cuArrays<float3> *imagesIn, cuArrays<float3> *imagesOut, int2 offset, cudaStream_t stream);
void cuArraysCopyInsert(cuArrays<float2> *imageIn, cuArrays<float2> *imageOut, int offsetX, int offersetY, cudaStream_t stream); void cuArraysCopyInsert(cuArrays<float2> *imageIn, cuArrays<float2> *imageOut, int offsetX, int offersetY, cudaStream_t stream);
void cuArraysCopyInsert(cuArrays<float3> *imageIn, cuArrays<float3> *imageOut, int offsetX, int offersetY, cudaStream_t stream);
void cuArraysCopyInsert(cuArrays<float> *imageIn, cuArrays<float> *imageOut, int offsetX, int offsetY, cudaStream_t stream); void cuArraysCopyInsert(cuArrays<float> *imageIn, cuArrays<float> *imageOut, int offsetX, int offsetY, cudaStream_t stream);
void cuArraysCopyInsert(cuArrays<int> *imageIn, cuArrays<int> *imageOut, int offsetX, int offersetY, cudaStream_t stream);
void cuArraysCopyInversePadded(cuArrays<float> *imageIn, cuArrays<float> *imageOut,cudaStream_t stream); void cuArraysCopyInversePadded(cuArrays<float> *imageIn, cuArrays<float> *imageOut,cudaStream_t stream);
void cuArraysCopyPadded(cuArrays<float> *imageIn, cuArrays<float> *imageOut,cudaStream_t stream); void cuArraysCopyPadded(cuArrays<float> *imageIn, cuArrays<float> *imageOut,cudaStream_t stream);
@ -80,7 +87,11 @@ void cuArraysElementMultiplyConjugate(cuArrays<float2> *image1, cuArrays<float2>
void cuArraysCopyExtractCorr(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, cuArrays<int> *imagesValid, cuArrays<int2> *maxloc, cudaStream_t stream); void cuArraysCopyExtractCorr(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut, cuArrays<int> *imagesValid, cuArrays<int2> *maxloc, cudaStream_t stream);
// implemented in cuCorrNormalization.cu // implemented in cuCorrNormalization.cu
void cuArraysSumCorr(cuArrays<float> *images, cuArrays<int> *imagesValid, cuArrays<float> *imagesSum, cuArrays<int> *imagesValidCount, cudaStream_t stream); void cuArraysSumCorr(cuArrays<float> *images, cuArrays<int> *imagesValid, cuArrays<float> *imagesSum, cuArrays<int> *imagesValidCount, cudaStream_t stream);
// implemented in cuEstimateStats.cu // implemented in cuEstimateStats.cu
void cuEstimateSnr(cuArrays<float> *corrSum, cuArrays<int> *corrValidCount, cuArrays<float> *maxval, cuArrays<float> *snrValue, cudaStream_t stream); void cuEstimateSnr(cuArrays<float> *corrSum, cuArrays<int> *corrValidCount, cuArrays<float> *maxval, cuArrays<float> *snrValue, cudaStream_t stream);
// implemented in cuEstimateStats.cu
void cuEstimateVariance(cuArrays<float> *corrBatchRaw, cuArrays<int2> *maxloc, cuArrays<float> *maxval, cuArrays<float3> *covValue, cudaStream_t stream);
#endif #endif

View File

@ -155,7 +155,20 @@
file.close(); file.close();
} }
template<>
void cuArrays<float3>::outputToFile(std::string fn, cudaStream_t stream)
{
float *data;
data = (float *)malloc(size*count*sizeof(float3));
checkCudaErrors(cudaMemcpyAsync(data, devData, size*count*sizeof(float3), cudaMemcpyDeviceToHost, stream));
std::ofstream file;
file.open(fn.c_str(), std::ios_base::binary);
file.write((char *)data, size*count*sizeof(float3));
file.close();
}
template class cuArrays<float>; template class cuArrays<float>;
template class cuArrays<float2>; template class cuArrays<float2>;
template class cuArrays<float3>;
template class cuArrays<int2>; template class cuArrays<int2>;
template class cuArrays<int>; template class cuArrays<int>;

View File

@ -16,7 +16,7 @@ inline __device__ float cuAbs(float2 a)
return sqrtf(a.x*a.x+a.y*a.y); return sqrtf(a.x*a.x+a.y*a.y);
}*/ }*/
//copy a chunk into a series of chips // copy a chunk into a batch of chips for a given stride
__global__ void cuArraysCopyToBatch_kernel(const float2 *imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyToBatch_kernel(const float2 *imageIn, const int inNX, const int inNY,
float2 *imageOut, const int outNX, const int outNY, float2 *imageOut, const int outNX, const int outNY,
const int nImagesX, const int nImagesY, const int nImagesX, const int nImagesY,
@ -33,7 +33,6 @@ __global__ void cuArraysCopyToBatch_kernel(const float2 *imageIn, const int inNX
imageOut[idxOut] = imageIn[idxIn]; imageOut[idxOut] = imageIn[idxIn];
} }
//tested
void cuArraysCopyToBatch(cuArrays<float2> *image1, cuArrays<float2> *image2, void cuArraysCopyToBatch(cuArrays<float2> *image1, cuArrays<float2> *image2,
int strideH, int strideW, cudaStream_t stream) int strideH, int strideW, cudaStream_t stream)
{ {
@ -48,6 +47,8 @@ void cuArraysCopyToBatch(cuArrays<float2> *image1, cuArrays<float2> *image2,
getLastCudaError("cuArraysCopyToBatch_kernel"); getLastCudaError("cuArraysCopyToBatch_kernel");
} }
// copy a chunk into a batch of chips for a set of offsets (varying strides), from complex to complex
__global__ void cuArraysCopyToBatchWithOffset_kernel(const float2 *imageIn, const int inNY, __global__ void cuArraysCopyToBatchWithOffset_kernel(const float2 *imageIn, const int inNY,
float2 *imageOut, const int outNX, const int outNY, const int nImages, float2 *imageOut, const int outNX, const int outNY, const int nImages,
const int *offsetX, const int *offsetY) const int *offsetX, const int *offsetY)
@ -61,10 +62,7 @@ __global__ void cuArraysCopyToBatchWithOffset_kernel(const float2 *imageIn, cons
imageOut[idxOut] = imageIn[idxIn]; imageOut[idxOut] = imageIn[idxIn];
} }
/// @param[in] image1 input image in a large chunk // lda1 (inNY) is the leading dimension of image1, usually, its width
/// @param[in] lda1 width of image 1
/// @param[out] image2 output image with a batch of small windows
void cuArraysCopyToBatchWithOffset(cuArrays<float2> *image1, const int lda1, cuArrays<float2> *image2, void cuArraysCopyToBatchWithOffset(cuArrays<float2> *image1, const int lda1, cuArrays<float2> *image2,
const int *offsetH, const int* offsetW, cudaStream_t stream) const int *offsetH, const int* offsetW, cudaStream_t stream)
{ {
@ -79,6 +77,7 @@ void cuArraysCopyToBatchWithOffset(cuArrays<float2> *image1, const int lda1, cuA
getLastCudaError("cuArraysCopyToBatchAbsWithOffset_kernel"); getLastCudaError("cuArraysCopyToBatchAbsWithOffset_kernel");
} }
// copy a chunk into a batch of chips for a set of offsets (varying strides), from complex to real(take amplitudes)
__global__ void cuArraysCopyToBatchAbsWithOffset_kernel(const float2 *imageIn, const int inNY, __global__ void cuArraysCopyToBatchAbsWithOffset_kernel(const float2 *imageIn, const int inNY,
float2 *imageOut, const int outNX, const int outNY, const int nImages, float2 *imageOut, const int outNX, const int outNY, const int nImages,
const int *offsetX, const int *offsetY) const int *offsetX, const int *offsetY)
@ -106,6 +105,34 @@ void cuArraysCopyToBatchAbsWithOffset(cuArrays<float2> *image1, const int lda1,
getLastCudaError("cuArraysCopyToBatchAbsWithOffset_kernel"); getLastCudaError("cuArraysCopyToBatchAbsWithOffset_kernel");
} }
// copy a chunk into a batch of chips for a set of offsets (varying strides), from real to complex(to real part)
__global__ void cuArraysCopyToBatchWithOffsetR2C_kernel(const float *imageIn, const int inNY,
float2 *imageOut, const int outNX, const int outNY, const int nImages,
const int *offsetX, const int *offsetY)
{
int idxImage = blockIdx.z;
int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y;
if(idxImage>=nImages || outx >= outNX || outy >= outNY) return;
int idxOut = idxImage*outNX*outNY + outx*outNY + outy;
int idxIn = (offsetX[idxImage]+outx)*inNY + offsetY[idxImage] + outy;
imageOut[idxOut] = make_float2(imageIn[idxIn], 0.0f);
}
void cuArraysCopyToBatchWithOffsetR2C(cuArrays<float> *image1, const int lda1, cuArrays<float2> *image2,
const int *offsetH, const int* offsetW, cudaStream_t stream)
{
const int nthreads = 16;
dim3 blockSize(nthreads, nthreads, 1);
dim3 gridSize(IDIVUP(image2->height,nthreads), IDIVUP(image2->width,nthreads), image2->count);
//fprintf(stderr, "copy tile to batch, %d %d\n", lda1, image2->count);
cuArraysCopyToBatchWithOffsetR2C_kernel<<<gridSize,blockSize, 0 , stream>>> (
image1->devData, lda1,
image2->devData, image2->height, image2->width, image2->count,
offsetH, offsetW);
getLastCudaError("cuArraysCopyToBatchWithOffsetR2C_kernel");
}
//copy a chunk into a series of chips //copy a chunk into a series of chips
__global__ void cuArraysCopyC2R_kernel(const float2 *imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyC2R_kernel(const float2 *imageIn, const int inNX, const int inNY,
float *imageOut, const int outNX, const int outNY, float *imageOut, const int outNX, const int outNY,
@ -208,14 +235,17 @@ __global__ void cuArraysCopyExtractVaryingOffsetCorr(const float *imageIn, const
int idxImage = blockIdx.z; int idxImage = blockIdx.z;
// One thread per out point. Find the coordinates within the current image.
int outx = threadIdx.x + blockDim.x*blockIdx.x; int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y; int outy = threadIdx.y + blockDim.y*blockIdx.y;
// Find the correponding input.
int inx = outx + maxloc[idxImage].x - outNX/2; int inx = outx + maxloc[idxImage].x - outNX/2;
int iny = outy + maxloc[idxImage].y - outNY/2; int iny = outy + maxloc[idxImage].y - outNY/2;
if (outx < outNX && outy < outNY) if (outx < outNX && outy < outNY)
{ {
// Find the location in full array.
int idxOut = ( blockIdx.z * outNX + outx ) * outNY + outy; int idxOut = ( blockIdx.z * outNX + outx ) * outNY + outy;
int idxIn = ( blockIdx.z * inNX + inx ) * inNY + iny; int idxIn = ( blockIdx.z * inNX + inx ) * inNY + iny;
@ -284,6 +314,7 @@ void cuArraysCopyExtract(cuArrays<float> *imagesIn, cuArrays<float> *imagesOut,
getLastCudaError("cuArraysCopyExtract error"); getLastCudaError("cuArraysCopyExtract error");
} }
//
__global__ void cuArraysCopyExtract_C2C_FixedOffset(const float2 *imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyExtract_C2C_FixedOffset(const float2 *imageIn, const int inNX, const int inNY,
float2 *imageOut, const int outNX, const int outNY, const int nImages, float2 *imageOut, const int outNX, const int outNY, const int nImages,
@ -315,6 +346,42 @@ void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float2> *imagesOut
imagesOut->devData, imagesOut->height, imagesOut->width, imagesOut->count, offset.x, offset.y); imagesOut->devData, imagesOut->height, imagesOut->width, imagesOut->count, offset.x, offset.y);
getLastCudaError("cuArraysCopyExtractC2C error"); getLastCudaError("cuArraysCopyExtractC2C error");
} }
//
// float3
__global__ void cuArraysCopyExtract_C2C_FixedOffset(const float3 *imageIn, const int inNX, const int inNY,
float3 *imageOut, const int outNX, const int outNY, const int nImages,
const int offsetX, const int offsetY)
{
int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y;
if(outx < outNX && outy < outNY)
{
int idxOut = (blockIdx.z * outNX + outx)*outNY+outy;
int idxIn = (blockIdx.z*inNX + outx + offsetX)*inNY + outy + offsetY;
imageOut[idxOut] = imageIn[idxIn];
}
}
void cuArraysCopyExtract(cuArrays<float3> *imagesIn, cuArrays<float3> *imagesOut, int2 offset, cudaStream_t stream)
{
//assert(imagesIn->height >= imagesOut && inNY >= outNY);
const int nthreads = NTHREADS2D;
dim3 threadsperblock(nthreads, nthreads,1);
dim3 blockspergrid(IDIVUP(imagesOut->height,nthreads), IDIVUP(imagesOut->width,nthreads), imagesOut->count);
//std::cout << "debug copyExtract" << imagesOut->width << imagesOut->height << "\n";
//imagesIn->debuginfo(stream);
//imagesOut->debuginfo(stream);
cuArraysCopyExtract_C2C_FixedOffset<<<blockspergrid, threadsperblock,0, stream>>>
(imagesIn->devData, imagesIn->height, imagesIn->width,
imagesOut->devData, imagesOut->height, imagesOut->width, imagesOut->count, offset.x, offset.y);
getLastCudaError("cuArraysCopyExtractFloat3 error");
}
//
__global__ void cuArraysCopyExtract_C2R_FixedOffset(const float2 *imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyExtract_C2R_FixedOffset(const float2 *imageIn, const int inNX, const int inNY,
float *imageOut, const int outNX, const int outNY, const int nImages, float *imageOut, const int outNX, const int outNY, const int nImages,
@ -332,6 +399,7 @@ __global__ void cuArraysCopyExtract_C2R_FixedOffset(const float2 *imageIn, const
} }
void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream) void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float> *imagesOut, int2 offset, cudaStream_t stream)
{ {
//assert(imagesIn->height >= imagesOut && inNY >= outNY); //assert(imagesIn->height >= imagesOut && inNY >= outNY);
@ -343,7 +411,7 @@ void cuArraysCopyExtract(cuArrays<float2> *imagesIn, cuArrays<float> *imagesOut,
imagesOut->devData, imagesOut->height, imagesOut->width, imagesOut->count, offset.x, offset.y); imagesOut->devData, imagesOut->height, imagesOut->width, imagesOut->count, offset.x, offset.y);
getLastCudaError("cuArraysCopyExtractC2C error"); getLastCudaError("cuArraysCopyExtractC2C error");
} }
//
__global__ void cuArraysCopyInsert_kernel(const float2* imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyInsert_kernel(const float2* imageIn, const int inNX, const int inNY,
float2* imageOut, const int outNY, const int offsetX, const int offsetY) float2* imageOut, const int outNY, const int offsetX, const int offsetY)
@ -367,7 +435,31 @@ void cuArraysCopyInsert(cuArrays<float2> *imageIn, cuArrays<float2> *imageOut, i
imageOut->devData, imageOut->width, offsetX, offsetY); imageOut->devData, imageOut->width, offsetX, offsetY);
getLastCudaError("cuArraysCopyInsert error"); getLastCudaError("cuArraysCopyInsert error");
} }
//
// float3
__global__ void cuArraysCopyInsert_kernel(const float3* imageIn, const int inNX, const int inNY,
float3* imageOut, const int outNY, const int offsetX, const int offsetY)
{
int inx = threadIdx.x + blockDim.x*blockIdx.x;
int iny = threadIdx.y + blockDim.y*blockIdx.y;
if(inx < inNX && iny < inNY) {
int idxOut = IDX2R(inx+offsetX, iny+offsetY, outNY);
int idxIn = IDX2R(inx, iny, inNY);
imageOut[idxOut] = make_float3(imageIn[idxIn].x, imageIn[idxIn].y, imageIn[idxIn].z);
}
}
void cuArraysCopyInsert(cuArrays<float3> *imageIn, cuArrays<float3> *imageOut, int offsetX, int offsetY, cudaStream_t stream)
{
const int nthreads = 16;
dim3 threadsperblock(nthreads, nthreads);
dim3 blockspergrid(IDIVUP(imageIn->height,nthreads), IDIVUP(imageIn->width,nthreads));
cuArraysCopyInsert_kernel<<<blockspergrid, threadsperblock,0, stream>>>(imageIn->devData, imageIn->height, imageIn->width,
imageOut->devData, imageOut->width, offsetX, offsetY);
getLastCudaError("cuArraysCopyInsert error");
}
//
__global__ void cuArraysCopyInsert_kernel(const float* imageIn, const int inNX, const int inNY, __global__ void cuArraysCopyInsert_kernel(const float* imageIn, const int inNX, const int inNY,
float* imageOut, const int outNY, const int offsetX, const int offsetY) float* imageOut, const int outNY, const int offsetX, const int offsetY)
@ -392,6 +484,32 @@ void cuArraysCopyInsert(cuArrays<float> *imageIn, cuArrays<float> *imageOut, int
getLastCudaError("cuArraysCopyInsert Float error"); getLastCudaError("cuArraysCopyInsert Float error");
} }
//
__global__ void cuArraysCopyInsert_kernel(const int* imageIn, const int inNX, const int inNY,
int* imageOut, const int outNY, const int offsetX, const int offsetY)
{
int inx = threadIdx.x + blockDim.x*blockIdx.x;
int iny = threadIdx.y + blockDim.y*blockIdx.y;
if(inx < inNX && iny < inNY) {
int idxOut = IDX2R(inx+offsetX, iny+offsetY, outNY);
int idxIn = IDX2R(inx, iny, inNY);
imageOut[idxOut] = imageIn[idxIn];
}
}
void cuArraysCopyInsert(cuArrays<int> *imageIn, cuArrays<int> *imageOut, int offsetX, int offsetY, cudaStream_t stream)
{
const int nthreads = 16;
dim3 threadsperblock(nthreads, nthreads);
dim3 blockspergrid(IDIVUP(imageIn->height,nthreads), IDIVUP(imageIn->width,nthreads));
cuArraysCopyInsert_kernel<<<blockspergrid, threadsperblock,0, stream>>>(imageIn->devData, imageIn->height, imageIn->width,
imageOut->devData, imageOut->width, offsetX, offsetY);
getLastCudaError("cuArraysCopyInsert Integer error");
}
//
__global__ void cuArraysCopyInversePadded_kernel(float *imageIn, int inNX, int inNY, int sizeIn, __global__ void cuArraysCopyInversePadded_kernel(float *imageIn, int inNX, int inNY, int sizeIn,
float *imageOut, int outNX, int outNY, int sizeOut, int nImages) float *imageOut, int outNX, int outNY, int sizeOut, int nImages)

View File

@ -195,7 +195,6 @@ __device__ float2 partialSums(const float v, volatile float* shmem, const int st
return make_float2(Sum, Sum2); return make_float2(Sum, Sum2);
} }
__forceinline__ __device__ int __mul(const int a, const int b) { return a*b; }
template<const int Nthreads2> template<const int Nthreads2>
__global__ void cuCorrNormalize_kernel( __global__ void cuCorrNormalize_kernel(
@ -232,7 +231,7 @@ __global__ void cuCorrNormalize_kernel(
templateSum += templateD[i]; templateSum += templateD[i];
} }
templateSum = sumReduceBlock<Nthreads>(templateSum, shmem); templateSum = sumReduceBlock<Nthreads>(templateSum, shmem);
__syncthreads();
float templateSum2 = 0.0f; float templateSum2 = 0.0f;
for (int i = tid; i < templateSize; i += Nthreads) for (int i = tid; i < templateSize; i += Nthreads)
@ -241,11 +240,12 @@ __global__ void cuCorrNormalize_kernel(
templateSum2 += t*t; templateSum2 += t*t;
} }
templateSum2 = sumReduceBlock<Nthreads>(templateSum2, shmem); templateSum2 = sumReduceBlock<Nthreads>(templateSum2, shmem);
__syncthreads();
//if(tid ==0) printf("template sum %d %g %g \n", imageIdx, templateSum, templateSum2); //if(tid ==0) printf("template sum %d %g %g \n", imageIdx, templateSum, templateSum2);
/*********/ /*********/
shmem[tid] = shmem[tid + Nthreads] = 0.0f; shmem[tid] = shmem[tid + Nthreads] = shmem[tid + 2*Nthreads] = 0.0f;
__syncthreads(); __syncthreads();
float imageSum = 0.0f; float imageSum = 0.0f;
@ -281,7 +281,7 @@ __global__ void cuCorrNormalize_kernel(
if (tid < resultNY) if (tid < resultNY)
{ {
const int ix = iaddr/imageNY; const int ix = iaddr/imageNY;
const int addr = __mul(ix-templateNX, resultNY); const int addr = (ix-templateNX)*resultNY;
//printf("test norm %d %d %d %d %f\n", tid, ix, addr, addr+tid, resultD[addr + tid]); //printf("test norm %d %d %d %d %f\n", tid, ix, addr, addr+tid, resultD[addr + tid]);

View File

@ -25,7 +25,7 @@ __global__ void cudaKernel_estimateSnr(const float* corrSum, const int* corrVali
float mean = (corrSum[idx] - maxval[idx] * maxval[idx]) / (corrValidCount[idx] - 1); float mean = (corrSum[idx] - maxval[idx] * maxval[idx]) / (corrValidCount[idx] - 1);
snrValue[idx] = maxval[idx] / mean; snrValue[idx] = maxval[idx] * maxval[idx] / mean;
} }
void cuEstimateSnr(cuArrays<float> *corrSum, cuArrays<int> *corrValidCount, cuArrays<float> *maxval, cuArrays<float> *snrValue, cudaStream_t stream) void cuEstimateSnr(cuArrays<float> *corrSum, cuArrays<int> *corrValidCount, cuArrays<float> *maxval, cuArrays<float> *snrValue, cudaStream_t stream)
@ -68,3 +68,80 @@ void cuEstimateSnr(cuArrays<float> *corrSum, cuArrays<int> *corrValidCount, cuAr
getLastCudaError("cuda kernel estimate stats error\n"); getLastCudaError("cuda kernel estimate stats error\n");
} }
template <const int BLOCKSIZE> // number of threads per block.
__global__ void cudaKernel_estimateVar(const float* corrBatchRaw, const int NX, const int NY, const int2* maxloc, const float* maxval, float3* covValue, const int size)
{
// Find image id.
int idxImage = threadIdx.x + blockDim.x*blockIdx.x;
if (idxImage >= size) return;
// Preparation.
int px = maxloc[idxImage].x;
int py = maxloc[idxImage].y;
float peak = maxval[idxImage];
// Check if maxval is on the margin.
if (px-1 < 0 || py-1 <0 || px + 1 >=NX || py+1 >=NY) {
covValue[idxImage] = make_float3(99.0, 99.0, 99.0);
}
else {
int offset = NX * NY * idxImage;
int idx00 = offset + (px - 1) * NY + py - 1;
int idx01 = offset + (px - 1) * NY + py ;
int idx02 = offset + (px - 1) * NY + py + 1;
int idx10 = offset + (px ) * NY + py - 1;
int idx11 = offset + (px ) * NY + py ;
int idx12 = offset + (px ) * NY + py + 1;
int idx20 = offset + (px + 1) * NY + py - 1;
int idx21 = offset + (px + 1) * NY + py ;
int idx22 = offset + (px + 1) * NY + py + 1;
float dxx = - ( corrBatchRaw[idx21] + corrBatchRaw[idx01] - 2*corrBatchRaw[idx11] ) * 0.5;
float dyy = - ( corrBatchRaw[idx12] + corrBatchRaw[idx10] - 2*corrBatchRaw[idx11] ) * 0.5;
float dxy = - ( corrBatchRaw[idx22] + corrBatchRaw[idx00] - corrBatchRaw[idx20] - corrBatchRaw[idx02] ) *0.25;
float n2 = fmaxf(1 - peak, 0.0);
int winSize = NX*NY;
dxx = dxx * winSize;
dyy = dyy * winSize;
dxy = dxy * winSize;
float n4 = n2*n2;
n2 = n2 * 2;
n4 = n4 * 0.5 * winSize;
float u = dxy * dxy - dxx * dyy;
float u2 = u*u;
if (fabsf(u) < 1e-2) {
covValue[idxImage] = make_float3(99.0, 99.0, 99.0);
}
else {
float cov_xx = (- n2 * u * dyy + n4 * ( dyy*dyy + dxy*dxy) ) / u2;
float cov_yy = (- n2 * u * dxx + n4 * ( dxx*dxx + dxy*dxy) ) / u2;
float cov_xy = ( n2 * u * dxy - n4 * ( dxx + dyy ) * dxy ) / u2;
covValue[idxImage] = make_float3(cov_xx, cov_yy, cov_xy);
}
}
}
void cuEstimateVariance(cuArrays<float> *corrBatchRaw, cuArrays<int2> *maxloc, cuArrays<float> *maxval, cuArrays<float3> *covValue, cudaStream_t stream)
{
int size = corrBatchRaw->count;
// One dimensional launching parameters to loop over every correlation surface.
cudaKernel_estimateVar<NTHREADS><<< IDIVUP(size, NTHREADS), NTHREADS, 0, stream>>>
(corrBatchRaw->devData, corrBatchRaw->height, corrBatchRaw->width, maxloc->devData, maxval->devData, covValue->devData, size);
getLastCudaError("cudaKernel_estimateVar error\n");
}

View File

@ -7,20 +7,21 @@
from distutils.core import setup from distutils.core import setup
from distutils.extension import Extension from distutils.extension import Extension
from Cython.Build import cythonize from Cython.Build import cythonize
import os
os.environ["CC"] = "g++" import numpy
setup( name = 'PyCuAmpcor', setup( name = 'PyCuAmpcor',
ext_modules = cythonize(Extension( ext_modules = cythonize(Extension(
"PyCuAmpcor", "PyCuAmpcor",
sources=['PyCuAmpcor.pyx'], sources=['PyCuAmpcor.pyx'],
include_dirs=['/usr/local/cuda/include'], # REPLACE WITH YOUR PATH TO YOUR CUDA LIBRARY HEADERS include_dirs=['/usr/local/cuda/include', numpy.get_include()], # REPLACE WITH YOUR PATH TO YOUR CUDA LIBRARY HEADERS
extra_compile_args=['-fPIC','-fpermissive'], extra_compile_args=['-fPIC','-fpermissive'],
extra_objects=['SlcImage.o','cuAmpcorChunk.o','cuAmpcorParameter.o','cuCorrFrequency.o', extra_objects=['GDALImage.o','cuAmpcorChunk.o','cuAmpcorParameter.o','cuCorrFrequency.o',
'cuCorrNormalization.o','cuCorrTimeDomain.o','cuArraysCopy.o', 'cuCorrNormalization.o','cuCorrTimeDomain.o','cuArraysCopy.o',
'cuArrays.o','cuArraysPadding.o','cuOffset.o','cuOverSampler.o', 'cuArrays.o','cuArraysPadding.o','cuOffset.o','cuOverSampler.o',
'cuSincOverSampler.o', 'cuDeramp.o','cuAmpcorController.o'], 'cuSincOverSampler.o', 'cuDeramp.o','cuAmpcorController.o','cuEstimateStats.o'],
extra_link_args=['-L/usr/local/cuda/lib64','-lcuda','-lcudart','-lcufft','-lcublas'], # REPLACE FIRST PATH WITH YOUR PATH TO YOUR CUDA LIBRARIES extra_link_args=['-L/usr/local/cuda/lib64',
'-L/usr/lib64/nvidia',
'-lcuda','-lcudart','-lcufft','-lcublas','-lgdal'], # REPLACE FIRST PATH WITH YOUR PATH TO YOUR CUDA LIBRARIES
language='c++' language='c++'
))) )))

View File

@ -78,3 +78,6 @@ SConscript(rfi)
SConscript('PyCuAmpcor/SConscript') SConscript('PyCuAmpcor/SConscript')
SConscript('splitSpectrum/SConscript') SConscript('splitSpectrum/SConscript')
SConscript('alos2proc/SConscript') SConscript('alos2proc/SConscript')
if os.path.exists('geo_autoRIFT'):
SConscript('geo_autoRIFT/SConscript')

View File

@ -43,8 +43,7 @@ import os
import sys import sys
import math import math
import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.parse, urllib.error
import logging from isce import logging
import logging.config
from iscesys.Component.Component import Component from iscesys.Component.Component import Component
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
@ -1013,10 +1012,6 @@ class DemStitcher(Component):
# logger not defined until baseclass is called # logger not defined until baseclass is called
if not self.logger: if not self.logger:
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf')
)
self.logger = logging.getLogger('isce.contrib.demUtils.DemStitcher') self.logger = logging.getLogger('isce.contrib.demUtils.DemStitcher')
url = property(getUrl,setUrl) url = property(getUrl,setUrl)

View File

@ -39,8 +39,7 @@ from ctypes import cdll
import os import os
import sys import sys
import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.error, urllib.parse
import logging from isce import logging
import logging.config
from iscesys.Component.Component import Component from iscesys.Component.Component import Component
from contrib.demUtils.DemStitcher import DemStitcher as DS from contrib.demUtils.DemStitcher import DemStitcher as DS
#Parameters definitions #Parameters definitions
@ -291,7 +290,4 @@ class DemStitcher(DS):
#it's /srtm/version2_1/SRTM(1,3) #it's /srtm/version2_1/SRTM(1,3)
self._remove = ['.jpg','.xml'] self._remove = ['.jpg','.xml']
if not self.logger: if not self.logger:
logging.config.fileConfig(
os.environ['ISCE_HOME'] + '/library/applications/logging.conf'
)
self.logger = logging.getLogger('isce.contrib.demUtils.DemStitcherV3') self.logger = logging.getLogger('isce.contrib.demUtils.DemStitcherV3')

View File

@ -39,9 +39,8 @@ from ctypes import cdll
import numpy as np import numpy as np
import os import os
import sys import sys
import logging from isce import logging
import math import math
import logging.config
import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.parse, urllib.error
from iscesys.Component.Component import Component from iscesys.Component.Component import Component
from contrib.demUtils.DemStitcher import DemStitcher from contrib.demUtils.DemStitcher import DemStitcher
@ -315,9 +314,6 @@ class SWBDStitcher(DemStitcher):
#it's /srtm/version2_1/SRTM(1,3) #it's /srtm/version2_1/SRTM(1,3)
self._remove = ['.jpg','.xml'] self._remove = ['.jpg','.xml']
if not self.logger: if not self.logger:
logging.config.fileConfig(
os.environ['ISCE_HOME'] + '/library/applications/logging.conf'
)
self.logger = logging.getLogger('isce.contrib.demUtils.SWBDStitcher') self.logger = logging.getLogger('isce.contrib.demUtils.SWBDStitcher')
self.parameter_list = self.parameter_list + super(DemStitcher,self).parameter_list self.parameter_list = self.parameter_list + super(DemStitcher,self).parameter_list

View File

@ -35,8 +35,7 @@ import sys
import math import math
from html.parser import HTMLParser from html.parser import HTMLParser
import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.parse, urllib.error
import logging from isce import logging
import logging.config
from iscesys.Component.Component import Component from iscesys.Component.Component import Component
import zipfile import zipfile
import os import os
@ -979,10 +978,6 @@ class MaskStitcher(Component):
# logger not defined until baseclass is called # logger not defined until baseclass is called
if not self.logger: if not self.logger:
logging.config.fileConfig(
os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf')
)
self.logger = logging.getLogger('isce.contrib.demUtils.MaskStitcher') self.logger = logging.getLogger('isce.contrib.demUtils.MaskStitcher')
utl = property(getUrl,setUrl) utl = property(getUrl,setUrl)

View File

@ -32,10 +32,7 @@
import os import os
import math import math
import logging from isce import logging
import logging.config
logging.config.fileConfig(os.path.join(os.environ['ISCE_HOME'], 'defaults',
'logging', 'logging.conf'))
import isce import isce
from iscesys.Component.FactoryInit import FactoryInit from iscesys.Component.FactoryInit import FactoryInit

View File

@ -1,17 +0,0 @@
To use the TOPS or Stripmap stack processors you need to:
1- Install ISCE as usual
2- Depending on which stack processor you need to try, add the path of the folder containing the python scripts to your $PATH environment variable as follows:
- to use the topsStack for processing a stack of Sentinel-1 tops data add the full path of your "contrib/stack/topsStack" to your $PATH environemnt variable
- to use the stripmapStack for processing a stack of Stripmap data, add the full path of your "contrib/stack/stripmapStack" to your $PATH environemnt variableu
NOTE:
The stack processors do not show up in the install directory of your isce software. They can be found in the isce source directory.
Important Note:
There might be conflicts between topsStack and stripmapStack scripts (due to comman names of different scripts). Therefore users MUST only have the path of one stack processor in their $PATH environment at a time, to avoid conflicts between the two stack processors.

34
contrib/stack/README.md Normal file
View File

@ -0,0 +1,34 @@
## Stack Processors
Read the document for each stack processor for details.
+ [stripmapStack](./stripmapStack/README.md)
+ [topsStack](./topsStack/README.md)
### Installation
To use the TOPS or Stripmap stack processors you need to:
1. Install ISCE as usual
2. Depending on which stack processor you need to try, add the path of the folder containing the python scripts to your `$PATH` environment variable as follows:
- add the full path of your **contrib/stack/topsStack** to `$PATH` to use the topsStack for processing a stack of Sentinel-1 TOPS data
- add the full path of your **contrib/stack/stripmapStack** to `$PATH` to use the stripmapStack for processing a stack of StripMap data
Note: The stack processors do not show up in the install directory of your isce software. They can be found in the isce source directory.
#### Important Note: ####
There might be conflicts between topsStack and stripmapStack scripts (due to comman names of different scripts). Therefore users **MUST only** have the path of **one stack processor in their $PATH environment at a time**, to avoid conflicts between the two stack processors.
### References
Users who use the stack processors may refer to the following literatures:
For StripMap stack processor and ionospheric phase estimation:
+ H. Fattahi, M. Simons, and P. Agram, "InSAR Time-Series Estimation of the Ionospheric Phase Delay: An Extension of the Split Range-Spectrum Technique", IEEE Trans. Geosci. Remote Sens., vol. 55, no. 10, 5984-5996, 2017. (https://ieeexplore.ieee.org/abstract/document/7987747/)
For TOPS stack processing:
+ H. Fattahi, P. Agram, and M. Simons, “A network-based enhanced spectral diversity approach for TOPS time-series analysis,” IEEE Trans. Geosci. Remote Sens., vol. 55, no. 2, pp. 777786, Feb. 2017. (https://ieeexplore.ieee.org/abstract/document/7637021/)

View File

@ -1,64 +0,0 @@
The detailed algorithms for stack processing of stripmap data can be found here:
H. Fattahi, M. Simons, and P. Agram, "InSAR Time-Series Estimation of the Ionospheric Phase Delay: An Extension of the Split Range-Spectrum Technique", IEEE Trans. Geosci. Remote Sens., vol. 55, no. 10, 5984-5996, 2017. (https://ieeexplore.ieee.org/abstract/document/7987747/)
-----------------------------------
Notes on stripmap stack processor:
Here are some notes to get started with processing stacks of stripmap data with ISCE.
1- create a folder somewhere for your project
mkdir MauleT111
cd MauleT111
2- create a DEM:
dem.py -a stitch -b -37 -31 -72 -69 -r -s 1 -c
3- Keep only ".dem.wgs84", ".dem.wgs84.vrt" and ".dem.wgs84.xml" and remove unnecessary files
4- fix the path of the file in the xml file of the DEM by using this command:
fixImageXml.py -f -i demLat_S37_S31_Lon_W072_W069.dem.wgs84
5- create a folder to download the ALOS-1 data from ASF:
mkdir download
cd download
6- Download the data that that you want to process to the downlowd directory.
7- once all data have been downloaded, we need to unzip them and move them to different folders and getting ready for unpacking and then SLC generation.
This can be done by running the following command in a directory above "download":
prepRawALOS.py -i download/ -o SLC
This command generates an empty SLC folder and a run file called: "run_unPackALOS".
You could also run prepRawSensor.py which aims to recognize the sensor data automatically followed by running the sensor specific preparation script. For now we include support for ALOS and CSK raw data, but it is trivial to expand and include other sensors as unpacking routines are already included in the distribution.
prepRawSensor.py -i download/ -o SLC
8- execute the commands inside run_unPackALOS file. If you have a cluster that you can submit jobs, you can submit each line of command to a processor. The commands are independent and can be run in parallel.
9- After successfully running the previous step, you should see acquisition dates in the SLC folder and the ".raw" files for each acquisition
Note: For ALOS-1, If there is an acquisition that does not include .raw file, this is most likely due to PRF change between frames and can not be currently handled by ISCE. You have to ignore those.
10- run stackStripmap.py which will generate many config and run files that need to be executed. Here is an example:
stackStripMap.py -s SLC/ -d demLat_S37_S31_Lon_W072_W069.dem.wgs84 -t 250 -b 1000 -a 14 -r 4 -u snaphu
This will produce:
a) baseline folder, which contains baseline information
b) pairs.png which is a baseline-time plot of the network of interferograms
c) configs: which contains the configuration parameter to run different InSAR processing steps
d) run_files: a folder that includes several run and job files that needs to be run in order
11- execute the commands in run files (run_1, run_2, etc) in the run_files folder

View File

@ -1,117 +0,0 @@
The detailed algorithm for stack processing of TOPS data can be find here:
H. Fattahi, P. Agram, and M. Simons, “A network-based enhanced spectral diversity approach for TOPS time-series analysis,” IEEE Trans. Geosci. Remote Sens., vol. 55, no. 2, pp. 777786, Feb. 2017. (https://ieeexplore.ieee.org/abstract/document/7637021/)
<<<<<< Sentinel-1 TOPS stack processor >>>>>>
To use the sentinel stack processor, make sure to add the path of your "contrib/stack/topsStack" folder to your $PATH environment varibale.
The scripts provides support for Sentinel-1 TOPS stack processing. Currently supported workflows include a coregistered stack of SLC, interferograms, offsets, and coherence.
stackSentinel.py generates all configuration and run files required to be executed on a stack of Sentinel-1 TOPS data. When stackSentinel.py is executed for a given workflow (-W option) a “configs” and “run_files” folder is generated. No processing is performed at this stage. Within the “run_files” folder different run_#_description files are contained which are to be executed as shell scripts in the run number order. Each of these run scripts call specific configure files contained in the “configs” folder which call ISCE in a modular fashion. The configure and run files will change depending on the selected workflow. To make run_# files executable, change the file permission accordingly (e.g., chmod +x run_1_unpack_slc).
To see workflow examples, type “stackSentinel.py -H”
To get an overview of all the configurable parameters, type “stackSentinel.py -h”
Required parameters of stackSentinel.py include:
-s SLC_DIRNAME A folder with downloaded Sentinel-1 SLCs.
-o ORBIT_DIRNAME A folder containing the Sentinel-1 orbits.
Missing orbit files will be downloaded automatically
-a AUX_DIRNAME A folder containing the Sentinel-1 Auxiliary files
-d DEM A DEM (Digital Elevation Model) referenced to wgs84
In the following, different workflow examples are provided. Note that stackSentinel.py only generates the run and configure files. To perform the actual processing, the user will need to execute each run file in their numbered order.
In all workflows, coregistration (-C option) can be done using only geometry (set option = geometry) or with geometry plus refined azimuth offsets through NESD (set option = NESD) approach, the latter being the default. For the NESD coregistrstion the user can control the ESD coherence threshold (-e option) and the number of overlap interferograms (-O) to be used in NESD estimation.
------------------------------ Example 1: Coregistered stack of SLC ----------------------------
Generate the run and configure files needed to generate a coregistered stack of SLCs.
In this example, a pre-defined bounding box is specified. Note, if the bounding box is not provided it is set by default to the common SLC area among all SLCs. We recommend that user always set the processing bounding box. Since ESA does not have a fixed frame definition, we suggest to download data for a larger bounding box compared to the actual bounding box used in stackSentinel.py. This way user can ensure to have required data to cover the region of interest. Here is an example command to create configuration files for a stack of SLCs:
stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc
by running the command above, the configs and run_files folders are created. User needs to execute each run file in order. The order is specified by the index number of the run file name. For the example above, the run_files folder includes the following files:
- run_1_unpack_slc_topo_master
- run_2_average_baseline
- run_3_extract_burst_overlaps
- run_4_overlap_geo2rdr_resample
- run_5_pairs_misreg
- run_6_timeseries_misreg
- run_7_geo2rdr_resample
- run_8_extract_stack_valid_region
- run_9_merge
- run_10_grid_baseline
The generated run files are self descriptive. Below is a short explanation on what each run_file does:
***run_1_unpack_slc_topo_master:***
Includes commands to unpack Sentinel-1 TOPS SLCs using ISCE readers. For older SLCs which need antenna elevation pattern correction, the file is extracted and written to disk. For newer version of SLCs which dont need the elevation antenna pattern correction, only a gdal virtual “vrt” file (and isce xml file) is generated. The “.vrt” file points to the Sentinel SLC file and reads them whenever required during the processing. If a user wants to write the “.vrt” SLC file to disk, it can be done easily using gdal_translate (e.g. gdal_translate of ENVI File.vrt File.slc).
The “run_1_unpack_slc_topo_master” also includes a command that refers to the config file of the stack master, which includes configuration for running topo for the stack master. Note that in the pair-wise processing strategy one should run topo (mapping from range-Doppler to geo coordinate) for all pairs. However, with stackSentinel, topo needs to be run only one time for the master in the stack.
***run_2_average_baseline: ***
Computes average baseline for the stack. These baselines are not used for processing anywhere. They are only an approximation and can be used for plotting purposes. A more precise baseline grid is estimated later in run_10.
***run_3_extract_burst_overlaps: ***
Burst overlaps are extracted for estimating azimuth misregistration using NESD technique. If coregistration method is chosen to be “geometry”, then this run file wont exist and the overlaps are not extracted.
***run_4_overlap_geo2rdr_resample: ***
Running geo2rdr to estimate geometrical offsets between slave burst overlaps and the stack master burst overlaps. The slave burst overlaps are then resampled to the stack master burst overlaps.
***run_5_pairs_misreg: ***
Using the coregistered stack burst overlaps generated from the previous step, differential overlap interferograms are generated and are used for estimating azimuth misregistration using Enhanced Spectral Diversity (ESD) technique.
***run_6_timeseries_misreg: ***
A time-series of azimuth and range misregistration is estimated with respect to the stack master. The time-series is a least squares esatimation from the pair misregistration from the previous step.
***run_7_geo2rdr_resample: ***
Using orbit and DEM, geometrical offsets among all slave SLCs and the stack master is computed. The goometrical offsets, together with the misregistration time-series (from previous step) are used for precise coregistration of each burst SLC.
***run_8_extract_stack_valid_region: ***
The valid region between burst SLCs at the overlap area of the bursts slightly changes for different acquisitions. Therefore we need to keep track of these overlaps which will be used during merging bursts. Without these knowledge, lines of invalid data may appear in the merged products at the burst overlaps.
***run_9_merge: ***
Merges all bursts for the master and coregistered SLCs. The geometry files are also merged including longitude, latitude, shadow and layer mask, line-of-sight files, etc. .
***run_10_grid_baseline: ***
A coarse grid of baselines between each slave SLC and the stack master is generated. This is not used in any computation.
-------- Example 2: Coregistered stack of SLC with modified parameters -----------
In the following example, the same stack generation is requested but where the threshold of the default coregistration approach (NESD) is relaxed from its default 0.85 value 0.7.
stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc -e 0.7
When running all the run files, the final products are located in the merge folder which has subdirectories “geom_master”, “baselines” and “SLC”. The “geom_master” folder contains geometry products such as longitude, latitude, height, local incidence angle, look angle, heading, and shadowing/layover mask files. The “baselines” folder contains sparse grids of the perpendicular baseline for each acquisition, while the “SLC” folder contains the coregistered SLCs
------------------------------ Example 3: Stack of interferograms ------------------------------
Generate the run and configure files needed to generate a stack of interferograms.
In this example, a stack of interferograms is requested for which up to 2 nearest neighbor connections are included.
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -c 2
In the following example, all possible interferograms are being generated and in which the coregistration approach is set to use geometry and not the default NESD.
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all
When executing all the run files, a coregistered stack of slcs are produced, the burst interferograms are generated and then merged. Merged interferograms are multilooked, filtered and unwrapped. Geocoding is not applied. If users need to geocode any product, they can use the geocodeGdal.py script.
-------------------- Example 4: Correlation stack example ----------------------------
Generate the run and configure files needed to generate a stack of coherence.
In this example, a correlation stack is requested considering all possible coherence pairs and where the coregistration approach is done using geometry only.
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all -W correlation
This workflow is basically similar to the previous one. The difference is that the interferograms are not unwrapped.
----------------------------------- DEM download example -----------------------------------
Download of DEM (need to use wgs84 version) using the ISCE DEM download script.
dem.py -a stitch -b 18 20 -100 -97 -r -s 1 c
Updating DEMs wgs84 xml to include full path to the DEM
fixImageXml.py -f -i demLat_N18_N20_Lon_W100_W097.dem.wgs84

View File

@ -1,11 +0,0 @@
Users who use the stack processors may refer to the following literatures:
for stripmap stack processor and ionospheric phase estimation:
H. Fattahi, M. Simons, and P. Agram, "InSAR Time-Series Estimation of the Ionospheric Phase Delay: An Extension of the Split Range-Spectrum Technique", IEEE Trans. Geosci. Remote Sens., vol. 55, no. 10, 5984-5996, 2017. (https://ieeexplore.ieee.org/abstract/document/7987747/)
For TOPS stack processing:
H. Fattahi, P. Agram, and M. Simons, “A network-based enhanced spectral diversity approach for TOPS time-series analysis,” IEEE Trans. Geosci. Remote Sens., vol. 55, no. 2, pp. 777786, Feb. 2017. (https://ieeexplore.ieee.org/abstract/document/7637021/)

View File

@ -0,0 +1,85 @@
## StripMap stack processor
The detailed algorithms and workflow for stack processing of stripmap SAR data can be found here:
+ Fattahi, H., M. Simons, and P. Agram (2017), InSAR Time-Series Estimation of the Ionospheric Phase Delay: An Extension of the Split Range-Spectrum Technique, IEEE Transactions on Geoscience and Remote Sensing, 55(10), 5984-5996, doi:[10.1109/TGRS.2017.2718566](https://ieeexplore.ieee.org/abstract/document/7987747/).
-----------------------------------
To use the stripmap stack processor, make sure to add the path of your `contrib/stack/stripmapStack` folder to your `$PATH` environment varibale.
Currently supported workflows include a coregistered stack of SLC, interferograms, ionospheric delays.
Here are some notes to get started with processing stacks of stripmap data with ISCE.
#### 1. Create your project folder somewhere
```
mkdir MauleAlosDT111
cd MauleAlosDT111
```
#### 2. Prepare DEM
a) create a folder for DEM;
b) create a DEM using dem.py with SNWE of your study area in integer;
d) Keep only ".dem.wgs84", ".dem.wgs84.vrt" and ".dem.wgs84.xml" and remove unnecessary files;
d) fix the path of the file in the xml file of the DEM by using fixImageXml.py.
```
mkdir DEM; cd DEM
dem.py -a stitch -b -37 -31 -72 -69 -r -s 1 -c
rm demLat*.dem demLat*.dem.xml demLat*.dem.vrt
fixImageXml.py -f -i demLat*.dem.wgs84
cd ..
```
#### 3. Download data
##### 3.1 create a folder to download SAR data (i.e. ALOS-1 data from ASF)
```
mkdir download
cd download
```
##### 3.2 Download the data that that you want to process to the "downlowd" directory
#### 4. Prepare SAR data
Once all data have been downloaded, we need to unzip them and move them to different folders and getting ready for unpacking and then SLC generation. This can be done by running the following command in a directory above "download":
```
prepRawALOS.py -i download/ -o SLC
```
This command generates an empty SLC folder and a run file called: "run_unPackALOS".
You could also run prepRawSensor.py which aims to recognize the sensor data automatically followed by running the sensor specific preparation script. For now we include support for ALOS and CSK raw data, but it is trivial to expand and include other sensors as unpacking routines are already included in the distribution.
```
prepRawSensor.py -i download/ -o SLC
```
#### 5. Execute the commands in "run_unPackALOS" file
If you have a cluster that you can submit jobs, you can submit each line of command to a processor. The commands are independent and can be run in parallel.
After successfully running the previous step, you should see acquisition dates in the SLC folder and the ".raw" files for each acquisition.
Note: For ALOS-1, If there is an acquisition that does not include .raw file, this is most likely due to PRF change between frames and can not be currently handled by ISCE. You have to ignore those.
#### 6. Run "stackStripmap.py"
This will generate many config and run files that need to be executed. Here is an example:
```
stackStripMap.py -s SLC/ -d DEM/demLat*.dem.wgs84 -t 250 -b 1000 -a 14 -r 4 -u snaphu
```
This will produce:
a) baseline folder, which contains baseline information
b) pairs.png which is a baseline-time plot of the network of interferograms
c) configs: which contains the configuration parameter to run different InSAR processing steps
d) run_files: a folder that includes several run and job files that needs to be run in order
#### 7. Execute the commands in run files (run_1*, run_2*, etc) in the "run_files" folder

View File

@ -1,64 +0,0 @@
The detailed algorithms for stack processing of stripmap data can be found here:
H. Fattahi, M. Simons, and P. Agram, "InSAR Time-Series Estimation of the Ionospheric Phase Delay: An Extension of the Split Range-Spectrum Technique", IEEE Trans. Geosci. Remote Sens., vol. 55, no. 10, 5984-5996, 2017. (https://ieeexplore.ieee.org/abstract/document/7987747/)
-----------------------------------
Notes on stripmap stack processor:
Here are some notes to get started with processing stacks of stripmap data with ISCE.
1- create a folder somewhere for your project
mkdir MauleT111
cd MauleT111
2- create a DEM:
dem.py -a stitch -b -37 -31 -72 -69 -r -s 1 -c
3- Keep only ".dem.wgs84", ".dem.wgs84.vrt" and ".dem.wgs84.xml" and remove unnecessary files
4- fix the path of the file in the xml file of the DEM by using this command:
fixImageXml.py -f -i demLat_S37_S31_Lon_W072_W069.dem.wgs84
5- create a folder to download the ALOS-1 data from ASF:
mkdir download
cd download
6- Download the data that that you want to process to the downlowd directory.
7- once all data have been downloaded, we need to unzip them and move them to different folders and getting ready for unpacking and then SLC generation.
This can be done by running the following command in a directory above "download":
prepRawALOS.py -i download/ -o SLC
This command generates an empty SLC folder and a run file called: "run_unPackALOS".
You could also run prepRawSensor.py which aims to recognize the sensor data automatically followed by running the sensor specific preparation script. For now we include support for ALOS and CSK raw data, but it is trivial to expand and include other sensors as unpacking routines are already included in the distribution.
prepRawSensor.py -i download/ -o SLC
8- execute the commands inside run_unPackALOS file. If you have a cluster that you can submit jobs, you can submit each line of command to a processor. The commands are independent and can be run in parallel.
9- After successfully running the previous step, you should see acquisition dates in the SLC folder and the ".raw" files for each acquisition
Note: For ALOS-1, If there is an acquisition that does not include .raw file, this is most likely due to PRF change between frames and can not be currently handled by ISCE. You have to ignore those.
10- run stackStripmap.py which will generate many config and run files that need to be executed. Here is an example:
stackStripMap.py -s SLC/ -d demLat_S37_S31_Lon_W072_W069.dem.wgs84 -t 250 -b 1000 -a 14 -r 4 -u snaphu
This will produce:
a) baseline folder, which contains baseline information
b) pairs.png which is a baseline-time plot of the network of interferograms
c) configs: which contains the configuration parameter to run different InSAR processing steps
d) run_files: a folder that includes several run and job files that needs to be run in order
11- execute the commands in run files (run_1, run_2, etc) in the run_files folder

View File

@ -65,6 +65,8 @@ class config(object):
self.f.write('master : ' + self.slcDir +'\n') self.f.write('master : ' + self.slcDir +'\n')
self.f.write('dem : ' + self.dem +'\n') self.f.write('dem : ' + self.dem +'\n')
self.f.write('output : ' + self.geometryDir +'\n') self.f.write('output : ' + self.geometryDir +'\n')
self.f.write('alks : ' + self.alks +'\n')
self.f.write('rlks : ' + self.rlks +'\n')
if self.nativeDoppler: if self.nativeDoppler:
self.f.write('native : True\n') self.f.write('native : True\n')
if self.useGPU: if self.useGPU:
@ -73,6 +75,17 @@ class config(object):
self.f.write('useGPU : False\n') self.f.write('useGPU : False\n')
self.f.write('##########################'+'\n') self.f.write('##########################'+'\n')
def createWaterMask(self, function):
self.f.write('##########################'+'\n')
self.f.write(function+'\n')
self.f.write('createWaterMask : '+'\n')
self.f.write('dem_file : ' + self.dem +'\n')
self.f.write('lat_file : ' + self.latFile +'\n')
self.f.write('lon_file : ' + self.lonFile +'\n')
self.f.write('output : ' + self.waterMaskFile + '\n')
self.f.write('##########################'+'\n')
def geo2rdr(self, function): def geo2rdr(self, function):
self.f.write('##########################'+'\n') self.f.write('##########################'+'\n')
@ -197,6 +210,8 @@ class config(object):
self.f.write('nomcf : ' + self.noMCF + '\n') self.f.write('nomcf : ' + self.noMCF + '\n')
self.f.write('master : ' + self.master + '\n') self.f.write('master : ' + self.master + '\n')
self.f.write('defomax : ' + self.defoMax + '\n') self.f.write('defomax : ' + self.defoMax + '\n')
self.f.write('alks : ' + self.alks + '\n')
self.f.write('rlks : ' + self.rlks + '\n')
self.f.write('method : ' + self.unwMethod + '\n') self.f.write('method : ' + self.unwMethod + '\n')
self.f.write('##########################'+'\n') self.f.write('##########################'+'\n')
@ -307,8 +322,7 @@ class run(object):
self.runf.write(self.text_cmd+'stripmapWrapper.py -c '+ configName+'\n') self.runf.write(self.text_cmd+'stripmapWrapper.py -c '+ configName+'\n')
def master_focus_split_geometry(self, stackMaster, config_prefix, split=False, focus=True, native=True): def master_focus_split_geometry(self, stackMaster, config_prefix, split=False, focus=True, native=True):
######## """focusing master and producing geometry files"""
# focusing master and producing geometry files
configName = os.path.join(self.configDir, config_prefix + stackMaster) configName = os.path.join(self.configDir, config_prefix + stackMaster)
configObj = config(configName) configObj = config(configName)
configObj.configure(self) configObj.configure(self)
@ -329,6 +343,14 @@ class run(object):
configObj.outDir = configObj.slcDir configObj.outDir = configObj.slcDir
configObj.shelve = os.path.join(configObj.slcDir, 'data') configObj.shelve = os.path.join(configObj.slcDir, 'data')
configObj.splitRangeSpectrum('[Function-{0}]'.format(counter)) configObj.splitRangeSpectrum('[Function-{0}]'.format(counter))
counter += 1
# generate water mask in radar coordinates
configObj.latFile = os.path.join(self.workDir, 'geom_master/lat.rdr')
configObj.lonFile = os.path.join(self.workDir, 'geom_master/lon.rdr')
configObj.waterMaskFile = os.path.join(self.workDir, 'geom_master/waterMask.rdr')
configObj.createWaterMask('[Function-{0}]'.format(counter))
counter += 1
configObj.finalize() configObj.finalize()
del configObj del configObj

View File

@ -2,30 +2,45 @@
#Author: Heresh Fattahi #Author: Heresh Fattahi
import isce import os
import isceobj
from contrib.demUtils.SWBDStitcher import SWBDStitcher
from iscesys.DataManager import createManager
import argparse import argparse
import configparser import configparser
from numpy import round import numpy as np
import isce
import isceobj
from iscesys.DataManager import createManager
from contrib.demUtils.SWBDStitcher import SWBDStitcher
EXAMPLE = """example:
createWaterMask.py -b 31 33 130 132
createWaterMask.py -b 31 33 130 132 -l lat.rdr -L lon.rdr -o waterMask.rdr
createWaterMask.py -d ../DEM/demLat_N31_N33_Lon_E130_E132.dem.wgs84 -l lat.rdr -L lon.rdr -o waterMask.rdr
"""
def createParser(): def createParser():
''' '''
Create command line parser. Create command line parser.
''' '''
parser = argparse.ArgumentParser( description='extracts the overlap geometry between master bursts') parser = argparse.ArgumentParser(description='Create water body mask in geo and/or radar coordinates',
# parser.add_argument('-b', '--bbox', dest='bbox', type=str, default=None, formatter_class=argparse.RawTextHelpFormatter,
# help='Lat/Lon Bounding SNWE') epilog=EXAMPLE)
parser.add_argument('-b', '--bbox', type = int, default = None, nargs = '+', dest = 'bbox', help = 'Defines the spatial region in the format south north west east.\ parser.add_argument('-b', '--bbox', dest='bbox', type=int, default=None, nargs=4, metavar=('S','N','W','E'),
The values should be integers from (-90,90) for latitudes and (0,360) or (-180,180) for longitudes.') help = 'Defines the spatial region in the format south north west east.\n'
'The values should be integers from (-90,90) for latitudes '
'and (0,360) or (-180,180) for longitudes.')
parser.add_argument('-d','--dem_file', dest='demName', type=str, default=None,
help='DEM file in geo coordinates, i.e. demLat*.dem.wgs84.')
parser.add_argument('-l', '--lat_file', dest='latName', type=str, default=None, parser.add_argument('-l', '--lat_file', dest='latName', type=str, default=None,
help='pixel by pixel lat file in radar coordinate') help='pixel by pixel lat file in radar coordinate')
parser.add_argument('-L', '--lon_file', dest='lonName', type=str, default=None, parser.add_argument('-L', '--lon_file', dest='lonName', type=str, default=None,
help='pixel by pixel lat file in radar coordinate') help='pixel by pixel lat file in radar coordinate')
parser.add_argument('-o', '--output', dest='outfile', type=str,
help='output filename of water mask in radar coordinates')
return parser return parser
def cmdLineParse(iargs = None): def cmdLineParse(iargs = None):
''' '''
Command line parser. Command line parser.
@ -33,37 +48,69 @@ def cmdLineParse(iargs = None):
parser = createParser() parser = createParser()
inps = parser.parse_args(args=iargs) inps = parser.parse_args(args=iargs)
#inps.bbox = [int(round(val)) for val in inps.bbox.split()]
if not inps.bbox and not inps.demName:
parser.print_usage()
raise SystemExit('ERROR: no --bbox/--dem_file input, at least one is required.')
if not inps.outfile and (inps.latName and inps.lonName):
inps.outfile = os.path.join(os.path.dirname(inps.latName), 'waterMask.rdr')
return inps return inps
def download_waterMask(inps): def dem2bbox(dem_file):
"""Grab bbox from DEM file in geo coordinates"""
demImage = isceobj.createDemImage()
demImage.load(dem_file + '.xml')
demImage.setAccessMode('read')
N = demImage.getFirstLatitude()
W = demImage.getFirstLongitude()
S = N + demImage.getDeltaLatitude() * demImage.getLength()
E = W + demImage.getDeltaLongitude() * demImage.getWidth()
bbox = [np.floor(S).astype(int), np.ceil(N).astype(int),
np.floor(W).astype(int), np.ceil(E).astype(int)]
return bbox
def download_waterMask(bbox, dem_file):
out_dir = os.getcwd()
# update out_dir and/or bbox if dem_file is input
if dem_file:
out_dir = os.path.dirname(dem_file)
if not bbox:
bbox = dem2bbox(dem_file)
sw = createManager('wbd') sw = createManager('wbd')
sw.configure() sw.configure()
inps.waterBodyGeo = sw.defaultName(inps.bbox) #inps.waterBodyGeo = sw.defaultName(inps.bbox)
sw.outputFile = os.path.join(out_dir, sw.defaultName(bbox))
sw._noFilling = False sw._noFilling = False
#sw._fillingValue = -1.0 sw._fillingValue = -1.0 #fill pixels without DEM data with value of -1, same as water body
sw._fillingValue = 0.0 #sw._fillingValue = 0.0
sw.stitch(inps.bbox[0:2],inps.bbox[2:]) sw.stitch(bbox[0:2], bbox[2:])
return sw.outputFile
return inps
def geo2radar(inps): def geo2radar(geo_file, rdr_file, lat_file, lon_file):
inps.waterBodyRadar = inps.waterBodyGeo + '.rdr' #inps.waterBodyRadar = inps.waterBodyGeo + '.rdr'
sw = SWBDStitcher() sw = SWBDStitcher()
sw.toRadar(inps.waterBodyGeo, inps.latName, inps.lonName, inps.waterBodyRadar) sw.toRadar(geo_file, lat_file, lon_file, rdr_file)
return rdr_file
#looks.py -i watermask.msk -r 4 -a 14 -o 'waterMask.14alks_4rlks.msk' #looks.py -i watermask.msk -r 4 -a 14 -o 'waterMask.14alks_4rlks.msk'
#imageMath.py -e='a*b' --a=filt_20100911_20101027.int --b=watermask.14alks_4rlks.msk -o filt_20100911_20101027_masked.int -t cfloat -s BIL #imageMath.py -e='a*b' --a=filt_20100911_20101027.int --b=watermask.14alks_4rlks.msk -o filt_20100911_20101027_masked.int -t cfloat -s BIL
def main(iargs=None): def main(iargs=None):
inps = cmdLineParse(iargs) inps = cmdLineParse(iargs)
inps = download_waterMask(inps) geo_file = download_waterMask(inps.bbox, inps.demName)
if inps.latName and inps.lonName: if inps.latName and inps.lonName:
inps = geo2radar(inps) geo2radar(geo_file, inps.outfile, inps.latName, inps.lonName)
return
if __name__ == '__main__' : if __name__ == '__main__' :
''' '''

View File

@ -20,7 +20,7 @@ defoMax = '2'
maxNodes = 72 maxNodes = 72
def createParser(): def createParser():
parser = argparse.ArgumentParser( description='Preparing the directory structure and config files for stack processing of Sentinel data') parser = argparse.ArgumentParser( description='Preparing the directory structure and config files for stack processing of StripMap data')
parser.add_argument('-s', '--slc_directory', dest='slcDir', type=str, required=True, parser.add_argument('-s', '--slc_directory', dest='slcDir', type=str, required=True,
help='Directory with all stripmap SLCs') help='Directory with all stripmap SLCs')
@ -31,7 +31,7 @@ def createParser():
help='Working directory ') help='Working directory ')
parser.add_argument('-d', '--dem', dest='dem', type=str, required=True, parser.add_argument('-d', '--dem', dest='dem', type=str, required=True,
help='Directory with the DEM (.xml and .vrt files)') help='DEM file (with .xml and .vrt files)')
parser.add_argument('-m', '--master_date', dest='masterDate', type=str, default=None, parser.add_argument('-m', '--master_date', dest='masterDate', type=str, default=None,
help='Directory with master acquisition') help='Directory with master acquisition')
@ -43,47 +43,54 @@ def createParser():
help='Baseline threshold (max bperp in meters)') help='Baseline threshold (max bperp in meters)')
parser.add_argument('-a', '--azimuth_looks', dest='alks', type=str, default='10', parser.add_argument('-a', '--azimuth_looks', dest='alks', type=str, default='10',
help='Number of looks in azimuth (automaticly computed as AspectR*looks when "S" or "sensor" is defined to give approximately square multi-look pixels)') help='Number of looks in azimuth (automaticly computed as AspectR*looks when '
'"S" or "sensor" is defined to give approximately square multi-look pixels)')
parser.add_argument('-r', '--range_looks', dest='rlks', type=str, default='10', parser.add_argument('-r', '--range_looks', dest='rlks', type=str, default='10',
help='Number of looks in range') help='Number of looks in range')
parser.add_argument('-S', '--sensor', dest='sensor', type=str, required=False, parser.add_argument('-S', '--sensor', dest='sensor', type=str, required=False,
help='SAR sensor used to define square multi-look pixels') help='SAR sensor used to define square multi-look pixels')
parser.add_argument('-L', '--low_band_frequency', dest='fL', type=str, default=None,
help='low band frequency') parser.add_argument('-u', '--unw_method', dest='unwMethod', type=str, default='snaphu',
parser.add_argument('-H', '--high_band_frequency', dest='fH', type=str, default=None, help='unwrapping method (icu, snaphu, or snaphu2stage)')
help='high band frequency')
parser.add_argument('-B', '--subband_bandwidth ', dest='bandWidth', type=str, default=None,
help='sub-band band width')
parser.add_argument('-u', '--unw_method', dest='unwMethod', type=str, default='snaphu'
, help='unwrapping method (icu, snaphu, or snaphu2stage)')
parser.add_argument('-f','--filter_strength', dest='filtStrength', type=str, default=filtStrength, parser.add_argument('-f','--filter_strength', dest='filtStrength', type=str, default=filtStrength,
help='strength of Goldstein filter applied to the wrapped phase before spatial coherence estimation.' help='strength of Goldstein filter applied to the wrapped phase before spatial coherence estimation.'
' Default: {}'.format(filtStrength)) ' Default: {}'.format(filtStrength))
parser.add_argument('--filter_sigma_x', dest='filterSigmaX', type=str, default='100' iono = parser.add_argument_group('Ionosphere', 'Configurationas for ionospheric correction')
, help='filter sigma for gaussian filtering the dispersive and nonDispersive phase') iono.add_argument('-L', '--low_band_frequency', dest='fL', type=str, default=None,
help='low band frequency')
iono.add_argument('-H', '--high_band_frequency', dest='fH', type=str, default=None,
help='high band frequency')
iono.add_argument('-B', '--subband_bandwidth ', dest='bandWidth', type=str, default=None,
help='sub-band band width')
parser.add_argument('--filter_sigma_y', dest='filterSigmaY', type=str, default='100.0', iono.add_argument('--filter_sigma_x', dest='filterSigmaX', type=str, default='100',
help='filter sigma for gaussian filtering the dispersive and nonDispersive phase')
iono.add_argument('--filter_sigma_y', dest='filterSigmaY', type=str, default='100.0',
help='sigma of the gaussian filter in Y direction, default=100') help='sigma of the gaussian filter in Y direction, default=100')
parser.add_argument('--filter_size_x', dest='filterSizeX', type=str, default='800.0', iono.add_argument('--filter_size_x', dest='filterSizeX', type=str, default='800.0',
help='size of the gaussian kernel in X direction, default = 800') help='size of the gaussian kernel in X direction, default = 800')
parser.add_argument('--filter_size_y', dest='filterSizeY', type=str, default='800.0', iono.add_argument('--filter_size_y', dest='filterSizeY', type=str, default='800.0',
help='size of the gaussian kernel in Y direction, default=800') help='size of the gaussian kernel in Y direction, default=800')
parser.add_argument('--filter_kernel_rotation', dest='filterKernelRotation', type=str, default='0.0', iono.add_argument('--filter_kernel_rotation', dest='filterKernelRotation', type=str, default='0.0',
help='rotation angle of the filter kernel in degrees (default = 0.0)') help='rotation angle of the filter kernel in degrees (default = 0.0)')
parser.add_argument('-W', '--workflow', dest='workflow', type=str, default='slc' parser.add_argument('-W', '--workflow', dest='workflow', type=str, default='slc',
, help='The InSAR processing workflow : (slc, interferogram, ionosphere)') help='The InSAR processing workflow : (slc, interferogram, ionosphere)')
parser.add_argument('-z', '--zero', dest='zerodop', action='store_true', default=False, help='Use zero doppler geometry for processing - Default : No') parser.add_argument('-z', '--zero', dest='zerodop', action='store_true', default=False,
parser.add_argument('--nofocus', dest='nofocus', action='store_true', default=False, help='If input data is already focused to SLCs - Default : do focus') help='Use zero doppler geometry for processing - Default : No')
parser.add_argument('-c', '--text_cmd', dest='text_cmd', type=str, default='' parser.add_argument('--nofocus', dest='nofocus', action='store_true', default=False,
, help='text command to be added to the beginning of each line of the run files. Example : source ~/.bash_profile;') help='If input data is already focused to SLCs - Default : do focus')
parser.add_argument('-useGPU', '--useGPU', dest='useGPU',action='store_true', default=False, help='Allow App to use GPU when available') parser.add_argument('-c', '--text_cmd', dest='text_cmd', type=str, default='',
help='text command to be added to the beginning of each line of the run files. Example : source ~/.bash_profile;')
parser.add_argument('-useGPU', '--useGPU', dest='useGPU',action='store_true', default=False,
help='Allow App to use GPU when available')
parser.add_argument('--summary', dest='summary', action='store_true', default=False, help='Show summary only') parser.add_argument('--summary', dest='summary', action='store_true', default=False, help='Show summary only')
return parser return parser

View File

@ -1,13 +1,16 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os
import argparse import argparse
import shelve
import datetime
import shutil
import numpy as np
import isce import isce
import isceobj import isceobj
import numpy as np
import shelve
import os
import datetime
from isceobj.Constants import SPEED_OF_LIGHT from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.Util.Poly2D import Poly2D from isceobj.Util.Poly2D import Poly2D
from mroipac.looks.Looks import Looks
def createParser(): def createParser():
''' '''
@ -328,6 +331,7 @@ def runTopoCPU(info, demImage, dop=None,
topo.topo() topo.topo()
return return
def runSimamp(outdir, hname='z.rdr'): def runSimamp(outdir, hname='z.rdr'):
from iscesys.StdOEL.StdOELPy import create_writer from iscesys.StdOEL.StdOELPy import create_writer
@ -354,6 +358,86 @@ def runSimamp(outdir, hname='z.rdr'):
simImage.renderHdr() simImage.renderHdr()
hgtImage.finalizeImage() hgtImage.finalizeImage()
simImage.finalizeImage() simImage.finalizeImage()
return
def runMultilook(in_dir, out_dir, alks, rlks):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
from iscesys.Parsers.FileParserFactory import createFileParser
FP = createFileParser('xml')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
print('create directory: {}'.format(out_dir))
for fbase in ['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']:
fname = '{}.rdr'.format(fbase)
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
if os.path.isfile(in_file):
xmlProp = FP.parse(in_file+'.xml')[0]
if('image_type' in xmlProp and xmlProp['image_type'] == 'dem'):
inImage = isceobj.createDemImage()
else:
inImage = isceobj.createImage()
inImage.load(in_file+'.xml')
inImage.filename = in_file
lkObj = Looks()
lkObj.setDownLooks(alks)
lkObj.setAcrossLooks(rlks)
lkObj.setInputImage(inImage)
lkObj.setOutputFilename(out_file)
lkObj.looks()
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def runMultilookGdal(in_dir, out_dir, alks, rlks):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
import gdal
# create 'geom_master' directory
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
print('create directory: {}'.format(out_dir))
# multilook files one by one
for fbase in ['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']:
fname = '{}.rdr'.format(fbase)
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
if os.path.isfile(in_file):
ds = gdal.Open(in_file, gdal.GA_ReadOnly)
in_wid = ds.RasterXSize
in_len = ds.RasterYSize
out_wid = int(in_wid / rlks)
out_len = int(in_len / alks)
src_wid = out_wid * rlks
src_len = out_len * alks
cmd = 'gdal_translate -of ENVI -a_nodata 0 -outsize {ox} {oy} '.format(ox=out_wid, oy=out_len)
cmd += ' -srcwin 0 0 {sx} {sy} {fi} {fo} '.format(sx=src_wid, sy=src_len, fi=in_file, fo=out_file)
print(cmd)
os.system(cmd)
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def extractInfo(frame, inps): def extractInfo(frame, inps):
@ -369,8 +453,8 @@ def extractInfo(frame, inps):
info.lookSide = frame.instrument.platform.pointingDirection info.lookSide = frame.instrument.platform.pointingDirection
info.rangeFirstSample = frame.startingRange info.rangeFirstSample = frame.startingRange
info.numberRangeLooks = inps.rlks info.numberRangeLooks = 1 #inps.rlks
info.numberAzimuthLooks = inps.alks info.numberAzimuthLooks = 1 #inps.alks
fsamp = frame.rangeSamplingRate fsamp = frame.rangeSamplingRate
@ -419,11 +503,9 @@ def main(iargs=None):
doppler = db['doppler'] doppler = db['doppler']
except: except:
doppler = frame._dopplerVsPixel doppler = frame._dopplerVsPixel
db.close() db.close()
####Setup dem ####Setup dem
demImage = isceobj.createDemImage() demImage = isceobj.createDemImage()
demImage.load(inps.dem + '.xml') demImage.load(inps.dem + '.xml')
@ -439,14 +521,20 @@ def main(iargs=None):
info.incFilename = os.path.join(info.outdir, 'incLocal.rdr') info.incFilename = os.path.join(info.outdir, 'incLocal.rdr')
info.maskFilename = os.path.join(info.outdir, 'shadowMask.rdr') info.maskFilename = os.path.join(info.outdir, 'shadowMask.rdr')
runTopo(info,demImage,dop=doppler,nativedop=inps.nativedop, legendre=inps.legendre) runTopo(info,demImage,dop=doppler,nativedop=inps.nativedop, legendre=inps.legendre)
runSimamp(os.path.dirname(info.heightFilename),os.path.basename(info.heightFilename)) runSimamp(os.path.dirname(info.heightFilename),os.path.basename(info.heightFilename))
# write multilooked geometry files in "geom_master" directory, same level as "Igrams"
if inps.rlks * inps.rlks > 1:
out_dir = os.path.join(os.path.dirname(os.path.dirname(info.outdir)), 'geom_master')
runMultilookGdal(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
#runMultilook(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
return
if __name__ == '__main__': if __name__ == '__main__':
''' '''
Main driver. Main driver.
''' '''
main() main()

View File

@ -73,8 +73,7 @@ def makeOnePlot(filename, pos):
minx = np.clip(np.min(pos[:,2])-win, 0, npix-1) minx = np.clip(np.min(pos[:,2])-win, 0, npix-1)
maxx = np.clip(np.max(pos[:,2])+win, 0, npix-1) maxx = np.clip(np.max(pos[:,2])+win, 0, npix-1)
box = np.power(np.abs(data[int(miny):int(maxy), int(minx):int(maxx)]), 0.4)
box = np.power(np.abs(data[miny:maxy, minx:maxx]), 0.4)
plt.figure('CR analysis') plt.figure('CR analysis')
@ -104,7 +103,7 @@ def getAzRg(frame,llh):
pol._normRange = frame.instrument.rangePixelSize pol._normRange = frame.instrument.rangePixelSize
pol.initPoly(azimuthOrder=0, rangeOrder=len(coeffs)-1, coeffs=[coeffs]) pol.initPoly(azimuthOrder=0, rangeOrder=len(coeffs)-1, coeffs=[coeffs])
taz, rgm = frame.orbit.geo2rdr(list(llh), side=frame.instrument.platform.pointingDirection, taz, rgm = frame.orbit.geo2rdr(list(llh)[1:], side=frame.instrument.platform.pointingDirection,
doppler=pol, wvl=frame.instrument.getRadarWavelength()) doppler=pol, wvl=frame.instrument.getRadarWavelength())
line = (taz - frame.sensingStart).total_seconds() * frame.PRF line = (taz - frame.sensingStart).total_seconds() * frame.PRF
@ -145,7 +144,7 @@ if __name__ == '__main__':
# frame.startingRange = frame.startingRange + 100.0 # frame.startingRange = frame.startingRange + 100.0
###Load CRS positions ###Load CRS positions
llhs = np.loadtxt(inps.posfile) llhs = np.loadtxt(inps.posfile, delimiter=',')
crs = [] crs = []

View File

@ -113,9 +113,19 @@ def extractInfoFromPickle(pckfile, inps):
data['earthRadius'] = elp.local_radius_of_curvature(llh.lat, hdg) data['earthRadius'] = elp.local_radius_of_curvature(llh.lat, hdg)
#azspacing = burst.azimuthTimeInterval * sv.getScalarVelocity() #azspacing = burst.azimuthTimeInterval * sv.getScalarVelocity()
azres = 20.0 #azres = 20.0
azspacing = sv.getScalarVelocity() / burst.PRF
azres = burst.platform.antennaLength / 2.0
azfact = azres / azspacing
burst.getInstrument()
rgBandwidth = burst.instrument.pulseLength * burst.instrument.chirpSlope
rgres = abs(SPEED_OF_LIGHT / (2.0 * rgBandwidth))
rgspacing = burst.instrument.rangePixelSize
rgfact = rgres / rgspacing
#data['corrlooks'] = inps.rglooks * inps.azlooks * azspacing / azres #data['corrlooks'] = inps.rglooks * inps.azlooks * azspacing / azres
data['corrlooks'] = inps.rglooks * inps.azlooks / (azfact * rgfact)
data['rglooks'] = inps.rglooks data['rglooks'] = inps.rglooks
data['azlooks'] = inps.azlooks data['azlooks'] = inps.azlooks
@ -149,7 +159,7 @@ def runUnwrap(infile, outfile, corfile, config, costMode = None,initMethod = Non
altitude = config['altitude'] altitude = config['altitude']
rangeLooks = config['rglooks'] rangeLooks = config['rglooks']
azimuthLooks = config['azlooks'] azimuthLooks = config['azlooks']
#corrLooks = config['corrlooks'] corrLooks = config['corrlooks']
maxComponents = 20 maxComponents = 20
snp = Snaphu() snp = Snaphu()
@ -163,7 +173,7 @@ def runUnwrap(infile, outfile, corfile, config, costMode = None,initMethod = Non
snp.setAltitude(altitude) snp.setAltitude(altitude)
snp.setCorrfile(corfile) snp.setCorrfile(corfile)
snp.setInitMethod(initMethod) snp.setInitMethod(initMethod)
# snp.setCorrLooks(corrLooks) snp.setCorrLooks(corrLooks)
snp.setMaxComponents(maxComponents) snp.setMaxComponents(maxComponents)
snp.setDefoMaxCycles(defomax) snp.setDefoMaxCycles(defomax)
snp.setRangeLooks(rangeLooks) snp.setRangeLooks(rangeLooks)
@ -248,7 +258,8 @@ def runUnwrapIcu(infile, outfile):
unwImage.finalizeImage() unwImage.finalizeImage()
unwImage.renderHdr() unwImage.renderHdr()
def runUnwrap2Stage(unwrappedIntFilename,connectedComponentsFilename,unwrapped2StageFilename, unwrapper_2stage_name=None, solver_2stage=None): def runUnwrap2Stage(unwrappedIntFilename,connectedComponentsFilename,unwrapped2StageFilename,
unwrapper_2stage_name=None, solver_2stage=None):
if unwrapper_2stage_name is None: if unwrapper_2stage_name is None:
unwrapper_2stage_name = 'REDARC0' unwrapper_2stage_name = 'REDARC0'
@ -303,6 +314,7 @@ def main(iargs=None):
pckfile = os.path.join(masterShelveDir,'data') pckfile = os.path.join(masterShelveDir,'data')
print(pckfile) print(pckfile)
metadata = extractInfoFromPickle(pckfile, inps) metadata = extractInfoFromPickle(pckfile, inps)
######## ########
print ('unwrapping method : ' , inps.method) print ('unwrapping method : ' , inps.method)
if inps.method == 'snaphu': if inps.method == 'snaphu':
@ -311,6 +323,7 @@ def main(iargs=None):
else: else:
fncall = runUnwrapMcf fncall = runUnwrapMcf
fncall(inps.intfile, inps.unwprefix + '_snaphu.unw', inps.cohfile, metadata, defomax=inps.defomax) fncall(inps.intfile, inps.unwprefix + '_snaphu.unw', inps.cohfile, metadata, defomax=inps.defomax)
elif inps.method == 'snaphu2stage': elif inps.method == 'snaphu2stage':
if inps.nomcf: if inps.nomcf:
fncall = runUnwrap fncall = runUnwrap
@ -319,8 +332,9 @@ def main(iargs=None):
fncall(inps.intfile, inps.unwprefix + '_snaphu.unw', inps.cohfile, metadata, defomax=inps.defomax) fncall(inps.intfile, inps.unwprefix + '_snaphu.unw', inps.cohfile, metadata, defomax=inps.defomax)
# adding in the two-stage # adding in the two-stage
runUnwrap2Stage(inps.unwprefix + '_snaphu.unw', inps.unwprefix + '_snaphu.unw.conncomp',inps.unwprefix + '_snaphu2stage.unw') runUnwrap2Stage(inps.unwprefix + '_snaphu.unw',
inps.unwprefix + '_snaphu.unw.conncomp',
inps.unwprefix + '_snaphu2stage.unw')
elif inps.method == 'icu': elif inps.method == 'icu':
runUnwrapIcu(inps.intfile, inps.unwprefix + '_icu.unw') runUnwrapIcu(inps.intfile, inps.unwprefix + '_icu.unw')

View File

@ -1,38 +1,80 @@
## Sentinel-1 TOPS stack processor
The detailed algorithm for stack processing of TOPS data can be find here: The detailed algorithm for stack processing of TOPS data can be find here:
H. Fattahi, P. Agram, and M. Simons, “A network-based enhanced spectral diversity approach for TOPS time-series analysis,” IEEE Trans. Geosci. Remote Sens., vol. 55, no. 2, pp. 777786, Feb. 2017. (https://ieeexplore.ieee.org/abstract/document/7637021/) + Fattahi, H., P. Agram, and M. Simons (2016), A Network-Based Enhanced Spectral Diversity Approach for TOPS Time-Series Analysis, IEEE Transactions on Geoscience and Remote Sensing, 55(2), 777-786, doi:[10.1109/TGRS.2016.2614925](https://ieeexplore.ieee.org/abstract/document/7637021).
-----------------------------------
<<<<<< Sentinel-1 TOPS stack processor >>>>>> To use the sentinel stack processor, make sure to add the path of your `contrib/stack/topsStack` folder to your `$PATH` environment varibale.
To use the sentinel stack processor, make sure to add the path of your "contrib/stack/topsStack" folder to your $PATH environment varibale.
The scripts provides support for Sentinel-1 TOPS stack processing. Currently supported workflows include a coregistered stack of SLC, interferograms, offsets, and coherence. The scripts provides support for Sentinel-1 TOPS stack processing. Currently supported workflows include a coregistered stack of SLC, interferograms, offsets, and coherence.
stackSentinel.py generates all configuration and run files required to be executed on a stack of Sentinel-1 TOPS data. When stackSentinel.py is executed for a given workflow (-W option) a “configs” and “run_files” folder is generated. No processing is performed at this stage. Within the run_files folder different run_#_description files are contained which are to be executed as shell scripts in the run number order. Each of these run scripts call specific configure files contained in the “configs” folder which call ISCE in a modular fashion. The configure and run files will change depending on the selected workflow. To make run_# files executable, change the file permission accordingly (e.g., chmod +x run_1_unpack_slc). `stackSentinel.py` generates all configuration and run files required to be executed on a stack of Sentinel-1 TOPS data. When stackSentinel.py is executed for a given workflow (-W option) a **configs** and **run_files** folder is generated. No processing is performed at this stage. Within the run_files folder different run\_#\_description files are contained which are to be executed as shell scripts in the run number order. Each of these run scripts call specific configure files contained in the “configs” folder which call ISCE in a modular fashion. The configure and run files will change depending on the selected workflow. To make run_# files executable, change the file permission accordingly (e.g., `chmod +x run_1_unpack_slc`).
To see workflow examples, type “stackSentinel.py -H” ```bash
To get an overview of all the configurable parameters, type “stackSentinel.py -h” stackSentinel.py -H #To see workflow examples,
stackSentinel.py -h #To get an overview of all the configurable parameters
```
Required parameters of stackSentinel.py include: Required parameters of stackSentinel.py include:
-s SLC_DIRNAME A folder with downloaded Sentinel-1 SLCs.
-o ORBIT_DIRNAME A folder containing the Sentinel-1 orbits.
Missing orbit files will be downloaded automatically
-a AUX_DIRNAME A folder containing the Sentinel-1 Auxiliary files
-d DEM A DEM (Digital Elevation Model) referenced to wgs84
```cfg
-s SLC_DIRNAME #A folder with downloaded Sentinel-1 SLCs.
-o ORBIT_DIRNAME #A folder containing the Sentinel-1 orbits. Missing orbit files will be downloaded automatically
-a AUX_DIRNAME #A folder containing the Sentinel-1 Auxiliary files
-d DEM_FILENAME #A DEM (Digital Elevation Model) referenced to wgs84
```
In the following, different workflow examples are provided. Note that stackSentinel.py only generates the run and configure files. To perform the actual processing, the user will need to execute each run file in their numbered order. In the following, different workflow examples are provided. Note that stackSentinel.py only generates the run and configure files. To perform the actual processing, the user will need to execute each run file in their numbered order.
In all workflows, coregistration (-C option) can be done using only geometry (set option = geometry) or with geometry plus refined azimuth offsets through NESD (set option = NESD) approach, the latter being the default. For the NESD coregistrstion the user can control the ESD coherence threshold (-e option) and the number of overlap interferograms (-O) to be used in NESD estimation. In all workflows, coregistration (-C option) can be done using only geometry (set option = geometry) or with geometry plus refined azimuth offsets through NESD (set option = NESD) approach, the latter being the default. For the NESD coregistrstion the user can control the ESD coherence threshold (-e option) and the number of overlap interferograms (-O) to be used in NESD estimation.
------------------------------ Example 1: Coregistered stack of SLC ---------------------------- #### AUX_CAL file download ####
Generate the run and configure files needed to generate a coregistered stack of SLCs.
In this example, a pre-defined bounding box is specified. Note, if the bounding box is not provided it is set by default to the common SLC area among all SLCs. We recommend that user always set the processing bounding box. Since ESA does not have a fixed frame definition, we suggest to download data for a larger bounding box compared to the actual bounding box used in stackSentinel.py. This way user can ensure to have required data to cover the region of interest. Here is an example command to create configuration files for a stack of SLCs:
The following calibration auxliary (AUX_CAL) file is used for **antenna pattern correction** to compensate the range phase offset of SAFE products with **IPF verison 002.36** (mainly for images acquired before March 2015). If all your SAFE products are from another IPF version, then no AUX files are needed. Check [ESA document](https://earth.esa.int/documents/247904/1653440/Sentinel-1-IPF_EAP_Phase_correction) for details.
Run the command below to download the AUX_CAL file once and store it somewhere (_i.e._ ~/aux/aux_cal) so that you can use it all the time, for `stackSentinel.py -a` or `auxiliary data directory` in `topsApp.py`.
```
wget https://qc.sentinel1.eo.esa.int/product/S1A/AUX_CAL/20140908T000000/S1A_AUX_CAL_V20140908T000000_G20190626T100201.SAFE.TGZ
tar zxvf S1A_AUX_CAL_V20140908T000000_G20190626T100201.SAFE.TGZ
rm S1A_AUX_CAL_V20140908T000000_G20190626T100201.SAFE.TGZ
```
#### 1. Create your project folder somewhere ####
```
mkdir MexicoSenAT72
cd MexicoSenAT72
```
#### 2. Prepare DEM ####
Download of DEM (need to use wgs84 version) using the ISCE DEM download script.
```
mkdir DEM; cd DEM
dem.py -a stitch -b 18 20 -100 -97 -r -s 1 c
rm demLat*.dem demLat*.dem.xml demLat*.dem.vrt
fixImageXml.py -f -i demLat*.dem.wgs84 #Updating DEMs wgs84 xml to include full path to the DEM
cd ..
```
#### 3. Download Sentinel-1 data to SLC ####
#### 4.1 Example workflow: Coregistered stack of SLC ####
Generate the run and configure files needed to generate a coregistered stack of SLCs. In this example, a pre-defined bounding box is specified. Note, if the bounding box is not provided it is set by default to the common SLC area among all SLCs. We recommend that user always set the processing bounding box. Since ESA does not have a fixed frame definition, we suggest to download data for a larger bounding box compared to the actual bounding box used in stackSentinel.py. This way user can ensure to have required data to cover the region of interest. Here is an example command to create configuration files for a stack of SLCs:
```
stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc
```
by running the command above, the configs and run_files folders are created. User needs to execute each run file in order. The order is specified by the index number of the run file name. For the example above, the run_files folder includes the following files: by running the command above, the configs and run_files folders are created. User needs to execute each run file in order. The order is specified by the index number of the run file name. For the example above, the run_files folder includes the following files:
- run_1_unpack_slc_topo_master - run_1_unpack_slc_topo_master
- run_2_average_baseline - run_2_average_baseline
- run_3_extract_burst_overlaps - run_3_extract_burst_overlaps
@ -46,72 +88,83 @@ by running the command above, the configs and run_files folders are created. Use
The generated run files are self descriptive. Below is a short explanation on what each run_file does: The generated run files are self descriptive. Below is a short explanation on what each run_file does:
***run_1_unpack_slc_topo_master:*** **run_1_unpack_slc_topo_master:**
Includes commands to unpack Sentinel-1 TOPS SLCs using ISCE readers. For older SLCs which need antenna elevation pattern correction, the file is extracted and written to disk. For newer version of SLCs which dont need the elevation antenna pattern correction, only a gdal virtual “vrt” file (and isce xml file) is generated. The “.vrt” file points to the Sentinel SLC file and reads them whenever required during the processing. If a user wants to write the “.vrt” SLC file to disk, it can be done easily using gdal_translate (e.g. gdal_translate of ENVI File.vrt File.slc). Includes commands to unpack Sentinel-1 TOPS SLCs using ISCE readers. For older SLCs which need antenna elevation pattern correction, the file is extracted and written to disk. For newer version of SLCs which dont need the elevation antenna pattern correction, only a gdal virtual “vrt” file (and isce xml file) is generated. The “.vrt” file points to the Sentinel SLC file and reads them whenever required during the processing. If a user wants to write the “.vrt” SLC file to disk, it can be done easily using gdal_translate (e.g. gdal_translate of ENVI File.vrt File.slc).
The “run_1_unpack_slc_topo_master” also includes a command that refers to the config file of the stack master, which includes configuration for running topo for the stack master. Note that in the pair-wise processing strategy one should run topo (mapping from range-Doppler to geo coordinate) for all pairs. However, with stackSentinel, topo needs to be run only one time for the master in the stack. The “run_1_unpack_slc_topo_master” also includes a command that refers to the config file of the stack master, which includes configuration for running topo for the stack master. Note that in the pair-wise processing strategy one should run topo (mapping from range-Doppler to geo coordinate) for all pairs. However, with stackSentinel, topo needs to be run only one time for the master in the stack.
***run_2_average_baseline: *** **run_2_average_baseline:**
Computes average baseline for the stack. These baselines are not used for processing anywhere. They are only an approximation and can be used for plotting purposes. A more precise baseline grid is estimated later in run_10. Computes average baseline for the stack. These baselines are not used for processing anywhere. They are only an approximation and can be used for plotting purposes. A more precise baseline grid is estimated later in run_10.
***run_3_extract_burst_overlaps: *** **run_3_extract_burst_overlaps:**
Burst overlaps are extracted for estimating azimuth misregistration using NESD technique. If coregistration method is chosen to be “geometry”, then this run file wont exist and the overlaps are not extracted. Burst overlaps are extracted for estimating azimuth misregistration using NESD technique. If coregistration method is chosen to be “geometry”, then this run file wont exist and the overlaps are not extracted.
***run_4_overlap_geo2rdr_resample: *** **run_4_overlap_geo2rdr_resample:***
Running geo2rdr to estimate geometrical offsets between slave burst overlaps and the stack master burst overlaps. The slave burst overlaps are then resampled to the stack master burst overlaps. Running geo2rdr to estimate geometrical offsets between slave burst overlaps and the stack master burst overlaps. The slave burst overlaps are then resampled to the stack master burst overlaps.
***run_5_pairs_misreg: *** **run_5_pairs_misreg:**
Using the coregistered stack burst overlaps generated from the previous step, differential overlap interferograms are generated and are used for estimating azimuth misregistration using Enhanced Spectral Diversity (ESD) technique. Using the coregistered stack burst overlaps generated from the previous step, differential overlap interferograms are generated and are used for estimating azimuth misregistration using Enhanced Spectral Diversity (ESD) technique.
***run_6_timeseries_misreg: *** **run_6_timeseries_misreg:**
A time-series of azimuth and range misregistration is estimated with respect to the stack master. The time-series is a least squares esatimation from the pair misregistration from the previous step. A time-series of azimuth and range misregistration is estimated with respect to the stack master. The time-series is a least squares esatimation from the pair misregistration from the previous step.
***run_7_geo2rdr_resample: *** **run_7_geo2rdr_resample:**
Using orbit and DEM, geometrical offsets among all slave SLCs and the stack master is computed. The goometrical offsets, together with the misregistration time-series (from previous step) are used for precise coregistration of each burst SLC. Using orbit and DEM, geometrical offsets among all slave SLCs and the stack master is computed. The goometrical offsets, together with the misregistration time-series (from previous step) are used for precise coregistration of each burst SLC.
***run_8_extract_stack_valid_region: *** **run_8_extract_stack_valid_region:**
The valid region between burst SLCs at the overlap area of the bursts slightly changes for different acquisitions. Therefore we need to keep track of these overlaps which will be used during merging bursts. Without these knowledge, lines of invalid data may appear in the merged products at the burst overlaps. The valid region between burst SLCs at the overlap area of the bursts slightly changes for different acquisitions. Therefore we need to keep track of these overlaps which will be used during merging bursts. Without these knowledge, lines of invalid data may appear in the merged products at the burst overlaps.
***run_9_merge: *** **run_9_merge:**
Merges all bursts for the master and coregistered SLCs. The geometry files are also merged including longitude, latitude, shadow and layer mask, line-of-sight files, etc. . Merges all bursts for the master and coregistered SLCs. The geometry files are also merged including longitude, latitude, shadow and layer mask, line-of-sight files, etc. .
***run_10_grid_baseline: *** **run_10_grid_baseline:**
A coarse grid of baselines between each slave SLC and the stack master is generated. This is not used in any computation. A coarse grid of baselines between each slave SLC and the stack master is generated. This is not used in any computation.
#### 4.2 Example workflow: Coregistered stack of SLC with modified parameters ####
-------- Example 2: Coregistered stack of SLC with modified parameters -----------
In the following example, the same stack generation is requested but where the threshold of the default coregistration approach (NESD) is relaxed from its default 0.85 value 0.7. In the following example, the same stack generation is requested but where the threshold of the default coregistration approach (NESD) is relaxed from its default 0.85 value 0.7.
```
stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc -e 0.7 stackSentinel.py -s ../SLC/ -d ../DEM/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -a ../../AuxDir/ -o ../../Orbits -b '19 20 -99.5 -98.5' -W slc -e 0.7
```
When running all the run files, the final products are located in the merge folder which has subdirectories “geom_master”, “baselines” and “SLC”. The “geom_master” folder contains geometry products such as longitude, latitude, height, local incidence angle, look angle, heading, and shadowing/layover mask files. The “baselines” folder contains sparse grids of the perpendicular baseline for each acquisition, while the “SLC” folder contains the coregistered SLCs When running all the run files, the final products are located in the merge folder which has subdirectories **geom_master**, **baselines** and **SLC**. The **geom_master** folder contains geometry products such as longitude, latitude, height, local incidence angle, look angle, heading, and shadowing/layover mask files. The **baselines** folder contains sparse grids of the perpendicular baseline for each acquisition, while the **SLC** folder contains the coregistered SLCs
#### 4.3 Example workflow: Stack of interferograms ####
------------------------------ Example 3: Stack of interferograms ------------------------------
Generate the run and configure files needed to generate a stack of interferograms. Generate the run and configure files needed to generate a stack of interferograms.
In this example, a stack of interferograms is requested for which up to 2 nearest neighbor connections are included. In this example, a stack of interferograms is requested for which up to 2 nearest neighbor connections are included.
```
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -c 2 stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -c 2
```
In the following example, all possible interferograms are being generated and in which the coregistration approach is set to use geometry and not the default NESD. In the following example, all possible interferograms are being generated and in which the coregistration approach is set to use geometry and not the default NESD.
```
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all
```
When executing all the run files, a coregistered stack of slcs are produced, the burst interferograms are generated and then merged. Merged interferograms are multilooked, filtered and unwrapped. Geocoding is not applied. If users need to geocode any product, they can use the geocodeGdal.py script. When executing all the run files, a coregistered stack of slcs are produced, the burst interferograms are generated and then merged. Merged interferograms are multilooked, filtered and unwrapped. Geocoding is not applied. If users need to geocode any product, they can use the geocodeGdal.py script.
#### 4.4 Example workflow: Stack of correlation ####
-------------------- Example 4: Correlation stack example ----------------------------
Generate the run and configure files needed to generate a stack of coherence. Generate the run and configure files needed to generate a stack of coherence.
In this example, a correlation stack is requested considering all possible coherence pairs and where the coregistration approach is done using geometry only. In this example, a correlation stack is requested considering all possible coherence pairs and where the coregistration approach is done using geometry only.
```
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all -W correlation stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all -W correlation
```
This workflow is basically similar to the previous one. The difference is that the interferograms are not unwrapped. This workflow is basically similar to the previous one. The difference is that the interferograms are not unwrapped.
#### 5. Execute the commands in run files (run_1*, run_2*, etc) in the "run_files" folder ####
----------------------------------- DEM download example -----------------------------------
Download of DEM (need to use wgs84 version) using the ISCE DEM download script.
dem.py -a stitch -b 18 20 -100 -97 -r -s 1 c
Updating DEMs wgs84 xml to include full path to the DEM
fixImageXml.py -f -i demLat_N18_N20_Lon_W100_W097.dem.wgs84

View File

@ -12,7 +12,8 @@
<value>/Users/fattahi/process/test_roiApp/Alos_Maule_T116/demLat_S39_S35_Lon_W074_W071.dem.wgs84</value> <value>/Users/fattahi/process/test_roiApp/Alos_Maule_T116/demLat_S39_S35_Lon_W074_W071.dem.wgs84</value>
</property> </property>
<!-- <!--
<property name="do rubbersheeting">True</property> <property name="do rubbersheetingAzimuth">True</property>
<property name="do rubbersheetingRange">False</property>
--> -->
<property name="do denseoffsets">True</property> <property name="do denseoffsets">True</property>
<property name="do split spectrum">True</property> <property name="do split spectrum">True</property>

View File

@ -52,7 +52,7 @@ def generate(env):
# default flags for the NVCC compiler # default flags for the NVCC compiler
env['STATICNVCCFLAGS'] = '' env['STATICNVCCFLAGS'] = ''
env['SHAREDNVCCFLAGS'] = '' env['SHAREDNVCCFLAGS'] = ''
env['ENABLESHAREDNVCCFLAG'] = '-arch=sm_35 -shared -Xcompiler -fPIC' env['ENABLESHAREDNVCCFLAG'] = '-shared -Xcompiler -fPIC'
# default NVCC commands # default NVCC commands
env['STATICNVCCCMD'] = '$NVCC -o $TARGET -c $NVCCFLAGS $STATICNVCCFLAGS $SOURCES' env['STATICNVCCCMD'] = '$NVCC -o $TARGET -c $NVCCFLAGS $STATICNVCCFLAGS $SOURCES'
@ -153,7 +153,7 @@ def generate(env):
#env.Append(LIBPATH=[cudaSDKPath + '/lib', cudaSDKPath + '/common/lib' + cudaSDKSubLibDir, cudaToolkitPath + '/lib']) #env.Append(LIBPATH=[cudaSDKPath + '/lib', cudaSDKPath + '/common/lib' + cudaSDKSubLibDir, cudaToolkitPath + '/lib'])
env.Append(CUDACPPPATH=[cudaToolkitPath + '/include']) env.Append(CUDACPPPATH=[cudaToolkitPath + '/include'])
env.Append(CUDALIBPATH=[cudaToolkitPath + '/lib', cudaToolkitPath + '/lib64']) env.Append(CUDALIBPATH=[cudaToolkitPath + '/lib', cudaToolkitPath + '/lib64', '/lib64'])
env.Append(CUDALIBS=['cudart']) env.Append(CUDALIBS=['cudart'])
def exists(env): def exists(env):

View File

@ -12,7 +12,7 @@
from __future__ import print_function from __future__ import print_function
import sys import sys
import os import os
import urllib2 import urllib
import getopt import getopt
import re import re
import shutil import shutil
@ -57,7 +57,7 @@ def print2log(msg, withtime=True, cmd=False):
if withtime: if withtime:
now = datetime.datetime.today() now = datetime.datetime.today()
msg = "%s >> %s" % (now.isoformat(), msg) msg = "%s >> %s" % (now.isoformat(), msg)
LOGFILE.write(msg + '\n') LOGFILE.write((msg + '\n').encode('utf-8'))
LOGFILE.flush() LOGFILE.flush()
os.fsync(LOGFILE) os.fsync(LOGFILE)
@ -157,9 +157,9 @@ def downloadfile(url, fname, repeat=1):
counter = 0 counter = 0
while counter < repeat: while counter < repeat:
try: try:
response = urllib2.urlopen(url) response = urllib.request.urlopen(url)
break break
except urllib2.URLError, e: except urllib.request.URLError as e:
counter += 1 counter += 1
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
print2log("Failed to reach server. Reason: %s" % e.reason) print2log("Failed to reach server. Reason: %s" % e.reason)
@ -851,7 +851,7 @@ class ISCEDeps(object):
f = open(self.config, 'rb') f = open(self.config, 'rb')
lines = f.readlines() lines = f.readlines()
for line in lines: for line in lines:
m = re.match("([^#].*?)=([^#]+?)$", line.strip()) m = re.match("([^#].*?)=([^#]+?)$", line.strip().decode('utf-8'))
if m: if m:
var = m.group(1).strip() var = m.group(1).strip()
val = m.group(2).strip() val = m.group(2).strip()
@ -867,7 +867,7 @@ def readSetupConfig(setup_config):
f = open(setup_config, 'rb') f = open(setup_config, 'rb')
lines = f.readlines() lines = f.readlines()
for line in lines: for line in lines:
m = re.match("([^#].*?)=([^#]+?)$", line.strip()) m = re.match("([^#].*?)=([^#]+?)$", line.strip().decode('utf-8'))
if m: if m:
var = m.group(1).strip() var = m.group(1).strip()
val = m.group(2).strip().replace('"', '') val = m.group(2).strip().replace('"', '')
@ -885,7 +885,7 @@ def checkArgs(args):
""" """
try: try:
opts, args = getopt.getopt(args, "h", ["help", "prefix=", "ping=", "config=", "uname=", "download=", "unpack=", "install=", "gcc=", "gpp=", "verbose"]) opts, args = getopt.getopt(args, "h", ["help", "prefix=", "ping=", "config=", "uname=", "download=", "unpack=", "install=", "gcc=", "gpp=", "verbose"])
except getopt.GetoptError, err: except getopt.GetoptError as err:
print2log("ProgError: %s" % str(err)) print2log("ProgError: %s" % str(err))
usage() usage()
sys.exit(2) sys.exit(2)