ISCE_INSAR/contrib/stack/topsStack/stackSentinel.py

1004 lines
40 KiB
Python
Raw Normal View History

2019-01-16 19:40:08 +00:00
#!/usr/bin/env python3
########################
#Author: Heresh Fattahi
#######################
import os, sys, glob
2019-01-16 19:40:08 +00:00
import argparse
import configparser
import datetime
2019-01-16 19:40:08 +00:00
import time
import numpy as np
2019-01-16 19:40:08 +00:00
import isce
import isceobj
from isceobj.Sensor.TOPS.Sentinel1 import Sentinel1
from topsStack.Stack import config, run, sentinelSLC
2019-01-16 19:40:08 +00:00
helpstr = """
2019-01-16 19:40:08 +00:00
Stack processor for Sentinel-1 data using ISCE software.
For a full list of different options, try stackSentinel.py -h
stackSentinel.py generates all configuration and run files required to be executed for a stack of Sentinel-1 TOPS data.
2019-01-16 19:40:08 +00:00
Following are required to start processing:
1) a folder that includes Sentinel-1 SLCs,
2) a DEM (Digital Elevation Model)
3) a folder that includes precise orbits (use dloadOrbits.py to download/ update your orbit folder. Missing orbits downloaded on the fly.)
4) a folder for Sentinel-1 Aux files (which is used for correcting the Elevation Antenna Pattern).
2019-01-16 19:40:08 +00:00
Note that stackSentinel.py does not process any data. It only prepares a lot of input files for processing and a lot of run files. Then you need to execute all those generated run files in order. To know what is really going on, after running stackSentinel.py, look at each run file generated by stackSentinel.py. Each run file actually has several commands that are independent from each other and can be executed in parallel. The config files for each run file include the processing options to execute a specific command/function.
Note also that run files need to be executed in order, i.e., running run_03 needs results from run_02, etc.
2019-01-16 19:40:08 +00:00
##############################################
#Examples:
stackSentinel.py can be run for different workflows including: a stack of interferogram, a stack of correlation files, a stack of offsets or a coregistered stack of SLC. Workflow can be chosen with -W option.
%%%%%%%%%%%%%%%
Example 1:
# interferogram workflow with 2 nearest neighbor connections (default coregistration is NESD):
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -c 2
%%%%%%%%%%%%%%%
Example 2:
# interferogram workflow with all possible interferograms and coregistration with only geometry:
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all
%%%%%%%%%%%%%%%
Example 3:
# correlation workflow with all possible correlation pairs and coregistration with geometry:
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C geometry -c all -W correlation
%%%%%%%%%%%%%%%
Example 4:
# slc workflow that produces a coregistered stack of SLCs
2019-01-16 19:40:08 +00:00
stackSentinel.py -s ../SLC/ -d ../../MexicoCity/demLat_N18_N20_Lon_W100_W097.dem.wgs84 -b '19 20 -99.5 -98.5' -a ../../AuxDir/ -o ../../Orbits -C NESD -W slc
##############################################
#Note:
For all workflows, coregistration can be done using only geometry or with geometry plus refined azimuth offsets through NESD approach.
Existing workflows: slc, interferogram, correlation, offset
"""
2019-01-16 19:40:08 +00:00
class customArgparseAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
'''
The action to be performed.
'''
print(helpstr)
parser.exit()
def createParser():
parser = argparse.ArgumentParser(description='Preparing the directory structure and config files for stack processing of Sentinel data')
2019-01-16 19:40:08 +00:00
parser.add_argument('-H','--hh', nargs=0, action=customArgparseAction,
help='Display detailed help information.')
2019-01-16 19:40:08 +00:00
parser.add_argument('-s', '--slc_directory', dest='slc_dirname', type=str, required=True,
help='Directory with all Sentinel SLCs')
2019-01-16 19:40:08 +00:00
parser.add_argument('-o', '--orbit_directory', dest='orbit_dirname', type=str, required=True,
help='Directory with all orbits')
2019-01-16 19:40:08 +00:00
parser.add_argument('-a', '--aux_directory', dest='aux_dirname', type=str, required=True,
help='Directory with all aux files')
2019-01-16 19:40:08 +00:00
parser.add_argument('-w', '--working_directory', dest='work_dir', type=str, default='./',
help='Working directory (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-d', '--dem', dest='dem', type=str, required=True,
help='Path of the DEM file')
2019-01-16 19:40:08 +00:00
parser.add_argument('-m', '--reference_date', dest='reference_date', type=str, default=None,
help='Directory with reference acquisition')
2019-01-16 19:40:08 +00:00
parser.add_argument('-c','--num_connections', dest='num_connections', type=str, default = '1',
help='number of interferograms between each date and subsequent dates (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-n', '--swath_num', dest='swath_num', type=str, default='1 2 3',
help="A list of swaths to be processed. -- Default : '1 2 3'")
2019-01-16 19:40:08 +00:00
parser.add_argument('-b', '--bbox', dest='bbox', type=str, default=None,
help="Lat/Lon Bounding SNWE. -- Example : '19 20 -99.5 -98.5' -- Default : common overlap between stack")
2019-01-16 19:40:08 +00:00
parser.add_argument('-x', '--exclude_dates', dest='exclude_dates', type=str, default=None,
help="List of the dates to be excluded for processing. -- Example : '20141007,20141031' (default: %(default)s).")
2019-01-16 19:40:08 +00:00
parser.add_argument('-i', '--include_dates', dest='include_dates', type=str, default=None,
help="List of the dates to be included for processing. -- Example : '20141007,20141031' (default: %(default)s).")
2019-01-16 19:40:08 +00:00
parser.add_argument('--start_date', dest='startDate', type=str, default=None,
help='Start date for stack processing. Acquisitions before start date are ignored. '
'format should be YYYY-MM-DD e.g., 2015-01-23')
parser.add_argument('--stop_date', dest='stopDate', type=str, default=None,
help='Stop date for stack processing. Acquisitions after stop date are ignored. '
'format should be YYYY-MM-DD e.g., 2017-02-26')
2019-01-16 19:40:08 +00:00
parser.add_argument('-z', '--azimuth_looks', dest='azimuthLooks', type=str, default='3',
help='Number of looks in azimuth for interferogram multi-looking (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-r', '--range_looks', dest='rangeLooks', type=str, default='9',
help='Number of looks in range for interferogram multi-looking (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-f', '--filter_strength', dest='filtStrength', type=str, default='0.5',
help='Filter strength for interferogram filtering (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('--snr_misreg_threshold', dest='snrThreshold', type=str, default='10',
help='SNR threshold for estimating range misregistration using cross correlation (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-p', '--polarization', dest='polarization', type=str, default='vv',
help='SAR data polarization (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-C', '--coregistration', dest='coregistration', type=str, default='NESD', choices=['geometry', 'NESD'],
help='Coregistration options (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-O','--num_overlap_connections', dest='num_overlap_connections', type=str, default = '3',
help='number of overlap interferograms between each date and subsequent dates used for NESD computation '
'(for azimuth offsets misregistration) (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-e', '--esd_coherence_threshold', dest='esdCoherenceThreshold', type=str, default='0.85',
help='Coherence threshold for estimating azimuth misregistration using enhanced spectral diversity (default: %(default)s).')
2019-01-16 19:40:08 +00:00
parser.add_argument('-W', '--workflow', dest='workflow', type=str, default='interferogram',
choices=['slc', 'correlation', 'interferogram', 'offset'],
help='The InSAR processing workflow (default: %(default)s).')
# unwrap
parser.add_argument('-u', '--unw_method', dest='unwMethod', type=str, default='snaphu', choices=['icu', 'snaphu'],
help='Unwrapping method (default: %(default)s).')
2019-01-16 19:40:08 +00:00
2020-05-01 19:44:59 +00:00
parser.add_argument('-rmFilter', '--rmFilter', dest='rmFilter', action='store_true', default=False,
help='Make an extra unwrap file in which filtering effect is removed')
# ionospheric correction
parser.add_argument('--param_ion', dest='param_ion', type=str, default=None,
help='ionosphere estimation parameter file. if provided, will do ionosphere estimation.')
parser.add_argument('--num_connections_ion', dest='num_connections_ion', type=str, default = '3',
help='number of interferograms between each date and subsequent dates for ionosphere estimation (default: %(default)s).')
# computing
compute = parser.add_argument_group('Computing options')
compute.add_argument('-useGPU', '--useGPU', dest='useGPU',action='store_true', default=False,
help='Allow App to use GPU when available')
compute.add_argument('--num_proc', '--num_process', dest='numProcess', type=int, default=1,
help='number of tasks running in parallel in each run file (default: %(default)s).')
compute.add_argument('--num_proc4topo', '--num_process4topo', dest='numProcess4topo', type=int, default=1,
help='number of parallel processes (for topo only) (default: %(default)s).')
compute.add_argument('-t', '--text_cmd', dest='text_cmd', type=str, default='',
help="text command to be added to the beginning of each line of the run files (default: '%(default)s'). "
"Example : 'source ~/.bash_profile;'")
compute.add_argument('-V', '--virtual_merge', dest='virtualMerge', type=str, default=None, choices=['True', 'False'],
help='Use virtual files for the merged SLCs and geometry files.\n'
'Default: True for correlation / interferogram workflow\n'
' False for slc / offset workflow')
2019-01-16 19:40:08 +00:00
return parser
def cmdLineParse(iargs = None):
parser = createParser()
inps = parser.parse_args(args=iargs)
inps.slc_dirname = os.path.abspath(inps.slc_dirname)
inps.orbit_dirname = os.path.abspath(inps.orbit_dirname)
inps.aux_dirname = os.path.abspath(inps.aux_dirname)
inps.work_dir = os.path.abspath(inps.work_dir)
inps.dem = os.path.abspath(inps.dem)
if any(i in iargs for i in ['--num_proc', '--num_process']) and all(
i not in iargs for i in ['--num_proc4topo', '--num_process4topo']):
inps.numProcess4topo = inps.numProcess
2019-01-16 19:40:08 +00:00
return inps
def generate_geopolygon(bbox):
"""generate shapely Polygon"""
from shapely.geometry import Point, Polygon
# convert pnts to shapely polygon format
# the order of pnts is conter-clockwise, starting from the lower ldft corner
# the order for Point is lon,lat
points = [Point(bbox[i][0], bbox[i][1]) for i in range(4)]
return Polygon([(p.coords.xy[0][0], p.coords.xy[1][0]) for p in points])
2019-01-16 19:40:08 +00:00
####################################
def get_dates(inps):
# Given the SLC directory This function extracts the acquisition dates
# and prepares a dictionary of sentinel slc files such that keys are
2019-01-16 19:40:08 +00:00
# acquisition dates and values are object instances of sentinelSLC class
# which is defined in Stack.py
if inps.bbox is not None:
bbox = [float(val) for val in inps.bbox.split()]
bbox_poly = np.array([[bbox[2],bbox[0]],[bbox[3],bbox[0]],[bbox[3],bbox[1]],[bbox[2],bbox[1]]])
2019-01-16 19:40:08 +00:00
if inps.exclude_dates is not None:
excludeList = inps.exclude_dates.split(',')
else:
excludeList = []
if inps.include_dates is not None:
includeList = inps.include_dates.split(',')
else:
includeList = []
if os.path.isfile(inps.slc_dirname):
print('reading SAFE files from: ' + inps.slc_dirname)
SAFE_files = []
for line in open(inps.slc_dirname):
SAFE_files.append(str.replace(line,'\n','').strip())
2019-01-16 19:40:08 +00:00
else:
SAFE_files = sorted(glob.glob(os.path.join(inps.slc_dirname, 'S1*_IW_SLC*zip'))) # changed to zip file by Minyan Zhong
2021-07-21 05:28:30 +00:00
if SAFE_files == []:
SAFE_files = sorted(glob.glob(os.path.join(inps.slc_dirname, 'S1*_IW_SLC*SAFE')))
2019-01-16 19:40:08 +00:00
if len(SAFE_files) == 0:
raise Exception('No SAFE file found')
elif len(SAFE_files) == 1:
raise Exception('At least two SAFE file is required. Only one SAFE file found.')
else:
print ("Number of SAFE files found: "+str(len(SAFE_files)))
if inps.startDate is not None:
stackStartDate = datetime.datetime(*time.strptime(inps.startDate, "%Y-%m-%d")[0:6])
else:
#if startDate is None let's fix it to first JPL's satellite lunch date :)
2019-01-16 19:40:08 +00:00
stackStartDate = datetime.datetime(*time.strptime("1958-01-31", "%Y-%m-%d")[0:6])
if inps.stopDate is not None:
stackStopDate = datetime.datetime(*time.strptime(inps.stopDate, "%Y-%m-%d")[0:6])
else:
stackStopDate = datetime.datetime(*time.strptime("2158-01-31", "%Y-%m-%d")[0:6])
################################
# write down the list of SAFE files in a txt file which will be used:
f = open('SAFE_files.txt','w')
safe_count=0
2019-01-16 19:40:08 +00:00
safe_dict={}
2019-01-16 19:40:08 +00:00
for safe in SAFE_files:
safeObj=sentinelSLC(safe)
safeObj.get_dates()
if safeObj.start_date_time < stackStartDate or safeObj.start_date_time > stackStopDate:
excludeList.append(safeObj.date)
continue
safeObj.get_orbit(inps.orbit_dirname, inps.work_dir)
# check if the date safe file is needed to cover the BBOX
reject_SAFE=False
if safeObj.date not in excludeList and inps.bbox is not None:
reject_SAFE=True
pnts = safeObj.getkmlQUAD(safe)
# process pnts to use generate_geopolygon function
pnts_bbox = np.empty((4,2))
count = 0
2019-01-16 19:40:08 +00:00
for pnt in pnts:
pnts_bbox[count, 0] = float(pnt.split(',')[0]) # longitude
pnts_bbox[count, 1] = float(pnt.split(',')[1]) # latitude
count += 1
pnts_polygon = generate_geopolygon(pnts_bbox)
bbox_polygon = generate_geopolygon(bbox_poly)
# judge whether these two polygon intersect with each other
overlap_flag = pnts_polygon.intersects(bbox_polygon)
if overlap_flag:
reject_SAFE = False
else:
reject_SAFE = True
2019-01-16 19:40:08 +00:00
if not reject_SAFE:
if safeObj.date not in safe_dict.keys() and safeObj.date not in excludeList:
safe_dict[safeObj.date]=safeObj
elif safeObj.date not in excludeList:
safe_dict[safeObj.date].safe_file = safe_dict[safeObj.date].safe_file + ' ' + safe
# write the SAFE file as it will be used
f.write(safe + '\n')
safe_count += 1
# closing the SAFE file overview
f.close()
print ("Number of SAFE files to be used (cover BBOX): "+str(safe_count))
################################
dateList = [key for key in safe_dict.keys()]
dateList.sort()
print ("*****************************************")
print ("Number of dates : " +str(len(dateList)))
print ("List of dates : ")
print (dateList)
################################
#get the overlap lat and lon bounding box
S=[]
N=[]
W=[]
E=[]
safe_dict_bbox={}
safe_dict_bbox_finclude={}
safe_dict_finclude={}
safe_dict_frameGAP={}
print ('date south north')
for date in dateList:
#safe_dict[date].get_lat_lon()
safe_dict[date].get_lat_lon_v2()
2019-01-16 19:40:08 +00:00
#safe_dict[date].get_lat_lon_v3(inps)
S.append(safe_dict[date].SNWE[0])
N.append(safe_dict[date].SNWE[1])
W.append(safe_dict[date].SNWE[2])
E.append(safe_dict[date].SNWE[3])
print (date , safe_dict[date].SNWE[0],safe_dict[date].SNWE[1])
if inps.bbox is not None:
if safe_dict[date].SNWE[0] <= bbox[0] and safe_dict[date].SNWE[1] >= bbox[1]:
safe_dict_bbox[date] = safe_dict[date]
safe_dict_bbox_finclude[date] = safe_dict[date]
elif date in includeList:
2019-01-16 19:40:08 +00:00
safe_dict_finclude[date] = safe_dict[date]
safe_dict_bbox_finclude[date] = safe_dict[date]
# tracking dates for which there seems to be a gap in coverage
if not safe_dict[date].frame_nogap:
safe_dict_frameGAP[date] = safe_dict[date]
print ("*****************************************")
print ("The overlap region among all dates (based on the preview kml files):")
print (" South North East West ")
print (max(S),min(N),max(W),min(E))
print ("*****************************************")
if max(S) > min(N):
print ("""WARNING:
2019-01-16 19:40:08 +00:00
There might not be overlap between some dates""")
print ("*****************************************")
################################
print ('All dates (' + str(len(dateList)) + ')')
print (dateList)
print("")
if inps.bbox is not None:
safe_dict = safe_dict_bbox
dateList = [key for key in safe_dict.keys()]
dateList.sort()
print ('dates covering the bbox (' + str(len(dateList)) + ')' )
print (dateList)
print("")
if len(safe_dict_finclude)>0:
# updating the dateList that will be used for those dates that are forced include
# but which are not covering teh BBOX completely
safe_dict = safe_dict_bbox_finclude
dateList = [key for key in safe_dict.keys()]
dateList.sort()
# sorting the dates of the forced include
dateListFinclude = [key for key in safe_dict_finclude.keys()]
print('dates forced included (do not cover the bbox completely, ' + str(len(dateListFinclude)) + ')')
print(dateListFinclude)
print("")
# report any potential gaps in fame coverage
if len(safe_dict_frameGAP)>0:
dateListframeGAP = [key for key in safe_dict_frameGAP.keys()]
print('dates for which it looks like there are missing frames')
print(dateListframeGAP)
print("")
if inps.reference_date is None:
2019-01-16 19:40:08 +00:00
if len(dateList)<1:
print('*************************************')
print('Error:')
print('No acquisition forfills the temporal range and bbox requirement.')
sys.exit(1)
inps.reference_date = dateList[0]
print ("The reference date was not chosen. The first date is considered as reference date.")
2019-01-16 19:40:08 +00:00
print ("")
print ("All SLCs will be coregistered to : " + inps.reference_date)
secondaryList = [key for key in safe_dict.keys()]
secondaryList.sort()
secondaryList.remove(inps.reference_date)
print ("secondary dates :")
print (secondaryList)
2019-01-16 19:40:08 +00:00
print ("")
return dateList, inps.reference_date, secondaryList, safe_dict
def selectNeighborPairs(dateList, stackReferenceDate, secondaryDates, num_connections, updateStack=False):
"""Select nearest neighbor acquisitions to form seqential pairs."""
2019-01-16 19:40:08 +00:00
pairs = []
if updateStack:
# use the secondaryDates (new acquisitions), instead of the entire list of dates
print('\nUpdating an existing stack ...\n')
# include the reference date for pairing if it is among the most recent acquisitions
dateList = sorted(secondaryDates + [stackReferenceDate])[1:]
num_date = len(dateList)
# translate num_connections input
2019-01-16 19:40:08 +00:00
if num_connections == 'all':
num_connections = len(dateList) - 1
else:
num_connections = int(num_connections)
# selecting nearest pairs based on dateList and num_connections
2019-01-16 19:40:08 +00:00
num_connections = num_connections + 1
for i in range(num_date-1):
for j in range(i+1, i+num_connections):
if j < num_date:
pairs.append((dateList[i], dateList[j]))
print('selecting pairs with {} nearest neighbor connections: {}'.format(num_connections-1, len(pairs)))
2019-01-16 19:40:08 +00:00
return pairs
def selectNeighborPairsIonosphere(safe_dict, num_connections):
'''
safe_dict: returned by def get_dates(inps):
num_connetions: number of subsequent dates to pair up with a date
This routine first groups the Dates. Dates of same starting ranges is put in a group.
Pairs within a same group are returned in pairs_same_starting_ranges
Pairs connecting different groups are returned in pairs_diff_starting_ranges
'''
#get starting ranges
for date in safe_dict:
safe_dict[date].get_starting_ranges()
#get sorted dataList
dateList = [key for key in safe_dict.keys()]
dateList.sort()
ndate = len(dateList)
#starting ranges sorted by date
starting_ranges = [safe_dict[date].startingRanges for date in dateList]
#get unique starting ranges sorted by date
starting_ranges_unique = []
for i in range(ndate):
if starting_ranges[i] not in starting_ranges_unique:
starting_ranges_unique.append(starting_ranges[i])
ndate_unique = len(starting_ranges_unique)
#put dates of same starting ranges in a list
#result is a 2-D list, each D is sorted by date
starting_ranges_unique_dates = [[] for i in range(ndate_unique)]
for k in range(ndate_unique):
for i in range(ndate):
if starting_ranges_unique[k] == safe_dict[dateList[i]].startingRanges:
starting_ranges_unique_dates[k].append(dateList[i])
#print(starting_ranges_unique_dates)
if num_connections == 'all':
num_connections = ndate - 1
else:
num_connections = int(num_connections)
#1. form all possible pairs, to be used in 3
pairs_same_starting_ranges_0 = []
pairs_diff_starting_ranges_0 = []
for i in range(ndate-1):
for j in range(i+1, i+num_connections+1):
if j >= ndate:
continue
same_starting_ranges = False
for k in range(ndate_unique):
if dateList[i] in starting_ranges_unique_dates[k] and dateList[j] in starting_ranges_unique_dates[k]:
same_starting_ranges = True
break
if same_starting_ranges == True:
pairs_same_starting_ranges_0.append((dateList[i],dateList[j]))
else:
pairs_diff_starting_ranges_0.append((dateList[i],dateList[j]))
#2. form pairs of same starting ranges
pairs_same_starting_ranges = []
for k in range(ndate_unique):
ndate_unique_k = len(starting_ranges_unique_dates[k])
for i in range(ndate_unique_k):
for j in range(i+1, i+num_connections+1):
if j >= ndate_unique_k:
continue
pairs_same_starting_ranges.append((starting_ranges_unique_dates[k][i],starting_ranges_unique_dates[k][j]))
#3. select pairs of diff starting ranges formed in 1 to connect the different starting ranges
pairs_diff_starting_ranges = []
for k in range(ndate_unique-1):
cnt = 0
for pair in pairs_diff_starting_ranges_0:
if (pair[0] in starting_ranges_unique_dates[k] and pair[1] in starting_ranges_unique_dates[k+1]) or \
(pair[1] in starting_ranges_unique_dates[k] and pair[0] in starting_ranges_unique_dates[k+1]):
pairs_diff_starting_ranges.append(pair)
cnt += 1
if cnt >= num_connections:
break
return pairs_same_starting_ranges, pairs_diff_starting_ranges
def excludeExistingPairsIonosphere(pairs_same_starting_ranges, pairs_diff_starting_ranges, work_dir):
'''
This routine searches for existing pairs for ionosphere estimation and exclude them from
pairs_same_starting_ranges and pairs_diff_starting_ranges.
'''
if os.path.isdir(os.path.join(work_dir, 'ion')):
print('previous ionosphere estimation directory found')
print('exclude already processed pairs for ionosphere estimation')
pairs = [os.path.basename(p) for p in glob.glob(os.path.join(work_dir, 'ion', '*')) if os.path.isdir(p)]
pairs.sort()
pairs = [tuple(p.split('_')) for p in pairs]
pairs_same_starting_ranges_update = [p for p in pairs_same_starting_ranges if p not in pairs]
pairs_diff_starting_ranges_update = [p for p in pairs_diff_starting_ranges if p not in pairs]
else:
pairs_same_starting_ranges_update = pairs_same_starting_ranges
pairs_diff_starting_ranges_update = pairs_diff_starting_ranges
return pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update
def getDatesIonosphere(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update):
'''
This routine gets all dates associated with ionosphere estimation from
pairs_same_starting_ranges_update and pairs_diff_starting_ranges_update
'''
dateListIon = []
for pairs in (pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update):
for p in pairs:
if p[0] not in dateListIon:
dateListIon.append(p[0])
if p[1] not in dateListIon:
dateListIon.append(p[1])
dateListIon.sort()
return dateListIon
def checkCurrentStatusIonosphere(inps):
#can run get_dates multiples times anywhere. it is only associated with inps parameters and safe files, not others
acquisitionDates, stackReferenceDate, secondaryDates, safe_dict = get_dates(inps)
pairs_same_starting_ranges, pairs_diff_starting_ranges = selectNeighborPairsIonosphere(safe_dict, inps.num_connections_ion)
pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update = excludeExistingPairsIonosphere(pairs_same_starting_ranges, pairs_diff_starting_ranges, inps.work_dir)
dateListIon = getDatesIonosphere(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update)
#report pairs of different swath starting ranges.
pdiff = 'ionosphere phase estimation pairs with different swath starting ranges\n'
for p in pairs_diff_starting_ranges:
pdiff += '{}_{}\n'.format(p[0], p[1])
pdiff += '\nionosphere phase estimation pairs with different platforms\n'
for p in pairs_same_starting_ranges+pairs_diff_starting_ranges:
if safe_dict[p[0]].platform != safe_dict[p[1]].platform:
pdiff += '{}_{}\n'.format(p[0], p[1])
with open('pairs_diff_starting_ranges.txt', 'w') as f:
f.write(pdiff)
return dateListIon, pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update, safe_dict
2019-01-16 19:40:08 +00:00
########################################
# Below are few workflow examples.
2019-01-16 19:40:08 +00:00
def slcStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, updateStack, mergeSLC=False):
2019-01-16 19:40:08 +00:00
#############################
i=0
if not updateStack:
i += 1
runObj = run()
runObj.configure(inps, 'run_{:02d}_unpack_topo_reference'.format(i))
runObj.unpackStackReferenceSLC(safe_dict)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_unpack_secondary_slc'.format(i))
runObj.unpackSecondarysSLC(stackReferenceDate, secondaryDates, safe_dict)
2019-01-16 19:40:08 +00:00
runObj.finalize()
2019-01-16 19:40:08 +00:00
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_average_baseline'.format(i))
runObj.averageBaseline(stackReferenceDate, secondaryDates)
2019-01-16 19:40:08 +00:00
runObj.finalize()
if inps.coregistration in ['NESD', 'nesd']:
if not updateStack:
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_extract_burst_overlaps'.format(i))
2019-01-16 19:40:08 +00:00
runObj.extractOverlaps()
runObj.finalize()
i += 1
runObj = run()
runObj.configure(inps, 'run_{:02d}_overlap_geo2rdr'.format(i))
runObj.geo2rdr_offset(secondaryDates)
runObj.finalize()
i += 1
2019-01-16 19:40:08 +00:00
runObj = run()
runObj.configure(inps, 'run_{:02d}_overlap_resample'.format(i))
runObj.resample_with_carrier(secondaryDates)
2019-01-16 19:40:08 +00:00
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_pairs_misreg'.format(i))
2019-01-16 19:40:08 +00:00
if updateStack:
runObj.pairs_misregistration(secondaryDates, safe_dict)
2019-01-16 19:40:08 +00:00
else:
runObj.pairs_misregistration(acquisitionDates, safe_dict)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_timeseries_misreg'.format(i))
2019-01-16 19:40:08 +00:00
runObj.timeseries_misregistration()
runObj.finalize()
i += 1
2019-01-16 19:40:08 +00:00
runObj = run()
runObj.configure(inps, 'run_{:02d}_fullBurst_geo2rdr'.format(i))
runObj.geo2rdr_offset(secondaryDates, fullBurst='True')
runObj.finalize()
i += 1
runObj = run()
runObj.configure(inps, 'run_{:02d}_fullBurst_resample'.format(i))
runObj.resample_with_carrier(secondaryDates, fullBurst='True')
2019-01-16 19:40:08 +00:00
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_extract_stack_valid_region'.format(i))
2019-01-16 19:40:08 +00:00
runObj.extractStackValidRegion()
runObj.finalize()
if mergeSLC:
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_reference_secondary_slc'.format(i))
runObj.mergeReference(stackReferenceDate, virtual = 'False')
runObj.mergeSecondarySLC(secondaryDates, virtual = 'False')
runObj.finalize()
2019-01-16 19:40:08 +00:00
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_grid_baseline'.format(i))
runObj.gridBaseline(stackReferenceDate, secondaryDates)
2019-01-16 19:40:08 +00:00
runObj.finalize()
return i
def correlationStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack):
2019-01-16 19:40:08 +00:00
i = slcStack(inps, acquisitionDates,stackReferenceDate, secondaryDates, safe_dict, updateStack)
# default value of virtual_merge
virtual_merge = 'True' if not inps.virtualMerge else inps.virtualMerge
2019-01-16 19:40:08 +00:00
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_reference_secondary_slc'.format(i))
runObj.mergeReference(stackReferenceDate, virtual = virtual_merge)
runObj.mergeSecondarySLC(secondaryDates, virtual = virtual_merge)
2019-01-16 19:40:08 +00:00
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_burst_igram'.format(i))
2019-01-16 19:40:08 +00:00
runObj.burstIgram_mergeBurst(acquisitionDates, safe_dict, pairs)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_filter_coherence'.format(i))
2019-01-16 19:40:08 +00:00
runObj.filter_coherence(pairs)
runObj.finalize()
return i
2019-01-16 19:40:08 +00:00
def interferogramStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack):
2019-01-16 19:40:08 +00:00
i = slcStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, updateStack)
2019-01-16 19:40:08 +00:00
# default value of virtual_merge
virtual_merge = 'True' if not inps.virtualMerge else inps.virtualMerge
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_reference_secondary_slc'.format(i))
runObj.mergeReference(stackReferenceDate, virtual = virtual_merge)
runObj.mergeSecondarySLC(secondaryDates, virtual = virtual_merge)
runObj.finalize()
2019-01-16 19:40:08 +00:00
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_generate_burst_igram'.format(i))
runObj.generate_burstIgram(acquisitionDates, safe_dict, pairs)
runObj.finalize()
i += 1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_burst_igram'.format(i))
runObj.igram_mergeBurst(acquisitionDates, safe_dict, pairs)
2019-01-16 19:40:08 +00:00
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_filter_coherence'.format(i))
2019-01-16 19:40:08 +00:00
runObj.filter_coherence(pairs)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_unwrap'.format(i))
2019-01-16 19:40:08 +00:00
runObj.unwrap(pairs)
runObj.finalize()
return i
2019-01-16 19:40:08 +00:00
def offsetStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack):
2019-01-16 19:40:08 +00:00
i = slcStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, updateStack)
2019-01-16 19:40:08 +00:00
# default value of virtual_merge
virtual_merge = 'False' if not inps.virtualMerge else inps.virtualMerge
2019-01-16 19:40:08 +00:00
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_merge_reference_secondary_slc'.format(i))
runObj.mergeReference(stackReferenceDate, virtual = virtual_merge)
runObj.mergeSecondarySLC(secondaryDates, virtual = virtual_merge)
2019-01-16 19:40:08 +00:00
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_dense_offsets'.format(i))
runObj.denseOffsets(pairs)
2019-01-16 19:40:08 +00:00
runObj.finalize()
return i
def ionosphereStack(inps, dateListIon, stackReferenceDate, pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update, safe_dict, i):
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_subband_and_resamp'.format(i))
runObj.subband_and_resamp(dateListIon, stackReferenceDate)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_generateIgram_ion'.format(i))
runObj.generateIgram_ion(pairs_same_starting_ranges_update+pairs_diff_starting_ranges_update, stackReferenceDate)
runObj.finalize()
i += 1
runObj = run()
runObj.configure(inps, 'run_{:02d}_mergeBurstsIon'.format(i))
runObj.mergeBurstsIon(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_unwrap_ion'.format(i))
runObj.unwrap_ion(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_look_ion'.format(i))
runObj.look_ion(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_computeIon'.format(i))
runObj.computeIon(pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update, safe_dict)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_filtIon'.format(i))
runObj.filtIon(pairs_same_starting_ranges_update + pairs_diff_starting_ranges_update)
runObj.finalize()
i+=1
runObj = run()
runObj.configure(inps, 'run_{:02d}_invertIon'.format(i))
runObj.invertIon()
runObj.finalize()
return i
2019-01-16 19:40:08 +00:00
def checkCurrentStatus(inps):
acquisitionDates, stackReferenceDate, secondaryDates, safe_dict = get_dates(inps)
coregSLCDir = os.path.join(inps.work_dir, 'coreg_secondarys')
2019-01-16 19:40:08 +00:00
stackUpdate = False
if os.path.exists(coregSLCDir):
coregSecondarys = glob.glob(os.path.join(coregSLCDir, '[0-9]???[0-9]?[0-9]?'))
coregSLC = [os.path.basename(slv) for slv in coregSecondarys]
2019-01-16 19:40:08 +00:00
coregSLC.sort()
if len(coregSLC)>0:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('\nAn existing stack with following coregistered SLCs was found:')
2019-01-16 19:40:08 +00:00
print(coregSLC)
print('\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
2019-01-16 19:40:08 +00:00
else:
pass
newAcquisitions = list(set(secondaryDates).difference(set(coregSLC)))
2019-01-16 19:40:08 +00:00
newAcquisitions.sort()
if len(newAcquisitions)>0:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('\nNew acquisitions was found: ')
2019-01-16 19:40:08 +00:00
print(newAcquisitions)
print('\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
2019-01-16 19:40:08 +00:00
else:
print(' ********************************* ')
print(' ***************** ')
print(' ********* ')
print('Warning:')
print('The stack already exists in: {}.'.format(coregSLCDir))
print('No new acquisition found to update the stack.')
2019-01-16 19:40:08 +00:00
print('')
print(' ********* ')
print(' ***************** ')
print(' ********************************* ')
sys.exit(1)
if inps.coregistration in ['NESD','nesd']:
numSLCReprocess = 2*int(inps.num_overlap_connections)
if numSLCReprocess > len(secondaryDates):
numSLCReprocess = len(secondaryDates)
2019-01-16 19:40:08 +00:00
latestCoregSLCs = coregSLC[-1*numSLCReprocess:]
latestCoregSLCs_original = list(set(secondaryDates).intersection(set(latestCoregSLCs)))
2019-01-16 19:40:08 +00:00
if len(latestCoregSLCs_original) < numSLCReprocess:
raise Exception('The original SAFE files for latest {0} coregistered SLCs is needed'.format(numSLCReprocess))
else: # add by Minyan Zhong, should be changed later as numSLCReprocess should be 0
numSLCReprocess = int(inps.num_connections)
if numSLCReprocess > len(secondaryDates):
numSLCReprocess = len(secondaryDates)
2019-01-16 19:40:08 +00:00
latestCoregSLCs = coregSLC[-1*numSLCReprocess:]
latestCoregSLCs_original = list(set(secondaryDates).intersection(set(latestCoregSLCs)))
2019-01-16 19:40:08 +00:00
if len(latestCoregSLCs_original) < numSLCReprocess:
raise Exception('The original SAFE files for latest {0} coregistered SLCs is needed'.format(numSLCReprocess))
print ('Last {0} coregistred SLCs to be updated: '.format(numSLCReprocess), latestCoregSLCs)
secondaryDates = latestCoregSLCs + newAcquisitions
secondaryDates.sort()
acquisitionDates = secondaryDates.copy()
acquisitionDates.append(stackReferenceDate)
2019-01-16 19:40:08 +00:00
acquisitionDates.sort()
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('acquisitions used in this update: ')
print('')
print(acquisitionDates)
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('stack reference:')
2019-01-16 19:40:08 +00:00
print('')
print(stackReferenceDate)
2019-01-16 19:40:08 +00:00
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('secondary acquisitions to be processed: ')
2019-01-16 19:40:08 +00:00
print('')
print(secondaryDates)
2019-01-16 19:40:08 +00:00
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
safe_dict_new={}
for d in acquisitionDates:
safe_dict_new[d] = safe_dict[d]
safe_dict = safe_dict_new
stackUpdate = True
else:
print('No existing stack was identified. A new stack will be generated.')
return acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, stackUpdate
2019-01-16 19:40:08 +00:00
2019-01-16 19:40:08 +00:00
def main(iargs=None):
inps = cmdLineParse(iargs)
if os.path.exists(os.path.join(inps.work_dir, 'run_files')):
print('')
print('**************************')
print('run_files folder exists.')
print(os.path.join(inps.work_dir, 'run_files'), ' already exists.')
2019-01-16 19:40:08 +00:00
print('Please remove or rename this folder and try again.')
print('')
print('**************************')
sys.exit(1)
acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, updateStack = checkCurrentStatus(inps)
2019-01-16 19:40:08 +00:00
# selecting pairs for interferograms / correlation / offset workflows
if inps.workflow != 'slc':
pairs = selectNeighborPairs(acquisitionDates, stackReferenceDate, secondaryDates, inps.num_connections, updateStack)
2019-01-16 19:40:08 +00:00
print ('*****************************************')
print ('Coregistration method: ', inps.coregistration )
print ('Workflow: ', inps.workflow)
print ('*****************************************')
if inps.workflow == 'interferogram':
i = interferogramStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack)
2019-01-16 19:40:08 +00:00
elif inps.workflow == 'offset':
i = offsetStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack)
2019-01-16 19:40:08 +00:00
elif inps.workflow == 'correlation':
i = correlationStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, pairs, updateStack)
2019-01-16 19:40:08 +00:00
elif inps.workflow == 'slc':
i = slcStack(inps, acquisitionDates, stackReferenceDate, secondaryDates, safe_dict, updateStack, mergeSLC=True)
#do ionosphere estimation
if inps.param_ion is not None:
dateListIon, pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update, safe_dict = checkCurrentStatusIonosphere(inps)
i = ionosphereStack(inps, dateListIon, stackReferenceDate, pairs_same_starting_ranges_update, pairs_diff_starting_ranges_update, safe_dict, i)
2019-01-16 19:40:08 +00:00
if __name__ == "__main__":
# Main engine
main(sys.argv[1:])