Proposed changes to the dloadOrbits.py to reflect the new orbit file download links (#300)

I've edited the dloadOrbits.py to reflect the new orbit file download links. This may also address issue # https://github.com/isce-framework/isce2/issues/299.
LT1AB
Bryan Marfito 2021-07-22 23:50:46 +08:00 committed by GitHub
parent 3928c3bdb7
commit 5297f385fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 46 additions and 52 deletions

View File

@ -7,17 +7,16 @@ import glob
import requests
from html.parser import HTMLParser
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
fmt = '%Y%m%d'
today = datetime.datetime.now().strftime(fmt)
server = 'https://aux.sentinel1.eo.esa.int/'
server = 'https://scihub.copernicus.eu/gnss/'
queryfmt = '%Y-%m-%d'
datefmt = '%Y%m%dT%H%M%S'
#Generic credentials to query and download orbit files
credentials = ('gnssguest', 'gnssguest')
S1Astart = '20140901'
S1Astart_dt = datetime.datetime.strptime(S1Astart, '%Y%m%d')
@ -29,7 +28,7 @@ def cmdLineParse():
'''
Automated download of orbits.
'''
parser = argparse.ArgumentParser('S1A orbit downloader')
parser = argparse.ArgumentParser('S1A and 1B AUX_POEORB precise orbit downloader')
parser.add_argument('--start', '-b', dest='start', type=str, default=S1Astart, help='Start date')
parser.add_argument('--end', '-e', dest='end', type=str, default=today, help='Stop date')
parser.add_argument('--dir', '-d', dest='dirname', type=str, default='.', help='Directory with precise orbits')
@ -99,9 +98,9 @@ def download_file(url, outdir='.', session=None):
if session is None:
session = requests.session()
path = os.path.join(outdir, os.path.basename(url))
path = outdir
print('Downloading URL: ', url)
request = session.get(url, stream=True, verify=False)
request = session.get(url, stream=True, verify=True, auth=credentials)
try:
request.raise_for_status()
@ -124,35 +123,22 @@ class MyHTMLParser(HTMLParser):
def __init__(self,url):
HTMLParser.__init__(self)
self.fileList = []
self.in_td = False
self.in_a = False
self.in_table = False
self._url = url
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.in_td = True
elif tag == 'a':
self.in_a = True
for name, val in attrs:
if name == "href":
if val.startswith("http"):
self._url = val.strip()
if name == 'href':
if val.startswith("https://scihub.copernicus.eu/gnss/odata") and val.endswith(")/"):
pass
else:
downloadLink = val.strip()
downloadLink = downloadLink.split("/Products('Quicklook')")
downloadLink = downloadLink[0] + downloadLink[-1]
self._url = downloadLink
def handle_data(self, data):
if self.in_td and self.in_a:
if ('S1A_OPER' in data) or ('S1B_OPER' in data):
# print(data.strip())
if data.startswith("S1") and data.endswith(".EOF"):
self.fileList.append((self._url, data.strip()))
print(self._url, data.strip())
def handle_tag(self, tag):
if tag == 'td':
self.in_td = False
self.in_a = False
elif tag == 'a':
self.in_a = False
self._url = None
if __name__ == '__main__':
@ -174,36 +160,44 @@ if __name__ == '__main__':
for dd in range(days):
indate = tstart + datetime.timedelta(days=dd, hours=12)
url = server + 'POEORB/' + str(indate.year).zfill(2) + '/' + str(indate.month).zfill(2) + '/' + str(
indate.day).zfill(2) + '/'
timebef = indate - datetime.timedelta(days=1)
timeaft = indate + datetime.timedelta(days=1)
timebef=str(timebef.strftime('%Y-%m-%d'))
timeaft = str(timeaft.strftime('%Y-%m-%d'))
url = server + 'search?q= ( beginPosition:[{0}T00:00:00.000Z TO {1}T23:59:59.999Z] AND endPosition:[{0}T00:00:00.000Z TO {1}T23:59:59.999Z] ) AND ( (platformname:Sentinel-1 AND producttype:AUX_POEORB))'.format(timebef, timeaft)
session = requests.session()
match = None
success = False
for mission in ['S1A', 'S1B']:
if not ifAlreadyExists(indate, mission, ranges):
for selectMission in ['S1A', 'S1B']:
if not ifAlreadyExists(indate, selectMission, ranges):
try:
r = session.get(url, verify=False)
r = session.get(url, verify=True, auth=credentials)
r.raise_for_status()
parser = MyHTMLParser(url)
parser.feed(r.text)
for resulturl, result in parser.fileList:
match = os.path.join(resulturl, result)
tbef, taft, mission = fileToRange(os.path.basename(result))
if selectMission==mission:
matchFileName = result
match = resulturl
if match is not None:
success = True
except:
pass
if match is not None:
download_file(match, inps.dirname, session)
output = os.path.join(inps.dirname, matchFileName)
print(output)
res = download_file(match, output, session)
else:
print('Failed to find {1} orbits for tref {0}'.format(indate, mission))
print('Failed to find {1} orbits for tref {0}'.format(indate, selectMission))
else:
print('Already exists: ', mission, indate)
print('Already exists: ', selectMission, indate)
print('Exit dloadOrbits Successfully')