陆探代码适配环境初始化--未做适配

C-SAR
剑古敛锋 2023-11-04 23:04:53 +08:00
commit 6290ac10dd
249 changed files with 234142 additions and 0 deletions

60
.gitignore vendored Normal file
View File

@ -0,0 +1,60 @@
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
*.toc
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
*.dll
x64/
x64/*
.vs/
.vs/*
/x64/*
/.vs/*
./x64/*
./.vs/*
./x64/*
/x64/*
*.ipch
*.db
*.pdb
*.tlog
*.log
*.pdb
*.db
*.tiff
*.tif
*.jpg
*.dll
*.lib
*.exe
Temporary*/
*.pyc

19
Ortho/.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
# 输入文件
Input/
Input2/
Input3/
Input4/
Input5/
# 输出文件
baseTool/
Temporary/
# 忽略工具文件
baseTool/
__pycache__/
dist/
build/
Ortho/
run_log/
*.tiff
*.tif
*.log

49
Ortho/OrthOne.spec Normal file
View File

@ -0,0 +1,49 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['OrthoMain.py'],
pathex=['.'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='OrthOne',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='OrthOne',
)

103
Ortho/Ortho.xml Normal file
View File

@ -0,0 +1,103 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\LWork\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
<AlgorithmName>Ortho_C_SAR_V2.1</AlgorithmName>
<DllName>Ortho_C_SAR_V2.1.exe</DllName>
<ChsName>正射校正</ChsName>
<AlgorithmDesc>微波卫星3-5级产品生产模型</AlgorithmDesc>
<AlgorithmAlias>Ortho-C-SAR-V2.1-1</AlgorithmAlias>
<Version>1.0</Version>
<AlgorithmClass>辐射类产品_正射校正</AlgorithmClass>
<AlgorithmLevel>4</AlgorithmLevel>
<AlgoirthmID>Ortho_中科卫星应用德清研究院_2.1</AlgoirthmID>
<Author>中科卫星应用德清研究院</Author>
<Type>景-算法</Type>
<InputTestFilePath>Ortho\\Input6</InputTestFilePath>
<InputTestFileName>
2599253_San_Francisco
</InputTestFileName>
<OutputTestFilePath>Ortho\\Output</OutputTestFilePath>
<OutputTestFileName>
</OutputTestFileName>
<jdkVersion>1.8</jdkVersion>
<algDevlanguage>python</algDevlanguage>
<Environment>
<IsCluster>0</IsCluster>
<ClusterNum>0</ClusterNum>
<OperatingSystem>Windows10</OperatingSystem>
<CPU>4核</CPU>
<Memory>8GB</Memory>
<Storage>25GB</Storage>
<NetworkCard>无需求</NetworkCard>
<Bandwidth>无需求</Bandwidth>
<GPU>无需求</GPU>
</Environment>
<Utility Satellite="GF3" Sensor="MSS" Resolution="1" />
<Inputs ParameterNum="3">
<Parameter>
<ParaName>SLC</ParaName>
<ParaChsName>SLC元文件</ParaChsName>
<Description>原始SLC各相关文件和参数</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\LT1B\LT230919\LT1B_MONO_MYC_STRIP4_005860_E130.9_N47.7_20230327_SLC_AHV_L1A_0000086966.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
<InputType>Satellite</InputType>
<InputNum>1</InputNum>
<DateFrom>GF3A</DateFrom>
</Parameter>
<Parameter>
<ParaName>DEM</ParaName>
<ParaChsName>DEM数字高程影像</ParaChsName>
<Description>30m分辨率DEM数字高程影像tif</Description>
<ParaType>File</ParaType>
<DataType>File</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\LT1B\LT230919\dem</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>File</Control>
<InputType>DEM</InputType>
<InputNum>0</InputNum>
<DateFrom>DEM</DateFrom>
</Parameter>
<Parameter>
<ParaName>CorrectMethod</ParaName>
<ParaChsName>选择校正方法</ParaChsName>
<Description>1.RPC;2.RD</Description>
<ParaType>int</ParaType>
<DataType>int</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>2</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>UploadInput</Control>
<InputType>Aux</InputType>
<InputNum>0</InputNum>
<DateFrom>Aux</DateFrom>
</Parameter>
</Inputs>
<Outputs ParameterNum="1">
<Parameter>
<ParaName>OrthoProduct</ParaName>
<ParaChsName>产品结果文件</ParaChsName>
<Description>产品结果文件</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>D:\micro\LWork\Ortho\Output\LT1B_MONO_MYC_STRIP4_005860_E130.9_N47.7_20230327_SLC_AHV_L1A_0000086966-ortho.tar.gz</ParaValue>
<MaxValue>DEFAULT</MaxValue>
<MinValue>DEFAULT</MinValue>
<OptionValue>DEFAULT</OptionValue>
<NoDataValue>DEFAULT</NoDataValue>
</Parameter>
</Outputs>
</AlgCompt>
</Root>

2985
Ortho/OrthoAlg.py Normal file

File diff suppressed because it is too large Load Diff

415
Ortho/OrthoAuxData.py Normal file
View File

@ -0,0 +1,415 @@
# 一米正射辅助数据处理类
import time
import math
import numpy as np
from osgeo import gdal
from xml.etree.ElementTree import ElementTree
from scipy.optimize import leastsq
class OrthoAuxData:
def __init__(self):
pass
@staticmethod
def time_stamp(tm):
list = tm.split(':')
sec = math.ceil(float(list[2]))
tm1 = list[0] + ':' + list[1] + ':' + str(sec)
tmArr = time.strptime(tm1, "%Y-%m-%d %H:%M:%S")
# tmArr = time.strptime(tm1, "%Y-%m-%d %H:%M:%S.%f")
ts = float(time.mktime(tmArr)) # 转换为时间戳
return ts
@staticmethod
def read_meta(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
T = []
Xs = []
Ys = []
Zs = []
Vsx = []
Vsy = []
Vsz = []
GPS_data = root.find('GPS')
for child in GPS_data:
Xs.append(float(child.find('xPosition').text))
Ys.append(float(child.find('yPosition').text))
Zs.append(float(child.find('zPosition').text))
Vsx.append(float(child.find('xVelocity').text))
Vsy.append(float(child.find('yVelocity').text))
Vsz.append(float(child.find('zVelocity').text))
tm = child.find('TimeStamp').text
ts = OrthoAuxData.time_stamp(tm)
T.append(ts)
meta_data = [Xs, Ys, Zs, Vsx, Vsy, Vsz]
return T, meta_data
@staticmethod
def read_control_points(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
center = imageinfo.find('center')
corner = imageinfo.find('corner')
ctrl_pts = [[] for i in range(2)]
ctrl_pts[0].append(float(center.find('longitude').text))
ctrl_pts[1].append(float(center.find('latitude').text))
for child in corner:
ctrl_pts[0].append(float(child.find('longitude').text))
ctrl_pts[1].append(float(child.find('latitude').text))
return ctrl_pts
@staticmethod
def read_dem(dem_resampled_path, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
pstn_arr = np.zeros([y_size, x_size, 3], dtype=np.float)
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(0, 0, x_size, y_size)
for y in range(y_size):
for x in range(x_size):
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
altitude = data[y, x]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZ(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
pstn_arr[y, x, 0] = pstn[0]
pstn_arr[y, x, 1] = pstn[1]
pstn_arr[y, x, 2] = pstn[2]
del in_ds, data
return pstn_arr
@staticmethod
def read_demM(dem_resampled_path, part_cnt, r_cnt, c_cnt, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize // part_cnt
y_size = in_ds.RasterYSize // part_cnt
x = [[i] * y_size for i in range(x_size)]
y = [[i] * x_size for i in range(y_size)]
x = np.array(x)
x = x.T
y = np.array(y)
x_off = c_cnt * x_size
y_off = r_cnt * y_size
gt[0] = gt[0] + c_cnt * x_size * gt[1]
gt[3] = gt[3] + r_cnt * y_size * gt[5]
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(x_off, y_off, x_size, y_size)
altitude = data / 255 * 1024
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZM(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
del in_ds, data
return pstn
@staticmethod
def read_dem_row(dem_resampled_path, p, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
x = [[i] for i in range(x_size)]
x = np.array(x)
x = x.T
y = np.ones((1, x_size)) * p
x_off = 0
y_off = p
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(x_off, y_off, x_size, 1)
altitude = data
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZM(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
del in_ds, data
return pstn
@staticmethod
def orbit_fitting(time_array, meta_data):
# 最小二乘法求解轨道参数
T0 = (time_array[0] + time_array[len(time_array)-1]) / 2
t = []
for i in range(len(time_array)):
t.append(time_array[i]-T0)
def func(p, x):
w3, w2, w1, w0 = p
return w3*x**3 + w2*x**2 + w1*x + w0
def error(p, x, y):
return func(p, x) - y
orbital_paras = []
for j in range(len(meta_data)):
p0 = [1, 2, 3, 4]
x = np.array(t)
y = np.array(meta_data[j])
Para = leastsq(error, p0, args=(x, y))
orbital_paras.append(Para[0])
print(Para[0], Para[1])
return orbital_paras, T0
@staticmethod
def get_PRF(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
waveParams = sensor.find('waveParams')
PRF = float(waveParams.find('wave').find('prf').text)
return PRF
@staticmethod
def get_delta_R(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
pulseWidth = float(sensor.find('waveParams').find('wave').find('pulseWidth').text)
bandWidth = float(sensor.find('waveParams').find('wave').find('bandWidth').text)
c = 299792458
delta_R = c / (1000000 * 2 * bandWidth)
return delta_R
@staticmethod
def get_doppler_rate_coef(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
processinfo = root.find('processinfo')
doppler = processinfo.find('DopplerRateValuesCoefficients')
t0 = float(processinfo.find('DopplerParametersReferenceTime').text)
r0 = float(doppler.find('r0').text)
r1 = float(doppler.find('r1').text)
r2 = float(doppler.find('r2').text)
r3 = float(doppler.find('r3').text)
r4 = float(doppler.find('r4').text)
return t0, np.array([r0, r1, r2, r3, r4]).reshape(5, 1)
@staticmethod
def get_doppler_center_coef(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
processinfo = root.find('processinfo')
doppler = processinfo.find('DopplerCentroidCoefficients')
b0 = float(doppler.find('d0').text)
b1 = float(doppler.find('d1').text)
b2 = float(doppler.find('d2').text)
return b0, b1, b2
@staticmethod
def get_lamda(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
λ = float(sensor.find('lamda').text)
return λ
@staticmethod
def get_t0(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
tm = imageinfo.find('imagingTime').find('start').text
t0 = OrthoAuxData.time_stamp(tm)
return t0
@staticmethod
def get_start_and_end_time(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
tm0 = imageinfo.find('imagingTime').find('start').text
tm1 = imageinfo.find('imagingTime').find('end').text
starttime = OrthoAuxData.time_stamp(tm0)
endtime = OrthoAuxData.time_stamp(tm1)
return starttime, endtime
@staticmethod
def get_width_and_height(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
width = int(imageinfo.find('width').text)
height = int(imageinfo.find('height').text)
return width, height
@staticmethod
def get_R0(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
R0 = float(imageinfo.find('nearRange').text)
return R0
@staticmethod
def get_h():
h = 6.6
return h
@staticmethod
def LLA2XYZ(longitude, latitude, altitude):
'''
WGS-84坐标系下经纬度坐标转空间直角坐标
'''
# 经纬度余弦值
cosLat = math.cos(latitude * math.pi / 180)
sinLat = math.sin(latitude * math.pi / 180)
cosLon = math.cos(longitude * math.pi / 180)
sinLon = math.sin(longitude * math.pi / 180)
# WGS84坐标系参数
rad = 6378137.0 #地球赤道平均半径
f = 1.0/298.257224 #WGS84椭球扁率
C = 1.0/math.sqrt(cosLat*cosLat + (1-f)*(1-f)*sinLat*sinLat)
S = (1-f)*(1-f)*C
h = altitude
# 计算XYZ坐标
X = (rad * C + h) * cosLat * cosLon
Y = (rad * C + h) * cosLat * sinLon
Z = (rad * S + h) * sinLat
# return np.array([X, Y, Z]).reshape(1,3)
return [X, Y, Z]
@staticmethod
def LLA2XYZM(longitude, latitude, altitude):
# 经纬度余弦值
cosLat = np.cos(latitude * math.pi / 180).reshape(-1,1)
sinLat = np.sin(latitude * math.pi / 180).reshape(-1,1)
cosLon = np.cos(longitude * math.pi / 180).reshape(-1,1)
sinLon = np.sin(longitude * math.pi / 180).reshape(-1,1)
# WGS84坐标系参数
rad = 6378137.0 #地球赤道平均半径
f = 1.0/298.257224 #WGS84椭球扁率
C = 1.0/(np.sqrt(cosLat*cosLat + (1-f)*(1-f)*sinLat*sinLat)).reshape(-1,1)
S = (1-f)*(1-f)*C
h = altitude.reshape(-1,1)
# 计算XYZ坐标
X = (rad * C + h) * cosLat * cosLon
Y = (rad * C + h) * cosLat * sinLon
Z = (rad * S + h) * sinLat
return [X, Y, Z]
@staticmethod
def XYZ2LLA(X, Y, Z):
''' 大地坐标系转经纬度
适用于WGS84坐标系
args:
x,y,z
return:
lat,long,altitude
'''
# WGS84坐标系的参数
a = 6378137.0 # 椭球长半轴
b = 6356752.314245 # 椭球短半轴
ea = np.sqrt((a ** 2 - b ** 2) / a ** 2)
eb = np.sqrt((a ** 2 - b ** 2) / b ** 2)
p = np.sqrt(X ** 2 + Y ** 2)
theta = np.arctan2(Z * a, p * b)
# 计算经纬度及海拔
longitude = np.arctan2(Y, X)
latitude = np.arctan2(Z + eb ** 2 * b * np.sin(theta) ** 3, p - ea ** 2 * a * np.cos(theta) ** 3)
N = a / np.sqrt(1 - ea ** 2 * np.sin(latitude) ** 2)
altitude = p / np.cos(latitude) - N
# return np.array([np.degrees(latitude), np.degrees(longitude), altitude])
return [np.degrees(longitude), np.degrees(latitude), altitude]
@staticmethod
def XYZ2LLAM(X, Y, Z):
''' 大地坐标系转经纬度
适用于WGS84坐标系
args:
x,y,z
return:
lat,long,altitude
'''
# WGS84坐标系的参数
a = 6378137.0 # 椭球长半轴
b = 6356752.314245 # 椭球短半轴
ea = np.sqrt((a ** 2 - b ** 2) / a ** 2)
eb = np.sqrt((a ** 2 - b ** 2) / b ** 2)
p = np.sqrt(X ** 2 + Y ** 2)
theta = np.arctan2(Z * a, p * b)
# 计算经纬度及海拔
longitude = np.arctan2(Y, X)
latitude = np.arctan2(Z + eb ** 2 * b * np.sin(theta) ** 3, p - ea ** 2 * a * np.cos(theta) ** 3)
N = a / np.sqrt(1 - ea ** 2 * np.sin(latitude) ** 2)
altitude = p / np.cos(latitude) - N
# return np.array([np.degrees(latitude), np.degrees(longitude), altitude])
return [np.degrees(longitude), np.degrees(latitude), altitude]
@staticmethod
def world2Pixel(geoMatrix, x, y):
"""
使用GDAL库的geomatrix对象((gdal.GetGeoTransform()))计算地理坐标的像素位置
"""
ulx = geoMatrix[0]
uly = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = int((x - ulx) / xDist)
line = int((uly - y) / abs(yDist))
return pixel, line
@staticmethod
def sar_intensity_synthesis(in_sar_tif, out_sar_tif):
# 获取SLC格式SAR影像的相关信息
in_ds = gdal.Open(in_sar_tif)
bands_num = in_ds.RasterCount
rows = in_ds.RasterYSize
columns = in_ds.RasterXSize
proj = in_ds.GetProjection()
geotrans = in_ds.GetGeoTransform()
# 创建输出的SAR强度图
gtiff_driver = gdal.GetDriverByName('GTiff')
out_ds = gtiff_driver.Create(out_sar_tif, columns, rows, 1)
out_ds.SetProjection(proj)
out_ds.SetGeoTransform(geotrans)
# 输出SAR强度图
in_data1 = in_ds.GetRasterBand(1).ReadAsArray(0, 0, columns, rows)
in_data1 = in_data1/10
in_data1 = np.power(10, in_data1)
in_data2 = in_ds.GetRasterBand(2).ReadAsArray(0, 0, columns, rows)
in_data2 = in_data2 / 10
in_data2 = np.power(10, in_data2)
out_data = np.sqrt(in_data1**2 + in_data2**2)
out_ds.GetRasterBand(1).WriteArray(out_data)
del in_ds, out_ds

749
Ortho/OrthoMain.py Normal file
View File

@ -0,0 +1,749 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File OneOrthoMain.py
@Function 正射校正
@Author KHZ
@Contact
@Date 2021/8/14
@Version 1.0.0
"""
import logging
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.algorithm.algtools.PreProcess import PreProcess as pp
import tarfile
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件
from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2
from OrthoAlg import ScatteringAlg as alg
from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.xml.CreatMetafile import CreateMetafile
from OrthoXmlInfo import CreateDict, CreateStadardXmlFile
from osgeo import gdal, osr
import os
import glob
# import gc
import datetime
import shutil
import sys
import scipy #解决打包错误
import scipy.spatial.transform # 用于解决打包错误
import scipy.spatial.transform.rotation
import scipy.spatial.transform._rotation_groups # 用于解决打包错误
DEBUG = True
EXE_NAME = 'Ortho'
#env_str = os.getcwd()
env_str =os.path.dirname(os.path.abspath(sys.argv[0])) #os.path.split(os.path.realpath(__file__))[0]
os.environ['PROJ_LIB'] = env_str
LogHandler.init_log_handler(os.path.join("run_log", EXE_NAME)) # r"run_log\Ortho"
logger = logging.getLogger("mylog")
logger.info(env_str)
class LogHandler2:
"""日志记录工具,用于输出程序运行状况。
这里因为是单程序执行没有必要调用logging
具体日志策略
1. 最外层使用try,catch 捕捉异常
2. 最后程序执行终止判断
日志记录格式
第一行
TaskID时间输入参数文件地址
中间
时间状态执行步骤消息
最后一行
finished 表示程序执行完成
failed 程序执行出现错误
"""
def __init__(self, sLogPath) -> None:
'''
初始化日志文件
args:
sLogPath:str 日志文件的路径
raise:
IOError:Exception 错误
'''
self.__sLogPath = sLogPath
def loggingStart(self,sTaskID,sParamPath):
'''输出日志开头
TaskID时间输入参数文件地址
args:
sTaskID:str 任务ID
sParamPath:str 任务参数文件地址
return:
None
'''
sDateTime=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") # 执行时间
sOutput="[{}],[{}],[{}]".format(sTaskID, sDateTime, sParamPath)
self.__outputText(sOutput)
pass
def logging(self,sStates,sExecuteStep,sException):
"""输出中间计算信息"""
sDateTime=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") # 执行时间
sOutput="[{}],[{}],[{}],[{}]".format(sDateTime,sStates,sExecuteStep,sExecuteStep)
self.__outputText(sOutput)
pass
def __outputText(self,sMessage):
'''将消息输出到最终的日志文件中
'''
with open(self.__sLogPath, 'a', encoding='utf-8') as fp:
fp.write("{}".format(sMessage))
def logggingEnd(self, bSuccessful):
'''
最后一行输出判断输出的结果是否是正确的
'''
if bSuccessful:
sEndText="\n{}\nfinished".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
else:
sEndText="\n{}\nError".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
self.__outputText(sEndText)
class OrthoMain:
"""
间接定位法正射校正 主函数
"""
def __init__(self, alg_xml_path):
self.alg_xml_path = alg_xml_path
self.imageHandler = ImageHandler()
self.__alg_xml_handler = ManageAlgXML(alg_xml_path)
self.__check_handler = CheckSource(self.__alg_xml_handler)
self.__workspace_path = None
self.__task_id = None
self.__input_paras = {}
# self.__output_paras = {}
self.__in_processing_paras = {}
# self.__out_processing_paras = {}
self.__preprocessed_paras = {}
self.__out_para = None
def check_source(self):
"""
检查算法相关的配置文件
辅助文件是否齐全
"""
if self.__check_handler.check_alg_xml() is False:
return False
if self.__check_handler.check_run_env() is False:
return False
input_para_names = ["SLC", "DEM", "CorrectMethod"] # //todo 增加检查校正方法
if self.__check_handler.check_input_paras(input_para_names) is False:
return False
self.__workspace_path = self.__alg_xml_handler.get_workspace_path()
self.__task_id = self.__alg_xml_handler.get_task_id()
self.__input_paras = self.__alg_xml_handler.get_input_paras()
# self.__output_paras = self.__alg_xml_handler.get_output_paras()
self.__create_work_space()
self.__in_processing_paras = self.__init_processing_paras(self.__input_paras) # 输入{paranameparavalue}
# self.__out_processing_paras = self.__init_processing_paras(self.__output_paras) # 输入{paranameparavalue}
self.__out_name = os.path.splitext(os.path.splitext(os.path.basename(self.__input_paras['SLC']['ParaValue']))[0])[0]
# AlgorithmName = self.__alg_xml_handler.get_algorithm_name()
# TaskId = self.__alg_xml_handler.get_task_id()
result_name = self.__out_name + ".tar.gz"
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name)
isError, CorrectMethod = self.__check_handler.check_input_paras(['CorrectMethod']) # //todo 获取xml中校正方法 根据不同方法进行结果处理
if CorrectMethod.get('CorrectMethod') == '1' or CorrectMethod.get('CorrectMethod') == 1:
logger.info("CorrectMethod is RPC!")
# self.__out_para=self.__out_para.replace(".tar.gz","_RPC.tar.gz")
self.__out_para=self.__out_para.replace(".tar.gz","-ortho.tar.gz")
elif CorrectMethod.get('CorrectMethod') == '2' or CorrectMethod.get('CorrectMethod') == 2:
logger.info("CorrectMethod is RD!")
# self.__out_para=self.__out_para.replace(".tar.gz","_RD.tar.gz")
self.__out_para=self.__out_para.replace(".tar.gz","-ortho.tar.gz")
else:
raise Exception('No CorrectMethod')
self.__alg_xml_handler.write_out_para("OrthoProduct", self.__out_para) #写入输出参数
logger.info('check_source finished!')
logger.info('progress bar :5')
return True
def __create_work_space(self):
"""
删除原有工作区文件夹,创建新工作区文件夹
"""
self.__workspace_Output_path = os.path.join(self.__workspace_path, EXE_NAME, "Output")
self.__workspace_Temporary_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary")
self.__workspace_unpack_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "unpack")
self.__workspace_ResampledDEM_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'TestDEM')
self.__workspace_LutImg_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'TestLut')
self.__workspace_IncidenceImg_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'TestInc')
self.__workspace_SimImg_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'TestSim')
self.__workspace_SARIntensity_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'TestSAR')
self.__workspace_package_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", 'package')
self.__workspace_origin_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "origin")
path_list = [self.__workspace_Output_path, self.__workspace_Temporary_path,
self.__workspace_unpack_path, self.__workspace_ResampledDEM_path,
self.__workspace_LutImg_path, self.__workspace_IncidenceImg_path,
self.__workspace_SimImg_path, self.__workspace_SARIntensity_path,
self.__workspace_package_path, self.__workspace_origin_path]
for path in path_list:
if os.path.exists(path):
if DEBUG is True:
continue
self.del_floder(path)
os.makedirs(path)
else:
os.makedirs(path)
logger.info('create new workspace success!')
@staticmethod
def force_del_file(file_path):
"""
强制删除文件
"""
if os.path.isdir(file_path):
for main_dir, subdir, file_name_list in os.walk(file_path):
for filename in file_name_list:
apath = main_dir + filename
# noinspection PyBroadException
try:
os.remove(apath)
except Exception as error: # 使用windows命令行强制删除
os.system("del /f /q %s" % apath)
elif os.path.isfile(file_path) is True:
# noinspection PyBroadException
try:
os.remove(file_path)
except Exception as error: # 使用windows命令行强制删除
os.system("del /f /q %s" % file_path)
@staticmethod
def make_targz(output_filename, source_dir):
"""
一次性打包整个根目录空子目录会被打包
如果只打包不压缩"w:gz"参数改为"w:""w"即可
:param output_filename:输出压缩包的完整路径eg:'E:\test.tar.gz'
:param source_dir:需要打包的跟目录eg: 'E:\testFfile\'打包文件夹里面的所有文件,'E:\testFfile'打包文件夹
"""
dir = os.path.split(output_filename)[0]
if os.path.exists(dir) is False:
os.makedirs(dir)
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
@staticmethod
def del_floder(path_data):
"""
删除整个文件夹
"""
if os.path.isdir(path_data):
shutil.rmtree(path_data)
def del_temp_workspace(self):
"""
临时工作区
"""
if DEBUG is True:
return
path = self.__workspace_path + EXE_NAME + r'\Temporary'
if os.path.exists(path):
self.del_floder(path)
def __init_processing_paras(self, names):
"""
:param names:字典列表每个字典为一个输入产品的配置信息
"""
processing_paras = {}
for name in names:
para = names[name]
if para is None:
logger.error(name + "is None!")
return False
if para['ParaType'] == 'File':
if para['DataType'] == 'File':
para_path = os.path.join(self.__workspace_path, para['ParaValue'])
processing_paras.update({name: para_path})
if para['DataType'] == 'xml':
para_path = os.path.join(self.__workspace_path, para['ParaValue'])
processing_paras.update({name: para_path})
if para['DataType']=="ymal":
para_path = os.path.join(self.__workspace_path, para['ParaValue'])
processing_paras.update({name: para_path})
if para['DataType'] == 'tar.gz':
para_path = os.path.join(self.__workspace_path, para['ParaValue'])
tar_gz_dic = self.__dec_tar_gz(name, para_path, self.__workspace_unpack_path)
processing_paras.update(tar_gz_dic)
if para['DataType'] == 'tif' or para['DataType'] == 'tiff': # 新增修改dem数据为文件绝对路径
if para['ParaValue'] != 'empty' and para['ParaValue'] != 'Empty' and para['ParaValue'] != '':
para_path_list = para['ParaValue'].split(";")
if len(para_path_list) != 0:
dem_path = os.path.join(self.__workspace_origin_path, para['ParaName'])
if os.path.exists(dem_path) is False:
os.mkdir(dem_path)
for file_path in para_path_list:
tif_name = os.path.basename(file_path)
shutil.copy(file_path, os.path.join(dem_path, tif_name))
para_path = os.path.join(self.__workspace_origin_path,para['ParaName'])
processing_paras.update({name: para_path})
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float':
value = float(para['ParaValue'])
processing_paras.update({name: value})
return processing_paras
def __dec_tar_gz(self, name1, tar_gz_path, out_dir):
"""
解压.tar_gz格式景影像文件
:param tar_gz_path:.tar_gz文件路径
:param out_dir:输出文件夹
:return para_dic:全极化影像路径
"""
# 创建文件夹
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
file_dir = os.path.join(out_dir, name + '\\')
if os.path.exists(file_dir) is False:
os.makedirs(file_dir)
# 解压
t = tarfile.open(tar_gz_path)
t.extractall(path=file_dir)
# 获取文件夹内的文件
para_dic = {}
# if os.path.exists(file_dir + name + '\\'):
# meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.meta.xml')))
# para_dic.update({'SLC': file_dir + name})
# else:
# meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.meta.xml')))
# para_dic.update({'SLC': file_dir})
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.xml')))
para_dic.update({'SLC': file_dir + name})
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.xml')))
para_dic.update({'SLC': file_dir})
if meta_xml_paths == []:
raise Exception('there is not .meta.xml in path: ', file_dir + '\\')
para_dic.update({'META': meta_xml_paths[0]})
self.image_meta_xml = meta_xml_paths
# para_dic.update({name1: file_dir}) # {SLC: file_path}
# 获取文件夹内的文件
hh_flag, hv_flag, vh_flag, vv_flag, dh_flag = 0, 0, 0, 0, 0 #
if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
else:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tiff')))
for in_tif_path in in_tif_paths:
# 获取极化类型
if 'hh' in os.path.basename(in_tif_path) or 'HH' in os.path.basename(in_tif_path):
hh_flag = 1
elif 'hv' in os.path.basename(in_tif_path) or 'HV' in os.path.basename(in_tif_path):
hv_flag = 1
elif 'vh' in os.path.basename(in_tif_path) or 'VH' in os.path.basename(in_tif_path):
vh_flag = 1
elif 'vv' in os.path.basename(in_tif_path) or 'VV' in os.path.basename(in_tif_path):
vv_flag = 1
elif "DH" in os.path.basename(in_tif_path):
dh_flag = 1
if hh_flag == 0 and hv_flag == 0 and vh_flag == 0 and vv_flag == 0 and dh_flag == 0:
raise Exception('can not found files: HH、HV、VH、VV、DH in path:', tar_gz_path)
self.processinfo = [hh_flag, hv_flag, vh_flag, vv_flag,dh_flag]
return para_dic
def process_handle(self):
isError, CorrectMethod = self.__check_handler.check_input_paras(['CorrectMethod']) # //todo 获取xml中校正方法 根据不同方法进行结果处理
if CorrectMethod.get('CorrectMethod') == '1' or CorrectMethod.get('CorrectMethod') == 1:
logger.info("CorrectMethod is RPC!")
return self.RPC_process_handle()
elif CorrectMethod.get('CorrectMethod') == '2' or CorrectMethod.get('CorrectMethod') == 2:
logger.info("CorrectMethod is RD!")
return self.RD_process_handle()
else:
raise Exception('No CorrectMethod')
def RPC_process_handle(self):
logger.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
# print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
# 1、DEM拼接、裁剪、重采样
Orth_Slc=[]
in_dem_path = self.__in_processing_paras['DEM']
meta_file_path = self.__in_processing_paras['META'] # .meta文件路径
out_dem_path = self.__workspace_ResampledDEM_path
dem_merged_path=DEMProcess.dem_merged(in_dem_path, meta_file_path, out_dem_path) # 生成TestDEM\mergedDEM_VRT.tif
# 2、间接定位法求解行列坐标
slc_paths = self.__in_processing_paras["SLC"]
rpc_slc_path=None
for slc_path in os.listdir(slc_paths):
if slc_path.find(".tiff")>0:
slc_path_temp=os.path.join(slc_paths,slc_path)
# out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path.replace(".tiff","_db.tif").replace("L1A","L1B"))
out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path.replace(".tiff","_db.tif").replace("L1A","L1B").replace("HH","h_h").replace("HV","h_v").replace("VH","v_h").replace("VV","v_v"))
alg.sar_backscattering_coef(slc_path_temp,self.__in_processing_paras['META'],out_power_path)
rpc_slc_path=slc_path_temp.replace(".tiff",".rpc")
if not os.path.exists(rpc_slc_path):
rpc_slc_path=slc_path_temp.replace(".tiff",".rpb")
if not os.path.exists(rpc_slc_path):
logger.error("rpc file Not Found!")
# out_slc_power_path=os.path.join(self.__workspace_package_path,os.path.basename(out_power_path.replace("_db.tif",".tif").replace("L1B","L4")))
out_slc_power_path=os.path.join(self.__workspace_package_path,os.path.basename(out_power_path.replace("_db.tif","-ortho.tif")))
rpc_correction(out_power_path,rpc_slc_path,out_slc_power_path)
break
logger.info('progress bar: 30%')
# 2.1 生成映射表
slc_path=os.path.join(slc_paths,os.listdir(slc_paths)[0])
out_rpc_rc_path = os.path.join(self.__workspace_package_path,"ori_sim-ortho.tif")
get_RPC_lon_lat(out_power_path,out_rpc_rc_path)
#getRCImageRC(slc_path_temp,out_rpc_rc_path,rpc_slc_path)
logger.info('progress bar: 70%')
# 2.2 生成局地入射角
Orthorectification = IndirectOrthorectification(os.path.join(os.path.dirname(__file__),"config.yaml"))
Orthorectification.IndirectOrthorectification(self.__in_processing_paras["SLC"],self.__workspace_package_path) # 改动1
out_incangle_path=os.path.join(self.__workspace_package_path,"inci_Angle-ortho.tif")
out_localincangle_path=os.path.join(self.__workspace_package_path,"LocalincidentAngle-ortho.tif")
out_incangle_geo_path=os.path.join(self.__workspace_package_path,"inc_angle.tif")
out_localincangle_geo_path=os.path.join(self.__workspace_package_path,"LocalincidenceAngle.tif") # 决定入射角名字
Orthorectification.getRPC_incidenceAngle_lon_lat(dem_merged_path,out_rpc_rc_path,self.__workspace_Temporary_path,self.__workspace_package_path,out_incangle_path,out_localincangle_path,out_incangle_geo_path,out_localincangle_geo_path)
# 2.3 输出结果
# self.del_floder(self.__workspace_processing_path)
logger.info('process_handle finished!')
logger.info('progress bar :90')
# 7、打包生成快视图
for tiff_name in os.listdir(self.__workspace_package_path):
if tiff_name.find(".tiff")>0 or tiff_name.find(".tif")>0:
self.imageHandler.write_quick_view(os.path.join(self.__workspace_package_path,tiff_name))
# 1/5、移动原始数据 ------------------------- 这里限制 原始数据是否进入 最终产品列表中
for maindir, subdir, file_name_list in os.walk(slc_paths):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
file_type = apath.split('.')[-1]
if file_type in ["xml"]:
output = os.path.join(self.__workspace_package_path, filename)
shutil.copy(apath, output)
else:
output=os.path.join(self.__workspace_package_path, filename)
shutil.copy(apath, output)
# 生成元文件案例
# xml_path = "./model_meta.xml"
tem_folder=self.__workspace_path + EXE_NAME + r"\Temporary""\\"
image_path=out_slc_power_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif")
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
# par_dict = CreateDict().calu_nature(image_path, self.processinfo, out_path1, out_path2)
#
# dem_path=os.path.join(self.__workspace_ResampledDEM_path, 'mergedDEM.tif')
# out_dem_path1 = os.path.join(tem_folder, "trans_dem_geo_projcs.tif")
# out_dem_path2 = os.path.join(tem_folder, "trans_dem_projcs_geo.tif")
# # par_dict2 = CreateDict().calu_dem_nature(dem_path, dem_meta, out_dem_path1, out_dem_path2, sampling_f, para_A_arr)
# # par_dict2 = CreateDict().calu_dem_nature(dem_path, dem_meta, out_dem_path1, out_dem_path2, sampling_f, para_A_arr)
# par_dict2 = CreateDict().calu_dem_nature(dem_path, out_dem_path1, out_dem_path2, None,Orthorectification.SatelliteOrbitModel.A_arr)
# model_xml_path = os.path.join(self.__workspace_Temporary_path, "creat_standard.meta.xml") # 输出xml路径
# CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, par_dict2, model_xml_path).create_standard_xml()
#
# sar_image_meta_xml = list(glob.glob(os.path.join(self.__workspace_package_path, '*.meta.xml')))
# meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml"))
# CreateMetafile(sar_image_meta_xml[0], self.alg_xml_path, model_xml_path, meta_xml_path).process(os.path.basename(self.__in_processing_paras["SLC"]))
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_package_path,
os.path.basename(self.__out_para).replace(".tar.gz", ".meta.xml"))
para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_package_path,
out_path1, out_path2).calu_nature()
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
# 生成压缩包
logger.info('progress bar :94')
logger.info('start make targz..')
self.del_floder(self.__workspace_unpack_path)
self.del_floder(self.__workspace_ResampledDEM_path)
self.del_floder(self.__workspace_LutImg_path)
self.del_floder(self.__workspace_IncidenceImg_path)
self.del_floder(self.__workspace_SimImg_path)
self.del_floder(self.__workspace_SARIntensity_path)
self.make_targz(self.__out_para, self.__workspace_package_path+"\\")
logger.info('make targz finish')
logger.info('progress bar :100')
return True
pass
def cut_dem(self, dem_merged_path, meta_file_path):
left_up_lon = 0
left_up_lat = 0
def process_sim_ori(self, ori_sim, sim_ori):
scopes = ()
scopes += (ImageHandler.get_scope_ori_sim(ori_sim),)
intersect_polygon = pp().intersect_polygon(scopes)
if intersect_polygon is None:
raise Exception('create intersect shp fail!')
shp_path = os.path.join(self.__workspace_Temporary_path, 'IntersectPolygon.shp')
if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
sim_ori_process = os.path.join(self.__workspace_Temporary_path, 'sim_ori_process.tif')
pp().cut_img(sim_ori_process, sim_ori, shp_path)
return sim_ori_process
def RD_process_handle(self):
# RPC
logger.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
# print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
# 1、DEM拼接、裁剪、重采样
Orth_Slc=[]
in_dem_path = self.__in_processing_paras['DEM']
meta_file_path = self.__in_processing_paras['META'] # .meta文件路径
out_dem_path = self.__workspace_ResampledDEM_path
dem_merged_path=DEMProcess.dem_merged(in_dem_path, meta_file_path, out_dem_path) # 生成TestDEM\mergedDEM_VRT.tif
# self.cut_dem(dem_merged_path, meta_file_path)
# 2、间接定位法求解行列坐标
slc_paths = self.__in_processing_paras["SLC"]
# 2.1 生成映射表
slc_path=os.path.join(slc_paths,os.listdir(slc_paths)[0])
# 2.2 生成局地入射角
path2 = env_str
Orthorectification = IndirectOrthorectification(os.path.join(path2,"config.yaml"))
Orthorectification.IndirectOrthorectification(self.__in_processing_paras["SLC"], self.__workspace_package_path) # 改动1
# 2.3 输出结果
# 3 处理RD
in_slc_path=None
for slc_path in os.listdir(slc_paths):
if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0 or slc_path.find("_DH_")>0):
in_slc_path=os.path.join(slc_paths,slc_path)
break
# 获取校正模型后
Orthorectification.preCaldem_sar_rc(dem_merged_path,in_slc_path,self.__workspace_Temporary_path,self.__workspace_package_path.replace("\\","\\\\")) # 初步筛选坐标范围
logger.info('progress bar: 40%')
# clip_dem_reample_path=os.path.join(self.__workspace_Temporary_path, "SAR_dem.tiff")
# infooption=gdal.InfoOptions("-json")
# clip_dem_tif_info=gdal.Info(clip_dem_reample_path,options=infooption)
# dem_merged_info=gdal.Info(dem_merged_path,options=infooption)
# sampling_f=clip_dem_tif_info['size'][0]/dem_merged_info['size'][0]
out_dir_path=self.__workspace_package_path.replace("\\","\\\\")
this_outSpace_path = out_dir_path
this_out_dem_slantRange_path = out_dir_path + "\\" + "dem_slantRange.tiff"#// 地形斜距
this_out_plant_slantRange_path = out_dir_path + "\\" + "flat_slantRange.tiff"#// 平地斜距
# 保留结果
if(os.path.exists(this_out_dem_slantRange_path)):
os.remove(this_out_dem_slantRange_path)
if(os.path.exists(this_out_plant_slantRange_path)):
os.remove(this_out_plant_slantRange_path)
this_out_dem_rc_path = out_dir_path + "\\" + "WGS_SAR_map.tiff"#// 经纬度与行列号映射
if(os.path.exists(this_out_dem_rc_path)):
os.remove(this_out_dem_rc_path)
this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff"
if (os.path.exists(this_out_sar_sim_path)):
os.remove(this_out_sar_sim_path)
this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射
if (os.path.exists(this_out_sar_sim_wgs_path)):
os.remove(this_out_sar_sim_wgs_path)
this_out_incidence_path = out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角
this_out_localIncidenct_path = out_dir_path + "\\" + "localincidentAngle.tiff"#// 局地入射角
this_out_inc_angle_rpc_path = out_dir_path + "\\" + "RD_incidentAngle.tiff"#// 局地入射角
this_out_local_inc_angle_rpc_path = out_dir_path + "\\" + "RD_localincidentAngle.tiff"#// 局地入射角
if (os.path.exists(this_out_inc_angle_rpc_path)):
shutil.move(this_out_inc_angle_rpc_path, out_dir_path + "\\" + "inci_Angle-ortho.tif")
if (os.path.exists(this_out_local_inc_angle_rpc_path)):
shutil.move(this_out_local_inc_angle_rpc_path, out_dir_path + "\\" + "LocalIncidentAngle-ortho.tif")
if(os.path.exists(this_out_incidence_path)):
shutil.move(this_out_incidence_path,out_dir_path + "\\" + "inc_angle.tif")
if(os.path.exists(this_out_localIncidenct_path)):
shutil.move(this_out_localIncidenct_path,out_dir_path + "\\" + "LocalIncidenceAngle.tif")
this_out_ori_sim_tiff = out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角
if (os.path.exists(this_out_ori_sim_tiff)):
shutil.move(this_out_ori_sim_tiff, out_dir_path + "\\" + "ori_sim-ortho.tif")
this_out_sim_ori_tiff = out_dir_path + "\\" + "RD_sim_ori.tif" # // 局地入射角
if (os.path.exists(this_out_sim_ori_tiff)):
shutil.move(this_out_sim_ori_tiff, out_dir_path + "\\" + "sim_ori-ortho.tif")
# GTC 入射角
GTC_rc_path=os.path.join(self.__workspace_package_path,"ori_sim-ortho.tif")
GTC_out_path=self.__workspace_package_path
parameter_path = os.path.join(self.__workspace_package_path, "orth_para.txt")
this_in_rpc_lon_lat_path = os.path.join(self.__workspace_package_path, "ori_sim-ortho.tif")
dem_rc = os.path.join(self.__workspace_package_path, "sim_ori-ortho.tif")
dem_rc_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, dem_rc)
shutil.move(dem_rc_pro, dem_rc)
in_tif_paths = list(glob.glob(os.path.join(slc_paths, '*.tiff')))
for in_tif_path in in_tif_paths:
out_sar_path = os.path.join(GTC_out_path, os.path.split(in_tif_path)[1])
slc_path_temp=os.path.join(slc_paths,in_tif_path)
out_power_path = os.path.join(self.__workspace_Temporary_path,
slc_path_temp.replace(".tiff", "-lin.tif").replace("L1A", "L1B")).replace(
"HH", "h_h").replace("HV", "h_v").replace("VH", "v_h").replace("VV", "v_v").replace("DH", "d_h")
# out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif"))
alg.sar_backscattering_coef(slc_path_temp, self.__in_processing_paras['META'], out_power_path)
lin_tif_path = os.path.join(self.__workspace_Temporary_path,
os.path.basename(out_power_path).split('-')[0] + "-lin_geo.tif")
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc,
# out_power_path,
# lin_tif_path)
Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, dem_rc,
out_power_path,
lin_tif_path)
tempout_tif_path = os.path.join(self.__workspace_package_path,
os.path.basename(lin_tif_path).split('-')[0] + "-ortho.tif")
alg.lin_to_db(lin_tif_path, tempout_tif_path) # 线性值转回DB值
# temp_slc_path=os.path.join(self.__workspace_package_path, os.path.basename(out_power_path))
# temp_slc_path=temp_slc_path.replace("_db.tif","-ortho.tif")
#inter_Range2Geo(self,lon_lat_path , data_tiff , grid_path , space)
# Orthorectification.inter_Range2Geo(GTC_rc_path,out_power_path,temp_slc_path,Orthorectification.heightspace)
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_power_path, temp_slc_path) #
break
#Orth_Slc.append(temp_slc_path)
# power_list.append(out_power_path)
for tiff_name in os.listdir(self.__workspace_package_path):
if tiff_name.find(".tiff")>0 or tiff_name.find(".tif")>0:
self.imageHandler.write_quick_view(os.path.join(self.__workspace_package_path,tiff_name))
# 1/5、原始数据中的.incidence.xml、.meta.xml输出影像不输出
for maindir, subdir, file_name_list in os.walk(slc_paths):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
file_type = apath.split('.')[-1]
if file_type in ["xml"]:
output = os.path.join(self.__workspace_package_path, filename)
shutil.copy(apath, output)
else:
output=os.path.join(self.__workspace_package_path, filename)
shutil.copy(apath, output)
# 2/5、正射的成果图
# for maindir, subdir, file_name_list in os.walk(self.__workspace_package_path):
# for filename in file_name_list:
# apath = os.path.join(maindir, filename)
# file_type = filename.split('.')[-1]
# image_name = os.path.splitext(filename)[0]
#self.imageHandler.write_quick_view(output_OrthoResult) # 快视图
# 生成元文件案例
# xml_path = "./model_meta.xml"
tem_folder=self.__workspace_path + EXE_NAME + r"\Temporary""\\"
image_path=tempout_tif_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif")
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
# par_dict = CreateDict().calu_nature(image_path, self.processinfo, out_path1, out_path2)
#
# dem_path=os.path.join(self.__workspace_ResampledDEM_path, 'mergedDEM.tif')
# out_dem_path1 = os.path.join(tem_folder, "trans_dem_geo_projcs.tif")
# out_dem_path2 = os.path.join(tem_folder, "trans_dem_projcs_geo.tif")
# # par_dict2 = CreateDict().calu_dem_nature(dem_path, dem_meta, out_dem_path1, out_dem_path2, sampling_f, para_A_arr)
# par_dict2 = CreateDict().calu_dem_nature(dem_path, out_dem_path1, out_dem_path2, sampling_f,Orthorectification.SatelliteOrbitModel.A_arr)
# model_xml_path = os.path.join(self.__workspace_Temporary_path, "creat_standard.meta.xml") # 输出xml路径
# CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, par_dict2, model_xml_path).create_standard_xml()
#
# sar_image_meta_xml = list(glob.glob(os.path.join(self.__workspace_package_path, '*.meta.xml')))
# meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml"))
# CreateMetafile(sar_image_meta_xml[0], self.alg_xml_path, model_xml_path, meta_xml_path).process(os.path.basename(self.__in_processing_paras["SLC"]))
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml"))
para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_package_path, out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": '正射校正'})
para_dict.update({"imageinfo_ProductIdentifier": 'Ortho'})
para_dict.update({"imageinfo_ProductLevel": '3A'})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
# 生成压缩包
logger.info('progress bar :94')
logger.info('start make targz..')
self.del_floder(self.__workspace_unpack_path)
self.del_floder(self.__workspace_ResampledDEM_path)
self.del_floder(self.__workspace_LutImg_path)
self.del_floder(self.__workspace_IncidenceImg_path)
self.del_floder(self.__workspace_SimImg_path)
self.del_floder(self.__workspace_SARIntensity_path)
self.make_targz(self.__out_para, self.__workspace_package_path+"\\")
logger.info('make targz finish')
logger.info('progress bar :100')
return True
pass
if __name__ == '__main__':
DEBUG=False
if '-DEBUG' in sys.argv:
DEBUG=True
start = datetime.datetime.now()
try:
if len(sys.argv) < 2:
xml_path = 'Ortho.xml'
else:
xml_path = sys.argv[1]
OrthoMain = OrthoMain(xml_path)
if OrthoMain.check_source() is False:
raise Exception('check_source() failed!')
if OrthoMain.process_handle() is False:
raise Exception('check_source() failed!')
logger.info('successful production of ortho products!')
except Exception:
logger.exception("run-time error!")
finally:
OrthoMain.del_temp_workspace()
pass
end = datetime.datetime.now()
logger.info('running use time: %s ' % (end - start))

40
Ortho/OrthoMain.spec Normal file
View File

@ -0,0 +1,40 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['OrthoMain.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='OrthoMain',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )

310
Ortho/OrthoXmlInfo.py Normal file
View File

@ -0,0 +1,310 @@
"""
@Project microproduct
@File BackScatteringXmlInfo.PY
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
from xml.etree.ElementTree import ElementTree, Element
import xml.dom.minidom
from lxml import etree
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from osgeo import gdal
import numpy as np
import datetime
from PIL import Image
class CreateDict:
"""根据影像/DEM的属性信息添加到字典中"""
def __init__(self):
self.ImageHandler = ImageHandler()
pass
def calu_nature(self, image_path, image_pair, out_path1, out_path2):
"""
将productinfo节点需要填写的信息存入字典中
image_path:影像路径
image_pair:输入的压缩包中的极化对 hh,hv,vh,vv=1111
out_path1地理转平面的输出路径
out_path2平面转地理的输出路径
"""
para_dict = {}
imageinfo_width = self.ImageHandler.get_img_width(image_path)
para_dict.update({"imageinfo_width":imageinfo_width})
imageinfo_height = self.ImageHandler.get_img_height(image_path)
para_dict.update({"imageinfo_height":imageinfo_height})
para_dict.update({"imageinfo_EarthModel": "WGS84"})
para_dict.update({"imageinfo_ProjectModel": "UTM"})
proj = self.ImageHandler.get_projection(image_path) # 输出的影像若是投影坐标系则先转成地理坐标系
keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
if keyword == "GEOGCS":
pass
elif keyword == "PROJCS":
pp.trans_projcs2geogcs(out_path2, image_path)
image_path = out_path2
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('image projection is missing!')
pp.trans_geogcs2projcs(out_path1, image_path) # 坐标投影, 地理转平面投影坐标
imageinfo_widthspace = self.ImageHandler.get_geotransform(out_path1)[1] # 投影后的分辨率
imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率
para_dict.update({"imageinfo_widthspace":imageinfo_widthspace})
para_dict.update({"imageinfo_heightspace":imageinfo_heightspace})
para_dict.update({"NominalResolution":imageinfo_widthspace})
WidthInMeters = imageinfo_width*imageinfo_widthspace # 投影后的分辨率×宽度
para_dict.update({"WidthInMeters":WidthInMeters})
image_array = self.ImageHandler.get_band_array(image_path)
a2 = np.where(np.isnan(image_array), 999999, image_array)
MinValue = np.min(a2)
a3 = np.where(np.isnan(image_array), -999999, image_array)
MaxValue = np.max(a3)
para_dict.update({"MaxValue":MaxValue})
para_dict.update({"MinValue":MinValue})
get_scope = self.ImageHandler.get_scope(image_path)
point_upleft, point_upright, point_downleft, point_downright=get_scope[0], get_scope[1], get_scope[2], get_scope[3]
para_dict.update({"imageinfo_corner_topLeft_latitude": point_upleft[1]})
para_dict.update({"imageinfo_corner_topLeft_longitude": point_upleft[0]})
para_dict.update({"imageinfo_corner_topRight_latitude": point_upright[1]})
para_dict.update({"imageinfo_corner_topRight_longitude": point_upright[0]})
para_dict.update({"imageinfo_corner_bottomLeft_latitude": point_downleft[1]})
para_dict.update({"imageinfo_corner_bottomLeft_longitude": point_downleft[0]})
para_dict.update({"imageinfo_corner_bottomRight_latitude": point_downright[1]})
para_dict.update({"imageinfo_corner_bottomRight_longitude": point_downright[0]})
longitude_max=np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).max()
longitude_min=np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).min()
latitude_max=np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).max()
latitude_min=np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).min()
imageinfo_center_latitude=(latitude_max+latitude_min)/2
imageinfo_center_longitude=(longitude_max+longitude_min)/2
para_dict.update({"imageinfo_center_latitude": imageinfo_center_latitude})
para_dict.update({"imageinfo_center_longitude": imageinfo_center_longitude})
# self.para_dict.update({"productType": "GTC"}) # 设置产品类型
para_dict.update({"productFormat": "TIF"})
productGentime = datetime.datetime.now()
para_dict.update({"productGentime": productGentime})
para_dict.update({"unit": "none"}) # 设置单位
para_dict.update({"NoDataValue": "nan"})
para_dict.update({"productLevel": "4"}) # 设置图像位深度
image_array = self.ImageHandler.get_band_array(image_path)
try: # 设置图像位深度
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
bit_dtypes = {
'int8': 8,
'unit16': 16,
'int16': 16,
'unit32': 32,
'int32': 32,
'float32': 32,
'float64': 64,
}
if not gdal_dtypes.get(image_array.dtype.name, None) is None:
bit_num = str(bit_dtypes[image_array.dtype.name])
datatype=bit_num+"bit"
else:
datatype = str(32) + "bit"
# datatype = str(gdal.GDT_Float32)+"bit"
para_dict.update({"imagebit": datatype})
except Exception:
para_dict.update({"imagebit": "None"})
HH, HV, VH ,VV= image_pair[0], image_pair[1], image_pair[2], image_pair[3]
if HH == 0:
HH = "delete"
else:
HH = "NULL"
para_dict.update({"imageinfo_QualifyValue_HH": HH})
if HV==0:
HV = "delete"
else:
HV = "NULL"
para_dict.update({"imageinfo_QualifyValue_HV": HV})
if VH==0:
VH = "delete"
else:
VH = "NULL"
para_dict.update({"imageinfo_QualifyValue_VH": VH})
if VV==0:
VV = "delete"
else:
VV = "NULL"
para_dict.update({"imageinfo_QualifyValue_VV": VV})
return para_dict
def calu_dem_nature(self, dem_path, out_dem_path1, out_dem_path2, sampling_f, para_A_arr):
"""
正射需要单独加上dem影像的信息
dem_pathmergedDEM.tif路径
out_dem_path1将mergedDEM.tif由地理坐标转化为平面坐标的保存路径
out_dem_path2将mergedDEM.tif由平面坐标转化为地理坐标的保存路径
sampling_f 采样率
para_A_arr四次多项式模型的参数数组
"""
para_dict2 = {}
proj = self.ImageHandler.get_projection(dem_path) # 输出的影像若是投影坐标系则先转成地理坐标系
keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
if keyword == "GEOGCS":
pass
elif keyword == "PROJCS":
pp.trans_projcs2geogcs(out_dem_path2, dem_path)
dem_path = out_dem_path2
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('image projection is missing!')
pp.trans_geogcs2projcs(out_dem_path1, dem_path) # 坐标投影, 地理转平面投影坐标
DEM_widthspace = self.ImageHandler.get_geotransform(out_dem_path1)[1] # 投影后的分辨率
DEM_heightspace = -self.ImageHandler.get_geotransform(out_dem_path1)[5] # 投影后的分辨率
para_dict2.update({"DEM_widthspace":DEM_widthspace})
para_dict2.update({"DEM_heightspace":DEM_heightspace})
# tree = ElementTree() # 获取DEMProductdem产品来源、DEMDatedem对应的日期
# tree.parse(dem_meta) # 影像头文件
# root = tree.getroot()
# productinfo = root.find("metadata")
# DEMProduct = list(productinfo)[0].tag
# para_dict2.update({"DEM_DEMProduct":DEMProduct})
#
# DEMDate = root.find("metadata").find(DEMProduct).text
# para_dict2.update({"DEM_DEMDate": DEMDate})
get_scope = self.ImageHandler.get_scope(dem_path) # 获取mergedDEM.tif数据的四个角的经纬度信息
point_upleft, point_upright, point_downleft, point_downright=get_scope[0], get_scope[1], get_scope[2], get_scope[3]
para_dict2.update({"DEM_corner_topLeft_latitude": point_upleft[1]})
para_dict2.update({"DEM_corner_topLeft_longitude": point_upleft[0]})
para_dict2.update({"DEM_corner_topRight_latitude": point_upright[1]})
para_dict2.update({"DEM_corner_topRight_longitude": point_upright[0]})
para_dict2.update({"DEM_corner_bottomLeft_latitude": point_downleft[1]})
para_dict2.update({"DEM_corner_bottomLeft_longitude": point_downleft[0]})
para_dict2.update({"DEM_corner_bottomRight_latitude": point_downright[1]})
para_dict2.update({"DEM_corner_bottomRight_longitude": point_downright[0]})
#para_dict2.update({"orthoModel_samplingrate": sampling_f})
para_dict2.update({"satalliteOrbitModel_parameter_X_a0": para_A_arr[0, 0]}) # 获取四次多项式模型6个参数的数值
para_dict2.update({"satalliteOrbitModel_parameter_X_a1": para_A_arr[1, 0]})
para_dict2.update({"satalliteOrbitModel_parameter_X_a2": para_A_arr[2, 0]})
para_dict2.update({"satalliteOrbitModel_parameter_X_a3": para_A_arr[3, 0]})
para_dict2.update({"satalliteOrbitModel_parameter_X_a4": para_A_arr[4, 0]})
para_dict2.update({"satalliteOrbitModel_parameter_Y_b0": para_A_arr[0, 1]})
para_dict2.update({"satalliteOrbitModel_parameter_Y_b1": para_A_arr[1, 1]})
para_dict2.update({"satalliteOrbitModel_parameter_Y_b2": para_A_arr[2, 1]})
para_dict2.update({"satalliteOrbitModel_parameter_Y_b3": para_A_arr[3, 1]})
para_dict2.update({"satalliteOrbitModel_parameter_Y_b4": para_A_arr[4, 1]})
para_dict2.update({"satalliteOrbitModel_parameter_Z_c0": para_A_arr[0, 2]})
para_dict2.update({"satalliteOrbitModel_parameter_Z_c1": para_A_arr[1, 2]})
para_dict2.update({"satalliteOrbitModel_parameter_Z_c2": para_A_arr[2, 2]})
para_dict2.update({"satalliteOrbitModel_parameter_Z_c3": para_A_arr[3, 2]})
para_dict2.update({"satalliteOrbitModel_parameter_Z_c4": para_A_arr[4, 2]})
para_dict2.update({"satalliteOrbitModel_parameter_Vx_d0": para_A_arr[0, 3]})
para_dict2.update({"satalliteOrbitModel_parameter_Vx_d1": para_A_arr[1, 3]})
para_dict2.update({"satalliteOrbitModel_parameter_Vx_d2": para_A_arr[2, 3]})
para_dict2.update({"satalliteOrbitModel_parameter_Vx_d3": para_A_arr[3, 3]})
para_dict2.update({"satalliteOrbitModel_parameter_Vx_d4": para_A_arr[4, 3]})
para_dict2.update({"satalliteOrbitModel_parameter_Vy_e0": para_A_arr[0, 4]})
para_dict2.update({"satalliteOrbitModel_parameter_Vy_e1": para_A_arr[1, 4]})
para_dict2.update({"satalliteOrbitModel_parameter_Vy_e2": para_A_arr[2, 4]})
para_dict2.update({"satalliteOrbitModel_parameter_Vy_e3": para_A_arr[3, 4]})
para_dict2.update({"satalliteOrbitModel_parameter_Vy_e4": para_A_arr[4, 4]})
para_dict2.update({"satalliteOrbitModel_parameter_Vz_f0": para_A_arr[0, 5]})
para_dict2.update({"satalliteOrbitModel_parameter_Vz_f1": para_A_arr[1, 5]})
para_dict2.update({"satalliteOrbitModel_parameter_Vz_f2": para_A_arr[2, 5]})
para_dict2.update({"satalliteOrbitModel_parameter_Vz_f3": para_A_arr[3, 5]})
para_dict2.update({"satalliteOrbitModel_parameter_Vz_f4": para_A_arr[4, 5]})
return para_dict2
class CreateStadardXmlFile:
"""读取字典中的属性值生成一个标准的xml文件"""
def __init__(self, xml_path, para_xml_path, par_dict, par_dict2, path):
"""
xml_path:模板路径
para_xml_path:算法配置文件的路径
par_dict:字典
path:xml模板输出路径
"""
self.par_dict = par_dict
self.par_dict2 = par_dict2
self.path = path
shutil.copy(xml_path, path)
pass
def create_standard_xml(self):
"""将字典中的信息写入到copy的xml文件中"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
productinfo = root.find("productinfo")
for key, value in self.par_dict.items():
if key.split("_")[0] != "imageinfo":
productinfo.find(key).text = str(value)
elif key.split("_")[0] == "imageinfo":
imageinfo = productinfo.find("imageinfo")
if key.split("_")[1] in ["EarthModel", "ProjectModel", "width", "height", "widthspace", "heightspace"]:
imageinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[1] == "center":
center = imageinfo.find("center")
center.find(key.split("_")[2]).text = str(value)
elif key.split("_")[1] == "corner":
corner = imageinfo.find("corner")
corner.find(key.split("_")[2]).find(key.split("_")[3]).text = str(value)
elif key.split("_")[1] == "QualifyValue":
QualifyValue = imageinfo.find("QualifyValue")
if value =="delete":
element_QualifyValue = list(QualifyValue)
for i in element_QualifyValue:
if i.tag == key.split("_")[2]:
QualifyValue.remove(i)
else:
QualifyValue.find(key.split("_")[2]).text = str(value)
pass
orthoModel = root.find("processinfo").find("orthoModel") # 写入四次多项式模型
for key, value in self.par_dict2.items():
if key.split("_")[0] == "satalliteOrbitModel":
satalliteOrbitModel = orthoModel.find("satalliteOrbitModel")
satalliteOrbitModel.find(key.split("_")[1]).find(key.split("_")[2]).find(key.split("_")[3]).text = str(value)
elif key.split("_")[0] == "DEM": # 写入dem四个角坐标
DEM= orthoModel.find("DEM")
if key.split("_")[1] == "corner":
corner = DEM.find("corner")
corner.find(key.split("_")[2]).find(key.split("_")[3]).text = str(value)
elif key.split("_")[1] == "widthspace" or key.split("_")[1] == "heightspace":
DEM.find(key.split("_")[1]).text = str(value)
elif key.split("_")[1] == "samplingrate":
orthoModel.find(key.split("_")[1]).text = str(value)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
#
# if __name__ == '__main__':
#
# xml_path = "./model_meta.xml"
# tem_folder= r"E:\microproduct\测试"
# image_path = r"E:\microproduct\测试\GF3_MYN_QPSI_011437_E98_HH_AtmosphericDelay.tif" # 输入影像
# out_path = os.path.join(tem_folder, "trans_geo_projcs.tif")
# image_pair=[1, 1, 1, 0]
# par_dict = CreateDict(image_path, image_pair, out_path).calu_nature() # 计算属性字典
#
# out_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
# CreateStadardXmlFile(xml_path, par_dict, out_xml_path).create_standard_xml()

105
Ortho/config.yaml Normal file
View File

@ -0,0 +1,105 @@
SatelliteOribit: # 轨道起算时间
StartTime:
Value:
'2017-09-15 01:55:21.0000'
format: # 时间格式
"%Y-%m-%d %H:%M:%S.%f"
ReferenceSpheroid:
Re: # 长半轴
6378137
Rp: # 短半轴
6356752.3142451795
we:
0.000072292115
GPS: # GPS 轨道节点 LT
GPSNode_Path:
['level1Product','platform','orbit', 'stateVec']
NodeInfomation_Name: # [时间x坐标y坐标z坐标x速度y速度z速度]
['timeUTC', 'posX', 'posY', 'posZ', 'velX', 'velY', 'velZ']
Time_format:
"%Y-%m-%dT%H:%M:%S.%f"
imageinfo: # 影像信息
ImageBox:
NodePath:
['product','imageinfo','orbit', 'stateVec']
NodeName:
['topLeft','topRight','bottomLeft','bottomRight']
latLon:
["latitude","longitude"]
ImageWidthSpace:
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster','columnSpacing']
ImageHeightSpace:
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster','rowSpacing']
ImageWidth: # 影像宽
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster', 'numberOfColumns']
ImageHeight: # 影像高
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster', 'numberOfRows']
Groundspace:
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster', 'groundRangeResolution' ]
Azspace:
NodePath:
['level1Product','productInfo','imageDataInfo', 'imageRaster', 'azimuthResolution' ]
StartImageTime: # 影像开始时间
NodePath:
['level1Product','productInfo','sceneInfo', 'start', 'timeUTC']
Format:
"%Y-%m-%dT%H:%M:%S.%f"
EndImageTime: # 影像中止时间
NodePath:
['level1Product','productInfo','sceneInfo', 'stop', 'timeUTC']
Format:
"%Y-%m-%dT%H:%M:%S.%f"
CenterImageTime: # 中心像元时间
NodePath:
['product','platform','CenterTime']
Format:
"%Y-%m-%d %H:%M:%S"
CenterFrequency: # 中心频率
NodePath:
['level1Product','instrument','radarParameters', 'centerFrequency']
Format:
"%Y-%m-%d %H:%M:%S"
CenterImagePositon: # 中心像元,可以作为迭代起算点
NodePath:
['product','imageinfo','center']
Value:
['latitude','longitude']
NearRange: # 近斜距
NodePath:
['level1Product','productInfo','sceneInfo','rangeTime', 'firstPixel']
FarRange:
NodePath:
['level1Product','productInfo','sceneInfo','rangeTime', 'lastPixel']
DopplerCentroidCoefficients: # 多普勒质心常数
NodePath:
['product','processinfo','DopplerCentroidCoefficients']
DopplerCentroidCoefficients_Name:
['d0','d1','d2','d3','d4']
DopplerParametersReferenceTime:
NodePath:
['level1Product','processing',"geometry", 'dopplerRate', 'dopplerRatePolynomial', 'referencePoint']
DopplerRate:
NodePath:
['level1Product','processing',"geometry", 'dopplerRate', 'dopplerRatePolynomial', 'coefficient']
sensor:
PRF: # 脉冲重复频率
NodePath:
['level1Product','instrument','settings', 'settingRecord', 'PRF']
bandWidth: # 信号带宽,计算距离向分辨率 用于距离向分辨率
NodePath:
['product','sensor','waveParams','wave','bandWidth']
lambda: # 波长
NodePath:
['product','sensor','lamda']
Fs: # 等效采样频率 eqvFs
NodePath:
['product','imageinfo','eqvFs']
LightSpeed:
299792458

98
Ortho/config1.yaml Normal file
View File

@ -0,0 +1,98 @@
SatelliteOribit: # 轨道起算时间
StartTime:
Value:
'2017-09-15 01:55:21.0000'
format: # 时间格式
"%Y-%m-%d %H:%M:%S.%f"
ReferenceSpheroid:
Re: # 长半轴
6378137
Rp: # 短半轴
6356752.3142451795
we:
0.000072292115
GPS: # GPS 轨道节点
GPSNode_Path:
['product','GPS','GPSParam']
NodeInfomation_Name: # [时间x坐标y坐标z坐标x速度y速度z速度]
['TimeStamp', 'xPosition', 'yPosition', 'zPosition', 'xVelocity', 'yVelocity', 'zVelocity']
Time_format:
"%Y-%m-%d %H:%M:%S"
imageinfo: # 影像信息
ImageBox:
NodePath:
['product','imageinfo','corner']
NodeName:
['topLeft','topRight','bottomLeft','bottomRight']
latLon:
["latitude","longitude"]
ImageWidthSpace:
NodePath:
['product','imageinfo','widthspace']
ImageHeightSpace:
NodePath:
['product','imageinfo','heightspace']
ImageWidth: # 影像宽
NodePath:
['product','imageinfo','width']
ImageHeight: # 影像高
NodePath:
['product','imageinfo','height']
StartImageTime: # 影像开始时间
NodePath:
['product','imageinfo','imagingTime',start]
Format:
"%Y-%m-%d %H:%M:%S.%f"
EndImageTime: # 影像中止时间
NodePath:
['product','imageinfo','imagingTime',end]
Format:
"%Y-%m-%d %H:%M:%S.%f"
CenterImageTime: # 中心像元时间
NodePath:
['product','platform','CenterTime']
Format:
"%Y-%m-%d %H:%M:%S"
CenterImagePositon: # 中心像元,可以作为迭代起算点
NodePath:
['product','imageinfo','center']
Value:
['latitude','longitude']
NearRange: # 近斜距
NodePath:
['product','imageinfo','nearRange']
DopplerCentroidCoefficients: # 多普勒质心常数
NodePath:
['product','processinfo','DopplerCentroidCoefficients']
DopplerCentroidCoefficients_Name:
['d0','d1','d2','d3','d4']
DopplerParametersReferenceTime:
NodePath:
['product','processinfo',"DopplerParametersReferenceTime"]
ReferenceRange:
NodePath:
['product','imageinfo','refRange']
incidenceAngle: # 入射角
NearRange: # 近入射角
NodePath:
['product','processinfo','incidenceAngleNearRange']
FarRange: # 远入射角
NodePath:
['product','processinfo','incidenceAngleFarRange']
sensor:
PRF: # 脉冲重复频率
NodePath:
['product','imageinfo','eqvPRF']
bandWidth: # 信号带宽,计算距离向分辨率 用于距离向分辨率
NodePath:
['product','sensor','waveParams','wave','bandWidth']
lambda: # 波长
NodePath:
['product','sensor','lamda']
Fs: # 等效采样频率 eqvFs
NodePath:
['product','imageinfo','eqvFs']
LightSpeed:
299792458

330
Ortho/geo_rpc.py Normal file
View File

@ -0,0 +1,330 @@
#命名为geo_rpc.py
"""
RPC model parsers, localization, and projection
"""
import numpy as np
from osgeo import gdal
#最大迭代次数超过则报错
class MaxLocalizationIterationsError(Exception):
"""
Custom rpcm Exception.
"""
pass
def apply_poly(poly, x, y, z):
"""
Evaluates a 3-variables polynom of degree 3 on a triplet of numbers.
将三次多项式的统一模式构建为一个单独的函数
Args:
poly: list of the 20 coefficients of the 3-variate degree 3 polynom,
ordered following the RPC convention.
x, y, z: triplet of floats. They may be numpy arrays of same length.
Returns:
the value(s) of the polynom on the input point(s).
"""
out = 0
out += poly[0]
out += poly[1]*y + poly[2]*x + poly[3]*z
out += poly[4]*y*x + poly[5]*y*z +poly[6]*x*z
out += poly[7]*y*y + poly[8]*x*x + poly[9]*z*z
out += poly[10]*x*y*z
out += poly[11]*y*y*y
out += poly[12]*y*x*x + poly[13]*y*z*z + poly[14]*y*y*x
out += poly[15]*x*x*x
out += poly[16]*x*z*z + poly[17]*y*y*z + poly[18]*x*x*z
out += poly[19]*z*z*z
return out
def apply_rfm(num, den, x, y, z):
"""
Evaluates a Rational Function Model (rfm), on a triplet of numbers.
执行20个参数的分子和20个参数的除法
Args:
num: list of the 20 coefficients of the numerator
den: list of the 20 coefficients of the denominator
All these coefficients are ordered following the RPC convention.
x, y, z: triplet of floats. They may be numpy arrays of same length.
Returns:
the value(s) of the rfm on the input point(s).
"""
return apply_poly(num, x, y, z) / apply_poly(den, x, y, z)
def rpc_from_geotiff(geotiff_path):
"""
Read the RPC coefficients from a GeoTIFF file and return an RPCModel object.
该函数返回影像的Gdal格式的影像和RPCmodel
Args:
geotiff_path (str): path or url to a GeoTIFF file
Returns:
instance of the rpc_model.RPCModel class
"""
# with rasterio.open(geotiff_path, 'r') as src:
#
dataset = gdal.Open(geotiff_path, gdal.GA_ReadOnly)
rpc_dict = dataset.GetMetadata("RPC")
# 同时返回影像与rpc
return dataset, RPCModel(rpc_dict,'geotiff')
def parse_rpc_file(rpc_file):
# rpc_file:.rpc文件的绝对路径
# rpc_dict符号RPC域下的16个关键字的字典
# 参考网址http://geotiff.maptools.org/rpc_prop.html
# https://www.osgeo.cn/gdal/development/rfc/rfc22_rpc.html
rpc_dict = {}
with open(rpc_file) as f:
text = f.read()
# .rpc文件中的RPC关键词
words = ['errBias', 'errRand', 'lineOffset', 'sampOffset', 'latOffset',
'longOffset', 'heightOffset', 'lineScale', 'sampScale', 'latScale',
'longScale', 'heightScale', 'lineNumCoef', 'lineDenCoef','sampNumCoef', 'sampDenCoef',]
# GDAL库对应的RPC关键词
keys = ['ERR_BIAS', 'ERR_RAND', 'LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF',
'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE',
'LONG_SCALE', 'HEIGHT_SCALE', 'LINE_NUM_COEFF', 'LINE_DEN_COEFF',
'SAMP_NUM_COEFF', 'SAMP_DEN_COEFF']
for old, new in zip(words, keys):
text = text.replace(old, new)
# 以‘;\n作为分隔符
text_list = text.split(';\n')
# 删掉无用的行
text_list = text_list[3:-2]
#
text_list[0] = text_list[0].split('\n')[1]
# 去掉制表符、换行符、空格
text_list = [item.strip('\t').replace('\n', '').replace(' ', '') for item in text_list]
for item in text_list:
# 去掉‘=
key, value = item.split('=')
# 去掉多余的括号‘()
if '(' in value:
value = value.replace('(', '').replace(')', '')
rpc_dict[key] = value
for key in keys[:12]:
# 为正数添加符号‘+
if not rpc_dict[key].startswith('-'):
rpc_dict[key] = '+' + rpc_dict[key]
# 为归一化项和误差标志添加单位
if key in ['LAT_OFF', 'LONG_OFF', 'LAT_SCALE', 'LONG_SCALE']:
rpc_dict[key] = rpc_dict[key] + ' degrees'
if key in ['LINE_OFF', 'SAMP_OFF', 'LINE_SCALE', 'SAMP_SCALE']:
rpc_dict[key] = rpc_dict[key] + ' pixels'
if key in ['ERR_BIAS', 'ERR_RAND', 'HEIGHT_OFF', 'HEIGHT_SCALE']:
rpc_dict[key] = rpc_dict[key] + ' meters'
# 处理有理函数项
for key in keys[-4:]:
values = []
for item in rpc_dict[key].split(','):
#print(item)
if not item.startswith('-'):
values.append('+'+item)
else:
values.append(item)
rpc_dict[key] = ' '.join(values)
return rpc_dict
def read_rpc_file(rpc_file):
"""
Read RPC from a RPC_txt file and return a RPCmodel
从TXT中直接单独读取RPC模型
Args:
rpc_file: RPC sidecar file path
Returns:
dictionary read from the RPC file, or an empty dict if fail
"""
rpc = parse_rpc_file(rpc_file)
return RPCModel(rpc)
class RPCModel:
def __init__(self, d, dict_format="geotiff"):
"""
Args:
d (dict): dictionary read from a geotiff file with
rasterio.open('/path/to/file.tiff', 'r').tags(ns='RPC'),
or from the .__dict__ of an RPCModel object.
dict_format (str): format of the dictionary passed in `d`.
Either "geotiff" if read from the tags of a geotiff file,
or "rpcm" if read from the .__dict__ of an RPCModel object.
"""
if dict_format == "geotiff":
self.row_offset = float(d['LINE_OFF'][0:d['LINE_OFF'].rfind(' ')])
self.col_offset = float(d['SAMP_OFF'][0:d['SAMP_OFF'].rfind(' ')])
self.lat_offset = float(d['LAT_OFF'][0:d['LAT_OFF'].rfind(' ')])
self.lon_offset = float(d['LONG_OFF'][0:d['LONG_OFF'].rfind(' ')])
self.alt_offset = float(d['HEIGHT_OFF'][0:d['HEIGHT_OFF'].rfind(' ')])
self.row_scale = float(d['LINE_SCALE'][0:d['LINE_SCALE'].rfind(' ')])
self.col_scale = float(d['SAMP_SCALE'][0:d['SAMP_SCALE'].rfind(' ')])
self.lat_scale = float(d['LAT_SCALE'][0:d['LAT_SCALE'].rfind(' ')])
self.lon_scale = float(d['LONG_SCALE'][0:d['LONG_SCALE'].rfind(' ')])
self.alt_scale = float(d['HEIGHT_SCALE'][0:d['HEIGHT_SCALE'].rfind(' ')])
self.row_num = list(map(float, d['LINE_NUM_COEFF'].split()))
self.row_den = list(map(float, d['LINE_DEN_COEFF'].split()))
self.col_num = list(map(float, d['SAMP_NUM_COEFF'].split()))
self.col_den = list(map(float, d['SAMP_DEN_COEFF'].split()))
if 'LON_NUM_COEFF' in d:
self.lon_num = list(map(float, d['LON_NUM_COEFF'].split()))
self.lon_den = list(map(float, d['LON_DEN_COEFF'].split()))
self.lat_num = list(map(float, d['LAT_NUM_COEFF'].split()))
self.lat_den = list(map(float, d['LAT_DEN_COEFF'].split()))
elif dict_format == "rpcm":
self.__dict__ = d
else:
raise ValueError(
"dict_format '{}' not supported. "
"Should be {{'geotiff','rpcm'}}".format(dict_format)
)
def projection(self, lon, lat, alt):
"""
Convert geographic coordinates of 3D points into image coordinates.
正投影从地理坐标到图像坐标
Args:
lon (float or list): longitude(s) of the input 3D point(s)
lat (float or list): latitude(s) of the input 3D point(s)
alt (float or list): altitude(s) of the input 3D point(s)
Returns:
float or list: horizontal image coordinate(s) (column index, ie x)
float or list: vertical image coordinate(s) (row index, ie y)
"""
nlon = (np.asarray(lon) - self.lon_offset) / self.lon_scale
nlat = (np.asarray(lat) - self.lat_offset) / self.lat_scale
nalt = (np.asarray(alt) - self.alt_offset) / self.alt_scale
col = apply_rfm(self.col_num, self.col_den, nlat, nlon, nalt)
row = apply_rfm(self.row_num, self.row_den, nlat, nlon, nalt)
col = col * self.col_scale + self.col_offset
row = row * self.row_scale + self.row_offset
return col, row
def localization(self, col, row, alt, return_normalized=False):
"""
Convert image coordinates plus altitude into geographic coordinates.
反投影从图像坐标到地理坐标
Args:
col (float or list): x image coordinate(s) of the input point(s)
row (float or list): y image coordinate(s) of the input point(s)
alt (float or list): altitude(s) of the input point(s)
Returns:
float or list: longitude(s)
float or list: latitude(s)
"""
ncol = (np.asarray(col) - self.col_offset) / self.col_scale
nrow = (np.asarray(row) - self.row_offset) / self.row_scale
nalt = (np.asarray(alt) - self.alt_offset) / self.alt_scale
if not hasattr(self, 'lat_num'):
lon, lat = self.localization_iterative(ncol, nrow, nalt)
else:
lon = apply_rfm(self.lon_num, self.lon_den, nrow, ncol, nalt)
lat = apply_rfm(self.lat_num, self.lat_den, nrow, ncol, nalt)
if not return_normalized:
lon = lon * self.lon_scale + self.lon_offset
lat = lat * self.lat_scale + self.lat_offset
return lon, lat
def localization_iterative(self, col, row, alt):
"""
Iterative estimation of the localization function (image to ground),
for a list of image points expressed in image coordinates.
逆投影时的迭代函数
Args:
col, row: normalized image coordinates (between -1 and 1)
alt: normalized altitude (between -1 and 1) of the corresponding 3D
point
Returns:
lon, lat: normalized longitude and latitude
Raises:
MaxLocalizationIterationsError: if the while loop exceeds the max
number of iterations, which is set to 100.
"""
# target point: Xf (f for final)
Xf = np.vstack([col, row]).T
# use 3 corners of the lon, lat domain and project them into the image
# to get the first estimation of (lon, lat)
# EPS is 2 for the first iteration, then 0.1.
lon = -col ** 0 # vector of ones
lat = -col ** 0
EPS = 2
x0 = apply_rfm(self.col_num, self.col_den, lat, lon, alt)
y0 = apply_rfm(self.row_num, self.row_den, lat, lon, alt)
x1 = apply_rfm(self.col_num, self.col_den, lat, lon + EPS, alt)
y1 = apply_rfm(self.row_num, self.row_den, lat, lon + EPS, alt)
x2 = apply_rfm(self.col_num, self.col_den, lat + EPS, lon, alt)
y2 = apply_rfm(self.row_num, self.row_den, lat + EPS, lon, alt)
n = 0
while not np.all((x0 - col) ** 2 + (y0 - row) ** 2 < 1e-18):
if n > 100:
raise MaxLocalizationIterationsError("Max localization iterations (100) exceeded")
X0 = np.vstack([x0, y0]).T
X1 = np.vstack([x1, y1]).T
X2 = np.vstack([x2, y2]).T
e1 = X1 - X0
e2 = X2 - X0
u = Xf - X0
# project u on the base (e1, e2): u = a1*e1 + a2*e2
# the exact computation is given by:
# M = np.vstack((e1, e2)).T
# a = np.dot(np.linalg.inv(M), u)
# but I don't know how to vectorize this.
# Assuming that e1 and e2 are orthogonal, a1 is given by
# <u, e1> / <e1, e1>
num = np.sum(np.multiply(u, e1), axis=1)
den = np.sum(np.multiply(e1, e1), axis=1)
a1 = np.divide(num, den).squeeze()
num = np.sum(np.multiply(u, e2), axis=1)
den = np.sum(np.multiply(e2, e2), axis=1)
a2 = np.divide(num, den).squeeze()
# use the coefficients a1, a2 to compute an approximation of the
# point on the gound which in turn will give us the new X0
lon += a1 * EPS
lat += a2 * EPS
# update X0, X1 and X2
EPS = .1
x0 = apply_rfm(self.col_num, self.col_den, lat, lon, alt)
y0 = apply_rfm(self.row_num, self.row_den, lat, lon, alt)
x1 = apply_rfm(self.col_num, self.col_den, lat, lon + EPS, alt)
y1 = apply_rfm(self.row_num, self.row_den, lat, lon + EPS, alt)
x2 = apply_rfm(self.col_num, self.col_den, lat + EPS, lon, alt)
y2 = apply_rfm(self.row_num, self.row_den, lat + EPS, lon, alt)
n += 1
return lon, lat

133
Ortho/model_meta.xml Normal file
View File

@ -0,0 +1,133 @@
<product>
<productinfo>
<NominalResolution desc="分辨率"> </NominalResolution>
<WidthInMeters> </WidthInMeters>
<productLevel desc="产品等级"> </productLevel>
<productType>GEC</productType>
<productFormat>TIF</productFormat>
<productGentime> </productGentime>
<unit desc="单位"> </unit>
<MinValue desc="最小值"> </MinValue>
<MaxValue desc="最大值"> </MaxValue>
<NoDataValue desc="无数据值">nan</NoDataValue>
<datastructure desc="数据组织方式">BSQ</datastructure>
<imagebit> </imagebit>
<imageinfo desc="产品影像信息">
<EarthModel> </EarthModel>
<ProjectModel> </ProjectModel>
<center>
<latitude unit="degree"> </latitude>
<longitude unit="degree"> </longitude>
</center>
<corner>
<topLeft>
<latitude unit="degree"> </latitude>
<longitude unit="degree"> </longitude>
</topLeft>
<topRight>
<latitude unit="degree"> </latitude>
<longitude unit="degree"> </longitude>
</topRight>
<bottomLeft>
<latitude unit="degree"> </latitude>
<longitude unit="degree"> </longitude>
</bottomLeft>
<bottomRight>
<latitude unit="degree"> </latitude>
<longitude unit="degree"> </longitude>
</bottomRight>
</corner>
<width> </width>
<height> </height>
<widthspace desc="宽度分辨率"> </widthspace>
<heightspace desc="高度分辨率"> </heightspace>
<QualifyValue desc="质量标识">
<HH> </HH>
<HV> </HV>
<VH> </VH>
<VV> </VV>
</QualifyValue>
</imageinfo>
</productinfo>
<processinfo desc="算法处理信息">
<orthoModel desc="正射校正方法">
<orthoModelName>模拟影像法</orthoModelName>
<simulationModel>常数累加法</simulationModel>
<satalliteOrbitModel desc="卫星轨道模型">
<ModelName>四次多项式模型</ModelName>
<EarthModel>WGS84</EarthModel>
<parameter desc="四次多项式模型">
<X>
<a0> </a0>
<a1> </a1>
<a2> </a2>
<a3> </a3>
<a4> </a4>
</X>
<Y>
<b0> </b0>
<b1> </b1>
<b2> </b2>
<b3> </b3>
<b4> </b4>
</Y>
<Z>
<c0> </c0>
<c1> </c1>
<c2> </c2>
<c3> </c3>
<c4> </c4>
</Z>
<Vx>
<d0> </d0>
<d1> </d1>
<d2> </d2>
<d3> </d3>
<d4> </d4>
</Vx>
<Vy>
<e0> </e0>
<e1> </e1>
<e2> </e2>
<e3> </e3>
<e4> </e4>
</Vy>
<Vz>
<f0> </f0>
<f1> </f1>
<f2> </f2>
<f3> </f3>
<f4> </f4>
</Vz>
</parameter>
</satalliteOrbitModel>
<DEM desc="DEM的参数">
<DEMProduct desc="DEM产品的来源">NULL</DEMProduct>
<DEMDate desc="DEM对应的时间">NULL</DEMDate>
<heightspace desc="行分辨率"> </heightspace>
<widthspace desc="列分辨率"> </widthspace>
<corner desc="DEM的范围">
<topLeft>
<latitude> </latitude>
<longitude> </longitude>
</topLeft>
<topRight>
<latitude> </latitude>
<longitude> </longitude>
</topRight>
<bottomLeft>
<latitude> </latitude>
<longitude> </longitude>
</bottomLeft>
<bottomRight>
<latitude> </latitude>
<longitude> </longitude>
</bottomRight>
</corner>
</DEM>
<samplingrate desc="DEM的重采样率"> </samplingrate>
<MatchModel desc="匹配方法">标准相关匹配</MatchModel>
</orthoModel>
</processinfo>
</product>

267
Ortho/orthProcess.ipynb Normal file

File diff suppressed because one or more lines are too long

89
Ortho/packing.spec Normal file
View File

@ -0,0 +1,89 @@
# -*- mode: python ; coding: utf-8 -*-
import sys
import shutil
import os
import tarfile
#文件夹遍历深度
sys.setrecursionlimit(5000)
block_cipher = None
#######begin-打包前处理##############
#将上一级的tool文件夹替换到当前路径下的tool
# 测试代码
cwdpath = os.getcwd()
tool_path = ''
src = '../tool'
des = os.path.join(cwdpath, "tool")
targz_path = os.path.join(cwdpath, "tool.tar.gz")
#创建文件夹
if os.path.exists(des):
if os.path.isdir(des):
shutil.rmtree(des)
os.makedirs(des)
#压缩
dir = os.path.split(targz_path )[0]
if os.path.exists(dir) is False:
os.makedirs(dir)
with tarfile.open(targz_path, "w:gz") as tar:
tar.add(src, arcname=os.path.basename(src))
#解压
t = tarfile.open(targz_path)
t.extractall(path=cwdpath)
#删除临时压缩包
#os.remove(targz_path)
#生成名称
main_name = ''
for name in os.listdir(cwdpath):
if 'Main.py' in name:
main_name = name
exe_name = exe_name = main_name .split('.')[0][:-4] + '-C-SAR-V2.0'
#######end-打包前处理##############
#######beging-pyinstaller打包参数##############
a = Analysis(['OrthoMain.py',
'./tool/algorithm/algtools/ScatteringAuxData.py',
'./tool/algorithm/algtools/CoordinateTransformation.py',
'./tool/algorithm/algtools/DEMJoint.py',
'./tool/algorithm/algtools/logHandler.py',
'./tool/algorithm/algtools/PreProcess.py',
'./tool/algorithm/algtools/RieveFilter.py',
'./tool/algorithm/algtools/ROIAlg.py',
'./tool/algorithm/block/blockprocess.py',
'./tool/algorithm/image/ImageHandle.py',
'./tool/algorithm/xml/AlgXmlHandle.py',
'./tool/algorithm/xml/CreatMetafile.py',
'./tool/config/ConfigeHandle.py',
'./tool/logs/logHandler.py',],
pathex=[cwdpath],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name= 'Ortho-C-SAR-V2.0',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
######beging-pyinstaller打包参数#################

60
Ortho/product.xml Normal file
View File

@ -0,0 +1,60 @@
<Root>
<ProductBasicInfo>
<ProductName>正射校正</ProductName>
<ProductIdentifier>Ortho</ProductIdentifier>
<ProductLevel>LEVEL3</ProductLevel>
<ProductResolution> </ProductResolution>
<ProductDate> </ProductDate>
<ProductFormat> </ProductFormat>
<CompressionMethod> </CompressionMethod>
<ProductSize> </ProductSize>
<SpatialCoverageInformation>
<TopLeftLatitude> </TopLeftLatitude>
<TopLeftLongitude> </TopLeftLongitude>
<TopRightLatitude> </TopRightLatitude>
<TopRightLongitude> </TopRightLongitude>
<BottomRightLatitude> </BottomRightLatitude>
<BottomRightLongitude> </BottomRightLongitude>
<BottomLeftLatitude> </BottomLeftLatitude>
<BottomLeftLongitude> </BottomLeftLongitude>
<CenterLatitude> </CenterLatitude>
<CenterLongitude> </CenterLongitude>
</SpatialCoverageInformation>
<TimeCoverageInformation>
<StartTime> </StartTime>
<EndTime> </EndTime>
<CenterTime> </CenterTime>
</TimeCoverageInformation>
<CoordinateReferenceSystemInformation>
<MapProjection> </MapProjection>
<EarthEllipsoid> </EarthEllipsoid>
<ZoneNo> </ZoneNo>
</CoordinateReferenceSystemInformation>
<MetaInfo>
<Unit> </Unit>
<UnitDes> </UnitDes>
</MetaInfo>
</ProductBasicInfo>
<ProductProductionInfo>
<DataSources number="1">
<DataSource>
<Satellite> </Satellite>
<Sensor> </Sensor>
</DataSource>
</DataSources>
<ObservationGeometry>
<SatelliteAzimuth> </SatelliteAzimuth>
<SatelliteRange> </SatelliteRange>
</ObservationGeometry>
<BandSelection>1</BandSelection>
<DataSourceDescription>None</DataSourceDescription>
<DataSourceProcessingDescription>参考产品介绍PDF</DataSourceProcessingDescription>
<ProductionDate> </ProductionDate>
<AuxiliaryDataDescription> </AuxiliaryDataDescription>
</ProductProductionInfo>
<ProductPublishInfo>
<Processor>德清</Processor>
<DistributionUnit> </DistributionUnit>
<ContactInformation> </ContactInformation>
</ProductPublishInfo>
</Root>

View File

@ -0,0 +1,47 @@
''' 去地平效应功能代码
因为去地平效应是需要像元的成像时间作为的输入参数量因此需要原始影像的SLC作为输出项或者原始的SLC中保留了影像的成像时刻
作为模型约定
1.模型的输入需要已经经过了正射校正否则没有办法与已有的DEM进行匹配
2.开发示例使用的高分三号注意高分三号的正射模型为rpc因此需要需要计算的对应的时间
'''
import os
import numpy as np
# import gdal
import math
class DeLevelingEffect:
''' 去地平效应
计算公式
Phi=(4*pi/lambda_)*(r1-r2)
lambda_ 波长
pi=3.14159265358979323846264338327950288
r1:
r2:
'''
def LevelingEffect(lamda_,r1,r2,pi=3.14159265358979323846264338327950288):
'''计算地平效应的相位
agrs:
lamda_: double, 波长
r1:shape nx1 ,主影像的斜距
r2:shape nx1 ,辅影像的斜距
pi:double,圆周率
return
Phi:shape nx1
'''
Phi=(4*pi/lamda_)*(r1-r2)
return Phi
def CalSlantDistance(Rs_Salatellite,Rs_SeaLevel):
'''计算斜率
agrs:
Rs_Salatellite:nx3 x,y,z 地面对应时刻的 卫星位置和速度
Rs_SeaLevel:nx3 x,y,z 地面的坐标
return:
SlantDistance:nx1 斜距
'''
SlantDistance=np.sqrt(np.sum((Rs_Salatellite-Rs_SeaLevel)**2,axis=1)).reshape(-1,1)
return SlantDistance
# 根据根据卫星轨道数据,构建合适的影像信息
pass

File diff suppressed because it is too large Load Diff

24
Ortho/test/ceshiL.py Normal file
View File

@ -0,0 +1,24 @@
"""
@Project microproduct
@File OnePlantHeight.PY
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import numpy as np
from scipy.special import lambertw
H_arr=np.array([1,2])
array_angle=np.array([1,0.5])
a = 0.5 * H_arr * H_arr
b = -0.5 * np.sin(array_angle)
y = 2
a1 = 2 * lambertw(-b * np.sqrt(y / a) / 2) / b
print(a1)
pass

19
Ortho/test/test.py Normal file
View File

@ -0,0 +1,19 @@
import os
import sys
import cv2
import numpy as np
img1 = cv2.imread('D:\MicroSAR\C-SAR\Ortho\Ortho\Temporary\dem_rcs.jpg', 0)
img2 = cv2.imread('D:\MicroSAR\C-SAR\Ortho\Ortho\Temporary\sar_rcs.jpg', 0)
def cv_show(name,img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher(crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None,flags=2)
cv_show('img3',img3)

30
Ortho/test/test1.py Normal file
View File

@ -0,0 +1,30 @@
def cal(base_block,i,j):
std=np.std(base_block)
dist=np.max(base_block)-np.min(base_block)
return [i,j,std,dist]
plist=[]
with Pool() as pool:
for i in range(height):
print("\r",i,"/",height," ",end="")
if i<50 or i>height-51:
continue
for j in range(width):
if j<50 or j>width-51:
continue
base_block=data[i-50:i+50,j-50:j+50]
plist.append(pool.apply_async(cal,args=(base,i,j,)))
pool.close()
pool.join()
for p in plist:
result=p.get()
[i,j,std,dist]=result
base_std[i,j]=std
base_dist[i,j]=dist
base_std.astype(np.float32).tofile(r"D:\MicroSAR\C-SAR\Ortho\Ortho\Temporary\HH_std.bin")
base_dist.astype(np.float32).tofile(r"D:\MicroSAR\C-SAR\Ortho\Ortho\Temporary\HH_dist.bin")
print(base_dist.shape,base_dist.dtype)

113
Ortho/test/testMatch.py Normal file
View File

@ -0,0 +1,113 @@
import logging
# from re import S
# from oneOrthoAuxData import OrthoAuxData
# from OrthoImage import ImageHandler
from tool.algorithm.image.ImageHandle import ImageHandler
import tarfile
# from OrthoDB import ManageAlgXML, CheckSource
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件
from OrthoAlg import IndirectOrthorectification, DEMProcess,ImageMatchClass
# from logHandler import LogHandler
from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.xml.CreatMetafile import CreateMetafile
from OrthoXmlInfo import CreateDict, CreateStadardXmlFile
from osgeo import gdal, osr
import os
import glob
import gc
import datetime
import shutil
import sys
import cv2
ori_sar_path="D:\MicroWorkspace\C-SAR\Ortho\Temporary\TestSAR\GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_HH_L10003923848.jpg"
sim_sar_path="D:\MicroWorkspace\C-SAR\Ortho\Temporary\TestSim\sim_img_sum.jpg"
work_sapce_path="D:\MicroWorkspace\C-SAR\Ortho\Temporary"
'''
import matplotlib.pyplot as plt
img1 = cv2.imread(ori_sar_path, 0)
img2 = cv2.imread(sim_sar_path, 0)
def cv_show(name,img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
sift = cv2.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher(crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None,flags=2)
cv2.imwrite(work_sapce_path,img3)
'''
# 匹配影像
def ImageMatch(ori_sar_path,sim_sar_path,work_sapce_path):
ori_sar=gdal.Open(ori_sar_path)
sim_sar=gdal.Open(sim_sar_path)
# 影像尺寸
ori_height=ori_sar.RasterYSize
ori_width=ori_sar.RasterXSize
sim_height=sim_sar.RasterYSize
sim_width=sim_sar.RasterXSize
# 分块匹配
ori_sar_arr=ori_sar.GetRasterBand(1).ReadAsArray(0,0,ori_width,ori_height) # 原始影像
ori_img=(255*ori_sar_arr/np.max(ori_sar_arr)).astype(np.uint8)
sim_sar_arr=np.log(sim_sar.GetRasterBand(1).ReadAsArray(0,0,sim_width,sim_height)+1) # 模拟影像
sim_img=(1+253*sim_sar_arr/np.max(sim_sar_arr)).astype(np.uint8)
res = cv.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
top_left = max_loc
# 范围
min_w=top_left[0] if top_left[0]>0 else 0
min_h=top_left[1] if top_left[1]>0 else 0
max_w=top_left[0]+ori_width if top_left[0]+ori_width<sim_width else sim_width
max_h=top_left[1]+ori_height if top_left[0]+ori_height<sim_height else sim_height
# 裁剪
sim_clip=sim_img[min_h:max_h,min_w:max_w]
for i in range(0,ori_img.shape[0],10):
for j in range(0,ori_img.shape[1],10):
pass
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread(sim_sar_path,0)
img2 = img.copy()
template = cv.imread(ori_sar_path,0)
w, h = template.shape[::-1]
# 列表中所有的6种比较方法
methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
i=0
for meth in methods:
img = img2.copy()
method = eval(meth)
# 应用模板匹配
res = cv.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
# 如果方法是TM_SQDIFF或TM_SQDIFF_NORMED则取最小值
if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv.rectangle(img,top_left, bottom_right, 255, 2)
cv.imwrite(os.path.join(work_sapce_path,"matchresult_{}.jpg".format(i)),img)
i=i+1

8
Ortho/test/testRPC.py Normal file
View File

@ -0,0 +1,8 @@
import geo_rpc
import os
import numpy as np
from osgeo import gdal
rpc = geo_rpc.read_rpc_file(r'D:\MicroSAR\C-SAR\Ortho\Temporary\unpack\GF3B_MYC_FSI_002233_E120.8_N30.2_20220426_L1A_VHVV_L10000030053\GF3B_MYC_FSI_002233_E120.8_N30.2_20220426_L1A_VV_L10000030053.rpb')
rows ,cols = rpc.localization(19820,14284,0)
print(rows,cols)

Binary file not shown.

View File

@ -0,0 +1,166 @@
#
# 模型计算的库
#
import cython
cimport cython # 必须导入
import numpy as np
cimport numpy as np
from libc.math cimport pi
from scipy.optimize import leastsq
import random
import logging
logger = logging.getLogger("mylog")
def WMCModel(param_arr,sample_lai,sample_soil,sample_inc,sample_sigma):
""" WMC模型 增加 归一化植被指数
Args:
param_arr (np.ndarray):
sample_lai (double):
sample_soil (double):
sample_inc (double):
sample_sigma (double): 线
Returns:
double:
"""
# 映射参数,方便修改模型
A,B,C,D,M,N=param_arr # 在这里修改模型
V_lai=sample_lai
#V_lai=E*sample_lai+F
exp_gamma=np.exp(-2*B*((V_lai*D+C))*(1/np.cos(sample_inc)))
sigma_soil=M*sample_soil+N
sigma_veg=A*((V_lai))*np.cos(sample_inc)
f_veg=1
result=sigma_veg*(1-exp_gamma)+sigma_soil*exp_gamma-sample_sigma
return result
def train_WMCmodel(lai_water_inc_sigma_list,params_X0,train_err_image_path,draw_flag=True):
""" 训练模型参数
Args:
lai_waiter_inc_sigma_list (list): 使
"""
def f(X):
eqs=[]
for lai_water_inc_sigma_item in lai_water_inc_sigma_list:
sample_lai=lai_water_inc_sigma_item[4]
sample_sigma=lai_water_inc_sigma_item[5] # 5: csv_sigma, 8:tiff_sigma
sample_soil=lai_water_inc_sigma_item[6]
sample_inc=lai_water_inc_sigma_item[7]
FVC=lai_water_inc_sigma_item[8]
eqs.append(WMCModel(X,sample_lai,sample_soil,sample_inc,sample_sigma))
return eqs
X0 = params_X0 # 初始值
logger.info(str(X0))
h = leastsq(f, X0)
logger.info(h[0],h[1])
err_f=f(h[0])
x_arr=[lai_waiter_inc_sigma_item[4] for lai_waiter_inc_sigma_item in lai_water_inc_sigma_list]
# 根据误差大小进行排序
logger.info("训练集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 样点信息".format(str(np.array(err_f).shape)))
for i in np.argsort(np.array(err_f)):
logger.info('{}\t{}\t{}'.format(i,err_f[i],str(lai_water_inc_sigma_list[i])))
logger.info("\n误差点序输出结束\n")
if draw_flag:
logger.info(err_f)
logger.info(np.where(np.abs(err_f)<10))
from matplotlib import pyplot as plt
plt.scatter(x_arr,err_f)
plt.title("equation-err")
plt.savefig(train_err_image_path,dpi=600)
plt.show()
return h[0]
def test_WMCModel(lai_waiter_inc_sigma_list,param_arr,lai_X0,test_err_image_path,draw_flag=True):
""" 测试模型训练结果
Args:
lai_waiter_inc_sigma_list (list): 使
A (_type_): A
B (_type_): B
C (_type_): C
D (_type_): D
M (_type_): M
N (_type_): N
lai_X0 (_type_):
Returns:
list: [sample_lai,err,predict]
"""
err=[]
err_f=[]
x_arr=[]
err_lai=[]
for lai_waiter_inc_sigma_item in lai_waiter_inc_sigma_list:
sample_time,sample_code,sample_lon,sample_lat,sample_lai,csv_sigma,sample_soil,sample_inc,sample_sigma=lai_waiter_inc_sigma_item
def f(X):
lai=X[0]
eqs=[WMCModel(param_arr,lai,sample_soil,sample_inc,csv_sigma)]
return eqs
X0=lai_X0
h = leastsq(f, X0)
temp_err=h[0]-sample_lai
err_lai.append(temp_err[0]) # lai预测的插值
err.append([sample_lai,temp_err[0],h[0][0],sample_code])
err_f.append(f(h[0])[0]) # 方程差
x_arr.append(sample_lai)
# 根据误差大小进行排序
logger.info("测试集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 方程差\t样点信息".format(str(np.array(err_lai).shape)))
for i in np.argsort(np.array(err_lai)):
logger.info('{}\t{}\t{}\t{}'.format(i,err_lai[i],err_f[i],str(lai_waiter_inc_sigma_list[i])))
logger.info("\n误差点序输出结束\n")
if draw_flag:
from matplotlib import pyplot as plt
plt.scatter(x_arr,err_lai)
plt.title("equation-err")
plt.savefig(test_err_image_path,dpi=600)
plt.show()
return err
def processs_WMCModel(param_arr,lai_X0,sigma,inc_angle,soil_water):
if(sigma<0 ):
return np.nan
def f(X):
lai=X[0]
eqs=[WMCModel(param_arr,lai,soil_water,inc_angle,sigma )]
return eqs
h = leastsq(f, [lai_X0])
return h[0][0]
# Cython 的扩展地址
cpdef np.ndarray[double,ndim=2] process_tiff(np.ndarray[double,ndim=2] sigma_tiff,
np.ndarray[double,ndim=2] inc_tiff,
np.ndarray[double,ndim=2] soil_water_tiff,
np.ndarray[double,ndim=1] param_arr,
double lai_X0):
cdef np.ndarray[double,ndim=2] result=sigma_tiff
cdef int param_arr_length=param_arr.shape[0]
cdef int height=sigma_tiff.shape[0]
cdef int width=sigma_tiff.shape[1]
cdef int i=0
cdef int j=0
cdef double temp=0
while i<height:
j=0
while j<width:
temp = processs_WMCModel(param_arr,lai_X0,sigma_tiff[i,j],inc_tiff[i,j],soil_water_tiff[i,j])
temp=temp if temp<10 and temp>=0 else np.nan
result[i,j]=temp
j=j+1
i=i+1
return result

45
Ortho/tool/LAI/setup.py Normal file
View File

@ -0,0 +1,45 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./LAIProcess') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./LAIProcess.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -0,0 +1,117 @@
# -*- encoding: utf-8 -*-
# code from https://blog.csdn.net/theonegis/article/details/54427906
from osgeo import gdal
from osgeo import osr
import numpy as np
def getSRSPair(dataset):
"""
获得给定数据的投影参考系和地理参考系
:param dataset: GDAL地理数据
:return: 投影参考系和地理参考系
"""
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
return prosrs, geosrs
def geo2lonlat(dataset, x, y):
"""
将投影坐标转为经纬度坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param x: 投影坐标x
:param y: 投影坐标y
:return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
"""
prosrs, geosrs = getSRSPair(dataset)
ct = osr.CoordinateTransformation(prosrs, geosrs)
coords = ct.TransformPoint(x, y)
return coords[:2]
def lonlat2geo(dataset, lon, lat):
"""
将经纬度坐标转为投影坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param lon: 地理坐标lon经度
:param lat: 地理坐标lat纬度
:return: 经纬度坐标(lon, lat)对应的投影坐标
"""
prosrs, geosrs = getSRSPair(dataset)
ct = osr.CoordinateTransformation(geosrs, prosrs)
coords = ct.TransformPoint(lat, lon)
return coords[:2]
def imagexy2geo(dataset, row, col):
"""
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset: GDAL地理数据
:param row: 像素的行号
:param col: 像素的列号
:return: 行列号(row, col)对应的投影坐标或地理坐标(x, y)
"""
trans = dataset.GetGeoTransform()
px = trans[0] + col * trans[1] + row * trans[2]
py = trans[3] + col * trans[4] + row * trans[5]
return px, py
def geo2imagexy(dataset, x, y):
"""
根据GDAL的六 参数模型将给定的投影或地理坐标转为影像图上坐标行列号
:param dataset: GDAL地理数据
:param x: 投影或地理坐标x
:param y: 投影或地理坐标y
:return: 影坐标或地理坐标(x, y)对应的影像图上行列号(col, row)
"""
trans = dataset.GetGeoTransform()
a = np.array([[trans[1], trans[2]], [trans[4], trans[5]]])
b = np.array([x - trans[0], y - trans[3]])
return np.linalg.solve(a, b) # 使用numpy的linalg.solve进行二元一次方程的求解
def test1():
gdal.AllRegister()
tif = 'D:/DATA/testdata/GLCFCS30_E110N25.tif'
# dataset = gdal.Open(r"D:\\DATA\\雷达测试\\GaoFen3_20200528_HH_DB.tif")
dataset = gdal.Open(tif)
print('数据投影:')
print(dataset.GetProjection())
print('数据的大小(行,列):')
print('(%s %s)' % (dataset.RasterYSize, dataset.RasterXSize))
x = 793214.118
y = 2485865.527
lon = 113.84897082317516
lat = 22.453998686022448
row = 24576
col = 22540
print('图上坐标 -> 投影坐标:')
coords = imagexy2geo(dataset, row, col)
print('(%s, %s)->(%s, %s)' % (row, col, coords[0], coords[1]))
print('投影坐标 -> 图上坐标:')
coords = geo2imagexy(dataset, x, y)
col = coords[0]
row = coords[1]
print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
print('投影坐标 -> 经纬度:')
coords = geo2lonlat(dataset, x, y)
print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
print('经纬度 -> 投影坐标:')
coords = lonlat2geo(dataset, lon, lat)
print('(%s, %s)->(%s, %s)' % (lon, lat, coords[0], coords[1]))
coords1 = geo2lonlat(dataset, 657974.118, 2633321.527)
print(coords1)
coords2 = geo2lonlat(dataset, 793214.118, 2485865.527)
print(coords2)
pass
# if __name__ == '__main__':
#
# print('done')

View File

@ -0,0 +1,156 @@
"""
@Project microproduct
@File DEMJoint
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
from osgeo import gdal, osr
import os
import numpy as np
class DEMProcess:
"""
DEM拼接重采样
"""
def __init__(self):
pass
@staticmethod
def get_extent(fn):
'''
原文链接https://blog.csdn.net/XBR_2014/article/details/85255412
'''
ds = gdal.Open(fn)
rows = ds.RasterYSize
cols = ds.RasterXSize
# 获取图像角点坐标
gt = ds.GetGeoTransform()
minx = gt[0]
maxy = gt[3]
maxx = gt[0] + gt[1] * rows
miny = gt[3] + gt[5] * cols
return (minx, maxy, maxx, miny)
@staticmethod
def img_mosaic(in_files, out_dem_path):
# 通过两两比较大小,将最终符合条件的四个角点坐标保存
# 即为拼接图像的四个角点坐标
minX, maxY, maxX, minY = DEMProcess.get_extent(in_files[0])
for fn in in_files[1:]:
minx, maxy, maxx, miny = DEMProcess.get_extent(fn)
minX = min(minX, minx)
maxY = max(maxY, maxy)
maxX = max(maxX, maxx)
minY = min(minY, miny)
# 获取输出图像的行列数
in_ds = gdal.Open(in_files[0])
bands_num = in_ds.RasterCount
gt = in_ds.GetGeoTransform()
rows = int((maxX - minX) / abs(gt[5]))
cols = int((maxY - minY) / gt[1])
# 判断栅格数据的数据类型
datatype = gdal.GDT_UInt16
# 创建输出图像
driver = gdal.GetDriverByName('GTiff')
out_dem = os.path.join(out_dem_path, 'mosaic0.tif')
out_ds = driver.Create(out_dem, cols, rows, bands_num, datatype)
out_ds.SetProjection(in_ds.GetProjection())
gt = list(in_ds.GetGeoTransform())
gt[0], gt[3] = minX, maxY
out_ds.SetGeoTransform(gt)
for fn in in_files:
in_ds = gdal.Open(fn)
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
trans = gdal.Transformer(in_ds, out_ds, [])
success, xyz = trans.TransformPoint(False, 0, 0)
x, y, z = map(int, xyz)
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray()
out_band = out_ds.GetRasterBand(i)
out_data = out_band.ReadAsArray(x, y, x_size, y_size)
data = np.maximum(data, out_data)
out_band.WriteArray(data, x, y)
del in_ds, out_band, out_ds
@staticmethod
def dem_clip(OutFilePath, DEMFilePath, SelectArea):
'''
根据选择范围裁剪DEM,并输出
agrs:
outFilePath:裁剪DEM输出地址
DEMFilePath:被裁减DEM地址
SelectArea:list [(xmin,ymax),(xmax,ymin)] 框选范围 左上角右下角
'''
DEM_ptr = gdal.Open(DEMFilePath)
DEM_GeoTransform = DEM_ptr.GetGeoTransform() # 读取影像的投影变换
DEM_InvGeoTransform = gdal.InvGeoTransform(DEM_GeoTransform)
SelectAreaArrayPoints = [gdal.ApplyGeoTransform(DEM_InvGeoTransform, p[0], p[1]) for p in SelectArea]
SelectAreaArrayPoints = list(map(lambda p: (int(p[0]), int(p[1])), SelectAreaArrayPoints)) # 确定坐标
[(ulx, uly), (brx, bry)] = SelectAreaArrayPoints
rowCount, colCount = bry - uly, brx - ulx
# 输出DEM的桌面坐标转换
Out_Transfrom = list(DEM_GeoTransform)
Out_Transfrom[0] = SelectArea[0][0]
Out_Transfrom[3] = SelectArea[0][1]
# 构建输出DEM
Bands_num = DEM_ptr.RasterCount
gtiff_driver = gdal.GetDriverByName('GTiff')
datatype = gdal.GDT_UInt16
out_dem = gtiff_driver.Create(OutFilePath, colCount, rowCount, Bands_num, datatype)
out_dem.SetProjection(DEM_ptr.GetProjection())
out_dem.SetGeoTransform(Out_Transfrom)
for i in range(1, Bands_num + 1):
data_band = DEM_ptr.GetRasterBand(i)
out_band = out_dem.GetRasterBand(i)
data = data_band.ReadAsArray(ulx, uly, colCount, rowCount)
out_band.WriteArray(data)
del out_dem
@staticmethod
def dem_resample(in_dem_path, out_dem_path):
'''
DEM重采样函数默认坐标系为WGS84
agrs:
in_dem_path: 输入的DEM文件夹路径
meta_file_path: 输入的xml元文件路径
out_dem_path: 输出的DEM文件夹路径
'''
# 读取文件夹中所有的DEM
dem_file_paths=[os.path.join(in_dem_path,dem_name) for dem_name in os.listdir(in_dem_path) if dem_name.find(".tif")>=0 and dem_name.find(".tif.")==-1]
spatialreference=osr.SpatialReference()
spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree
spatialproj=spatialreference.ExportToWkt() # 导出投影结果
# 将DEM拼接成一张大图
mergeFile =gdal.BuildVRT(os.path.join(out_dem_path,"mergeDEM.tif"), dem_file_paths)
out_DEM=os.path.join(out_dem_path,"mosaic.tif")
gdal.Warp(out_DEM,
mergeFile,
format="GTiff",
dstSRS=spatialproj,
dstNodata=-9999,
outputType=gdal.GDT_Float32)
return out_DEM
# if __name__ == "__main__":
# DEMProcess = DEMProcess()
# in_dem_path = r'F:\大气延迟\out_dem'
# out_dem_path = r'F:\大气延迟\out_dem'
# DEMProcess.dem_resample(in_dem_path, out_dem_path)

View File

@ -0,0 +1,154 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File ScatteringAuxData.py
@Function 后向散射
@Author SHJ
@Contact
@Date 2022/6/29
@Version 1.0.0
修改历史
[修改序列] [修改日期] [修改者] [修改内容]
1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
"""
import logging
from xml.etree.ElementTree import ElementTree
import math
logger = logging.getLogger("mylog")
class GF3L1AMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text) if root.find('processinfo').find('CalibrationConst').find(polarization).text!="NULL" else 0
return Kdb
class OrthoMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
lamda = float(root.find('sensor').find('lamda').text)
return lamda
class MetaDataHandler:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
try:
QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_QualifyValue() error!')
QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_QualifyValue() success!')
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
try:
Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_Kdb() error!')
Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_Kdb() success!')
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率,单位GHz
RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
lamda = OrthoMetaData.get_lamda(meta_file_path)
return lamda
class Calibration:
def __init__(self):
pass
@staticmethod
def get_Calibration_coefficient(meta_file_path, polarization):
calibration = [0, 0, 0, 0]
for i in polarization:
if i == 'HH':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[0] = math.sqrt(data_value)
if i == 'HV':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[1] = math.sqrt(data_value)
if i == 'VH':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[2] = math.sqrt(data_value)
if i == 'VV':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[3] = math.sqrt(data_value)
return calibration
# if __name__ == '__main__':
# A = ScatteringAuxData()
# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
# path1 = dir + 'OrthoProduct.meta.xml'
# t1 = A.get_QualifyValue(path, 'HH')
# t2 = A.get_Kdb(path, 'HH')
# t3 = A.get_RadarCenterFrequency(path)
# t4 = A.get_lamda(path)
# pass

View File

@ -0,0 +1,527 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File PreProcess.py
@Function @Function: 坐标转换,坐标系转换图像裁剪重投影重采样
@Author LMM
@Date 2021/8/25 14:17
@Version 1.0.0
"""
from shapely.geometry import Polygon # 导入 gdal库要放到这一句的后面不然会引起错误
from osgeo import gdal
from osgeo import gdalconst
from osgeo import osr
from osgeo import ogr
import os
import cv2
import numpy as np
import shutil
import scipy.spatial.transform
import scipy.spatial.transform._rotation_groups # 用于解决打包错误
import scipy.special.cython_special # 用于解决打包错误
import scipy.spatial.transform._rotation_groups # 解决打包的问题
import shapefile
from shapely.errors import TopologicalError
from tool.algorithm.image.ImageHandle import ImageHandler
import logging
logger = logging.getLogger("mylog")
os.environ['PROJ_LIB'] = os.getcwd()
class PreProcess:
"""
预处理所有的影像配准
"""
def __init__(self):
self._ImageHandler = ImageHandler()
pass
def cal_scopes(self, processing_paras):
# 计算roi
scopes = ()
for key, value in processing_paras.items():
if 'ori_sim' in key:
scopes += (ImageHandler.get_scope_ori_sim(value),)
if(processing_paras['box'] != "" or processing_paras['box'] != "empty"):
scopes += self.box2scope(processing_paras['box'])
return scopes
def cal_scopes_roi(self, processing_paras):
return self.intersect_polygon(self.cal_scopes(processing_paras))
def cut_geoimg(self,workspace_preprocessing_path, para_names_geo, processing_paras):
self.check_img_projection(workspace_preprocessing_path, para_names_geo, processing_paras)
# 计算roi
scopes = self.cal_scopes(processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
scopes_roi = self.cal_intersect_shp(intersect_shp_path, para_names_geo, processing_paras, scopes)
# 裁剪
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names_geo, processing_paras, intersect_shp_path)
return cutted_img_paths, scopes_roi
def preprocessing(self, para_names, ref_img_name, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
# 读取每一张图像,检查图像坐标系
self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
self.box2scope(processing_paras['box']))
logger.info('create intersect shp success!')
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names, processing_paras,
intersect_shp_path)
logger.info('cut images success!')
# 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
preprocessed_paras = self.resampling_img(workspace_preprocessed_path, para_names, cutted_img_paths,cutted_img_paths[ref_img_name])
# 清除预处理缓存文件
logger.info('preprocess_handle success!')
return preprocessed_paras # cutted_img_paths
def get_ref_inf(self, ref_img_path):
"""获取参考影像的图像信息"""
ref_img_path = ref_img_path
cols = ImageHandler.get_img_width(ref_img_path)
rows = ImageHandler.get_img_height(ref_img_path)
proj = ImageHandler.get_projection(ref_img_path)
geo = ImageHandler.get_geotransform(ref_img_path)
return ref_img_path, cols, rows, proj, geo
def check_img_projection(self, out_dir, para_names, processing_paras):
"""
读取每一张图像,检查图像坐标系;
将投影坐标系影像转换为地理坐标系影像(EPSG:4326)
:param para_names:需要检查的参数名称
"""
if len(para_names) == 0:
return False
for name in para_names:
proj = ImageHandler.get_projection(processing_paras[name])
keyword = proj.split("[", 2)[0]
if keyword == "PROJCS":
# 投影坐标系 转 地理坐标系
para_dir = os.path.split(processing_paras[name])
out_para = os.path.join(out_dir, para_dir[1].split(".", 1)[0] + "_EPSG4326.tif")
self.trans_epsg4326(out_para, processing_paras[name])
processing_paras[name] = out_para
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('coordinate is missing!')
def preprocessing_oh2004(self, para_names, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
# 读取每一张图像,检查图像坐标系
self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
scopes = self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
self.box2scope(processing_paras['box']))
logger.info('create intersect shp success!')
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessed_path, para_names, processing_paras,
intersect_shp_path)
logger.info('cut images success!')
# 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
return cutted_img_paths, scopes
@staticmethod
def lonlat2geo(lat, lon):
"""
WGS84转平面坐标
Param: lat 为WGS_1984的纬度
Param: lon 为WGS_1984的经度
输出转换后的坐标x,y
"""
dstsrs1 = osr.SpatialReference()
dstsrs1.ImportFromEPSG(32649)
dstsrs2 = osr.SpatialReference()
dstsrs2.ImportFromEPSG(4326)
ct = osr.CoordinateTransformation(dstsrs2, dstsrs1)
coords = ct.TransformPoint(lat, lon)
# print("输出转换后的坐标x,y:",coords[:2])
return coords[:2]
@staticmethod
def trans_geogcs2projcs(out_path, in_path):
"""
:param out_path:wgs84投影坐标影像保存路径
:param in_path:地理坐标影像输入路径
"""
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:4326', dstSRS='EPSG:32649')
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_projcs2geogcs(out_path, in_path):
"""
:param out_path:wgs84地理坐标影像输入路径
:param in_path:wgs84投影坐标影像保存路径
"""
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:32649', dstSRS='EPSG:4326')
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_projcs2geogcs(out_path, in_path ,EPSG_src=32649,EPSG_dst=4326):
"""
:param out_path:wgs84地理坐标影像输入路径
:param in_path:wgs84投影坐标影像保存路径
:param EPSG_src:原始投影系
:param EPSG_dst:目标坐标系
"""
str_EPSG_src = 'EPSG:'+ str(EPSG_src)
str_EPSG_dst = 'EPSG:'+ str(EPSG_dst)
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS=str_EPSG_src, dstSRS=str_EPSG_dst)
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_epsg4326(out_path, in_path):
OutTile = gdal.Warp(out_path, in_path,
dstSRS='EPSG:4326',
resampleAlg=gdalconst.GRA_Bilinear
)
OutTile = None
return True
@staticmethod
def box2scope(str_box):
roi_box = ()
if str_box == '' or str_box == 'empty':
return roi_box
box_list = [float(num) for num in list(str_box.split(';'))]
if len(box_list) == 4:
roi_box = ([[box_list[2], box_list[1]], [box_list[3], box_list[1]], [box_list[2], box_list[0]],
[box_list[3], box_list[0]]],)
return roi_box
def cal_intersect_shp(self, shp_path, para_names,processing_paras, add_scope =()):
"""
:param shp_path:相交区域矢量文件保存区域
:param para_names:判断相交影像的名称
:return: True or False
"""
scopes = ()
if len(add_scope) != 0:
scopes += add_scope
for name in para_names:
scope_tuple = (self._ImageHandler.get_scope(processing_paras[name]),)
scopes += scope_tuple
for n, scope in zip( range(len(scopes)), scopes):
logging.info("scope" + str(n) + ":%s", scope)
intersect_polygon = self.intersect_polygon(scopes)
if intersect_polygon is None:
logger.error('image range does not overlap!')
raise Exception('create intersect shp fail!')
logging.info("scope roi :%s", intersect_polygon)
if self.write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
return intersect_polygon
@staticmethod
def intersect_polygon(scopes_tuple):
"""
功能说明计算多边形相交的区域坐标;注意多边形区域会转变成凸区域再求交
:param scopes_tuple: 输入多个区域坐标的tuple
:return: 多边形相交的区域坐标((x0,y0),(x1,y1),..., (xn,yn))
"""
if len(scopes_tuple) < 2:
logger.error('len(scopes_tuple) < 2')
# return # todo 修改只有单景会出现无法判断相交区域问题
try:
# python四边形对象会自动计算四个点最后四个点顺序为左上 左下 右下 右上 左上
tmp = tuple(scopes_tuple[0])
poly_intersect = Polygon(tmp).convex_hull
for i in range(len(scopes_tuple)-1):
polygon_next = Polygon(tuple(scopes_tuple[i+1])).convex_hull
if poly_intersect.intersects(polygon_next):
poly_intersect = poly_intersect.intersection(polygon_next)
else:
msg = 'Image:' + str(i) + 'range does not overlap!'
logger.error(msg)
return
return list(poly_intersect.boundary.coords)[:-1]
# except shapely.geos.TopologicalError:
except TopologicalError:
logger.error('shapely.geos.TopologicalError occurred!')
return
@staticmethod
def write_polygon_shp(out_shp_path, point_list, EPSG =32649):
"""
功能说明创建闭环的矢量文件
:param out_shp_path :矢量文件保存路径
:param point_list :装有闭环点的列表[[x0,y0],[x1,y1]...[xn,yn]]
:return: True or False
"""
# 为了支持中文路径,请添加下面这句代码
gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "NO")
# 为了使属性表字段支持中文,请添加下面这句
gdal.SetConfigOption("SHAPE_ENCODING", "")
# 注册所有的驱动
ogr.RegisterAll()
# 创建数据这里以创建ESRI的shp文件为例
str_driver_name = "ESRI Shapefile"
o_driver = ogr.GetDriverByName(str_driver_name)
if o_driver is None:
msg = 'driver('+str_driver_name+')is invalid value'
logger.error(msg)
return False
# 创建数据源
if os.path.exists(out_shp_path) and os.path.isfile(out_shp_path): # 如果已存在同名文件
os.remove(out_shp_path) # 则删除之
o_ds = o_driver.CreateDataSource(out_shp_path)
if o_ds is None:
msg = 'create file failed!' + out_shp_path
logger.error(msg)
return False
# 创建图层,创建一个多边形图层
srs = osr.SpatialReference()
#srs.ImportFromEPSG(32649) # 投影坐标系空间参考WGS84
srs.ImportFromEPSG(EPSG) # 地理坐标系EPSG
o_layer = o_ds.CreateLayer("TestPolygon", srs, ogr.wkbPolygon)
if o_layer is None:
msg = 'create coverage failed!'
logger.error(msg)
return False
# 下面创建属性表
# 先创建一个叫FieldID的整型属性
o_field_id = ogr.FieldDefn("FieldID", ogr.OFTInteger)
o_layer.CreateField(o_field_id, 1)
# 再创建一个叫FeatureName的字符型属性字符长度为50
o_field_name = ogr.FieldDefn("FieldName", ogr.OFTString)
o_field_name.SetWidth(100)
o_layer.CreateField(o_field_name, 1)
o_defn = o_layer.GetLayerDefn()
# 创建矩形要素
o_feature_rectangle = ogr.Feature(o_defn)
o_feature_rectangle.SetField(0, 1)
o_feature_rectangle.SetField(1, "IntersectRegion")
# 创建环对象ring
ring = ogr.Geometry(ogr.wkbLinearRing)
for i in range(len(point_list)):
ring.AddPoint(point_list[i][0], point_list[i][1])
ring.CloseRings()
# 创建环对象polygon
geom_rect_polygon = ogr.Geometry(ogr.wkbPolygon)
geom_rect_polygon.AddGeometry(ring)
o_feature_rectangle.SetGeometry(geom_rect_polygon)
o_layer.CreateFeature(o_feature_rectangle)
o_ds.Destroy()
return True
def cut_imgs(self, out_dir, para_names, processing_paras, shp_path):
"""
使用矢量数据裁剪影像
:param para_names:需要检查的参数名称
:param shp_path裁剪的shp文件
"""
if len(para_names) == 0:
return {}
cutted_img_paths = {}
try:
for name in para_names:
input_path = processing_paras[name]
output_path = os.path.join(out_dir, name + '_cut.tif')
self.cut_img(output_path, input_path, shp_path)
cutted_img_paths.update({name: output_path})
logger.info('cut %s success!', name)
except BaseException:
logger.error('cut_img failed!')
return {}
return cutted_img_paths
@staticmethod
def cut_img(output_path, input_path, shp_path):
"""
:param output_path:剪切后的影像
:param input_path:待剪切的影像
:param shp_path:矢量数据
:return: True or False
"""
r = shapefile.Reader(shp_path)
box = r.bbox
input_dataset = gdal.Open(input_path)
gdal.Warp(output_path, input_dataset, format='GTiff', outputBounds=box, cutlineDSName=shp_path, dstNodata=-9999)
# cutlineWhere="FIELD = whatever",
# optionally you can filter your cutline (shapefile) based on attribute values
# select the no data value you like
# ds = None
# do other stuff with ds object, it is your cropped dataset. in this case we only close the dataset.
del input_dataset
return True
def resampling_img(self, out_dir, para_names, img_paths, refer_img_path):
"""
以主影像为参考对影像重采样
:param para_names:需要检查的参数名称
:param img_paths待重采样影像路径
:param refer_img_path参考影像路径
"""
if len(para_names) == 0 or len(img_paths) == 0:
return
prepro_imgs_path = {}
for name in para_names:
img_path = img_paths[name]
output_para = os.path.join(out_dir, name + '_preprocessed.tif') # + name + '_preprocessed.tif'
self.resampling_by_scale(img_path, output_para, refer_img_path)
prepro_imgs_path.update({name: output_para})
logger.info('resampling %s success!', name)
return prepro_imgs_path
@staticmethod
def resampling_by_scale(input_path, target_file, refer_img_path):
"""
按照缩放比例对影像重采样
:param input_path: GDAL地理数据路径
:param target_file: 输出影像
:param refer_img_path:参考影像
:return: True or False
"""
ref_dataset = gdal.Open(refer_img_path)
ref_cols = ref_dataset.RasterXSize # 列数
ref_rows = ref_dataset.RasterYSize # 行数
target_dataset = gdal.Open(input_path)
target_cols = target_dataset.RasterXSize # 列数
target_rows = target_dataset.RasterYSize # 行数
if(ref_cols == target_cols) and (ref_rows == target_rows):
shutil.copyfile(input_path, target_file)
return True
dataset = gdal.Open(input_path)
if dataset is None:
logger.error('resampling_by_scale:dataset is None!')
return False
band_count = dataset.RasterCount # 波段数
if (band_count == 0) or (target_file == ""):
logger.error("resampling_by_scale:Parameters of the abnormal!")
return False
cols = dataset.RasterXSize # 列数
rows = dataset.RasterYSize # 行数
scale_x = ref_cols/cols
scale_y = ref_rows/rows
# rows = dataset.RasterYSize # 行数
# cols = int(cols * scale) # 计算新的行列数
# rows = int(rows * scale)
cols = ref_cols
rows = ref_rows
geotrans = list(dataset.GetGeoTransform())
geotrans[1] = geotrans[1] / scale_x # 像元宽度变为原来的scale倍
geotrans[5] = geotrans[5] / scale_y # 像元高度变为原来的scale倍
if os.path.exists(target_file) and os.path.isfile(target_file): # 如果已存在同名影像
os.remove(target_file) # 则删除之
if not os.path.exists(os.path.split(target_file)[0]):
os.makedirs(os.path.split(target_file)[0])
band1 = dataset.GetRasterBand(1)
data_type = band1.DataType
target = dataset.GetDriver().Create(target_file, xsize=cols, ysize=rows, bands=band_count,
eType=data_type)
target.SetProjection(dataset.GetProjection()) # 设置投影坐标
target.SetGeoTransform(geotrans) # 设置地理变换参数
total = band_count + 1
for index in range(1, total):
# 读取波段数据
data = dataset.GetRasterBand(index).ReadAsArray(buf_xsize=cols, buf_ysize=rows)
out_band = target.GetRasterBand(index)
no_data_value = dataset.GetRasterBand(index).GetNoDataValue() # 获取没有数据的点
if not (no_data_value is None):
out_band.SetNoDataValue(no_data_value)
out_band.WriteArray(data) # 写入数据到新影像中
out_band.FlushCache()
out_band.ComputeBandStats(False) # 计算统计信息
del dataset
del target
return True
@staticmethod
def cv_mean_filter(out_path, in_path, filter_size):
"""
:param out_path:滤波后的影像
:param in_path:滤波前的影像
:param filter_size:滤波尺寸
:return: True or False
"""
proj = ImageHandler.get_projection(in_path)
geotrans = ImageHandler.get_geotransform(in_path)
array = ImageHandler.get_band_array(in_path, 1)
array = cv2.blur(array, (filter_size, filter_size)) # 均值滤波
ImageHandler.write_img(out_path, proj, geotrans, array)
return True
@staticmethod
def check_LocalIncidenceAngle(out_tif_path, in_tif_path):
"""
将角度的无效值设置为nan把角度值转为弧度值
:param out_tif_path:处理后影像路径
:param in_tif_path:处理前影像路径
"""
proj, geo, angle = ImageHandler.read_img(in_tif_path)
angle = angle.astype(np.float32, order='C')
angle[angle == -9999] = np.nan
mean = np.nanmean(angle)
if mean > np.pi:
angle = np.deg2rad(angle)# 角度转弧度
angle[np.where(angle >= 0.5 * np.pi)] = np.nan
angle[np.where(angle < 0)] = np.nan
ImageHandler.write_img(out_tif_path, proj, geo, angle)

View File

@ -0,0 +1,180 @@
# -*- coding: UTF-8 -*-
"""
@Project:microproduct
@File:ROIAlg.py
@Function:
@Contact:
@Author:SHJ
@Date:2021/11/17
@Version:1.0.0
"""
import logging
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
import numpy as np
logger = logging.getLogger("mylog")
class ROIAlg:
def __init__(self,):
pass
@staticmethod
def roi_process(names, processing_path, processing_paras, preprocessed_paras):
roi_paths = []
roi = ROIAlg()
for name in names:
if 'LocalIncidenceAngle' in name:
# 利用角度为nan生成Mask
pp.check_LocalIncidenceAngle(preprocessed_paras[name],preprocessed_paras[name])
angle_nan_mask_path = processing_path + 'angle_nan_mask.tif'
roi.trans_tif2mask(angle_nan_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(angle_nan_mask_path)
elif ("HH" in name) or ("HV" in name) or ("VH" in name) or ("VV" in name):
# 利用影像的有效范围生成MASK
tif_mask_path = processing_path + name + "_tif_mask.tif"
roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(tif_mask_path)
elif name == 'Covering':
# 利用cover计算植被覆盖范围
cover_mask_path = processing_path + "cover_mask.tif"
cover_id_list = list(processing_paras['CoveringIDs'].split(';'))
cover_id_list = [int(num) for num in cover_id_list]
roi.trans_cover2mask(cover_mask_path, preprocessed_paras[name], cover_id_list)
roi_paths.append(cover_mask_path)
elif name == "NDVI":
# 利用NDVI计算裸土范围该指数的输出值在 -1.0 和 1.0 之间,大部分表示植被量,
# 负值主要根据云、水和雪而生成
# 接近零的值则主要根据岩石和裸土而生成。
# 较低的(小于等于 0.1NDVI 值表示岩石、沙石或雪覆盖的贫瘠区域。
# 中等值0.2 至 0.3)表示灌木丛和草地
# 较高的值0.6 至 0.8)表示温带雨林和热带雨林。
ndvi_mask_path = processing_path + "ndvi_mask.tif"
ndvi_scope = list(processing_paras['NDVIScope'].split(';'))
threshold_of_ndvi_min = float(ndvi_scope[0])
threshold_of_ndvi_max = float(ndvi_scope[1])
roi.trans_tif2mask(ndvi_mask_path, preprocessed_paras[name], threshold_of_ndvi_min, threshold_of_ndvi_max)
roi_paths.append(ndvi_mask_path)
# else:
# # 其他特征影像
# tif_mask_path = processing_path + name + "_mask.tif"
# roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
# roi_paths.append(tif_mask_path)
bare_land_mask_path = processing_path + "bare_land_mask.tif"
for roi_path in roi_paths:
roi.combine_mask(bare_land_mask_path, roi_path, bare_land_mask_path)
return bare_land_mask_path
@staticmethod
def trans_tif2mask(out_mask_path, in_tif_path, threshold_min, threshold_max = None):
"""
:param out_mask_path:mask输出路径
:param in_tif_path:输入路径
:param threshold_min:最小阈值
:param threshold_max:最大阈值
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
array = image_handler.get_band_array(in_tif_path, 1)
if threshold_max == None and np.isnan(threshold_min)==True:
nan = np.isnan(array)
mask = (nan.astype(int) == 0).astype(int)
mask1 = ((array == -9999).astype(int) == 0).astype(int)
mask *= mask1
image_handler.write_img(out_mask_path, proj, geotrans, mask)
else:
if threshold_min < threshold_max:
mask = ((array > threshold_min) & (array < threshold_max)).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
elif threshold_min > threshold_max:
mask = ((array < threshold_min) & (array > threshold_max)).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
elif threshold_max == threshold_min:
mask = ((array == threshold_min).astype(int) == 0).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
logger.info("trans_tif2mask success, path: %s", out_mask_path)
return True
@staticmethod
def trans_cover2mask(out_mask_path, in_tif_path, cover_id_list):
"""
:param out_mask_path:mask输出路径
:param in_tif_path:输入路径
:param cover_id_list 地表覆盖类型数据的id
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
array = image_handler.get_band_array(in_tif_path, 1)
mask = np.zeros(array.shape, dtype=bool)
for id in cover_id_list:
mask_tmp = (array == id)
mask = mask | mask_tmp
mask = mask.astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
@staticmethod
def combine_mask(out_mask_path, in_main_mask_path, in_sub_mask_path):
"""
:param out_mask_path:输出路径
:param in_main_mask_path:主mask路径输出影像采用主mask的地理信息
:param in_sub_mask_path:副mask路径
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_main_mask_path)
geotrans = image_handler.get_geotransform(in_main_mask_path)
main_array = image_handler.get_band_array(in_main_mask_path, 1)
if image_handler.get_dataset(in_sub_mask_path) != None:
sub_array = image_handler.get_band_array(in_sub_mask_path, 1)
main_array = main_array * sub_array
image_handler.write_img(out_mask_path, proj, geotrans, main_array)
logger.info("combine_mask success, path: %s", out_mask_path)
return True
@staticmethod
def cal_roi(out_tif_path, in_tif_path, mask_path, background_value=1):
"""
:param out_tif_path:ROI的影像
:param in_tif_path:计算ROI的影像
:param mask_path:掩模
:param background_value:无效区域设置的背景值
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
tif_array = image_handler.get_data(in_tif_path) # 读取所有波段的像元值存为数组
mask_array = image_handler.get_band_array(mask_path, 1)
if len(tif_array.shape) == 3:
im_bands, im_height, im_width = tif_array.shape
else:
im_bands, (im_height, im_width) = 1, tif_array.shape
if im_bands == 1:
tif_array[np.isnan(mask_array)] = background_value
tif_array[mask_array == 0] = background_value
elif im_bands>1:
for i in range(0, im_bands):
tif_array[i, :, :][np.isnan(mask_array)] = background_value
tif_array[i, :, :][mask_array == 0] = background_value
image_handler.write_img(out_tif_path, proj, geotrans, tif_array, '0')
logger.info("cal_roi success, path: %s", out_tif_path)
return True
# if __name__ == '__main__':
# dir = r'G:\MicroWorkspace\C-SAR\SoilMoisture\Temporary\processing/'
# out_tif_path = dir + 'soil_moisture_roi.tif'
# in_tif_path = dir + 'soil_moisture.tif'
# mask_path = dir + 'bare_land_mask.tif'
# background_value = np.nan
# ROIAlg.cal_roi(out_tif_path, in_tif_path, mask_path, background_value)
# pass

View File

@ -0,0 +1,57 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:sieve_filter.py
@Function:gdal斑点滤波功能
@Contact: 'https://www.osgeo.cn/gdal/api/gdal_alg.html?highlight=gdalsievefilter#'
'_CPPv415GDALSieveFilter15GDALRasterBandH15GDALRasterBandH15GDALRasterBandHiiPPc16GDALProgressFuncPv'
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import logging
from osgeo import gdal
import numpy as np
# from onestar.soilMoisture.OneMoistureImage import ImageHandler
from tool.algorithm.image.ImageHandle import ImageHandler
logger = logging.getLogger("mylog")
def gdal_sieve_filter(dst_filename, src_filename, threshold=100, connectedness=4):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
mask_band = src_band.GetMaskBand()
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
#
# if __name__ == '__main__':
# inputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25.tif'
# outputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25_sieve_filter.tif'
# flag = gdal_sieve_filter(outputfile, inputfile, threshold=100, connectedness=4)

View File

@ -0,0 +1,122 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File ScatteringAuxData.py
@Function 后向散射
@Author SHJ
@Contact
@Date 2022/6/29
@Version 1.0.0
修改历史
[修改序列] [修改日期] [修改者] [修改内容]
1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
"""
import logging
from xml.etree.ElementTree import ElementTree
logger = logging.getLogger("mylog")
class GF3L1AMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
class OrthoMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
lamda = float(root.find('sensor').find('lamda').text)
return lamda
class ScatteringAuxData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
try:
QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_QualifyValue() error!')
QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_QualifyValue() success!')
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
try:
Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_Kdb() error!')
Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_Kdb() success!')
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率,单位GHz
RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
lamda = OrthoMetaData.get_lamda(meta_file_path)
return lamda
# if __name__ == '__main__':
# A = ScatteringAuxData()
# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
# path1 = dir + 'OrthoProduct.meta.xml'
# t1 = A.get_QualifyValue(path, 'HH')
# t2 = A.get_Kdb(path, 'HH')
# t3 = A.get_RadarCenterFrequency(path)
# t4 = A.get_lamda(path)
# pass

View File

@ -0,0 +1,414 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File CalculateIncident.py
@Function 计算局部入射角计算
@Author LMM
@Date 2021/8/25 14:17
@Version 1.0.0
"""
import os
import numpy as np
from osgeo import gdal
from osgeo import gdalconst
import gc
import math
from xml.dom import minidom # 不需要安装,默认环境里就有
class CalculateIncident:
def __init__(self):
pass
@staticmethod
def add_round(npgrid):
"""
边缘填充一圈,然后输出填充得到的矩阵
paramnpgrid dem数组
"""
ny, nx = npgrid.shape # ny:行数nx:列数
zbc = np.zeros((ny + 2, nx + 2))
zbc[1:-1, 1:-1] = npgrid
# 四边
zbc[0, 1:-1] = npgrid[0, :]
zbc[-1, 1:-1] = npgrid[-1, :]
zbc[1:-1, 0] = npgrid[:, 0]
zbc[1:-1, -1] = npgrid[:, -1]
# 角点
zbc[0, 0] = npgrid[0, 0]
zbc[0, -1] = npgrid[0, -1]
zbc[-1, 0] = npgrid[-1, 0]
zbc[-1, -1] = npgrid[-1, -1]
print("输出填充后的数组的形状", zbc.shape)
return zbc
@staticmethod
def cal_dxdy(zbc, dx):
"""
计算dx,dy
paramzbc填充后的数组
paramdx dem数据像元大小
"""
we_x = ((zbc[1:-1, :-2]) - (zbc[1:-1, 2:])) / dx / 2 # WE方向
ns_y = ((zbc[2:, 1:-1]) - (zbc[:-2, 1:-1])) / dx / 2 # NS方向
print("输出Sx的数组的形状", we_x.shape, "输出Sy的数组的形状", ns_y.shape)
sx = we_x[1:-1, 1:-1]
sy = ns_y[1:-1, 1:-1]
# np.savetxt("dxdy.csv",dx,delimiter=",")
print("输出Sx2的数组的形状", sx.shape, "输出Sy2的数组的形状", sy.shape)
return sx, sy
@staticmethod
def cal_slopasp(dx, dy):
# 计算坡度\坡向
# 坡度计算 slope
slope = (np.arctan(np.sqrt(dx * dx + dy * dy))) * 57.29578 # 转换成°,57.29578=180/math.pi
slope = slope[1:-1, 1:-1]
# 坡向计算 aspect
aspect = np.ones([dx.shape[0], dx.shape[1]]).astype(np.float32) # 生成一个全是0的数组
# dx = dx.astype(np.float32)
# dy = dy.astype(np.float32)
# a1=(np.where(dx==0) and np.where(dy ==0))
# print(a1)
# aspect[a1]=-1
# a2 = (np.where(dx == 0) and np.where(dy > 0))
# aspect[a2] =0.0
# a3 = (np.where(dx == 0) and np.where(dy <0))
# aspect[a3] =180.0
# a4 = (np.where(dx > 0) and np.where(dy ==0))
# aspect[a4] =90.0
# a5 = (np.where(dx < 0) and np.where(dy ==0))
# aspect[a5] =270.0
# a6 = (np.where(dx != 0) or np.where(dy !=0))
# b=dy[a6]
# print(":", 1)
# aspect[a6] =float(math.atan2(dy[i, j], dx[i, j])) * 57.29578
# a7=np.where(aspect[a6]< 0.0)
# aspect[a7] = 90.0 - aspect[a7]
# a8=np.where(aspect[a6]> 90.0)
# aspect[a8] = 450.0- aspect[a8]
# a9 =np.where(aspect[a6] >= 0 or aspect[a6] <= 90)
# aspect[a9] =90.0 - aspect[a9]
for i in range(dx.shape[0]):
for j in range(dx.shape[1]):
x = float(dx[i, j])
y = float(dy[i, j])
if (x == 0.0) & (y == 0.0):
aspect[i, j] = -1
elif x == 0.0:
if y > 0.0:
aspect[i, j] = 0.0
else:
aspect[i, j] = 180.0
elif y == 0.0:
if x > 0.0:
aspect[i, j] = 90.0
else:
aspect[i, j] = 270.0
else:
aspect[i, j] = float(math.atan2(y, x)) * 57.29578 # 范围(-Π/2Π/2
if aspect[i, j] < 0.0:
aspect[i, j] = 90.0 - aspect[i, j]
elif aspect[i, j] > 90.0:
aspect[i, j] = 450.0 - aspect[i, j]
else:
aspect[i, j] = 90.0 - aspect[i, j]
print("输出aspect形状:", aspect.shape) # 3599, 3599
print("输出aspect:", aspect)
return slope, aspect
def creat_twofile(self, dem_file_path, slope_out_path, aspect_out_path):
"""
生成坡度图坡向图
param: path_file1 为输入文件tif数据的文件路径
"""
if os.path.isfile(dem_file_path):
print("高程数据文件存在")
else:
print("高程数据文件不存在")
dataset_caijian = gdal.Open(dem_file_path)
x_size = dataset_caijian.RasterXSize
y_size = dataset_caijian.RasterYSize
geo = dataset_caijian.GetGeoTransform()
pro = dataset_caijian.GetProjection()
array0 = dataset_caijian.ReadAsArray(0, 0, x_size, y_size)
print("输出dem数据的数组", array0)
zbc = self.add_round(array0)
sx, sy = self.cal_dxdy(zbc, 30)
slope, aspect = self.cal_slopasp(sx, sy)
driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver.Register()
newfile = driver.Create(slope_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
newfile.SetProjection(pro)
geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
newfile.SetGeoTransform(geo)
newfile.GetRasterBand(1).WriteArray(slope)
driver2 = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver2.Register()
newfile2 = driver2.Create(aspect_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
newfile2.SetGeoTransform(geo)
newfile2.GetRasterBand(1).WriteArray(aspect)
@staticmethod
def resampling(input_file1, input_file2, ref_file, output_file, output_file2):
"""
采用gdal.Warp()方法进行重采样差值法为双线性插值
:param input_file1 slope path
:param input_file2 aspect path
:param ref_file: 参考图像路径
:param output_file: slope path
:param output_file2 aspect path
:return:
"""
gdal.AllRegister()
in_ds1 = gdal.Open(input_file1)
in_ds2 = gdal.Open(input_file2)
ref_ds = gdal.Open(ref_file, gdal.GA_ReadOnly)
# 获取输入影像信息
input_file_proj = in_ds1.GetProjection()
# inputefileTrans = in_ds1.GetGeoTransform()
reference_file_proj = ref_ds.GetProjection()
reference_file_trans = ref_ds.GetGeoTransform()
nbands = in_ds1.RasterCount
bandinputfile1 = in_ds1.GetRasterBand(1)
bandinputfile2 = in_ds2.GetRasterBand(1)
x = ref_ds.RasterXSize
y = ref_ds.RasterYSize
# 创建重采样输出文件(设置投影及六参数)
driver1 = gdal.GetDriverByName('GTiff')
output1 = driver1.Create(output_file, x, y, nbands, bandinputfile1.DataType)
output1.SetGeoTransform(reference_file_trans)
output1.SetProjection(reference_file_proj)
# options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
# resampleAlg = gdalconst.GRA_NearestNeighbour
gdal.ReprojectImage(in_ds1, output1, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
driver2 = gdal.GetDriverByName('GTiff')
output2 = driver2.Create(output_file2, x, y, nbands, bandinputfile2.DataType)
output2.SetGeoTransform(reference_file_trans)
output2.SetProjection(reference_file_proj)
# options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
# resampleAlg = gdalconst.GRA_NearestNeighbour
gdal.ReprojectImage(in_ds2, output2, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
@staticmethod
def getorbitparameter(xml_path):
"""
从轨道参数文件xml中获取升降轨信息影像四个角的经纬度坐标
"""
# 打开xml文档,根据路径初始化DOM
doc = minidom.parse(xml_path)
# 得到xml文档元素对象,初始化root对象
root = doc.documentElement
# 输出升降轨信息DEC降轨ASC升轨
direction = root.getElementsByTagName("Direction")[0]
# print("输出Direction的子节点列表",Direction.firstChild.data)
pd = direction.firstChild.data
imageinfo = root.getElementsByTagName("imageinfo")[0]
# 输出topLeft的纬度和经度
top_left = imageinfo.getElementsByTagName("topLeft")[0]
latitude = top_left.getElementsByTagName("latitude")[0]
longitude = top_left.getElementsByTagName("longitude")[0]
# print("输出topLeft的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
tl_lat, tl_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出topRight的纬度和经度
top_right = imageinfo.getElementsByTagName("topRight")[0]
latitude = top_right.getElementsByTagName("latitude")[0]
longitude = top_right.getElementsByTagName("longitude")[0]
# print("输出topRight的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
tr_lat, tr_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出 bottomLeft的纬度和经度
bottom_left = imageinfo.getElementsByTagName("bottomLeft")[0]
latitude = bottom_left.getElementsByTagName("latitude")[0]
longitude = bottom_left.getElementsByTagName("longitude")[0]
# print("输出bottomLeft的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
bl_lat, bl_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出 bottomRight的纬度和经度
bottom_right = imageinfo.getElementsByTagName("bottomRight")[0]
latitude = bottom_right.getElementsByTagName("latitude")[0]
longitude = bottom_right.getElementsByTagName("longitude")[0]
# print("输出bottomRight的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
br_lat, br_lon = latitude.firstChild.data, longitude.firstChild.data
print("pd,tl_lat,tl_lon,tr_lat,tr_lon,bl_lat,bl_lon,br_lat,br_lon", pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat,
bl_lon, br_lat, br_lon)
return pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon
def get_rparademeter(self, xml_path):
"""
计算雷达视线向方向角R
"""
pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
tl_lat = float(tl_lat) # 原来的数是带有小数点的字符串int会报错使用float
tl_lon = float(tl_lon)
# tr_lat = float(tr_lat)
# tr_lon = float(tr_lon)
bl_lat = float(bl_lat)
bl_lon = float(bl_lon)
# br_lat = float(br_lat)
# br_lon = float(br_lon)
if pd == "DEC":
# 降轨
b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
r = 270 + b
return r
# tl_lat, tl_lon = lonlat2geo(tl_lat, tl_lon)
# tr_lat, tr_lon = lonlat2geo(tr_lat, tr_lon)
# bl_lat, bl_lon = lonlat2geo(bl_lat, bl_lon)
# br_lat, br_lon = lonlat2geo(br_lat, br_lon)
# B2 = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
# R2 = 270 + B2
# print(("输出R2", R2))
if pd == "ASC":
# 升轨
b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
return b
def clau(self, pathfile1, pathfile2, pathfile3, xml_path, save_localangle_path):
"""
计算局部入射角
param: pathfile1是slope的坡度图路径
param: pathfile2是aspect的坡向图路径
param: pathfile3是入射角文件的路径
param: xml_path是轨道参数文件
r是雷达视线向方位角
"""
r = self.get_rparademeter(xml_path)
pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
print("输出升降轨:", pd)
dataset = gdal.Open(pathfile1)
x = dataset.RasterXSize
y = dataset.RasterYSize
print("输出slope的行、列", x, y)
slope_array = dataset.ReadAsArray(0, 0, x, y)
dataset2 = gdal.Open(pathfile2)
x2 = dataset2.RasterXSize
y2 = dataset2.RasterYSize
print("输出aspect的行、列", x2, y2)
aspect_array = dataset2.ReadAsArray(0, 0, x2, y2)
dataset3 = gdal.Open(pathfile3)
x3 = dataset3.RasterXSize
y3 = dataset3.RasterYSize
geo3 = dataset3.GetGeoTransform()
pro3 = dataset3.GetProjection()
print("输出入射角文件的行、列:", x3, y3)
rushe_array = dataset3.ReadAsArray(0, 0, x3, y3)
# b0 = np.where(rushe_array > 0.00001, 0, 1)
radina_value = 0
if pd == "DEC":
# 降轨数据
# 雷达视线角-坡度角在90度到270度之间
where_0 = np.where(rushe_array == 0)
bb1 = (r-aspect_array).all() and (r-aspect_array).all()
bb2 = np.where(90 < bb1 < 270, 1, 0)
b1 = (bb1 and bb2)
# b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) * np.sin(
rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d1 = b1 * c1
# 雷达视线角-坡度角=90度或=270度时
b2 = np.where((r-aspect_array == 90) | (r-aspect_array == 270), 1, 0)
d2 = b2*c1
# 雷达视线角-坡度角在90度到270度之间
b3 = 1-b1-b2
c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d3 = b3 * c3
del b1, b2, b3, c3, c1
gc.collect()
radina_value = d1 + d2 + d3
radina_value[where_0] = 0
del d1, d2, d3
gc.collect()
if pd == "ASC":
# 升轨数据
# 坡度-雷达视线角在90度到270度之间
where_0 = np.where(rushe_array == 0)
bb1 = (r-aspect_array).all() and (r-aspect_array).all()
bb2 = np.where(90 < bb1 < 270, 1, 0)
b1 = (bb1 and bb2)
# b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d1 = b1 * c1
# 坡度-雷达视线角=90或=270时
b2 = np.where((aspect_array-r == 90) | (aspect_array-r == 270), 1, 0)
d2 = b2 * c1
# 坡度-雷达视线角在0-90度或270-360度之间
b3 = 1 - b1-b2
c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) *\
np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d3 = b3 * c3
radina_value = d1 + d2 + d3
radina_value[where_0] = 0
del b1, b2, b3, c3, c1, d1, d2, d3
gc.collect()
jubu_o = 57.29578 * np.arccos(radina_value)
print("输出局部入射角", jubu_o)
driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver.Register()
newfile = driver.Create(save_localangle_path, x3, y3, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
newfile.SetProjection(pro3)
newfile.SetGeoTransform(geo3)
newfile.GetRasterBand(1).WriteArray(jubu_o)
def localangle(self, dem_path, incidence_angle_path, orbital_parameters_path):
"""
获取输入文件的路径
计算坡度图坡向图
计算局部入射角
"""
para_names = ["Dem", "IncidenceAngle", "OrbitalParameters", "经验A"]
if len(para_names) == 0:
return False
# 获取三个文件的路径
# print("输出三个文件路径",Dem_path,IncidenceAngle_path,OrbitalParameters_path)
# 确定坡度、坡向的输出路径,输出坡度、坡向图
slope_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipslope.tif"
aspect_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipaspect.tif"
print("slope_out_path的路径是", slope_out_path)
print("aspect_out_path的路径是", aspect_out_path)
self.creat_twofile(dem_path, slope_out_path, aspect_out_path)
# 根据入射角文件对坡度坡向图进行裁剪与重采样
slope_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipslope.tif"
aspect_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipaspect.tif"
self.resampling(slope_out_path, aspect_out_path, incidence_angle_path, slope_out_path2, aspect_out_path2)
# 输出局部入射角文件
save_localangle_path = r"D:\\MicroWorkspace\\LocaLangle\\Temporary\\\localangle.tif"
self.clau(slope_out_path2, aspect_out_path2, incidence_angle_path,
orbital_parameters_path, save_localangle_path)
# if __name__ == '__main__':
# calu_incident = CalculateIncident()
# Dem_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\dem.tif"
# IncidenceAngle_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\RSJ.tif"
# OrbitalParameters_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\" \
# "GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml"
# calu_incident.localangle(Dem_path, IncidenceAngle_path, OrbitalParameters_path)
# print('done')

View File

@ -0,0 +1,302 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:lee_filter.py
@Function:lee_filter
@Contact: https://github.com/PyRadar/pyradar
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import numpy as np
import math
from PIL import Image
import multiprocessing
import multiprocessing
from tool.algorithm.block.blockprocess import BlockProcess
import logging
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
from tool.algorithm.algtools.filter import lee_Filter_c as lee_Filter_c
logger = logging.getLogger("mylog")
file =fileHandle(False)
COEF_VAR_DEFAULT = 0.01
CU_DEFAULT = 0.25
import os
class Filter:
def __int__(self):
pass
@staticmethod
def assert_window_size(win_size):
"""
Asserts invalid window size.
Window size must be odd and bigger than 3.
"""
assert win_size >= 3, 'ERROR: win size must be at least 3'
if win_size % 2 == 0:
print('It is highly recommended to user odd window sizes.'
'You provided %s, an even number.' % (win_size, ))
@staticmethod
def assert_indices_in_range(width, height, xleft, xright, yup, ydown):
"""
Asserts index out of image range.
"""
# assert xleft >= 0 and xleft <= width, \
assert 0 <= xleft <= width, \
"index xleft:%s out of range (%s<= xleft < %s)" % (xleft, 0, width)
# assert xright >= 0 and xright <= width, \
assert 0 <= xright <= width, "index xright:%s out of range (%s<= xright < %s)" % (xright, 0, width)
# assert yup >= 0 and yup <= height, \
assert 0 <= yup <= height, "index yup:%s out of range. (%s<= yup < %s)" % (yup, 0, height)
# assert ydown >= 0 and ydown <= height, \
assert 0 <= ydown <= height, "index ydown:%s out of range. (%s<= ydown < %s)" % (ydown, 0, height)
@staticmethod
def weighting(window, cu=CU_DEFAULT):
"""
Computes the weighthing function for Lee filter using cu as the noise
coefficient.
"""
# cu is the noise variation coefficient
two_cu = cu * cu
# ci is the variation coefficient in the window
window_mean = window.mean()
window_std = window.std()
ci = window_std / window_mean
two_ci = ci * ci
if not two_ci: # dirty patch to avoid zero division
two_ci = COEF_VAR_DEFAULT
if cu > ci:
w_t = 0.0
else:
w_t = 1.0 - (two_cu / two_ci)
return w_t
def lee_filter(self, in_path, out_path, win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
cu = CU_DEFAULT
self.assert_window_size(win_size)
# img = self.ImageHandler.get_band_array(img, 1)
array1 = Image.open(in_path)
img = np.array(array1)
# we process the entire img as float64 to avoid type overflow error
img = np.float64(img)
img_filtered = np.zeros_like(img)
# n, m = img.shape
# win_offset = win_size / 2
#
# for i in range(0, n):
# xleft = i - win_offset
# xright = i + win_offset
#
# if xleft < 0:
# xleft = 0
# if xright >= n:
# xright = n
#
# for j in range(0, m):
# yup = j - win_offset
# ydown = j + win_offset
#
# if yup < 0:
# yup = 0
# if ydown >= m:
# ydown = m
#
# self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
#
# pix_value = img[i, j]
#
# window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
# w_t = self.weighting(window, cu)
# window_mean = window.mean()
# new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
#
# if not new_pix_value > 0:
# new_pix_value = 0
# img_filtered[i, j] = round(new_pix_value)
# # return img_filtered
self.lee_filter_array(img, img_filtered, win_size)
out_image = Image.fromarray(img_filtered)
out_image.save(out_path)
print("lee_filter finish! path:" + out_path)
return True
@staticmethod
def lee_filter_array(in_arry, out_arry, win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
f = Filter()
#cu = CU_DEFAULT
f.assert_window_size(win_size)
img = in_arry
# we process the entire img as float64 to avoid type overflow error
img = np.float64(img)
img = img + 100
# lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arryint win_offset,int win_size):
newOUt=lee_Filter_c.lee_filter_array(img,out_arry,win_size)
newOUt=newOUt-100
out_arry[:,:]=newOUt[:,:]
# def lee_filter_array(self, in_arry, out_arry, win_size):
# """
# Apply lee to a numpy matrix containing the image, with a window of
# win_size x win_size.
# """
# cu = CU_DEFAULT
# self.assert_window_size(win_size)
# img = in_arry
# # we process the entire img as float64 to avoid type overflow error
# img = np.float64(img)
# img = img + 100
# img_filtered = np.zeros_like(img)
# n, m = img.shape
# win_offset = win_size / 2
#
# for i in range(0, n):
# xleft = i - win_offset
# xright = i + win_offset
#
# if xleft < 0:
# xleft = 0
# if xright >= n:
# xright = n
#
# for j in range(0, m):
# yup = j - win_offset
# ydown = j + win_offset
#
# if yup < 0:
# yup = 0
# if ydown >= m:
# ydown = m
#
# self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
#
# pix_value = img[i, j]
#
# window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
# w_t = self.weighting(window, cu)
# window_mean = window.mean()
# new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
#
# if not new_pix_value > 0:
# new_pix_value = 0
# out_arry[i, j] = round(new_pix_value)
# out_arry = out_arry - 100
#
def lee_filter_multiprocess(self, in_paths, out_paths, win_size =3,processes_num=10):
if len(in_paths) != len(out_paths):
return False
# 开启多进程处理
pool = multiprocessing.Pool(processes=processes_num)
pl = []
for i in range(len(in_paths)):
#self.lee_filter(in_paths[i], out_paths[i], win_size)
pl.append(pool.apply_async(self.lee_filter,(in_paths[i], out_paths[i], win_size)))
print("lee_filter runing! path:" + in_paths[i])
pool.close()
pool.join()
return True
def lee_filter_block_multiprocess(self, in_path, out_path, win_size =3):
in_name = os.path.basename(in_path)
out_name = os.path.basename(out_path)
outDir= os.path.split(out_path)[0]
#创建工作文件夹
src_path = os.path.join(outDir, "src_img")
block_path = os.path.join(outDir, "block")
block_filtered = os.path.join(outDir, "block_filtered")
file.creat_dirs([src_path, block_path, block_filtered])
shutil.copyfile(in_path, os.path.join(src_path, in_name))
cols = ImageHandler.get_img_width(in_path)
rows = ImageHandler.get_img_height(in_path)
# 分块
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
logger.info('blocking tifs success!')
img_dir, img_name = bp.get_file_names(block_path, ['tif'])
dir_dict = bp.get_same_img(img_dir, img_name)
img_path_list = [value for value in dir_dict.values()][0]
processes_num = min([len(img_path_list), multiprocessing.cpu_count() - 1])
out_img_path_list =[]
for in_path in img_path_list:
suffix = bp.get_suffix(os.path.basename(in_path))
out_path = os.path.join(block_filtered, out_name.replace('.tif', suffix))
out_img_path_list.append(out_path)
self.lee_filter_multiprocess(img_path_list, out_img_path_list, win_size = win_size, processes_num=processes_num)
# 开启多进程处理
# pool = multiprocessing.Pool(processes=processes_num)
#
# for i in range(len(hh_list)):
# block_img_path = hh_list[i]
# suffix = bp.get_suffix(os.path.basename(hh_list[i]))
# filed_block_img_path = os.path.join(block_filtered,out_name.replace('.tif',suffix))
# pool.apply_async(self.lee_filter, (block_img_path, filed_block_img_path, win_size))
# print("lee_filter runing! path:" + block_img_path)
# logger.info('total:%s, block:%s lee_filter!', len(hh_list), i)
#
# pool.close()
# pool.join()
# # 合并处理后的影像
bp.combine(block_filtered, cols, rows, outDir, file_type=['tif'], datetype='float32')
file.del_folder(src_path)
file.del_folder(block_path)
file.del_folder(block_filtered)
pass
def lee_process_sar(self, in_sar, out_sar, win_size, noise_var):
'''
# std::cout << "mode 12"
# std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var"
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar,
out_sar, win_size, noise_var)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
if __name__ == '__main__':
# 示例1
# path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif"
# f = Filter()
# f.lee_filter(path,path,3)
#示例2
f = Filter()
f.lee_filter_block_multiprocess('I:\preprocessed\HH.tif','I:\preprocessed\HHf.tif')
pass

View File

@ -0,0 +1,124 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:lee_filter.py
@Function:lee_filter
@Contact: https://github.com/PyRadar/pyradar
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import os
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi
from libc.math cimport atan as math_atan
from libc.math cimport log10 as math_log10
from libc.math cimport log as math_log
from libc.math cimport floor as math_floor
from libc.math cimport sqrt as math_sqrt
from libc.math cimport exp as math_exp
from libc.math cimport sin as math_sin
from libc.math cimport cos as math_cos
from libc.math cimport tan as math_tan
from libc.math cimport asin as math_asin
from libc.math cimport acos as math_acos
from libc.math cimport tan as math_atan
from libc.math cimport sinh as math_sinh
from libc.math cimport cosh as math_cosh
from libc.math cimport tanh as math_tanh
from libc.math cimport floor as math_floor
from libc.math cimport ceil as math_ceil
from libc.math cimport lround as math_round
cdef double COEF_VAR_DEFAULT = 0.01
cdef double CU_DEFAULT = 0.25
cdef int ceil_usr(double v):
return int(math_ceil(v))
cdef double weighting(np.ndarray[double,ndim=2] window,double cu):
"""
Computes the weighthing function for Lee filter using cu as the noise
coefficient.
"""
# cu is the noise variation coefficient
cdef double two_cu = cu * cu
# ci is the variation coefficient in the window
cdef double window_mean = window.mean()
cdef double window_std = window.std()
cdef double ci = window_std / window_mean
cdef double two_ci = ci * ci
cdef double w_t=0;
if not (two_ci==0): # dirty patch to avoid zero division
two_ci = COEF_VAR_DEFAULT
if cu > ci:
w_t = 0.0
else:
w_t = 1.0 - (two_cu / two_ci)
return w_t
cpdef np.ndarray[double,ndim=2] lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arry,int win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
# we process the entire img as float64 to avoid type overflow error
#n, m = img.shape
cdef double cu = CU_DEFAULT
cdef int i=0
cdef int j=0
cdef int xleft=0
cdef int xright=0
cdef int yup=0
cdef int ydown=0
cdef np.ndarray[double,ndim=2] window;
cdef double w_t=0;
cdef double window_mean=0;
cdef double new_pix_valu=0;
cdef int n = img.shape[0]
cdef int m=img.shape[1]
cdef int win_offset=int(win_size/2)
while i<n:
xleft=ceil_usr(i-win_offset)
xright=int(i+win_offset)
if xleft < 0:
xleft = 0
if xright >= n:
xright = n
j=0
while j<m:
yup = ceil_usr(j - win_offset)
yup=0 if yup<0 else yup
ydown = int(j + win_offset)
if yup < 0:
yup = 0
if ydown >= m:
ydown = m
pix_value = img[i, j]
window = img[xleft:xright+1, yup:ydown+1]
w_t = weighting(window, cu)
window_mean = np.mean(window)
new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
if not new_pix_value > 0:
new_pix_value = 0
out_arry[i, j] = round(new_pix_value*100000.0)/100000.0
j=j+1
i=i+1
return out_arry

View File

@ -0,0 +1,45 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./lee_Filter') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./lee_Filter/lee_Filter_c.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -0,0 +1,90 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File logHandler.py
@Function 日志检查生成
@Author SHJ
@Date 2021/12/1
@Version 1.0.0
"""
import logging
import os
import time
import datetime
class LogHandler:
"""
生成日志
"""
__logger = logging.getLogger("mylog")
__format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
"(%(filename)s:%(lineno)d) - %(message)s")
__log_path = None
@staticmethod
def init_log_handler(log_name):
"""
初始化日志
:param log_name: 日志保存的路径和名称
:return:
"""
path = os.getcwd()
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
para_dir = os.path.split(LogHandler.__log_path)
if not os.path.exists(para_dir[0]):
os.makedirs(para_dir[0])
# 删除七天以前的文件
LogHandler.delete_outdate_files(para_dir[0])
# 方法1普通日志
log_format = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
" (%(filename)s:Line%(lineno)d) "
date_format = "%m/%d/%Y %H:%M:%S"
fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
fs = logging.StreamHandler()
logging.basicConfig(level=logging.INFO, format=log_format, datefmt=date_format, handlers=[fp, fs]) # 调用
# 方法2回滚日志
# LogHandler.__logger.setLevel(logging.DEBUG)
# th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
# backupCount=2, encoding='utf-8')
# th.suffix = "%Y-%m-%d-%H-%M-%S.log"
# th.setFormatter(LogHandler.__format_str)
# th.setLevel(level=logging.DEBUG)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# LogHandler.__logger.addHandler(console)
# LogHandler.__logger.addHandler(th)
@staticmethod
def delete_outdate_files(path, date_interval=7):
"""
删除目录下七天前创建的文件
"""
current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
current_time_list = current_time.split("-")
current_time_day = datetime.datetime(int(current_time_list[0]), int(current_time_list[1]),
int(current_time_list[2]))
for root, dirs, files in os.walk(path):
for item in files:
item_format = item.split(".", 2)
if item_format[1] == "log":
file_path = os.path.join(root, item)
create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
create_time_list = create_time.split("-")
create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
int(create_time_list[2]))
time_difference = (current_time_day - create_time_day).days
if time_difference > date_interval:
os.remove(file_path)
#
# if __name__ == "__main__":
# # eg2:
# log_handler = LogHandler()
# log_handler.init_log_handler(r"run_log\myrun1")
# logging.warning("1")
# print("done")

View File

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 14 18:53:14 2021
@author: Dipankar
References
----------
Oh (2004): Quantitative retrieval of soil moisture content and surface roughness from multipolarized radar observations of bare soil surface. IEEE TGRS 42(3). 596-601.
"""
# ---------------------------------------------------------------------------------------
# Copyright (C) 2021 by Microwave Remote Sensing Lab, IITBombay http://www.mrslab.in
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, see http://www.gnu.org/licenses/
# ---------------------------------------------------------------------------------------
import numpy as np
#import matplotlib.pyplot as plt
## Description: Given sigma_0_vv, sigma_0_hh, and sigma_0_hv, the inverse
## model computes s, and mv
sigma0vvdB = -14.1
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
k = 2*np.pi*f/0.3 #calculate the wave number
theta_rad = theta*np.pi/180 #represent angle in radians
sigma_0_vv = np.power(10,(sigma0vvdB/10)) #%represent data in linear scale
sigma_0_hh = np.power(10,(sigma0hhdB/10))
sigma_0_hv = np.power(10,(sigma0hvdB/10))
p = sigma_0_hh / sigma_0_vv #calculate the p-ratio
q = sigma_0_hv / sigma_0_vv #calculate the q-ratio
mv0 = np.arange(0.05,0.5,0.01) # set Gamma0 range of values (fine increments)
## First estimates s1 and mv1
ks = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
err = (1 - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
abs_err = np.abs(err)
min_err = np.min(abs_err) #find the value of minimum error
mv1 = mv0[np.where(abs_err == min_err)]
ks1 = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)))**0.556
s1 = ks1/k
## Second estimate s2 and mv2
ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
s2 = ks2/k
xx = (1-p)/np.exp(-0.4 * ks2**1.4)
if xx<=0:
mv2 =0
else:
yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
mv2 = yy**(-100/65)
print(mv2,yy,np.power(yy,-100/65))
## Third estimate mv3
mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
## weighted average s and mv-------------------------------------
sf = (s1 + 0.25*s2)/(1+0.25)
mvf = (mv1+mv2+mv3)/3
print(mv1,mv2,mv3,s1,s2)
print('Estimated rms height s (cm): ', sf*100)
print('Estimated volumetric soil moisture: ', mvf)

View File

@ -0,0 +1,128 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 14:59:54 2013
@author: Sat Kumar Tomer
@email: satkumartomer@gmail.com
@website: www.ambhas.com
"""
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi
from scipy.optimize import fmin
cpdef np.ndarray[double,ndim=1] inverse_oh2004(double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f):
"""
sigma0vvdB = -14.1 dB
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
"""
#print("--------------------------------------------------------\n")
cdef np.ndarray[double,ndim=1] result=np.ones((2))
result[0]=np.nan
result[1]=np.nan
#print("*************设置为nan****************")
#print(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f)
cdef double k = 2*3.1415926*f/0.299792458; #calculate the wave number
cdef double theta_rad = theta*3.1415926/180; #represent angle in radians
cdef double sigma_0_vv = np.power(10.,(sigma0vvdB/10.)) #%represent data in linear scale
cdef double sigma_0_hh = np.power(10.,(sigma0hhdB/10.))
cdef double sigma_0_hv = np.power(10.,(sigma0hvdB/10.))
if sigma_0_vv==0:
#print("***********sigma_0_vv==0*************")
return result
cdef double p = sigma_0_hh / sigma_0_vv; #calculate the p-ratio
cdef double q = sigma_0_hv / sigma_0_vv; #calculate the q-ratio
cdef np.ndarray[double,ndim=1] mv0 = np.arange(0.05,0.9,0.01) # set Gamma0 range of values (fine increments)
## First estimates s1 and mv1
cdef np.ndarray[double,ndim=1] ks = ((-1.)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
cdef np.ndarray[double,ndim=1] err = (1. - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
cdef np.ndarray[double,ndim=1] abs_err = np.abs(err);
cdef double min_err = np.nanmin(abs_err); #find the value of minimum error
#print(np.where(abs_err == min_err)[0].shape)
if min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0 :
#print("***************min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0")
return result
cdef double mv1 = mv0[np.where(abs_err == min_err)[0][0]]
cdef double temp_ks1=1. - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)
if temp_ks1<0:
#print("*********************temp_ks1<0")
return result
cdef double ks1 = ((-1)*3.125*np.log(temp_ks1))**0.556
cdef double s1 = ks1/k
## Second estimate s2 and mv2
cdef double ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
cdef double s2 = ks2/k
cdef double mv2 =0.
cdef double yy =0.
cdef double xx = (1-p)/np.exp(-0.4 * ks2**1.4)
if xx<=0:
mv2 =0.
else:
yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
mv2=np.power(yy,-100.0/65)
## Third estimate mv3
cdef double mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
## weighted average s and mv-------------------------------------
#print("q:\t",q)
#print("k:\t",k)
#print("ks1:\t",ks1)
#print("ks2:\t",ks2)
#print("theta_rad:\t",theta_rad)
cdef double sf = (s1 + 0.25*s2)/(1+0.25)
cdef double mvf = (mv1+mv2+mv3)/3
result[0]=mvf*1.0
result[1]=sf*1.0
#print("mv1:\t",mv1)
#print("mv2:\t",mv2)
#print("mv3:\t",mv3)
#print("s1:\t",s1)
#print("s2:\t",s2)
#print("Estimated volumetric soil moisture: ",result[0])
#print("Estimated rms height s (m): ",result[1])
#print("\nend\n")
return result
cpdef double lamda2freq(double lamda):
return 299792458.0/lamda
cpdef double freq2lamda(double freq):
return 299792458.0/freq
# double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f
cpdef int retrieve_oh2004_main(int n,np.ndarray[double,ndim=1] mv,np.ndarray[double,ndim=1] h,np.ndarray[int,ndim=1] mask,np.ndarray[double,ndim=1] sigma0vvdB,np.ndarray[double,ndim=1] sigma0hhdB,np.ndarray[double,ndim=1] sigma0hvdB, np.ndarray[double,ndim=1] vh, np.ndarray[double,ndim=1] theta,double f):
cdef int i=0;
cdef np.ndarray[double,ndim=1] result;
while i<n:
if mask[i]<0.5:
mv[i]=np.nan
h[i] =np.nan
else:
#print(i)
##print(sigma0vvdB[i], sigma0hhdB[i],sigma0hvdB[i], theta[i], f)
result= inverse_oh2004(sigma0vvdB[i], sigma0hhdB[i],sigma0hvdB[i], theta[i], f)
##print(result)
mv[i]=result[0]
h[i] =result[1]
##print(mv[i],h[i])
##print(result[0],result[1])
i=i+1
return 1

View File

@ -0,0 +1,45 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./oh2004') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./oh2004/oh2004.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -0,0 +1,26 @@
import numpy as np
import oh2004
sigma0vvdB = -14.1
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
#print(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f)
#print(oh2004.inverse_oh2004(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f))
n=3
mask=np.ones((3))
mask[1]=0
mask=mask.astype(np.int32)
sigma0hhdB=np.ones((3))*sigma0hhdB
sigma0vvdB=np.ones((3))*sigma0vvdB
sigma0hvdB=np.ones((3))*sigma0hvdB
theta=np.ones((3))*theta
mv=np.zeros(3)*1.0
h=np.zeros(3)*1.0
oh2004.retrieve_oh2004_main(n,mv,h, mask,sigma0vvdB,sigma0hhdB,sigma0hvdB,sigma0hvdB, theta,f)
print(mask)
print(mv)
print(h)

View File

@ -0,0 +1,92 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:sieve_filter.py
@Function:gdal斑点滤波功能
@Contact: 'https://www.osgeo.cn/gdal/api/gdal_alg.html?highlight=gdalsievefilter#'
'_CPPv415GDALSieveFilter15GDALRasterBandH15GDALRasterBandH15GDALRasterBandHiiPPc16GDALProgressFuncPv'
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import logging
from osgeo import gdal
from tool.algorithm.image.ImageHandle import ImageHandler
logger = logging.getLogger("mylog")
def gdal_sieve_filter_test(dst_filename, src_filename, threshold=100, connectedness=8):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
mask_band = src_band.GetMaskBand()
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
def gdal_sieve_filter(dst_filename, src_filename, threshold=2, connectedness=4):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
#只能处理整数
src_array = src_band.ReadAsArray(0, 0, src_band.XSize, src_band.YSize)
src_array = src_array * 1000
src_band.WriteArray(src_array)
mask_band = None
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
dst_array = dst_array / 1000
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
if __name__ == '__main__':
inputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25.tif'
outputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25_sieve_filter.tif'
flag = gdal_sieve_filter(outputfile, inputfile, threshold=100, connectedness=4)

View File

@ -0,0 +1,449 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : blockprocess.py
@Function : tiftiff图像分块处理拼接功能
@Contact : https://blog.csdn.net/qq_38308388/article/details/102978755
@Author:SHJ
@Date:2021/9/6
@Version:1.0.0
"""
from osgeo import osr, gdal
import numpy as np
import os
from PIL import Image
# import time
# from skimage import io
from tool.algorithm.image.ImageHandle import ImageHandler
class BlockProcess:
def __init__(self):
pass
@staticmethod
def get_block_size(rows, cols):
block_size = 512
if rows > 2048 and cols > 2048:
block_size = 1024
return block_size
# def get_block_size(rows, cols, block_size_config):
# block_size = 512 if block_size_config < 512 else block_size_config
# if rows > 2048 and cols > 2048:
# block_size = block_size_config
# return block_size
@staticmethod
def get_suffix(path_name):
name = path_name
suffix = '_' + name.split('_')[-4] + '_' + name.split('_')[-3] + '_' + name.split('_')[-2] + '_' + \
name.split('_')[-1]
return suffix
@staticmethod
def get_file_names(data_dir, file_type=['tif', 'tiff']):
"""
获取data_dir文件夹下file_type类型的文件路径
"""
result_dir = []
result_name = []
for maindir, subdir, file_name_list in os.walk(data_dir):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = apath.split('.')[-1]
if ext in file_type:
result_dir.append(apath)
result_name.append(filename)
else:
pass
return result_dir, result_name
@staticmethod
def get_same_img(img_dir, img_name):
"""
在img_dir路径下用img_name的子图像路径集合将集合以字典输出
"""
result = {}
for idx, name in enumerate(img_name):
temp_name = ''
for idx2, item in enumerate(name.split('_')[:-4]):
if idx2 == 0:
temp_name = temp_name + item
else:
temp_name = temp_name + '_' + item
if temp_name in result:
result[temp_name].append(img_dir[idx])
else:
result[temp_name] = []
result[temp_name].append(img_dir[idx])
return result
@staticmethod
def assign_spatial_reference_byfile(src_path, dst_path):
"""
将src_path的地理信息输入到dst_path图像中
"""
src_ds = gdal.Open(src_path, gdal.GA_ReadOnly)
if src_ds is None:
return False
sr = osr.SpatialReference()
sr.ImportFromWkt(src_ds.GetProjectionRef())
geo_transform = src_ds.GetGeoTransform()
dst_ds = gdal.Open(dst_path, gdal.GA_Update)
if dst_ds is None:
return False
dst_ds.SetProjection(sr.ExportToWkt())
dst_ds.SetGeoTransform(geo_transform)
del dst_ds
del src_ds
return True
@staticmethod
def assign_spatial_reference_bypoint(row_begin, col_begin, src_proj, src_geo, img_path):
"""
将src_path的地理信息输入到dst_path图像中
"""
sr = osr.SpatialReference()
sr.ImportFromWkt(src_proj)
geo_transform = src_geo
geo_transform[0] = src_geo[0] + col_begin * src_geo[1] + row_begin * src_geo[2]
geo_transform[3] = src_geo[3] + col_begin * src_geo[4] + row_begin * src_geo[5]
dst_ds = gdal.Open(img_path, gdal.GA_Update)
if dst_ds is None:
return False
dst_ds.SetProjection(sr.ExportToWkt())
dst_ds.SetGeoTransform(geo_transform)
del dst_ds
return True
@staticmethod
def __get_band_array(filename, num):
"""
:param filename: tif路径
:param num: 波段序号
:return: 对应波段的矩阵数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.GetRasterBand(num)
array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
del dataset
return array
@staticmethod
def get_data(filename):
"""
:param filename: tif路径
:return: 获取所有波段的数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_width = dataset.RasterXSize
im_height = dataset.RasterYSize
im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
del dataset
return im_data
def get_tif_dtype(self, filename):
"""
:param filename: tif路径
:return: tif数据类型
"""
image = self.__get_band_array(filename, 1)
return image.dtype.name
def cut(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
"""
:param in_dir:存放待裁剪的影像文件夹不用指定到tif文件
:param out_dir:存放裁剪结果的影像文件夹
:param file_type:待裁剪的影像文件类型tiftiffbmpjpgpng等等
:param out_type:裁剪结果影像文件类型
:param out_size:裁剪尺寸裁剪为n*n的方形
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
data_dir_list, _ = self.get_file_names(in_dir, file_type)
count = 0
for each_dir in data_dir_list:
name_suffix = os.path.basename(each_dir)
img_name = os.path.splitext(name_suffix)[0]
# gdal读取方法
image = self.__get_band_array(each_dir, 1)
cut_factor_row = int(np.ceil(image.shape[0] / out_size))
cut_factor_clo = int(np.ceil(image.shape[1] / out_size))
for i in range(cut_factor_row):
for j in range(cut_factor_clo):
if i == cut_factor_row - 1:
i = image.shape[0] / out_size - 1
else:
pass
if j == cut_factor_clo - 1:
j = image.shape[1] / out_size - 1
else:
pass
start_x = int(np.rint(i * out_size))
start_y = int(np.rint(j * out_size))
end_x = int(np.rint((i + 1) * out_size))
end_y = int(np.rint((j + 1) * out_size))
out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
end_y) + '.' + out_type)
# + '/' + img_name \
# + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
# end_y) + '.' + out_type
# temp_image = image[start_x:end_x, start_y:end_y]
# out_image = Image.fromarray(temp_data)
# out_image = Image.fromarray(temp_image)
# out_image.save(out_dir_images)
data = ImageHandler.get_data(each_dir)
if ImageHandler.get_bands(each_dir) > 1:
temp_data = data[:,start_x:end_x, start_y:end_y]
else:
temp_data = data[start_x:end_x, start_y:end_y]
ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
count += 1
return True
def cut_new(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
"""
:param in_dir:存放待裁剪的影像文件夹不用指定到tif文件
:param out_dir:存放裁剪结果的影像文件夹
:param file_type:待裁剪的影像文件类型tiftiffbmpjpgpng等等
:param out_type:裁剪结果影像文件类型
:param out_size:裁剪尺寸裁剪为n*n的方形
:return: True or Flase
20230831修改 ----tjx
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
data_dir_list, _ = self.get_file_names(in_dir, file_type)
count = 0
for each_dir in data_dir_list:
name_suffix = os.path.basename(each_dir)
img_name = os.path.splitext(name_suffix)[0]
# gdal读取方法
image = self.__get_band_array(each_dir, 1)
block_x = int(np.ceil(image.shape[1] / out_size))
block_y = int(np.ceil(image.shape[0] / out_size)) # todo 修改分块
for i in range(block_y):
for j in range(block_x):
start_x = j * out_size
start_y = i * out_size
end_x = image.shape[1] if (j + 1) * out_size > image.shape[1] else (j + 1) * out_size
end_y = image.shape[0] if (i + 1) * out_size > image.shape[0] else (i + 1) * out_size
out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
end_y) + '.' + out_type)
# print(out_dir_images)
data = ImageHandler.get_data(each_dir)
if ImageHandler.get_bands(each_dir) > 1:
# temp_data = data[:,start_x:end_x, start_y:end_y]
temp_data = data[:,start_y:end_y, start_x:end_x]
else:
# temp_data = data[start_x:end_x, start_y:end_y]
temp_data = data[start_y:end_y, start_x:end_x]
ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
count += 1
return True
def combine(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
"""
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
temp_label = np.zeros(shape=(h, w), dtype=datetype)
dir_list = dir_dict[key]
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[x_start:x_end, y_start:y_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
return True
# todo 20230901 修改分块同步修改合并代码
def combine_new(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
"""
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
dir_list = dir_dict[key]
bands = ImageHandler.get_bands(dir_list[0])
if bands > 1:
temp_label = np.zeros(shape=(bands, h, w), dtype=datetype)
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[:, y_start:y_end, x_start:x_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
else:
temp_label = np.zeros(shape=(h, w), dtype=datetype)
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[y_start:y_end, x_start:x_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
return True
def combine_Tif(self, data_dir, w, h, out_dir, proj, geo, out_type='tif', file_type=['tif', 'tiff'],
datetype='float16'):
"""
将文件夹下的tif拼接成一个大的tif
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param proj: 指定投影系
:param geo: 指定变换参数
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
image_handler = ImageHandler()
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
temp_label = np.zeros(shape=(h, w), dtype=datetype)
dir_list = dir_dict[key]
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
img = image_handler.get_data(item)
temp_label[x_start:x_end, y_start:y_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir,img_name)
image_handler.write_img(new_out_dir, proj, geo, temp_label)
count += 1
return True
# if __name__ == '__main__':
# bp = BlockProcess()
# # # cut
# data_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\cut"
# out_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\combine"
# file_type = ['tif']
# out_type = 'tif'
# cut_size = 1024
# #
# bp.cut_new(data_dir, out_dir, file_type, out_type, cut_size)
# # # combine
# # data_dir=r"D:\Workspace\SoilMoisture\Temporary\test"
# w= 5043
# h= 1239
# out_dirs=r"D:\BaiduNetdiskDownload\HF\cut_outs"
# # out_type='tif'
# # file_type=['tif']
# datetype = 'float'
# # src_path = r"D:\Workspace\SoilMoisture\Temporary\preprocessed\HH_preprocessed.tif"
# # datetype = bp.get_tif_dtype(src_path)
# bp.combine_new(out_dir, w, h, out_dirs, out_type, file_type, datetype)
#
# # 添加地理信息
# new_out_dir =r"D:\DATA\testdata1\combine\TEST_20200429_NDVI.tif"
# bp.assign_spatial_reference_byfile(src_path, new_out_dir)
# fn = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture.tif'
# product_path = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture_1.tif'
#
# proj, geos, img = ImageHandler.read_img(fn)
# img[img>1] = 1
# img[img<0] = 0
# ImageHandler.write_img(product_path, proj, geos, img)

View File

@ -0,0 +1,660 @@
"""
@Project microproduct
@File ImageHandle.py
@Function 实现对待处理SAR数据的读取格式标准化和处理完后保存文件功能
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
from PIL import Image
from osgeo import gdal
from osgeo import osr
import numpy as np
from PIL import Image
import cv2
import logging
import math
logger = logging.getLogger("mylog")
class ImageHandler:
"""
影像读取编辑保存
"""
def __init__(self):
pass
@staticmethod
def get_dataset(filename):
"""
:param filename: tif路径
:return: 图像句柄
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
return dataset
def get_scope(self, filename):
"""
:param filename: tif路径
:return: 图像范围
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_scope = self.cal_img_scope(dataset)
del dataset
return im_scope
@staticmethod
def get_projection(filename):
"""
:param filename: tif路径
:return: 地图投影信息
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_proj = dataset.GetProjection()
del dataset
return im_proj
@staticmethod
def get_geotransform(filename):
"""
:param filename: tif路径
:return: 从图像坐标空间也称为像素线到地理参考坐标空间投影或地理坐标的仿射变换
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
geotransform = dataset.GetGeoTransform()
del dataset
return geotransform
def get_invgeotransform(filename):
"""
:param filename: tif路径
:return: 从地理参考坐标空间投影或地理坐标的到图像坐标空间
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
geotransform = dataset.GetGeoTransform()
geotransform=gdal.InvGeoTransform(geotransform)
del dataset
return geotransform
@staticmethod
def get_bands(filename):
"""
:param filename: tif路径
:return: 影像的波段数
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.RasterCount
del dataset
return bands
@staticmethod
def geo2lonlat(dataset, x, y):
"""
将投影坐标转为经纬度坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param x: 投影坐标x
:param y: 投影坐标y
:return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
"""
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
ct = osr.CoordinateTransformation(prosrs, geosrs)
coords = ct.TransformPoint(x, y)
return coords[:2]
@staticmethod
def get_band_array(filename, num=1):
"""
:param filename: tif路径
:param num: 波段序号
:return: 对应波段的矩阵数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.GetRasterBand(num)
array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
# if 'int' in str(array.dtype):
# array[np.where(array == -9999)] = np.inf
# else:
# array[np.where(array < -9000.0)] = np.nan
del dataset
return array
@staticmethod
def get_data(filename):
"""
:param filename: tif路径
:return: 获取所有波段的数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_width = dataset.RasterXSize
im_height = dataset.RasterYSize
im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
del dataset
return im_data
@staticmethod
def get_all_band_array(filename):
"""
大气延迟算法
将ERA-5影像所有波段存为一个数组, 波段数在第三维度 get_data->3788
:param filename 影像路径 get_all_band_array ->8837
:return: 影像数组
"""
dataset = gdal.Open(filename)
x_size = dataset.RasterXSize
y_size = dataset.RasterYSize
nums = dataset.RasterCount
array = np.zeros((y_size, x_size, nums), dtype=float)
if nums == 1:
bands_0 = dataset.GetRasterBand(1)
array = bands_0.ReadAsArray(0, 0, x_size, y_size)
else:
for i in range(0, nums):
bands = dataset.GetRasterBand(i+1)
arr = bands.ReadAsArray(0, 0, x_size, y_size)
array[:, :, i] = arr
return array
@staticmethod
def get_img_width(filename):
"""
:param filename: tif路径
:return: 影像宽度
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
width = dataset.RasterXSize
del dataset
return width
@staticmethod
def get_img_height(filename):
"""
:param filename: tif路径
:return: 影像高度
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
height = dataset.RasterYSize
del dataset
return height
@staticmethod
def read_img(filename):
"""
影像读取
:param filename:
:return:
"""
gdal.AllRegister()
img_dataset = gdal.Open(filename) # 打开文件
if img_dataset is None:
msg = 'Could not open ' + filename
logger.error(msg)
return None, None, None
im_proj = img_dataset.GetProjection() # 地图投影信息
if im_proj is None:
return None, None, None
im_geotrans = img_dataset.GetGeoTransform() # 仿射矩阵
im_width = img_dataset.RasterXSize # 栅格矩阵的行数
im_height = img_dataset.RasterYSize # 栅格矩阵的行数
im_arr = img_dataset.ReadAsArray(0, 0, im_width, im_height)
del img_dataset
return im_proj, im_geotrans, im_arr
def cal_img_scope(self, dataset):
"""
计算影像的地理坐标范围
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset :GDAL地理数据
:return: list[point_upleft, point_upright, point_downleft, point_downright]
"""
if dataset is None:
return None
img_geotrans = dataset.GetGeoTransform()
if img_geotrans is None:
return None
width = dataset.RasterXSize # 栅格矩阵的列数
height = dataset.RasterYSize # 栅格矩阵的行数
point_upleft = self.trans_rowcol2geo(img_geotrans, 0, 0)
point_upright = self.trans_rowcol2geo(img_geotrans, width, 0)
point_downleft = self.trans_rowcol2geo(img_geotrans, 0, height)
point_downright = self.trans_rowcol2geo(img_geotrans, width, height)
return [point_upleft, point_upright, point_downleft, point_downright]
@staticmethod
def get_scope_ori_sim(filename):
"""
计算影像的地理坐标范围
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset :GDAL地理数据
:return: list[point_upleft, point_upright, point_downleft, point_downright]
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
width = dataset.RasterXSize # 栅格矩阵的列数
height = dataset.RasterYSize # 栅格矩阵的行数
band1 = dataset.GetRasterBand(1)
array1 = band1.ReadAsArray(0, 0, band1.XSize, band1.YSize)
band2 = dataset.GetRasterBand(2)
array2 = band2.ReadAsArray(0, 0, band2.XSize, band2.YSize)
if array1[0, 0] < array1[0, width-1]:
point_upleft = [array1[0, 0], array2[0, 0]]
point_upright = [array1[0, width-1], array2[0, width-1]]
else:
point_upright = [array1[0, 0], array2[0, 0]]
point_upleft = [array1[0, width-1], array2[0, width-1]]
if array1[height-1, 0] < array1[height-1, width-1]:
point_downleft = [array1[height - 1, 0], array2[height - 1, 0]]
point_downright = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
else:
point_downright = [array1[height - 1, 0], array2[height - 1, 0]]
point_downleft = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
if(array2[0, 0] < array2[height - 1, 0]):
#上下调换顺序
tmp1 = point_upleft
point_upleft = point_downleft
point_downleft = tmp1
tmp2 = point_upright
point_upright = point_downright
point_downright = tmp2
return [point_upleft, point_upright, point_downleft, point_downright]
@staticmethod
def trans_rowcol2geo(img_geotrans,img_col, img_row):
"""
据GDAL的六参数模型仿射矩阵将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param img_geotrans: 仿射矩阵
:param img_col:图像纵坐标
:param img_row:图像横坐标
:return: [geo_x,geo_y]
"""
geo_x = img_geotrans[0] + img_geotrans[1] * img_col + img_geotrans[2] * img_row
geo_y = img_geotrans[3] + img_geotrans[4] * img_col + img_geotrans[5] * img_row
return [geo_x, geo_y]
@staticmethod
def write_era_into_img(filename, im_proj, im_geotrans, im_data):
"""
影像保存
:param filename:
:param im_proj:
:param im_geotrans:
:param im_data:
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_height, im_width, im_bands = im_data.shape # shape[0] 行数
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
else:
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[:, :, i])
# dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
del dataset
# 写GeoTiff文件
@staticmethod
def write_img(filename, im_proj, im_geotrans, im_data, no_data='0'):
"""
影像保存
:param filename: 保存的路径
:param im_proj:
:param im_geotrans:
:param im_data:
:param no_data: 把无效值设置为 nodata
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
flag = False
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
flag = True
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
# outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
if flag:
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data[0])
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else:
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data)
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else:
for i in range(im_bands):
outband = dataset.GetRasterBand(1 + i)
outband.WriteArray(im_data[i])
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
# outRaster.GetRasterBand(i + 1).WriteArray(array[i])
del dataset
# 写GeoTiff文件
@staticmethod
def write_img_envi(filename, im_proj, im_geotrans, im_data, no_data='null'):
"""
影像保存
:param filename: 保存的路径
:param im_proj:
:param im_geotrans:
:param im_data:
:param no_data: 把无效值设置为 nodata
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("ENVI") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
# outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data)
if no_data != 'null':
outband.SetNoDataValue(no_data)
outband.FlushCache()
else:
for i in range(im_bands):
outband = dataset.GetRasterBand(1 + i)
outband.WriteArray(im_data[i])
outband.FlushCache()
# outRaster.GetRasterBand(i + 1).WriteArray(array[i])
del dataset
@staticmethod
def write_img_rpc(filename, im_proj, im_geotrans, im_data, rpc_dict):
"""
图像中写入rpc信息
"""
# 判断栅格数据的数据类型
if 'int8' in im_data.dtype.name:
datatype = gdal.GDT_Byte
elif 'int16' in im_data.dtype.name:
datatype = gdal.GDT_Int16
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
# 写入RPC参数
for k in rpc_dict.keys():
dataset.SetMetadataItem(k, rpc_dict[k], 'RPC')
if im_bands == 1:
dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
else:
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
del dataset
def transtif2mask(self,out_tif_path, in_tif_path, threshold):
"""
:param out_tif_path:输出路径
:param in_tif_path:输入的路径
:param threshold:阈值
"""
im_proj, im_geotrans, im_arr, im_scope = self.read_img(in_tif_path)
im_arr_mask = (im_arr < threshold).astype(int)
self.write_img(out_tif_path, im_proj, im_geotrans, im_arr_mask)
def write_quick_view(self, tif_path, color_img=False, quick_view_path=None):
"""
生成快视图,默认快视图和影像同路径且同名
:param tif_path:影像路径
:param color_img:是否生成随机伪彩色图
:param quick_view_path:快视图路径
"""
if quick_view_path is None:
quick_view_path = os.path.splitext(tif_path)[0]+'.jpg'
n = self.get_bands(tif_path)
if n == 1: # 单波段
t_data = self.get_data(tif_path)
else: # 多波段,转为强度数据
t_data = self.get_data(tif_path)
t_data = t_data.astype(float)
t_data = np.sqrt(t_data[0] ** 2 + t_data[1] ** 2)
t_r = self.get_img_height(tif_path)
t_c = self.get_img_width(tif_path)
if t_r > 10000 or t_c > 10000:
q_r = int(t_r / 10)
q_c = int(t_c / 10)
elif 1024 < t_r < 10000 or 1024 < t_c < 10000:
if t_r > t_c:
q_r = 1024
q_c = int(t_c/t_r * 1024)
else:
q_c = 1024
q_r = int(t_r/t_c * 1024)
else:
q_r = t_r
q_c = t_c
if color_img is True:
# 生成伪彩色图
img = np.zeros((t_r, t_c, 3), dtype=np.uint8) # (高,宽,维度)
u = np.unique(t_data)
for i in u:
if i != 0:
w = np.where(t_data == i)
img[w[0], w[1], 0] = np.random.randint(0, 255) # 随机生成一个0到255之间的整数 可以通过挑参数设定不同的颜色范围
img[w[0], w[1], 1] = np.random.randint(0, 255)
img[w[0], w[1], 2] = np.random.randint(0, 255)
img = cv2.resize(img, (q_c, q_r)) # (宽,高)
cv2.imwrite(quick_view_path, img)
# cv2.imshow("result4", img)
# cv2.waitKey(0)
else:
# 灰度图
min = np.percentile(t_data, 2) # np.nanmin(t_data)
max = np.percentile(t_data, 98) # np.nanmax(t_data)
t_data[np.isnan(t_data)] = max
if (max - min) < 256:
t_data = (t_data - min) / (max - min) * 255
out_img = Image.fromarray(t_data)
out_img = out_img.resize((q_c, q_r)) # 重采样
out_img = out_img.convert("L") # 转换成灰度图
out_img.save(quick_view_path)
def limit_field(self, out_path, in_path, min_value, max_value):
"""
:param out_path:输出路径
:param in_path:主mask路径输出影像采用主mask的地理信息
:param min_value
:param max_value
"""
proj = self.get_projection(in_path)
geotrans = self.get_geotransform(in_path)
array = self.get_band_array(in_path, 1)
array[array < min_value] = min_value
array[array > max_value] = max_value
self.write_img(out_path, proj, geotrans, array)
return True
def band_merge(self, lon, lat, ori_sim):
lon_arr = self.get_data(lon)
lat_arr = self.get_data(lat)
temp = np.zeros((2, lon_arr.shape[0], lon_arr.shape[1]), dtype=float)
temp[0, :, :] = lon_arr[:, :]
temp[1, :, :] = lat_arr[:, :]
self.write_img(ori_sim, '', [0.0, 1.0, 0.0, 0.0, 0.0, 1.0], temp, '0')
def get_scopes(self, ori_sim):
ori_sim_data = self.get_data(ori_sim)
lon = ori_sim_data[0, :, :]
lat = ori_sim_data[1, :, :]
min_lon = np.nanmin(np.where((lon != 0) & ~np.isnan(lon), lon, np.inf))
max_lon = np.nanmax(np.where((lon != 0) & ~np.isnan(lon), lon, -np.inf))
min_lat = np.nanmin(np.where((lat != 0) & ~np.isnan(lat), lat, np.inf))
max_lat = np.nanmax(np.where((lat != 0) & ~np.isnan(lat), lat, -np.inf))
scopes = [[min_lon, max_lat], [max_lon, max_lat], [min_lon, min_lat], [max_lon, min_lat]]
return scopes
# if __name__ == '__main__':
# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
# s = ImageHandler().band_merge(path, path2, path3)
# print(s)
# pass

View File

@ -0,0 +1,185 @@
# -*- coding: UTF-8 -*-
"""
@Project:SalinityMain.py
@File:MonteCarloSampling.py
@Function:基于蒙特卡洛随机抽样的最优特征选择算法
@Contact:
@Author:SHJ
@Date:2021/10/19 11:30
@Version:1.0.0
"""
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
import seaborn as sns
import logging
logger = logging.getLogger("mylog")
def api_sel_feature(x_list, iter=100, alpha=0.5, ts=-0.5, iter_ratio=0.2):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],
Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:para iter: 迭代次数
:para alpha: 调节因子
:para ts: com_sep_coef的阈值
:para iter_ratio : 迭代次数阈值
:return : True-特征与类别相关度高False-特征与类别相关度低
"""
com_sep_coef_old = cal_com_sep_coef(x_list, alpha)
# print('com_sep_coef_old:', com_sep_coef_old)
if com_sep_coef_old < ts:
return False, com_sep_coef_old
X = np.zeros(1) # x_list组合为行向量X
x_len_list = [] # 记录每个类别x的位置
num_sampler = 0 # 样本总数
t = 0
flag = 0
for x in x_list:
len_x = len(x)
if t == 0:
X = x
x_len_list.append(len_x)
else:
X = np.hstack([X, x])
x_len_list.append(x_len_list[t - 1] + len_x)
num_sampler += len_x
t += 1
x_len_list.pop()
num = int(np.ceil(num_sampler / 3))
for i in range(iter):
# 生成随机数组
randmtx = np.random.rand(1, num)
randmtx_ceil = np.ceil(randmtx * num_sampler).astype(int)
randmtx_ceil = np.sort(randmtx_ceil[0, :]) - 1
# 随机取值,重排后,替换原来的数据,组成新数组
X_new_sel = X.copy()
X_new_sel[randmtx_ceil] = np.random.permutation(X[randmtx_ceil])
X_new_list = np.split(X_new_sel, x_len_list)
com_sep_coef_new = cal_com_sep_coef(X_new_list, alpha)
if com_sep_coef_new <= com_sep_coef_old:
flag += 1
# print('com_sep_coef_new:', com_sep_coef_new)
logger.info('flag:' + str(flag) +', iter:' + str(iter) + ', falg/iter:' + str(int(flag)/int(iter)))
if flag > (iter * iter_ratio):
return False, com_sep_coef_old
return True, com_sep_coef_old
def cal_com_coef(x_list):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:return com_coef : 类内聚合因子Compactness Coefficient
"""
class_num = len(x_list)
coef_array = np.full((1, class_num), 0.0)
for m in range(class_num):
sample_num = len(x_list[m])
c = np.full((1, sample_num), 0.0)
for u in range(sample_num):
l = np.full((1, sample_num), x_list[m][u])
c[0, u] = np.sum(np.abs(l - x_list[m]))
coef_array[0, m] = np.sum(c) / (sample_num * (sample_num - 1))
com_coef = np.sum(coef_array) / class_num
return com_coef
def cal_sep_coef(x_list):
"""
:para x_list : k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:return sep_coef : 类间离散度Separation Coefficient
"""
class_num = len(x_list)
coef_list = []
coef_sum = 0
for m in range(class_num):
xm = x_list[m]
l_xm = len(xm)
for n in range(class_num):
if not n == m:
xn = x_list[n]
l_xn = len(xn)
xm = np.expand_dims(xm, 1)
coef_list.append(np.sum(np.abs(xm - xn)) / (l_xm * l_xn))
for coef in coef_list:
coef_sum = coef_sum + coef
if class_num == 1 or class_num == 0:
sep_coef = coef_sum
else:
sep_coef = coef_sum / (class_num * (class_num - 1))
return sep_coef
def cal_com_sep_coef(x_list, alpha = 0.5):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:para alpha : 调节因子
:return com_sep_coef: 类内聚合度和类间离散度的因子Compactness- Separation Coeffcient
"""
if not alpha >= 0 and alpha <= 1:
raise ('input_para_alpha beyond (0,1)!')
com_coef = cal_com_coef(x_list)
sep_coef = cal_sep_coef(x_list)
com_sep_coef = alpha * com_coef - (1-alpha) * sep_coef
return com_sep_coef
def get_logistic_rand_number(num, u=0.4): #弃用
randmtx = np.full((1, num), 0.0)
# randmtx[0,0] = np.random.rand(1, 1) #随机初始值
randmtx[0, 0] = 0.5 #初始值
for i in range(1, num):
randmtx[0, i] = u * randmtx[0, i-1]*(1-randmtx[0, i-1])
randmtx = randmtx * 3 * num
randmtx_ceil = np.ceil(randmtx)
# 绘制随机数分布图
# randmty = np.arange(0,num,1)
# randmty = np.expand_dims( randmty, 1)
# fig, axes = plt.subplots(1, 1, figsize=(5, 5))
# axes.scatter(randmty, randmtx_ceil, alpha=.3, label='ground truth')
# axes.legend()
# plt.tight_layout()
# plt.show()
return randmtx_ceil
def test():
'''测试生成随机数'''
# 插入
# a = np.array([3.4, 2.5, 1.8, 4.7, 5.6, 2.1])
# b = np.array([2.5, 4.7, 5.6])
# c = a[[0,1]]
# a[[0,1]] = np.array([1, 1])
# 随机排列
random.shuffle()
# logist随机数
sns.distplot(random.normal(scale=2, size=1000), hist=False, label='normal')
sns.distplot(random.logistic(loc=2, scale=0.5, size=1000), hist=False, label='logistic')
plt.show()
# 绘制随机数
randmtx = random.logistic(loc=0.5, scale=0.5, size=100)
randmtx.sort(axis=0)
randmty = np.arange(0,100,1)
randmty = np.expand_dims(randmty, 1)
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.scatter(randmty, randmtx, alpha=.3, label='ground truth')
axes.legend()
plt.tight_layout()
plt.show()
# if __name__ == '__main__':
# 例子
# x1 = np.array([1, 1.1])
# x2 = np.array([2, 2.1, 2.2])
# x3 = np.array([3, 3.4, 3.1])
# x_list = [x1, x2, x3]
# com_sep_coef = cal_com_sep_coef(x_list, 0.5)
# flag = api_sel_feature(x_list)
# print('done')

View File

@ -0,0 +1,416 @@
import sklearn # 用于解决打包错误
import sklearn.utils # 用于解决打包错误
import sklearn.utils._cython_blas # 用于解决打包错误
import sklearn.utils._weight_vector # 用于解决打包错误
import sklearn.neighbors # 用于解决打包错误
import sklearn.neighbors._typedefs # 用于解决打包错误
import sklearn.neighbors._partition_nodes # 用于解决打包错误
import sklearn.neighbors._quad_tree # 用于解决打包错误
import sklearn.tree._utils # 用于解决打包错误
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.svm import SVC
import numpy as np
from scipy.stats import pearsonr
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.block.blockprocess import BlockProcess
import logging
import os
import glob
from PIL import Image
from tool.file.fileHandle import fileHandle
import multiprocessing
logger = logging.getLogger("mylog")
file = fileHandle()
class MachineLeaning:
"""
机器学习库
"""
def __init__(self):
pass
@staticmethod
def gene_optimal_train_set(train_data_dic, feature_tif_dir, important_threshold=0.3, correlation_threshold=0.7): # todo 修改特征重要性
ml = MachineLeaning()
name_list = ml.get_name_list(feature_tif_dir)
X_train, Y_train = ml.gene_train_set(train_data_dic, feature_tif_dir)
optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
X_train = X_train[:, optimal_feature]
logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
return X_train, Y_train, optimal_feature
@ staticmethod
def sel_optimal_feature(X_train, Y_train, name_list,important_threshold=0.3, correlation_threshold=0.7):
ml = MachineLeaning()
optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
X_train = X_train[:, optimal_feature]
logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
return X_train, Y_train, optimal_feature
@staticmethod
def gene_test_set(feature_tif_dir, optimal_feature):
"""
生成测试集
:param feature_tif_dir : 特征影像路径字典
:param optimal_feature : 最优特征子集
:return X_test_list : 分块测试集影像路径
"""
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
cols = ImageHandler.get_img_width(in_tif_paths[0])
rows = ImageHandler.get_img_height(in_tif_paths[0])
workspace_block_tif_path = os.path.join(feature_tif_dir, 'block')
workspace_block_feature_path = os.path.join(feature_tif_dir, 'feature')
file.creat_dirs([workspace_block_tif_path, workspace_block_feature_path])
# 特征分块
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
bp.cut(feature_tif_dir, workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size)
img_dir, img_name = bp.get_file_names(workspace_block_tif_path, ['tif'])
dir_dict_all = bp.get_same_img(img_dir, img_name)
# 选择最优特征子集特征影像
dir_dict = {}
for n, key in zip(range(len(dir_dict_all)), dir_dict_all):
if n in optimal_feature:
dir_dict.update({key: dir_dict_all[key]})
logger.info('test_feature:%s', dir_dict.keys())
logger.info('blocking tifs success!')
X_test_list = []
# 特征维度合并
for key in dir_dict:
key_name = key
block_num = len(dir_dict[key])
break
for n in range(block_num):
name = os.path.basename(dir_dict[key_name][n])
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
features_path = os.path.join(workspace_block_feature_path, "features" + suffix) # + "\\features" + suffix
X_test_list.append(features_path)
features_array = np.zeros((len(dir_dict), block_size, block_size), dtype='float32')
for m, value in zip(range(len(dir_dict)), dir_dict.values()):
features_array[m, :, :] = ImageHandler.get_band_array(value[n])
features_array[np.isnan(features_array)] = 0.0 # 异常值转为0
ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
logger.info('create features matrix success!')
# file.del_folder(workspace_block_tif_path)
# file.del_folder(workspace_block_feature_path)
return X_test_list
@staticmethod
def predict_blok(clf, X_test, rows, cols, img_path, row_begin, col_begin, block_sum, n):
logger.info('total:%s,block:%s testing data !path:%s', block_sum, n, img_path)
Y_test = clf.predict(X_test)
img = Y_test.reshape(rows, cols)
out_image = Image.fromarray(img)
out_image.save(img_path)
# bp = BlockProcess()
# bp.assign_spatial_reference_bypoint(row_begin, col_begin, self.__proj, self.__geo, img_path)
# sr = osr.SpatialReference()
# sr.ImportFromWkt(self.__proj)
# geo_transform = (self.__geo[0] + col_begin * self.__geo[1] + row_begin * self.__geo[2],
# self.__geo[1],
# self.__geo[2],
# self.__geo[3] + col_begin * self.__geo[4] + row_begin * self.__geo[5],
# self.__geo[4],
# self.__geo[5]
# )
# dst_ds = gdal.Open(img_path, gdal.GA_Update)
# if dst_ds is None:
# return False
# dst_ds.SetProjection(sr.ExportToWkt())
# dst_ds.SetGeoTransform(geo_transform)
# del dst_ds
logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path)
return True
@staticmethod
def predict(clf, X_test_list, out_tif_name, workspace_processing_path,rows, cols):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
ml = MachineLeaning()
# 开启多进程处理
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
block_features_dir = X_test_list
bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name + '\\') # workspace_processing_path + out_tif_name + '\\'
file.creat_dirs([bp_cover_dir])
processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 1])
pool = multiprocessing.Pool(processes=processes_num)
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
pool.close()
pool.join()
# 合并影像
data_dir = bp_cover_dir
out_path = workspace_processing_path[0:-1]
bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
# 添加地理信息
cover_path = os.path.join(workspace_processing_path, out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
@staticmethod
def predict_VP(clf, X_test_list, out_tif_name, workspace_processing_path, rows, cols):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
ml = MachineLeaning()
# 开启多进程处理
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
block_features_dir = X_test_list
bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name,
'pre_result\\') # workspace_processing_path + out_tif_name + '\\'
file.creat_dirs([bp_cover_dir])
processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 1])
pool = multiprocessing.Pool(processes=processes_num)
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \
name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pool.apply_async(ml.predict_blok, (
clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
pool.close()
pool.join()
# 合并影像
data_dir = bp_cover_dir
out_path = workspace_processing_path[0:-1]
bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
# 添加地理信息
cover_path = os.path.join(workspace_processing_path,
out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
@staticmethod
def get_name_list(feature_tif_dir):
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
name_list = []
dim = len(in_tif_paths)
for n, path in zip(range(dim), in_tif_paths):
name_list.append(str(n)+': '+os.path.split(path)[1])
logger.info('feature_list:%s', name_list)
return name_list
@staticmethod
def gene_train_set(train_data_dic, feature_tif_dir):
"""
生成训练集
:param train_data_dic : 从csv读取的训练数据
:param feature_tif_dir : 特征影像路径路径
:return X_train, Y_train : 训练数据
"""
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
dim = len(in_tif_paths)
X_train = np.empty(shape=(0, dim))
Y_train = np.empty(shape=(0, 1))
ids = train_data_dic['ids']
positions = train_data_dic['positions']
for id, points in zip(ids, positions):
# for data in train_data_list:
if points == []:
raise Exception('data is empty!')
row, col = zip(*points)
l = len(points)
X = np.empty(shape=(l, dim))
for n, tif_path in zip(range(dim), in_tif_paths):
feature_array = ImageHandler.get_data(tif_path)
feature_array[np.isnan(feature_array)] = 0 # 异常值填充为0
x = feature_array[row, col].T
X[:, n] = x
Y = np.full((l, 1), id)
X_train = np.vstack((X_train, X))
Y_train = np.vstack((Y_train, Y))
Y_train = Y_train.T[0, :]
logger.info("gene_train_set success!")
return X_train, Y_train
@staticmethod
def standardization(data, num=1):
# 矩阵标准化到[0,1]
min = np.nanmin(data)
max = np.nanmax(data)
data[np.isnan(data)] = min # 异常值填充为0
_range = max - min
return (data - min) / _range * num
@staticmethod
def sel_optimal_feature_set(X_train, Y_train, threshold=0.01):
"""
筛选最优特征组合(极度随机树
"""
model = ExtraTreesClassifier()
max = np.max(Y_train)
if max < 0.1:
Y_train = (Y_train*10000).astype('int')
model.fit(X_train, Y_train.astype('int'))
# select the relative importance of each attribute
importances = model.feature_importances_
logger.info('importances:%s,threshold=%s', importances, threshold)
importances_resort = -np.sort(-importances) # 从大到小排序
imp_argsort = np.argsort(-importances) # 输出从大到小的序号
optimal_feature = list(imp_argsort[np.where(importances_resort > threshold)]) # 过滤重要性低的特征
logger.info('optimal_feature:%s', optimal_feature)
if len(optimal_feature)==0:
logger.error('optimal_feature is empty')
optimal_feature = list(imp_argsort)
return optimal_feature
@staticmethod
def correlation_map(x, y):
# https://blog.csdn.net/weixin_39836726/article/details/110783640
# cc matrix based on scipy pearsonr
n_row_x = x.shape[0]
n_row_y = x.shape[0]
ccmtx_xy = np.empty((n_row_x, n_row_y))
for n in range(n_row_x):
for m in range(n_row_y):
ccmtx_xy[n, m] = pearsonr(x[n, :], y[m, :])[0]
return ccmtx_xy
@staticmethod
def remove_correlation_feature(X_train,validity_list, threshold=0.85):
"""
相关性抑制,去除相关性
:param X_train : 训练集
:param validity_list : 最优特征子集
:param threshold: 相关性阈值
:return validity_list : 最优特征子集
"""
ccmtx = MachineLeaning().correlation_map(X_train[:, validity_list].T, X_train[:, validity_list].T)
ccmtx = np.abs(ccmtx)
for r in range(len(validity_list)):
for c in range(len(validity_list)):
if c <= r:
ccmtx[r, c] = 0
logger.info('correlation_map:\n %s', ccmtx)
# 相关性大于0.85的特征删除com_sep_coef较大的特征
high_corr = np.unique(np.where(ccmtx > threshold)[1]) # 删除的特征序号
validity_list = np.delete(validity_list, high_corr)
logger.info('validity_list_corr:%s', validity_list)
logger.info(validity_list)
return validity_list
@staticmethod
def gene_train_data(block_features_dir,rows,cols,block_size,measured_data_img):
# 生成训练集
X_train = []
Y_train = []
block_rows = int(np.ceil(rows/block_size))
block_cols = int(np.ceil(cols/block_size))
for data, n in zip(measured_data_img, range(len(measured_data_img))):
row = data[0]
col = data[1]
block_row = row//block_size
block_col = col//block_size
if block_row == block_rows-1:
part_img_row = row - (rows - block_size)
else:
part_img_row = row % block_size
if block_col == block_cols-1:
part_img_col = col - (cols-block_size)
else:
part_img_col = col % block_size
features_path = block_features_dir[block_row*block_rows + block_col]
features_array = ImageHandler().get_data(features_path)
feature = features_array[:, part_img_row, part_img_col]
if not np.isnan(feature).any() or np.isinf(feature).any():
X_train.append(list(feature))
Y_train.append([data[2]])
logger.info('total:%s,num:%s create train set success!', len(measured_data_img), n)
return np.array(X_train), np.array(Y_train)
@staticmethod
def trainRF(X_train, Y_train):
#随机森林
logger.info('RF trainning')
clf = RandomForestClassifier()
clf.fit(X_train, Y_train)
return clf
@staticmethod
def trainSVM(X_train, Y_train, cost=1, kernel='rbf'):
logger.info('svm trainning')
clf = SVC(decision_function_shape='ovo')
clf.fit(X_train, Y_train)
SVC(C=cost, cache_size=1000, class_weight='balanced', coef0=0.0, decision_function_shape='ovr',
degree=3, gamma='auto', kernel=kernel, max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=True)
return clf
@staticmethod
def vegetationPhenology_combine_feature(feature_dir,workspace_processing_path, name, rows, cols, debug =False):
ml = MachineLeaning()
path_list = list(glob.glob(os.path.join(feature_dir, '*.tif')))
#多维矩阵合并为一个
name_featuresPath_dic = {}
dim = len(path_list)
features_path = workspace_processing_path + name + "/"+ name +'_features.tif'
if debug== False:
features_array = np.zeros((dim, rows, cols), dtype='float16')
for m, path in zip(range(dim), path_list):
data = ImageHandler.get_data(path)
data = ml.standardization(data)
features_array[m, :, :] = data
# 异常值转为0
features_array[np.isnan(features_array)] = 0.0
features_array[np.isinf(features_array)] = 0.0
ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
name_featuresPath_dic.update({name: features_path})
return name_featuresPath_dic

View File

@ -0,0 +1,491 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:AHVToPolsarpro.py
@Function:全极化影像转成polsarpro格式T3数据
@Contact:
@Author:SHJ
@Date:2021/9/18 16:44
@Version:1.0.0
"""
import os
import numpy as np
import glob
import struct
from tool.algorithm.image.ImageHandle import ImageHandler
class AHVToPolsarpro:
"""
全极化影像转换为bin格式T3矩阵支持polsarpro处理
"""
def __init__(self, hh_hv_vh_vv_path_list=[]):
self._hh_hv_vh_vv_path_list = hh_hv_vh_vv_path_list
pass
@staticmethod
def __ahv_to_s2_veg(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if '_HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif '_HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif '_VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif '_VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_soil(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_list(ahv_path_list):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = ahv_path_list
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path')
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_list_2(hh_hv_vh_vv_path_list):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = hh_hv_vh_vv_path_list
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path, n in zip(in_tif_paths, range(len(in_tif_paths))):
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if n == 0:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif n == 1:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif n == 2:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif n == 3:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path')
return s11, s12, s21, s22
@staticmethod
def __s2_to_t3(s11, s12, s21, s22):
"""
S2矩阵转T3矩阵
:param s11: HH极化数据
:param s12: HV极化数据
:param s21: VH极化数据
:param s22: VV极化数据
:return: 极化相干矩阵T3
"""
HH = s11
HV = s12
VH = s21
VV = s22
t11 = (np.abs(HH + VV)) ** 2 / 2
t12 = (HH + VV) * np.conj(HH - VV) / 2
t13 = (HH + VV) * np.conj(HV + VH)
t21 = (HH - VV) * np.conj(HH + VV) / 2
t22 = np.abs(HH - VV) ** 2 / 2
t23 = (HH - VV) * np.conj(HV + VH)
t31 = (HV + VH) * np.conj(HH + VV)
t32 = (HV + VH) * np.conj(HH - VV)
t33 = 2 * np.abs(HV + VH) ** 2
return t11, t12, t13, t21, t22, t23, t31, t32, t33
def __t3_to_polsarpro_t3(self, out_dir, t11, t12, t13, t22, t23, t33):
"""
T3矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param t11:
:param t12:
:param t13:
:param t22:
:param t23:
:param t33:
:return: bin格式矩阵T3和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = t11.shape[0]
cols = t11.shape[1]
bins_dict = {
'T11.bin': t11,
'T12_real.bin': t12.real,
'T12_imag.bin': t12.imag,
'T13_real.bin': t13.real,
'T13_imag.bin': t13.imag,
'T22.bin': t22,
'T23_real.bin': t23.real,
'T23_imag.bin': t23.imag,
'T33.bin': t33}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_img_bin(data, bin_path) # todo 修改T3阵保存方式
# data.tofile(bin_path)
out_hdr_path = bin_path + '.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
def rows(self):
"""获取影像行数"""
return self._rows
def cols(self):
"""获取影像列数"""
return self._cols
def __write_img_bin(self, im, file_path):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
self._rows = im.shape[0]
self._cols = im.shape[1]
for row in range(self._rows):
im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
f.write(im_bin)
f.close()
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'File Imported into ENVI. }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 4' # 数据格式
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'band names = {'
h14 = bin_path + '}'
# h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
# doc = open(out_hdr_path, 'w')
# for i in range(0, 14):
# print(h[i], end='', file=doc)
# print('\n', end='', file=doc)
h = [h1, h4, h5, h6, h7, h8, h9, h10, h12]
doc = open(out_hdr_path, 'w')
for i in range(0, 9):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
h8 = 'monostatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'full'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def incidence_tif2bin(self, incidence_file, out_path):
if not os.path.exists(out_path):
os.mkdir(out_path)
incidence_bin = os.path.join(out_path, 'incidence.bin')
data = ImageHandler().get_data(incidence_file)
rows = data.shape[0]
cols = data.shape[1]
self.__write_img_bin(data, incidence_bin)
if not os.path.exists(incidence_bin):
raise Exception('incidence to bin failed')
out_hdr_path = incidence_bin + '.hdr'
self.__write_bin_hdr(out_hdr_path, incidence_bin, rows, cols)
return incidence_bin
def ahv_to_polsarpro_t3_veg(self, out_file_dir, in_ahv_dir=''):
if self._hh_hv_vh_vv_path_list == [] :
s11, s12, s21, s22 = self.__ahv_to_s2_veg(in_ahv_dir)
else:
s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
s11, s12, s21, s22)
self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
def ahv_to_polsarpro_t3_soil(self, out_file_dir, in_ahv_dir=''):
if self._hh_hv_vh_vv_path_list == [] :
s11, s12, s21, s22 = self.__ahv_to_s2_soil(in_ahv_dir)
else:
s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
s11, s12, s21, s22)
self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
def calibration(self, calibration_value, in_ahv_dir='', name=''):
if name == '':
out_dir = os.path.join(in_ahv_dir, 'calibration')
else:
out_dir = os.path.join(in_ahv_dir, name, 'calibration')
flag_list = [0, 0, 0, 0]
if self._hh_hv_vh_vv_path_list == []: # 地表覆盖、土壤盐碱度
in_tif_paths = list(glob.glob(os.path.join(in_ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(in_ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
name = os.path.basename(in_tif_path)
data_new = np.zeros(data.shape)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[0]
data_new[1, :, :] = data[1, :, :] * calibration_value[0]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[1]
data_new[1, :, :] = data[1, :, :] * calibration_value[1]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[2]
data_new[1, :, :] = data[1, :, :] * calibration_value[2]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[3]
data_new[1, :, :] = data[1, :, :] * calibration_value[3]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[3] = 1
if not flag_list == [1, 1, 1, 1]:
raise Exception('calibration error! ')
else:
for in_tif_path in self._hh_hv_vh_vv_path_list: # 植被物候
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
name = os.path.basename(in_tif_path)
data_new = np.zeros(data.shape)
# 获取极化类型
if '_HH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[0]
data_new[1, :, :] = data[1, :, :] * calibration_value[0]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[0] = 1
elif '_HV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[1]
data_new[1, :, :] = data[1, :, :] * calibration_value[1]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[1] = 1
elif '_VH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[2]
data_new[1, :, :] = data[1, :, :] * calibration_value[2]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[2] = 1
elif '_VV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[3]
data_new[1, :, :] = data[1, :, :] * calibration_value[3]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[3] = 1
if not flag_list == [1, 1, 1, 1]:
raise Exception('calibration error! ')
self._hh_hv_vh_vv_path_list = []
return out_dir
if __name__ == '__main__':
#实例1
# atp = AHVToPolsarpro()
# ahv_path = 'D:\\DATA\\GAOFEN3\\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\\'
# # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
# out_file_path = 'D:\\bintest0923\\'
# atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
# # 极化分解得到T3矩阵
# atp = AHVToPolsarpro()
# ahv_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPC"
# t3_path = ahv_path + 'psp_t3\\'
# atp.ahv_to_polsarpro_t3(t3_path, ahv_path)
#实例2
# dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
# path_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
#
#
# atp = AHVToPolsarpro(path_list)
# atp.ahv_to_polsarpro_t3(r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC/t3')
print("done")

View File

@ -0,0 +1,228 @@
"""
@Project microproduct
@File AHVToPolsarpro.PY
@Function 将四个极化数据转成S2矩阵文件
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
import numpy as np
import glob
import struct
from tool.algorithm.image.ImageHandle import ImageHandler
class AHVToPolsarproS2:
"""
全极化影像转换为bin格式S2矩阵支持polsarpro处理
"""
def __init__(self):
pass
@staticmethod
def __ahv_to_s2(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
s11, s12, s21, s22 = None,None,None,None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :] # 获取第一个波段 (实部)
data_imag = data[1, :, :] # 获取第二个波段 (虚部)
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('tif of HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
def __s2_to_bin(self, out_dir, s11, s12, s21, s22):
"""
S2矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param s11:
:param s12:
:param s21
:param s22:
:return: bin格式矩阵S2和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = s11.shape[0]
cols = s11.shape[1]
bins_dict = {'s11.bin': s11,
's12.bin': s12,
's21.bin': s21,
's22.bin': s22}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_slc_img_bin(data, bin_path,name)
out_hdr_path = bin_path+'.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
@staticmethod
def __write_slc_img_bin(im, file_path,name):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
rows = im.shape[0]
cols = im.shape[1]
cre_im = np.zeros((rows, 2*cols), dtype=float)
cre_im[:, ::2] = im.real #存 real
cre_im[:, 1::2] = im.imag #存 imag
for row in range(rows):
cre_im_bin = struct.pack("f" * 2*cols, *np.reshape(cre_im[row, :], (2*cols, 1), order='F'))
f.write(cre_im_bin)
f.close()
@staticmethod
def read_slc_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4 * 2:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
bin_data = np.zeros([rows, cols*2], dtype=np.float32)
img_array = np.zeros([2,rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols * 2) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols*2, data) # 转为一行float数据
bin_data[row, :] = row_data
bin_file.close()
img_array[0] = bin_data[:, ::2] # real
img_array[1] = bin_data[:, 1::2] # imag
return img_array
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'ENVI File, Created [] }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 6' # 数据格式,6代表复数
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'wavelength units = Unknown'
h14 = 'complex function = Power'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
doc = open(out_hdr_path, 'w')
for i in range(0, 14):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
# h8 = 'monostatic'
h8 = 'bistatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'full'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def api_ahv_to_polsarpro_s2(self, out_file_dir, in_ahv_dir):
s11, s12, s21, s22 = self.__ahv_to_s2(in_ahv_dir)
self.__s2_to_bin(out_file_dir, s11, s12, s21, s22)
# if __name__ == '__main__':
# # test()
# atp = AHVToPolsarproS2()
# ahv_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087'
# # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
# out_file_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ_2'
# atp.api_ahv_to_polsarpro_s2(out_file_path, ahv_path)
# bin_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ\s11.bin'
# # data = atp.read_slc_bin_to_img(bin_path)
# print("done")

View File

@ -0,0 +1,196 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:DualPolarToPolsarproC2.py
@Function:双极化影像转成polsarpro格式C2数据
@Contact:
@Author:SHJ
@Date:2021/11/5
@Version:1.0.0
"""
import os
import numpy as np
import glob
import struct
import gc
from tool.algorithm.image.ImageHandle import ImageHandler
class DualPolarToPolsarproC2:
"""
双极化影像转换为bin格式C2矩阵支持polsarpro处理
"""
def __init__(self):
pass
@staticmethod
def __dual_polar_to_c2(dual_polar_dir):
"""
双影像转S2矩阵
:param dual_polar_dir: 双极化影像文件夹路径
:return: C2矩阵
"""
in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tiff')))
s11, s22 = None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
s11 = data[0, :, :] + 1j * data[1, :, :]
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
s22 = data[0, :, :] + 1j * data[1, :, :]
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
s22 = data[0, :, :] + 1j * data[1, :, :]
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
s11 = data[0, :, :] + 1j * data[1, :, :]
flag_list[3] = 1
else:
continue
del data
gc.collect()
if flag_list != [1, 1, 0, 0] and flag_list != [0, 0, 1, 1] :
raise Exception('Dual-Polarization SAR is not in path :%s',in_tif_path)
c11,c12,c22 = None, None, None
c11 = np.abs(s11)** 2
c12 = s11 * np.conj(s22)
del s11
gc.collect()
c22 = np.abs(s22)**2
return c11, c12, c22
def __c2_to_polsarpro_c2(self, out_dir, c11, c12, c22):
"""
C2矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param c11:
:param c12:
:param c21:
:param c22:
:return: bin格式矩阵C3和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = c11.shape[0]
cols = c11.shape[1]
bins_dict = {
'C11.bin': c11,
'C12_real.bin': c12.real,
'C12_imag.bin': c12.imag,
'C22.bin': c22}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_img_bin(data, bin_path)
out_hdr_path = bin_path + '.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
def rows(self):
"""获取影像行数"""
return self._rows
def cols(self):
"""获取影像列数"""
return self._cols
def __write_img_bin(self, im, file_path):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
self._rows = im.shape[0]
self._cols = im.shape[1]
for row in range(self._rows):
im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
f.write(im_bin)
f.close()
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
name = os.path.split(bin_path)[1]
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'File Imported into ENVI. }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 4' # 数据格式 浮点型
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'band names = {'
h14 = name + '}'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
doc = open(out_hdr_path, 'w')
for i in range(0, 14):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
h8 = 'monostatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'pp1'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def api_dual_polar__to_polsarpro_c2(self, out_file_dir, dual_polar_dir):
c11, c12, c22 = self.__dual_polar_to_c2(dual_polar_dir)
self.__c2_to_polsarpro_c2(out_file_dir,c11, c12, c22)
# if __name__ == '__main__':
# tp = DualPolarToPolsarproC2()
# out_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\SLC_SHJ1'
# in_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\'
# # out_file_path = 'D:\\bintest0923\\'
# tp.api_dual_polar__to_polsarpro_c2(out_dic,in_dic)
# # atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
#
# print("done")

View File

@ -0,0 +1,97 @@
# -*- coding: UTF-8 -*-
"""
@Project onestar
@File GLDM.py
@Contact
scikit-image feature计算图像特征https://blog.csdn.net/lyxleft/article/details/102904909
python如何在二维图像上进行卷积https://www.xz577.com/j/281686.html
利用python的skimage计算灰度共生矩阵https://zhuanlan.zhihu.com/p/147066037
@function 计算图像灰度共生矩阵
@Author SHJ
@Date 2021/11/10 14:42
@Version 1.0.0
"""
import numpy as np
import os
from skimage.feature import greycomatrix, greycoprops
import datetime
from tool.algorithm.image.ImageHandle import ImageHandler
class GLDM:
def __init__(self,win_size = 15, step=2,levels=16,angles=[0,45,90,135],
prop=['contrast', 'dissimilarity', 'homogeneity', 'energy', 'correlation', 'ASM']):
self._win_size = win_size # 计算灰度共生矩阵窗口尺寸,为奇数
self._step = step # 步长
self._levels = levels # 灰度等级例如16256
self._angles = list(np.deg2rad(np.array(angles))) #角度,使用弧度制
"""
'contrast':对比度反映了图像的清晰度和纹理沟纹深浅的程度
'dissimilarity':差异性
'homogeneity':同质性/逆差距度量图像纹理局部变化的多少其值大则说明图像纹理的不同区域间缺少变化局部非常均匀
'energy':能量是灰度共生矩阵元素值的平方和所以也称能量反映了图像灰度分布均匀程度和纹理粗细度
'correlation':相关性它度量空间灰度共生矩阵元素在行或列方向上的相似程度
'ASM':二阶距
"""
self._prop = prop #纹理特征名称
def get_glcm_value(self,input):
values_temp = []
# 统计得到glcm
# 得到共生矩阵,参数:图像矩阵,距离,方向,灰度级别,是否对称,是否标准化
# para2: [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4] 一共计算了四个方向,你也可以选择一个方向
glcm = greycomatrix(input, [self._step], self._angles, self._levels, symmetric=False, normed=True)
# print(glcm.shape)
# 循环计算表征纹理的参数
for prop in self._prop:
temp = greycoprops(glcm, prop)
# print(temp)
values_temp.append(np.mean(temp))
return values_temp
def get_glcm_array(self,inputs: np.ndarray, win_size):
h, w = inputs.shape
pad = (win_size - 1) // 2
inputs = np.pad(inputs, pad_width=[(pad, pad), (pad, pad)], mode="constant", constant_values=0)
glcm_array ={}
for name in self._prop:
glcm_array.update({name:np.zeros(shape=(h, w),dtype=np.float32)})
for i in range(h): # 行号
for j in range(w): # 列号
window = inputs[i: i + win_size, j: j + win_size]
value = self.get_glcm_value(window)
print('i:%s,j:%s',i,j)
# print(value)
for n,array in zip(range(len(glcm_array)),glcm_array.values()):
array[i,j] = value[n]
return glcm_array
@staticmethod
def standardization(data, num=1):
# 矩阵标准化到[0,1]
data[np.isnan(data)] = np.min(data) # 异常值填充为0
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range * num
def api_get_glcm_array(self,out_dir,in_tif_path,name=''):
ih = ImageHandler()
proj, geotrans, array = ih.read_img(in_tif_path)
array[np.where(array > 500000)]=500000 #去除过大的值避免标准化时大部分的值都接近0
array = self.standardization(array,self._levels-1) #标准化到0~self._levels-1
array = np.uint8(array)
glcm_array = self.get_glcm_array(array, self._win_size)
for key,value in glcm_array.items():
out_path = os.path.join(out_dir,name+'_'+key+'.tif')
ih.write_img(out_path, proj, geotrans,value)
if __name__ == '__main__':
start = datetime.datetime.now()
gldm = GLDM(win_size=9,levels=16,step=3,angles=[0,45,90,135])
gldm.api_get_glcm_array('D:\glcm','D:\glcm\src_img.tif',)
end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start)
print(msg)
# 666*720尺寸影像消耗的running use time: 0:04:23.155424

View File

@ -0,0 +1,85 @@
import os
import glob
import numpy as np
import struct
from PIL import Image
from tool.algorithm.ml.machineLearning import MachineLeaning as ml
def read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
def write_bin_to_tif(out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
img_array = ml.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path
def write_bin_to_tif_soil(out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
# img_array = ml.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path

View File

@ -0,0 +1,190 @@
from tool.algorithm.algtools.MetaDataHandler import Calibration
from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3
from tool.algorithm.polsarpro.pspCloudePottierDecomposition import PspCloudePottierDecomposition
from tool.algorithm.polsarpro.pspFreemanDecomposition import PspFreemanDecomposition
from tool.algorithm.polsarpro.pspYamaguchiDecomposition import PspYamaguchiDecomposition
from tool.algorithm.polsarpro.pspTouziDecomposition import PspTouziDecomposition
from tool.algorithm.polsarpro.bin2tif import write_bin_to_tif
from tool.algorithm.polsarpro.pspHAAlphaDecomposition import PspHAAlphaDecomposition
from tool.algorithm.xml.AlgXmlHandle import InitPara
import logging
import os
import shutil
import glob
logger = logging.getLogger("mylog")
class CreateFeature:
"""
生产特征
"""
def __init__(self, debug = False, exe_dir = ''):
self._debug = debug
self._exe_dir = exe_dir
pass
def ahv_to_t3(self, workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name='',FILTER_SIZE=3):
# 全极化tif转bin格式T3数据
atp = AHVToPolsarpro()
atp = AHVToPolsarpro(hh_hv_vh_vv_list)
lee_filter_path = os.path.join(workspace_processing_path, name, 'lee_filter\\') # workspace_processing_path + name + '\\lee_filter\\'
if self._debug == False:
t3_path = os.path.join(workspace_processing_path, name, 'psp_t3\\') # workspace_processing_path + name + '\\psp_t3\\'
# atp.ahv_to_polsarpro_t3(t3_path, tif_path)
polarization = ['HH', 'HV', 'VH', 'VV']
if os.path.exists(workspace_preprocessing_path + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(workspace_preprocessing_path + name, '*.meta.xml')))
meta_dic = InitPara.get_meta_dic_new(meta_xml_paths, name)
calibration = Calibration.get_Calibration_coefficient(meta_dic['Origin_META'], polarization)
tif_path = atp.calibration(calibration, workspace_preprocessing_path, name)
atp.ahv_to_polsarpro_t3_veg(t3_path, tif_path)
# Lee滤波
leeFilter = LeeRefinedFilterT3()
leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols(), FILTER_SIZE)
logger.info("refine_lee filter success!")
return lee_filter_path
def decompose(self,workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic={},FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']): # , 'Touzi'
"""
极化分解FreemanTouziYamaguchiCloude
:param t3_path: t3文件路径
:param rows: 影像行数
:return cols:影像列数
"""
# 计算特征组合
exeDir = self._exe_dir
outFolderDic = {}
if 'Freeman' in FeatureInput:
# freeman分解
freemanOutDir = os.path.join(workspace_processing_path, name + '\\freeman\\')
if self._debug == False:
freemDecom = PspFreemanDecomposition(exeDir, t3_path, freemanOutDir)
flag = freemDecom.api_freeman_decomposition_T3(0, 0, rows, cols)
if not flag:
logger.error('FreemanDecomposition err')
return False, None
outFolderDic['Freeman'] = freemanOutDir
# Touzi分解
if 'Touzi' in FeatureInput:
touziOutDir = os.path.join(workspace_processing_path, name + '\\touzi\\')
if not os.path.exists(touziOutDir):
os.makedirs(touziOutDir)
if self._debug == False:
# touzi分解耗时较长且对特征表达效果较差
p = PspTouziDecomposition(hh_hv_vh_vv_dic, touziOutDir)
p.Touzi_decomposition_multiprocessing()
outFolderDic['Touzi'] = touziOutDir
if 'Yamaguchi' in FeatureInput:
# Yamaguchi分解
yamaguchiOutDir = os.path.join(workspace_processing_path, name + '\\yamaguchi\\')
if self._debug == False:
yamaguchiDecom = PspYamaguchiDecomposition(exeDir, t3_path, yamaguchiOutDir)
flag = yamaguchiDecom.api_yamaguchi_4components_decomposition_T3(0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Yamaguchi'] = yamaguchiOutDir
if 'Cloude' in FeatureInput:
# CloudePottier分解
cloudeOutDir = os.path.join(workspace_processing_path, name + '\\cloude\\')
if self._debug == False:
cloudeDecom = PspCloudePottierDecomposition(
exeDir, t3_path, cloudeOutDir)
flag = cloudeDecom.api_h_a_alpha_decomposition_T3(
0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Cloude'] = cloudeOutDir
return True, outFolderDic
def creat_h_a_alpha_features(self, t3_path, out_dir):
logger.info('ahv transform to polsarpro T3 matrix success!')
logger.info('progress bar: 20%')
h_a_alpha_decomposition_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_decomposition_T3.exe')
h_a_alpha_eigenvalue_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvalue_set_T3.exe')
h_a_alpha_eigenvector_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvector_set_T3.exe')
if self._debug == False:
haa = PspHAAlphaDecomposition(normalization=True)
haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir,
h_a_alpha_decomposition_T3_path=h_a_alpha_decomposition_T3_path ,
h_a_alpha_eigenvalue_set_T3_path=h_a_alpha_eigenvalue_set_T3_path ,
h_a_alpha_eigenvector_set_T3_path=h_a_alpha_eigenvector_set_T3_path,
polsarpro_in_dir=t3_path)
def cereat_features_dic(self,outFolderDic, feature_tif_dir):
if not os.path.exists(feature_tif_dir):
os.makedirs(feature_tif_dir)
feature_tif_paths = {}
for key in outFolderDic:
feature_bin_dic = outFolderDic[key]
if key == 'Touzi':
for path in list(glob.glob(os.path.join(feature_bin_dic, '*.tif'))):
name = os.path.split(path)[1].split('.')[0]
if self._debug == False:
shutil.copyfile(path, os.path.join(feature_tif_dir, name + '.tif')) # feature_tif_dir + '\\' + name + '.tif')
feature_tif_paths.update({name: os.path.join(feature_tif_dir, name + '.tif')}) # feature_tif_dir + '\\' + name + '.tif'
else:
feature_tif_paths.update(write_bin_to_tif(feature_tif_dir, feature_bin_dic))
return feature_tif_paths
@staticmethod
def decompose_single_tar(hh_hv_vh_vv_list, workspace_processing_path, workspace_preprocessing_path, name, exe_dir, rows, cols, FILTER_SIZE = 3, debug =False, FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']):
hh_hv_vh_vv_dic = {}
hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
t3_path = os.path.join(workspace_processing_path, name, "lee_filter") # workspace_processing_path + name + "\\lee_filter"
feature_tif_dir = os.path.join(workspace_processing_path, name, 'features') # workspace_processing_path + name + "\\features"
cfeature = CreateFeature(debug, exe_dir)
cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
t3_path = cfeature.ahv_to_t3(workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name, FILTER_SIZE)
flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic, FeatureInput) # , 'Touzi'
cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
return feature_tif_dir
if __name__ == '__main__':
# # 实例1
# exe_dir = os.getcwd()
# dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
# hh_hv_vh_vv_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
#
# workspace_processing_path= r"D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing/"
# name= 'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC'
# hh_hv_vh_vv_dic = {}
# hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
# hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
# hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
# hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
# t3_path = workspace_processing_path + name + "\\lee_filter"
# feature_tif_dir = workspace_processing_path + name + "\\features"
#
# cfeature = CreateFeature(False, exe_dir)
#
# cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
#
# t3_path = cfeature.ahv_to_t3(workspace_processing_path, hh_hv_vh_vv_list, name, 3)
# flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, 997, 1227, hh_hv_vh_vv_dic, FeatureInput=['Freeman', 'Touzi', 'Yamaguchi', 'Cloude'])
#
# feature_tifs_dic = cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
pass

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspHAAlphaDecomposition.py Cloude-Pottier分解
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
V1.0.1:1可选分解特征2bin转tif格式
@Contact:
@Author:SHJ
@Date:2021/9/24 9:06
@Version:1.0.1
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspCloudePottierDecomposition:
"""
调用polsarpro4.2.0的Cloude-Pottier极化分解 h_a_alpha_decomposition_T3.exe
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='h_a_alpha_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_h_a_alpha_decomposition_T3(
self,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
alpbetdelgam = 1
Lambda = 1
alpha = 1
entropy = 1
anisotropy = 1
CombHA = 1
CombH1mA = 1
Comb1mHA = 1
Comb1mH1mA = 1
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
alpbetdelgam,
Lambda,
alpha,
entropy,
anisotropy,
CombHA,
CombH1mA,
Comb1mHA,
Comb1mH1mA]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -0,0 +1,109 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspFreemanDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/12 18:45
@Version:1.0.0
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspFreemanDecomposition:
"""
Freeman分解
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='freeman_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_freeman_decomposition_T3(
self,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -0,0 +1,435 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspHAAlphaDecomposition.py
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
V1.0.1:1可选分解特征2bin转tif格式
@Contact:
@Author:SHJ
@Date:2021/9/24 9:06
@Version:1.0.1
"""
import os
import shutil
import subprocess
import struct
import numpy as np
import glob
from PIL import Image
import logging
logger = logging.getLogger("mylog")
import multiprocessing
class PspHAAlphaDecomposition:
"""
调用polsarpro4.2.0的Cloude-Pottier极化分解
"""
def __init__(self,normalization = False):
self.__normalization = normalization #是否做归一化
self.__res_h_a_alpha_decomposition_T3 = {}
self.__res_h_a_alpha_eigenvalue_set_T3 = {}
self.__res_h_a_alpha_eigenvector_set_T3 = {}
pass
def api_creat_h_a_alpha_features_single_process(self, h_a_alpha_out_dir,
h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decompositionh_a_alpha_eigenvalue_set h_a_alpha_eigenvector_set)
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
h_a_alpha_features ={}
h_a_alpha_features.update(self.api_h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_decomposition_T3 success!")
logger.info('progress bar: 40%')
h_a_alpha_features.update(self.api_h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
logger.info('progress bar: 60%')
h_a_alpha_features.update(self.api_h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_eigenvector_set_T3 success!")
logger.info('progress bar: 80%')
if is_trans_to_tif:
self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
if is_read_to_dic:
h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
return h_a_alpha_features
def api_creat_h_a_alpha_features(self, h_a_alpha_out_dir,
h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decompositionh_a_alpha_eigenvalue_set h_a_alpha_eigenvector_set)
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
pool = multiprocessing.Pool(processes=3)
pl = []
logger.info("run h_a_alpha_decomposition_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_decomposition_T3, (h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1))))
logger.info("run h_a_alpha_eigenvalue_set_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_eigenvalue_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))))
logger.info("run h_a_alpha_eigenvector_set_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_eigenvector_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1))))
pool.close()
pool.join()
logger.info(pl)
logger.info('progress bar: 60%')
h_a_alpha_features = {}
h_a_alpha_features.update(self.__res_h_a_alpha_decomposition_T3)
logger.info("run h_a_alpha_decomposition_T3 success!")
h_a_alpha_features.update(self.__res_h_a_alpha_eigenvalue_set_T3)
logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
h_a_alpha_features.update(self.__res_h_a_alpha_eigenvector_set_T3)
logger.info("run h_a_alpha_eigenvector_set_T3 success!")
if is_trans_to_tif:
self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
if is_read_to_dic:
h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
return h_a_alpha_features
def api_h_a_alpha_decomposition_T3(self, h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解H-A-Alpha分解
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args)
name_list = ['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_decomposition_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_decomposition_T3
else:
return {}
def api_h_a_alpha_eigenvalue_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
Cloude-Pottier eigenvalue based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args)
name_list = ['anisotropy', 'anisotropy_lueneburg', 'anisotropy12', 'asymetry', 'derd', 'derd_norm', 'entropy_shannon',
'entropy_shannon_I', 'entropy_shannon_I_norm', 'entropy_shannon_norm', 'entropy_shannon_P',
'entropy_shannon_P_norm', 'l1', 'l2', 'l3', 'p1', 'p2', 'p3', 'pedestal', 'polarisation_fraction',
'rvi', 'serd', 'serd_norm']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_eigenvalue_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_eigenvalue_set_T3
else:
return {}
def api_h_a_alpha_eigenvector_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
Cloude-Pottier eigenvector based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvector
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args)
name_list = ['alpha', 'alpha1', 'alpha2', 'alpha3',
'beta', 'beta1', 'beta2', 'beta3',
'delta', 'delta1', 'delta2', 'delta3',
'gamma', 'gamma1', 'gamma2', 'gamma3']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_eigenvector_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_eigenvector_set_T3
else:
return {}
def api_read_T3_matrix(self,polsarpro_T3_dir):
"""
读取T3矩阵转换字典
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:return : 包含T3矩阵的字典
"""
name_list = ['T11', 'T12_imag', 'T12_real',
'T22', 'T13_imag', 'T13_real',
'T33', 'T23_imag', 'T23_real']
return self.__read_haalpha(polsarpro_T3_dir, name_list)
def api_trans_T3_to_tif(self, out_tif_dir, polsarpro_T3_dir):
"""
将T3矩阵从bin格式转换为tif格式
:param out_tif_dir:保存路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
name_list = ['T11', 'T12_imag', 'T12_real',
'T22', 'T13_imag', 'T13_real',
'T33', 'T23_imag', 'T23_real']
self.__write_haalpha_to_tif(out_tif_dir, polsarpro_T3_dir, name_list)
@staticmethod
def __h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解H-A-Alpha分解
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:9个可选输出变量(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
"""
if not os.path.exists(h_a_alpha_decomposition_T3_path):
raise Exception(h_a_alpha_decomposition_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
alpbetdelgam = int(args[0])
Lambda = int(args[1])
alpha = int(args[2])
entropy = int(args[3])
anisotropy = int(args[4])
CombHA = int(args[5])
CombH1mA = int(args[6])
Comb1mHA = int(args[7])
Comb1mH1mA = int(args[8])
para_list = [h_a_alpha_decomposition_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(alpbetdelgam), str(Lambda), str(alpha), str(entropy), str(anisotropy),
str(CombHA), str(CombH1mA), str(Comb1mHA), str(Comb1mH1mA)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
@staticmethod
def __h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args):
"""
Cloude-Pottier eigenvalue based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:11个可选输出变量(eigen123,proba123,anisotropy,anisotropy12,asymetry,
polarisation_fraction,erd,rvi,pedestal,shannon,lueneburg),不输出0输出1
"""
if not os.path.exists(h_a_alpha_eigenvalue_set_T3_path):
raise Exception(h_a_alpha_eigenvalue_set_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
eigen123 = int(args[0])
proba123 = int(args[1])
anisotropy = int(args[2])
anisotropy12 = int(args[3])
asymetry = int(args[4])
polarisation_fraction = int(args[5])
erd = int(args[6])
rvi = int(args[7])
pedestal = int(args[8])
shannon = int(args[9])
lueneburg = int(args[10])
para_list = [h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(eigen123), str(proba123), str(anisotropy), str(anisotropy12), str(asymetry),
str(polarisation_fraction), str(erd), str(rvi), str(pedestal),
str(shannon), str(lueneburg)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
@staticmethod
def __h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args):
"""
Cloude-Pottier eigenvector based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvector
:param h_a_alpha_eigenvector_set_T3_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:5个可选输出变量(alpha123,beta123,delta123,gamma123,alpbetdelgam),不输出0输出1
"""
if not os.path.exists(h_a_alpha_eigenvector_set_T3_path):
raise Exception(h_a_alpha_eigenvector_set_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
alpha123 = int(args[0])
beta123 = int(args[1])
delta123 = int(args[2])
gamma123 = int(args[3])
alpbetdelgam = int(args[4])
para_list = [h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(alpha123), str(beta123), str(delta123), str(gamma123), str(alpbetdelgam)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
def __read_haalpha(self, h_a_alpha_dir, name_list):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
:name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
:return : 包含H-A-Alpha矩阵信息的字典
"""
dir = os.path.join(h_a_alpha_dir, '*.bin')
bin_paths = list(glob.glob(dir))
haalpha_dic ={}
for name in name_list:
path = os.path.join(h_a_alpha_dir, name + '.bin')
if path in bin_paths:
img = self.__read_bin_to_img(path)
haalpha_dic.update({name: img})
return haalpha_dic
def standardization(self, data, num=1):
# 矩阵标准化到[0,1]
data[np.isnan(data)] = np.min(data) # 异常值填充为0
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range * num
def __write_haalpha_to_tif(self, out_tif_dir, h_a_alpha_dir, name_list):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
:name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
"""
dir = os.path.join(h_a_alpha_dir, '*.bin')
bin_paths = list(glob.glob(dir))
for name in name_list:
in_path = os.path.join(h_a_alpha_dir, name + '.bin')
out_path = os.path.join(out_tif_dir, name + '.tif')
if in_path in bin_paths:
img_array = self.__read_bin_to_img(in_path)
if self.__normalization is True:
img_array = self.standardization(img_array, num=1)
out_image = Image.fromarray(img_array)
out_image.save(out_path)
@staticmethod
def __read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows*cols*4:
raise Exception('bin size less than rows*cols*4! size:', size, 'byte, rows:', rows, 'cols:', cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
# if __name__ == '__main__':
# h_a_alpha_decomposition_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_decomposition_T3.exe'
# h_a_alpha_eigenvalue_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvalue_set_T3.exe'
# h_a_alpha_eigenvector_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvector_set_T3.exe'
# polsarpro_in_dir = 'D:\\PolSARpro_v4.2.0\\in'
# haalpha_out_dir = 'D:\\PolSARpro_v4.2.0\\out'
# h_a_alpha_eigenvalue_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvalue_set_T3'
# h_a_alpha_eigenvector_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvector_set_T3'
#
# haa = PspHAAlphaDecomposition()
# h_a_alpha_features = haa.api_creat_h_a_alpha_features(haalpha_out_dir, h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir)
# haa = PspHAAlphaDecomposition(normalization=True)
# psp_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPCpsp_t3"
# t3_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\t3"
# exe_dir = r"I:\microproduct\soilSalinity/"
# haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=t3_path,
# h_a_alpha_decomposition_T3_path= exe_dir + 'h_a_alpha_decomposition_T3.exe',
# h_a_alpha_eigenvalue_set_T3_path= exe_dir + 'h_a_alpha_eigenvalue_set_T3.exe',
# h_a_alpha_eigenvector_set_T3_path=exe_dir +'h_a_alpha_eigenvector_set_T3.exe',
# polsarpro_in_dir=psp_path)
# print('done')

View File

@ -0,0 +1,170 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspLeeRefinedFilterC2.py
@Function:
@Contact:
@Author:SHJ
@Date:2021/11/5
@Version:1.0.0
"""
import logging
import os
import shutil
import subprocess
import glob
import numpy as np
import struct
from PIL import Image
logger = logging.getLogger("mylog")
class LeeRefinedFilterC2:
"""
调用polsarpro4.2.0的lee_refined_filter_C2.exe做精致Lee滤波
"""
def __init__(self, exeFilterName='lee_refined_filter_C2.exe'):
self.__exeName = exeFilterName
pass
def api_lee_refined_filter_C2(
self,
exeDir,
inC2Dir,
outDir,
off_row,
off_col,
Nrow,
Ncol,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inC2Dir:C2矩阵目录
:param outDir:输出目录
:param off_row:行偏移行启始位置
:param off_col:列偏移列启始位置
:param Nrow:终止行
:param Ncol:终止列
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
# if not self._checkT3Matrix(inT3Dir):
# raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = off_row
Off_col = off_col
Sub_Nlig = Nrow
Sub_Ncol = Ncol
para_list = [
exePath,
inC2Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
config_path = os.path.join(inC2Dir, 'config.txt')
if config_path != os.path.join(outDir, 'config.txt'):
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
@staticmethod
def _checkC2Matrix(C2Dir):
# 检测C2矩阵
if not os.path.exists(C2Dir):
return False
file_name_in_out = ['C11.bin', 'C12_real.bin', 'C12_imag.bin', 'C22.bin','config.txt']
for item in file_name_in_out:
if not os.path.exists(C2Dir + "\\" + item):
return False
return True
def write_bin_to_tif(self, out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = self.__read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
# img_array = self.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path
@staticmethod
def __read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
if __name__ == '__main__':
tp =LeeRefinedFilterC2()
inC2Dir=r'E:\MicroWorkspace\LandCover\HHHV1'
outDir =r'E:\MicroWorkspace\LandCover\HHHV1_f'
off_row = 0
off_col = 0
Nrow = 666
Ncol = 746
tp.api_lee_refined_filter_C2( '',inC2Dir,outDir,off_row,off_col,Nrow,Ncol)
tp.write_bin_to_tif(outDir,outDir)
print('done')

View File

@ -0,0 +1,104 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspLeeRefinedFilterT3.py
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
@Contact:
@Author:LVY
@Date:2021/10/12 9:06
@Version:1.0.0
"""
import logging
import os
import shutil
import subprocess
logger = logging.getLogger("mylog")
class LeeRefinedFilterT3:
"""
调用polsarpro4.2.0的lee_refined_filter_T3.exe做精致Lee滤波
"""
def __init__(self, exeFilterName='lee_refined_filter_T3.exe'):
self.__exeName = exeFilterName
pass
def api_lee_refined_filter_T3(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -0,0 +1,393 @@
import logging
import os
import shutil
import subprocess
logger = logging.getLogger("mylog")
class SurfaceInversionDubois:
"""
调用polsarpro4.2.0的surface_inversion_dubois.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_dubois.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_dubois(
self,
exeDir,
inT3Dir,
outDir,
incidence,
rectX,
rectY,
row,
col,
frequency, # GHZ
angleFlag, # 0:deg, 1:rad
):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = row
Sub_Ncol = col
dataFormat = 'T3'
calibration_flag = 1
calibration_coefficient = 0.0
threshold_HHHH_VVVV = 0.0
threshold_HVHV_VVVV = 0.0
para_list = [
exePath,
inT3Dir,
outDir,
dataFormat,
incidence,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
frequency, # GHZ
angleFlag,
]
cmd = "surface_inversion_dubois.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -caf {} -cac {} -th1 {} -th2 {}".format(
inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag,
calibration_flag, calibration_coefficient, threshold_HHHH_VVVV, threshold_HVHV_VVVV)
logger.info('surface_inversion_dubois:{}'.format(cmd))
result = os.system(cmd)
logger.info('cmd_result:{}'.format(result))
logger.info('surface_inversion_dubois finish!')
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# cmd = ' '.join(str(i) for i in para_list)
# config_path = os.path.join(inT3Dir, 'config.txt')
# shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# result_tuple = subprocess.getstatusoutput(cmd)
#
# if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
# raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionHisto:
"""
调用polsarpro4.2.0的surface_inversion_histo.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_histo.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_histo(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionOh:
"""
调用polsarpro4.2.0的surface_inversion_oh.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_oh.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_oh(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionOh2004:
"""
调用polsarpro4.2.0的surface_inversion_oh2004.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_oh2004.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_oh2004(
self,
exeDir,
inT3Dir,
outDir,
incidence,
rectY,
rectX,
row,
col,
frequency, # GHZ
angleFlag):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = row
Sub_Ncol = col
dataFormat = 'T3'
threshold_mv = 1.0
threshold_s = 7.0
para_list = [
exePath,
inT3Dir,
outDir,
dataFormat,
incidence,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
frequency, # GHZ
angleFlag,
threshold_mv,
threshold_s]
cmd = "surface_inversion_oh2004.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -th1 {} -th2 {}".format(
inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag, threshold_mv, threshold_s)
logger.info('surface_inversion_oh2004:{}'.format(cmd))
result = os.system(cmd)
logger.info('cmd_result:{}'.format(result))
logger.info('surface_inversion_oh2004 finish!')
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# cmd = ' '.join(str(i) for i in para_list)
# result_tuple = subprocess.getstatusoutput(cmd)
# #
# if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
# raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
# file_name_in_out = ['T11.img', 'T12_real.img', 'T12_imag.img',
# 'T13_real.img', 'T13_imag.img', 'T22.img',
# 'T23_real.img', 'T23_imag.img', 'T33.img']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -0,0 +1,146 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspTouziDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/14 10:11
@Version:1.0.0
"""
import os
import logging
from tool.algorithm.polsarpro.polarizationDecomposition import ModTouzi as TouziDecomp
from osgeo import gdal
import multiprocessing
from tool.algorithm.block.blockprocess import BlockProcess
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
logger = logging.getLogger("mylog")
file =fileHandle(False)
class PspTouziDecomposition:
"""
Touzi分解
"""
def __init__(self, inDic, outDir):
"""
:param inDic:T3矩阵目录
:param outDir:输出目录
"""
self.__inDic = inDic
self.__outDir = outDir
self.__DecompostFlag = False
if self._checkTifFileDic(self.__inDic) is False:
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
def api_Touzi_decomposition_TIF(self, Nwin = 5):
"""
:param Nwin:滤波窗口大小 3 5 7 9 11
"""
bandHH = gdal.Open(self.__inDic["HH"])
bandHV = gdal.Open(self.__inDic["HV"])
bandVH = gdal.Open(self.__inDic["VH"])
bandVV = gdal.Open(self.__inDic["VV"])
bandAll = [bandHH, bandHV, bandVH, bandVV]
decomposition = TouziDecomp(bandAll, Nwin)
decomposition.get_result(self.__outDir)
return True
def Touzi_decomposition_TIF(self,hh_path,hv_path,vh_path,vv_path,out_dir,suffix,Nwin = 5):
"""
:param Nwin:滤波窗口大小 3 5 7 9 11
"""
bandHH = gdal.Open(hh_path)
bandHV = gdal.Open(hv_path)
bandVH = gdal.Open(vh_path)
bandVV = gdal.Open(vv_path)
bandAll = [bandHH, bandHV, bandVH, bandVV]
decomposition = TouziDecomp(bandAll, Nwin)
decomposition.get_result_block(out_dir, suffix)
return True
@staticmethod
def _checkTifFileDic(inDic):
file_name_in_out = ['HH', 'VV', 'HV', 'VH']
for item in file_name_in_out:
if item in inDic:
print(inDic[item])
if not os.path.exists(os.path.join(inDic[item])):
return False
else:
return False
return True
def Touzi_decomposition_multiprocessing(self):
#创建工作文件夹
src_path = os.path.join(self.__outDir, "src_img")
block_path = os.path.join(self.__outDir, "block")
decomposition_path = os.path.join(self.__outDir, "feature")
file.creat_dirs([src_path,block_path,decomposition_path])
shutil.copyfile(self.__inDic["HH"], os.path.join(src_path, "HH.tif"))
shutil.copyfile(self.__inDic["HV"], os.path.join(src_path, "HV.tif"))
shutil.copyfile(self.__inDic["VH"], os.path.join(src_path, "VH.tif"))
shutil.copyfile(self.__inDic["VV"], os.path.join(src_path, "VV.tif"))
self.__cols = ImageHandler.get_img_width(self.__inDic["HH"])
self.__rows = ImageHandler.get_img_height(self.__inDic["HH"])
# 分块
bp = BlockProcess()
block_size = bp.get_block_size(self.__rows, self.__cols)
bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
logger.info('blocking tifs success!')
img_dir, img_name = bp.get_file_names(block_path, ['tif'])
dir_dict = bp.get_same_img(img_dir, img_name)
hh_list, vv_list, hv_list, vh_list = None, None, None, None
for key in dir_dict.keys():
tmp = key.split('_', 2)[0]
if tmp == 'HH':
hh_list = dir_dict[key]
elif tmp == 'VV':
vv_list = dir_dict[key]
elif tmp == 'HV':
hv_list = dir_dict[key]
elif tmp == 'VH':
vh_list = dir_dict[key]
processes_num = min([len(hh_list), multiprocessing.cpu_count() - 1])
# 开启多进程处理
pool = multiprocessing.Pool(processes=processes_num)
for i in range(len(hh_list)):
suffix = bp.get_suffix(os.path.basename(hh_list[i]))
# self.Touzi_decomposition_TIF(hh_list[i], hv_list[i], vh_list[i], vv_list[i], block_path, suffix,5)
pool.apply_async(self.Touzi_decomposition_TIF, (hh_list[i], hv_list[i], vh_list[i], vv_list[i], decomposition_path, suffix,5))
logger.info('total:%s, block:%s touzi!', len(hh_list), i)
pool.close()
pool.join()
# 合并处理后的影像
bp.combine(decomposition_path, self.__cols, self.__rows, self.__outDir, file_type=['tif'], datetype='float16')
file.del_folder(src_path)
file.del_folder(block_path)
file.del_folder(decomposition_path)
pass
# if __name__ == '__main__':
# dir = {}
# dir.update({"HH":"I:\preprocessed\HH_preprocessed.tif"})
# dir.update({"HV":"I:\preprocessed\HV_preprocessed.tif"})
# dir.update({"VH":"I:\preprocessed\VH_preprocessed.tif"})
# dir.update({"VV":"I:\preprocessed\VV_preprocessed.tif"})
#
#
# p = PspTouziDecomposition(dir, "I:/preprocessed/")
# p.Touzi_decomposition_multiprocessing()
# pass

View File

@ -0,0 +1,104 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspFreemanDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/12 18:45
@Version:1.0.0
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspYamaguchiDecomposition:
"""
Yamaguchi yamaguchi_3components_decomposition_T3.exe yamaguchi_4components_decomposition_T3.exe
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='yamaguchi_4components_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_yamaguchi_4components_decomposition_T3(
self, rectX, rectY, rectWidth, rectHeight, Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -0,0 +1,157 @@
import os
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi
from scipy.interpolate import griddata
#####################
# 结构定义区
####################
cdef struct Point: # 结构
double x
double y
######################
# 射线法
######################
cdef int rayCasting(Point p,np.ndarray[double,ndim=2] poly):
cdef double px = p.x,
cdef double py = p.y,
cdef int flag = 0
cdef int i=0
cdef int l=poly.shape[0]
cdef int j=l-1
cdef double sx
cdef double sy
cdef double tx
cdef double ty
cdef x=0
while(i<l):
sx=poly[i,0]
sy=poly[i,1]
tx=poly[j,0]
ty=poly[j,1]
# 点与多边形顶点重合
if((sx == px and sy == py) or (tx == px and ty == py)):
return 1
#// 判断线段两端点是否在射线两侧
if((sy < py and ty >= py) or (sy >= py and ty < py)) :
#// 线段上与射线 Y 坐标相同的点的 X 坐标
x = sx + (py - sy) * (tx - sx) / (ty - sy)
#// 点在多边形的边上
if(x == px):
return 1
#// 射线穿过多边形的边界
if(x > px):
flag = 0 if flag==1 else 1
# 循环体
j=i
i=i+1
#// 射线穿过多边形边界的次数为奇数时点在多边形内
return 1 if flag==1 else 0
cpdef np.ndarray[double,ndim=2] insert_data(np.ndarray[double,ndim=2] ori2geo_img,np.ndarray[int , ndim=1] row_ids,np.ndarray[int,ndim=1] col_ids,np.ndarray[double,ndim=1] data):
cdef int i=0
cdef int count=row_ids.shape[0]
while i<count:
ori2geo_img[row_ids[i],col_ids[i]]=data[i]
i=i+1
return ori2geo_img
cpdef np.ndarray[double,ndim=2] cut_L1A_img(np.ndarray[double,ndim=3] ori2geo_img,np.ndarray[double,ndim=2] roi_list):
""" 根据roi 获取栅格对象
"""
cdef int height=ori2geo_img.shape[1]
cdef int width=ori2geo_img.shape[2]
cdef int i=0
cdef int j=0
cdef Point temp_p
cdef np.ndarray[double,ndim=2] mask=np.zeros((height,width),dtype=np.float64)
while i<height:
j=0
while j<width:
temp_p.x=ori2geo_img[0,i,j]
temp_p.y=ori2geo_img[1,i,j]
if rayCasting(temp_p,roi_list)==1:
mask[i,j]=1
else:
mask[i,j]=np.nan
j=j+1
i=i+1
return mask
cdef double distance_powe(Point p1,Point p2):
return (p1.x-p2.x)**2+(p1.y-p2.y)**2
cpdef np.ndarray[int,ndim=2] get_r_c(np.ndarray[double,ndim=3] ori2geo,np.ndarray[double,ndim=2] lon_lat):
cdef int p_count=lon_lat.shape[0]
cdef int height=ori2geo.shape[1]
cdef int width=ori2geo.shape[2]
cdef int i=0
cdef int j=0
cdef int c=0
cdef double dist=999
cdef double temp_dist=0
cdef Point p1
cdef Point p2
cdef int min_i
cdef int min_j
cdef np.ndarray[double,ndim=2] result=np.ones((p_count,2))*-1
# 范围
cdef double min_lon=np.min(ori2geo[0,:,:])
cdef double max_lon=np.max(ori2geo[0,:,:])
cdef double min_lat=np.min(ori2geo[1,:,:])
cdef double max_lat=np.max(ori2geo[1,:,:])
while c<p_count:
p1.x=lon_lat[c,0]
p1.y=lon_lat[c,1]
if min_lon>p1.x or max_lon<p1.x or p2.y<min_lat or p2.y>max_lat:
continue
c=c+1
# 测试程序
cpdef np.ndarray[double,ndim=2] Add(np.ndarray[double,ndim=2] a,double x):
cdef double d=0; # 声明 注意 cython没有 bool类型
print("调用成功")
print(a)
print(x)
return a+x

View File

@ -0,0 +1,45 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./SAR_geo') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./SAR_geo/SAR_GEO.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -0,0 +1,573 @@
from tool.algorithm.transforml1a import SAR_GEO as SAR_GEO
from tool.algorithm.image import ImageHandle
import numpy as np
import scipy
from scipy.interpolate import griddata, RegularGridInterpolator
import logging
import pyresample as pr
# 插值模块
from pyresample.bilinear import NumpyBilinearResampler
from pyresample import geometry
from pyresample.geometry import AreaDefinition
from osgeo import osr
import os
# os.environ['PROJ_LIB'] = r"D:\Anaconda\envs\micro\Lib\site-packages\osgeo\data\proj"
logger = logging.getLogger("mylog")
##############
# 多项式回归组件
##############
#
def griddata_geo(points, data, lon_grid, lat_grid, method, i, end_i):
grid_data = griddata(points, data, (lon_grid, lat_grid), method=method, )
grid_data = grid_data[:, :, 0]
return [i, end_i, grid_data]
def griddataBlock(start_x, len_x, start_y, len_y, grid_data_input, grid_x, grid_y, method):
grid_x = grid_x.reshape(-1)
grid_y = grid_y.reshape(-1)
grid_data_input = grid_data_input.reshape(-1)
x_list = np.array(list(range(len_x))) + start_x
y_list = np.array(list(range(len_y))) + start_y
x_grid, y_grid = np.meshgrid(x_list, y_list)
idx = np.argsort(grid_x)
grid_x = grid_x[idx].reshape(-1)
grid_y = grid_y[idx].reshape(-1)
grid_data_input = grid_data_input[idx].reshape(-1)
interp_func = RegularGridInterpolator((grid_x.reshape(-1), grid_y.reshape(-1)), grid_data_input.reshape(-1),
method='slinear', bounds_error=False, fill_value=np.nan)
grid_data = interp_func((x_grid, y_grid))
# grid_data = griddata(p, grid_data_input, (x_grid, y_grid), method=method)
grid_data = grid_data[:, :, 0]
return (x_grid, y_grid, grid_data)
class polyfit2d_U:
def __init__(self, x, y, z) -> None:
# 定义参数
X = np.ones((x.shape[0], 10))
X[:, 0] = 1
X[:, 1] = x
X[:, 2] = y
X[:, 3] = x * y
X[:, 4] = x ** 2
X[:, 5] = y ** 2
X[:, 6] = x * X[:, 5]
X[:, 7] = y * X[:, 4]
X[:, 8] = x ** 3
X[:, 9] = y ** 3
Y = z.reshape(-1, 1)
A = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y)
self.paras_fit = A
def fit(self, x, y):
X = np.ones((x.shape[0], 10))
X[:, 0] = 1
X[:, 1] = x
X[:, 2] = y
X[:, 3] = x * y
X[:, 4] = x ** 2
X[:, 5] = y ** 2
X[:, 6] = x * X[:, 5]
X[:, 7] = y * X[:, 4]
X[:, 8] = x ** 3
X[:, 9] = y ** 3
z = np.matmul(X, self.paras_fit)
return np.sum(z)
class TransImgL1A:
def __init__(self, ori_sim_path, roi):
self._begin_r, self._begin_c, self._end_r, self._end_c = 0, 0, 0, 0
self.ori2geo_img = None
self._mask = None
self._min_lon, self._max_lon, self._min_lat, self._max_lat = 0, 0, 0, 0
self.init_trans_para(ori_sim_path, roi)
def get_roi_points(self):
rowcol = np.where(self._mask == 1)
data = [(self._begin_r + row, self._begin_c + col) for (row, col) in zip(rowcol[0], rowcol[1])]
return data
def get_lonlat_points(self):
lon = self.ori2geo_img[0, :, :][np.where(self._mask == 1)]
lat = self.ori2geo_img[1, :, :][np.where(self._mask == 1)]
data = [(row, col) for (row, col) in zip(lon, lat)]
return data
######################
# 插值方法
######################
def init_trans_para(self, ori_sim_path, roi):
"""裁剪L1a_img
Args:
src_img_path (_type_): 原始L1A影像
cuted_img_path (_type_): 待裁剪对象
roi (_type_): 裁剪roi
"""
ori2geo_img = ImageHandle.ImageHandler.get_data(ori_sim_path)
point_list = np.array(roi)
min_lon = np.nanmin(point_list[:, 0])
max_lon = np.nanmax(point_list[:, 0])
min_lat = np.nanmin(point_list[:, 1])
max_lat = np.nanmax(point_list[:, 1])
self._min_lon, self._max_lon, self._min_lat, self._max_lat = min_lon, max_lon, min_lat, max_lat
r_c_list = np.where(
(ori2geo_img[0, :, :] >= min_lon) & (ori2geo_img[0, :, :] <= max_lon)
& (ori2geo_img[1, :, :] >= min_lat) & (ori2geo_img[1, :, :] <= max_lat)) #
if len(r_c_list) == 0 or r_c_list[0] == [] or r_c_list[1] == [] or np.array(r_c_list).size == 0:
msg = 'csv_roi:' + str(roi) + 'not in box,please revise csv data'
print(msg)
else:
# print("csv_roi:")
# print(roi)
r_min = np.nanmin(r_c_list[0])
r_max = np.nanmax(r_c_list[0])
c_min = np.nanmin(r_c_list[1])
c_max = np.nanmax(r_c_list[1])
self.ori2geo_img = ori2geo_img[:, r_min:r_max + 1, c_min:c_max + 1]
# 开始调用组件 计算
mask = SAR_GEO.cut_L1A_img(self.ori2geo_img.astype(np.float64), point_list)
self._begin_r = r_min
self._end_r = r_max
self._begin_c = c_min
self._end_c = c_max
self._mask = mask
def cut_L1A(self, in_path, out_path):
img = ImageHandle.ImageHandler.get_data(in_path)
if len(img.shape) == 3:
cut_img = img[:, self._begin_r:self._end_r + 1, self._begin_c:self._end_c + 1]
cut_img[0, :, :] = cut_img[0, :, :] * self._mask
cut_img[1, :, :] = cut_img[1, :, :] * self._mask
ImageHandle.ImageHandler.write_img(out_path, '', [0, 0, 0, 0, 0, 0], cut_img)
else:
cut_img = img[self._begin_r:self._end_r + 1, self._begin_c:self._end_c + 1]
cut_img[:, :] = cut_img[:, :] * self._mask
cut_img[:, :] = cut_img[:, :] * self._mask
ImageHandle.ImageHandler.write_img(out_path, '', [0, 0, 0, 0, 0, 0], cut_img)
def grid_interp_to_station(self, all_data, station_lon, station_lat, method='linear'):
'''
func: 将等经纬度网格值 插值到 离散站点使用griddata进行插值
inputs:
all_data,形式为[grid_lon,grid_lat,data] [经度网格纬度网格数值网格]
station_lon: 站点经度
station_lat: 站点纬度可以是 单个点列表或者一维数组
method: 插值方法,默认使用 linear
'''
station_lon = np.array(station_lon).reshape(-1, 1)
station_lat = np.array(station_lat).reshape(-1, 1)
lon = all_data[0].reshape(-1, 1)
lat = all_data[1].reshape(-1, 1)
data = all_data[2].reshape(-1, 1)
points = np.concatenate([lon, lat], axis=1)
station_value = griddata(points, data, (station_lon, station_lat), method=method)
station_value = station_value[:, :, 0]
return station_value
#####################
# 当存在 ori2geo.tif
#####################
@staticmethod
def cut_L1a_img(src_img_path, cuted_img_path, roi):
"""裁剪L1a_img
Args:
src_img_path (_type_): 原始L1A影像
cuted_img_path (_type_): 待裁剪对象
roi (_type_): 裁剪roi
"""
ori2geo_img = ImageHandle.ImageHandler.get_data(src_img_path)
point_list = np.array(roi)
# 开始调用组件 计算
mask = SAR_GEO.cut_L1A_img(ori2geo_img.astype(np.float64), point_list)
#
ori2geo_img[0, :, :] = ori2geo_img[0, :, :] * mask
ori2geo_img[1, :, :] = ori2geo_img[1, :, :] * mask
ImageHandle.ImageHandler.write_img(cuted_img_path, '', [0, 0, 0, 0, 0, 0], ori2geo_img)
return ori2geo_img # 保存成影像
def tran_geo_to_l1a(self, geo_img_path, out_l1a_img_path, ori_sim_img_path, is_class=False):
"""裁剪后的有投影信息的影像(cover、ndvi)转换到L1A裁剪影像的尺寸
Args:
geo_img_path (_type_): _description_
out_l1a_img_path (_type_): _description_
ori_sim_img_path (_type_): _description_
geo_img_path:地理影像路径
out_l1a_img_path转换L1A坐标系图像路径
ori_sim_img_path裁剪后模拟影像路径
is_clss: 是否是 定性类产品
"""
inverse_gt = ImageHandle.ImageHandler.get_invgeotransform(geo_img_path)
ori2geo_tif = ImageHandle.ImageHandler.get_data(ori_sim_img_path)
height = ImageHandle.ImageHandler.get_img_height(geo_img_path)
width = ImageHandle.ImageHandler.get_img_width(geo_img_path)
# 计算投影
x = ori2geo_tif[0, :, :] # lon lat x,y
y = ori2geo_tif[1, :, :]
ori2geo_tif[0, :, :] = inverse_gt[0] + inverse_gt[1] * x + inverse_gt[2] * y # x
ori2geo_tif[1, :, :] = inverse_gt[3] + inverse_gt[4] * x + inverse_gt[5] * y # y
del x, y
geo_tif = ImageHandle.ImageHandler.get_data(geo_img_path) # 获取目标影像
ori2geo_tif_shape = ori2geo_tif.shape # height,width
if is_class:
ori2geo_tif = np.round(ori2geo_tif).astype(np.int32)
mask = (ori2geo_tif[0, :, :] >= 0) & (ori2geo_tif[0, :, :] < width) & (ori2geo_tif[1, :, :] >= 0) & (
ori2geo_tif[1, :, :] < height)
ori2geo_tif[0, :, :] = ori2geo_tif[0, :, :] * mask
ori2geo_tif[1, :, :] = ori2geo_tif[1, :, :] * mask
geo_tif_shape = geo_tif.shape
geo_tif_l1a = geo_tif[ori2geo_tif[1, :, :].reshape(-1), ori2geo_tif[0, :, :].reshape(-1)].reshape(
ori2geo_tif.shape[1], ori2geo_tif.shape[2]).astype(np.float32)
del ori2geo_tif, geo_tif
one_ids = np.where(mask == False)
geo_tif_l1a[one_ids[0], one_ids[1]] = np.nan
ImageHandle.ImageHandler.write_img(out_l1a_img_path, '', [0, 0, 0, 0, 0, 0], geo_tif_l1a)
# save_temp_L1A(out_l1a_img_path,geo_tif_l1a)
return geo_tif_l1a
else: # 数值性插值
mask = (ori2geo_tif[0, :, :] > 0) & (ori2geo_tif[0, :, :] < width - 1) & (ori2geo_tif[1, :, :] > 0) & (
ori2geo_tif[1, :, :] < height - 1)
one_ids = np.where(mask == 1)
x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
result_data = self.grid_interp_to_station([y.reshape(-1), x.reshape(-1), geo_tif.reshape(-1)],
ori2geo_tif[1, one_ids[0], one_ids[1]].reshape(-1),
ori2geo_tif[0, one_ids[0], one_ids[1]].reshape(-1),
method='linear').reshape(-1)
mask = mask.reshape(-1)
result_data_result = np.zeros((ori2geo_tif.shape[1], ori2geo_tif.shape[2]))
result_data_result[:, :] = np.nan
result_data_result = SAR_GEO.insert_data(result_data_result, one_ids[0].astype(np.int32),
one_ids[1].astype(np.int32), result_data)
ImageHandle.ImageHandler.write_img(out_l1a_img_path, '', [0, 0, 0, 0, 0, 0], result_data_result)
# save_temp_L1A(out_l1a_img_path,result_data_result)
return result_data_result
def tran_lonlats_to_rowcols(self, lonlats, ori_sim_img_path):
"""
功能输入经纬度坐标输出图像行列号
函数名称tran_lonlats_to_rowcols(lonlats,out_rowcols,ori_sim_img_path)
Lonlats:经纬度坐标示例[[120.53, 31.5], [120.61, 31.5], [120.53, 31.45], [120.61, 31.45]]
out_rowcols:图像行列号示例[[0, 0], [7000, 0], [7000, 8000], [0, 8000]]
ori_sim_img_path裁剪后模拟影像路径
"""
ori2geo_tif = ImageHandle.ImageHandler.get_data(ori_sim_img_path)
min_lon = np.nanmin(ori2geo_tif[0, :, :])
max_lon = np.nanmax(ori2geo_tif[0, :, :])
min_lat = np.nanmin(ori2geo_tif[1, :, :])
max_lat = np.nanmax(ori2geo_tif[1, :, :])
result = []
for i in range(len(lonlats)):
p = lonlats[i]
if min_lon > p[0] or max_lon < p[0] or min_lat > p[1] or max_lat < p[1]:
result.append([-1, -1])
continue
temp_x = np.square(ori2geo_tif[0, :, :] - p[0]) + np.square(ori2geo_tif[1, :, :] - p[1])
r_c_list = []
r_c = np.argmin(temp_x)
r_c = [r_c // temp_x.shape[1], r_c % temp_x.shape[1]]
r_c_list.append([r_c[0], r_c[1], ori2geo_tif[0, r_c[0], r_c[1]], ori2geo_tif[1, r_c[0], r_c[1]]])
# 插值
for i in range(r_c[0] - 3, r_c[0] + 3):
if i < 0 or i > temp_x.shape[0] - 1:
continue
for j in range(r_c[1] - 3, r_c[1] + 3):
if j < 0 or j > temp_x.shape[1] - 1:
continue
r_c_list.append([i, j, ori2geo_tif[0, i, j], ori2geo_tif[1, i, j]])
r_c_list = np.array(r_c_list)
points = r_c_list[:, 2:]
f_r = scipy.interpolate.interp2d(r_c_list[:, 2], r_c_list[:, 3], r_c_list[:, 0], kind='linear')
f_c = scipy.interpolate.interp2d(r_c_list[:, 2], r_c_list[:, 3], r_c_list[:, 1], kind='linear')
tar_get_r = f_r(p[0], p[1])[0]
tar_get_c = f_c(p[0], p[1])[0]
if tar_get_r < ori2geo_tif.shape[1] and tar_get_c < ori2geo_tif.shape[
2] and tar_get_r >= 0 and tar_get_c >= 0:
lon_temp = ori2geo_tif[0, int(round(tar_get_r)), int(round(tar_get_c))]
lon_lat = ori2geo_tif[1, int(round(tar_get_r)), int(round(tar_get_c))]
# 增加条件筛选
result.append([tar_get_r, tar_get_c])
else:
result.append([-1, -1])
return result
def tran_lonlats_to_L1A_rowcols(self, meas_data, ori_sim_path):
lonlats = []
data_roi = []
for data in meas_data:
lon = float(data[1])
lat = float(data[2])
if (lon > self._min_lon and lon < self._max_lon and lat > self._min_lat and lat < self._max_lat):
lonlats.append([lon, lat])
data_roi.append(data)
rowcols = self.tran_lonlats_to_rowcols(lonlats, ori_sim_path)
measdata_list = []
for data, rowcol in zip(data_roi, rowcols):
if (rowcol[0] != -1 and rowcol[1] != -1):
measdata_list.append(
[round(rowcol[0]) - self._begin_r, round(rowcol[1]) - self._begin_c, float(data[3])])
return measdata_list
@staticmethod
def get_radius_of_influence(lalo_step, src_meta='radar2geo', ratio=3):
EARTH_RADIUS = 6378122.65 # m
"""Get radius of influence based on the lookup table resolution in lat/lon direction"""
if src_meta == "geo2radar":
# geo2radar
radius = 100e3
else:
# radar2geo
step_deg = max(np.abs(lalo_step))
step_m = step_deg * np.pi / 180.0 * EARTH_RADIUS
radius = step_m * ratio
return radius
def interp2d_station_to_grid(self, lon, lat, data, loc_range=[18, 54, 73, 135],
det_grid=1, method='linear', projCode=4326):
# 参考链接 https://blog.csdn.net/weixin_43718675/article/details/103497930
'''
func : 将站点数据插值到等经纬度格点
inputs:
lon: 站点的经度
lat: 站点的纬度
data: 对应经纬度站点的 气象要素值
loc_range: [lat_min,lat_max,lon_min,lon_max]站点数据插值到loc_range这个范围
det_grid: 插值形成的网格空间分辨率
method: 所选插值方法默认 0.125
return:
[lon_grid,lat_grid,data_grid]
'''
# step1: 先将 lon,lat,data转换成 n*1 的array数组
lon = np.array(lon).reshape(-1, 1)
lat = np.array(lat).reshape(-1, 1)
data = np.array(data).reshape(-1, 1)
# step2:确定插值区域的经纬度网格
lat_min = loc_range[0] # y
lat_max = loc_range[1] # y
lon_min = loc_range[2] # x
lon_max = loc_range[3] # x
gt = [0, 0, 0, 0, 0, 0]
gt[0] = lon_min # x
gt[1] = det_grid
gt[3] = lat_max # y
gt[5] = -det_grid
lat_count = int((lat_max - lat_min) / det_grid + 1) # y
lon_count = int((lon_max - lon_min) / det_grid + 1) # x
# 替换为pyresample 插值方法
proj_osr = osr.SpatialReference()
proj_osr.ImportFromEPSG(projCode)
projection = proj_osr.ExportToPROJJSON()
# lower_left_x、lower_left_y、upper_right_x、upper_right_y
target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
source_def = geometry.SwathDefinition(lons=lon, lats=lat)
lalo_step = [det_grid, -det_grid]
radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
result = pr.bilinear.resample_bilinear(data, source_def, target_def,
radius=radius, neighbours=32,
nprocs=8, fill_value=np.nan,
epsilon=0)
#
return result
def geocoding(self, ori_geo_tif, produc_arr, pixel_delta=1, method='linear'):
# 参考链接 https://blog.csdn.net/weixin_43718675/article/details/103497930
ori_geo_tif[np.isnan(ori_geo_tif)] = -1
lon_data = ori_geo_tif[0, :, :].reshape(-1)
lat_data = ori_geo_tif[1, :, :].reshape(-1)
idx = np.where(lat_data != -1)
lat_data = lat_data[idx]
lon_data = lon_data[idx]
idx = np.where(lon_data != -1)
lat_data = lat_data[idx]
lon_data = lon_data[idx]
# ###########################################
result = self.interp2d_station_to_grid(lon_data, lat_data, produc_arr,
[self._min_lat, self._max_lat, self._min_lon, self._max_lon],
det_grid=pixel_delta, method=method)
return result
# def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'):
# ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
# # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
# l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
# pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
# pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
#
# lon_data = ori_geo_tif[0, :, :].reshape(-1)
# lat_data = ori_geo_tif[1, :, :].reshape(-1)
# l1a_produc = l1a_produc.reshape(-1)
# idx = np.logical_not(np.isnan(lon_data))
# lat_data = lat_data[idx]
# lon_data = lon_data[idx]
# l1a_produc = l1a_produc[idx]
# idx = np.logical_not(np.isnan(lat_data))
# lat_data = lat_data[idx]
# lon_data = lon_data[idx]
# l1a_produc = l1a_produc[idx]
#
# gt = [self._min_lon, pixel_delta_x, 0.0,
# self._max_lat, 0.0, -pixel_delta_y]
# [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
# lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
# lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
#
# # 获取地理坐标系统信息,用于选取需要的地理坐标系统
# srs = osr.SpatialReference()
# srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
# proj = srs.ExportToWkt()
#
# projection = srs.ExportToPROJJSON()
# # lower_left_x、lower_left_y、upper_right_x、upper_right_y
# target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
# lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
# lon_data = lon_data.reshape(-1, 1)
# lat_data = lat_data.reshape(-1, 1)
# l1a_produc = l1a_produc.reshape(-1, 1)
# source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
# lalo_step = [pixel_delta_x, -pixel_delta_y]
# radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
# geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
# radius=radius, neighbours=32,
# nprocs=8, fill_value=np.nan,
# epsilon=0)
#
# ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan)
#
# def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'):
# ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
# # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
# l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
# pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
# pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
#
# lon_data = ori_geo_tif[0, :, :].reshape(-1)
# lat_data = ori_geo_tif[1, :, :].reshape(-1)
# l1a_produc = l1a_produc.reshape(-1)
# idx = np.logical_not(np.isnan(lon_data))
# lat_data = lat_data[idx]
# lon_data = lon_data[idx]
# l1a_produc = l1a_produc[idx]
# idx = np.logical_not(np.isnan(lat_data))
# lat_data = lat_data[idx]
# lon_data = lon_data[idx]
# l1a_produc = l1a_produc[idx]
#
# gt = [self._min_lon, pixel_delta_x, 0.0,
# self._max_lat, 0.0, -pixel_delta_y]
# [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
# lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
# lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
#
# # 获取地理坐标系统信息,用于选取需要的地理坐标系统
# srs = osr.SpatialReference()
# srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
# proj = srs.ExportToWkt()
#
# projection = srs.ExportToPROJJSON()
# # lower_left_x、lower_left_y、upper_right_x、upper_right_y
# target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
# lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
# lon_data = lon_data.reshape(-1, 1)
# lat_data = lat_data.reshape(-1, 1)
# l1a_produc = l1a_produc.reshape(-1, 1)
# source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
# lalo_step = [pixel_delta_x, -pixel_delta_y]
# radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
# if method == 'linear':
# geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
# radius=radius, neighbours=32,
# nprocs=8, fill_value=0,
# epsilon=0)
# elif method == 'nearest':
# geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0,
# radius_of_influence=50000,
# fill_value=0, nprocs=8
# )
# geo_produc = geo_produc[:,:,0]
# ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc)
@property
def mask(self):
return self._mask
if __name__ == '__main__':
# ori_sim_path = r"I:\坐标转换\坐标转换接口\L1A数据l1a_img_path数据\RPC_ori_sim.tif"
# roi_Extend = [[120.53, 31.5], [120.61, 31.5], [120.61, 31.45], [120.53, 31.45]]
# conver_path = r"I:\坐标转换\坐标转换接口\裁剪后辅助数据geo_img_path数据\Covering_cut.tif"
# ndvi_path = r"I:\坐标转换\坐标转换接口\裁剪后辅助数据geo_img_path数据\NDVI_cut.tif"
# out_path = r"I:\坐标转换\SAR2GEO\test"
#
# tr = TransImgL1A(ori_sim_path,roi_Extend)
# tr.l1a_2_geo("I:/cut.tif", "I:/salinity.tif", "I:/salinity_geo2.tif")
ori_sim = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\preprocessed\ori_sim_preprocessed.tif'
product_tif = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\SurfaceRoughnessProduct_temp.tif'
result = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\SurfaceRoughnessProduct.tif'
method = 'linear'
"""
31.14;31.50;120.34;120.75
"""
# roi_Extend = [[102.12, 33.879], [102.327, 33.879], [102.327, 33.66], [102.12, 31.45]]
ori_sim_data = ImageHandle.ImageHandler.get_data(ori_sim)
lon = ori_sim_data[0,:,:]
lat = ori_sim_data[1,:,:]
min_lon = np.nanmin(lon)
max_lon = np.nanmax(lon)
min_lat = np.nanmin(lat)
max_lat = np.nanmax(lat)
print(np.nanmin(lon))
print(np.nanmax(lon))
print(np.nanmin(lat))
print(np.nanmax(lat))
# roi_Extend = [[min_lon, max_lat], [max_lon, max_lat], [min_lon, min_lat], [max_lon, min_lat]]
roi_Extend = [[116.17328, 43.727577], [116.652504, 43.727577], [116.652504, 44.119164], [116.17328, 44.119164]]
# roi_Extend = [[108.51960117899473, 38.192443138079895], [109.62308480328566, 38.192443138079895], [109.62308480328566, 37.69300142375064], [108.51960117899473, 37.69300142375064]]
tr = TransImgL1A(ori_sim, roi_Extend)
tr.l1a_2_geo_int(ori_sim, product_tif, result, method)
pass
"""
import numpy as np
from pyresample import kd_tree, geometry
area_def = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD',
{'a': '6378144.0', 'b': '6356759.0',
'lat_0': '50.00', 'lat_ts': '50.00',
'lon_0': '8.00', 'proj': 'stere'},
800, 800,
[-1370912.72, -909968.64,
1029087.28, 1490031.36])
data = np.fromfunction(lambda y, x: y*x, (50, 10))
lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
result = kd_tree.resample_nearest(swath_def, data,area_def, radius_of_influence=50000, epsilon=0.5)
"""

View File

@ -0,0 +1,730 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File AlgXmlHandle.py
@Function 算法描述文件读写和检查
@Contact https://www.cnblogs.com/feifeifeisir/p/10893127.html
@Author SHJ
@Date 2021/9/6
@Version 1.0.0
"""
import logging
from xml.etree.ElementTree import ElementTree
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
import os
import re
import platform
import psutil
import multiprocessing
import ctypes
logger = logging.getLogger("mylog")
import glob
class ManageAlgXML:
"""
检查和读取XML文件信息
"""
def __init__(self, xml_path):
self.in_path = xml_path
self.__tree = ElementTree()
self.__root = None
self.__alg_compt = None
self.__workspace_path = None
self.__taskID = None
self.__algorithm_name = None
self.__envs = {}
self.__input_paras = {}
self.__output_paras = {}
self.__init_flag = False
def init_xml(self):
"""
初始化XML文件
:return: True初始化成功 False 初始化失败
"""
try:
self.__tree.parse(self.in_path)
except FileNotFoundError as ex:
msg = ex + "xml_path = " + self.in_path
raise Exception(msg)
except BaseException:
raise Exception("cannot open algXMl")
self.__root = self.__tree.getroot()
if self.__root is None:
raise Exception("get root failed")
self.__alg_compt = self.__root.find("AlgCompt")
if self.__alg_compt is None:
raise Exception("get AlgCompt failed")
self.__workspace_path = self.__check_workspace_path()
if self.__workspace_path is None:
raise Exception("check workspace_path failed")
self.__taskID = self.__check_task_id()
if self.__taskID is None:
raise Exception("check taskID failed")
self.__algorithm_name = self.__check_algorithm_name()
if self.__algorithm_name is None:
raise Exception("check AlgorithmName failed")
self.__envs = self.__check_environment()
if self.__envs is None or self.__envs == {}:
raise Exception("check environment failed")
self.__input_paras = self.__check_input_para()
if self.__input_paras is None or self.__input_paras == {}:
raise Exception("check input para failed")
self.__output_paras = self.__check_output_para()
self.__init_flag = True
return True
def get_workspace_path(self):
"""
获取工作空间路径
:return: 工作空间路径 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__workspace_path
def get_task_id(self):
"""
获取任务ID
:return: taskID None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__taskID
def get_algorithm_name(self):
"""
获取算法名
:return:
"""
if not self.__init_flag:
raise Exception("AlgorithmName is not initialized")
return self.__algorithm_name
def get_envs(self):
"""
获取运行环境要求
:return:运行环境要求 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__envs
def get_input_paras(self):
"""
获取输入参数
:return:输入参数 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__input_paras
def get_output_paras(self):
"""
获取输出参数
:return:输出参数 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__output_paras
def __check_workspace_path(self):
"""
检查工作空间路径
:return: 工作空间路径 None-异常
"""
workspace_note = self.__root.find("WorkSpace")
workspace_path = str(workspace_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if workspace_path is None:
raise Exception("'workspace_path' is None")
if not os.path.isdir(workspace_path):
raise Exception("'workspace_path' is not save:%s",workspace_path)
if workspace_path[-1] != '\\':
workspace_path += "'\'"
return workspace_path
def __check_environment(self):
"""
检查XML文件中运行环境要求
:return: dic-运行环境要求 None-异常
"""
env_note = self.__alg_compt.find("Environment")
is_cluster = int(env_note.find("IsCluster").text.replace("\n", "").replace(' ', ''))
is_legal = is_cluster in [0, 1]
if not is_legal:
raise Exception("IsCluster is not 0 or 1")
cluster_num = int(env_note.find("ClusterNum").text)
is_legal = cluster_num in [0, 1, 2, 3, 4, 5, 6, 7]
if not is_legal:
raise Exception("cluster_num is beyond [0,1,2,3,4,5,6,7]")
operating_system = env_note.find("OperatingSystem").text.replace("\n", "").replace(' ', '') #去除空格和回车
# is_legal = operating_system in ["Windows10", "Windows7", "WindowsXP"]
# if not is_legal:
# raise Exception("OperatingSystem is beyond [Windows10, Windows7, WindowsXP]")
cpu = env_note.find("CPU").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = cpu in ["单核", "双核", "3核", "4核", "6核", "8核"]
if not is_legal:
raise Exception("OperatingSystem is beyond [单核, 双核, 3核, 4核, 6核, 8核]")
memory = env_note.find("Memory").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = memory in ["1GB", "2GB", "4GB", "6GB", "8GB", "10GB", "12GB", "16GB"]
if not is_legal:
raise Exception("OperatingSystem is beyond [1GB, 2GB, 4GB, 6GB, 8GB, 10GB, 12GB, 16GB]")
storage = env_note.find("Storage").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = int(storage[:-2]) > 0
if not is_legal:
raise Exception("Storage < 0GB")
network_card = env_note.find("NetworkCard").text
# is_legal = network_card in ["无需求"]
# if not is_legal:
# # 输出异常
# return
band_width = env_note.find("Bandwidth").text
# is_legal = band_width in ["无需求"]
# if not is_legal:
# # 输出异常
# return
gpu = env_note.find("GPU").text
# is_legal = GPU in ["无需求"]
# if not is_legal:
# # 输出异常
# return
envs = {"is_Cluster": is_cluster, "cluster_num": cluster_num, "operating_system": operating_system,
"CPU": cpu, "memory": memory}
envs.update({"Storage": storage, "network_card": network_card, "band_width": band_width, "GPU": gpu})
return envs
def __check_input_para(self):
"""
检查XML文件中输入参数
:return: dic-输入参数 None-异常
"""
input_paras_note = self.__alg_compt.find("Inputs")
paras_num = int(input_paras_note.attrib.get("ParameterNum"))
para_list = input_paras_note.findall("Parameter")
if paras_num != len(para_list):
msg ="'ParameterNum':"+ str(paras_num) + " != number of 'Parameter':" + str(len(para_list))
logger.warning(msg)
input_paras = {}
for para in para_list:
para_name = para.find("ParaName").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_chs_name = para.find("ParaChsName").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_type = para.find("ParaType").text.replace("\n", "").replace(' ', '') #去除空格和回车
data_type = para.find("DataType").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_value = para.find("ParaValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
input_para = {"ParaName": para_name, "ParaChsName": para_chs_name, "ParaType": para_type,
"DataType": data_type, "ParaValue": para_value}
#print(para_name)
if para_type == "Value":
# max_value = para.find("MaxValue").text
# min_value = para.find("MinValue").text
# option_value = para.find("OptionValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
# input_para.update({"MaxValue": max_value, "MinValue": min_value, "OptionValue": option_value})
# input_para.update({"OptionValue": option_value}) todo
pass
if para_name is None or para_type is None or para_value is None:
msg = 'there is None among para_name:' + para_name + ',para_type:' + para_type + 'or para_value:' + para_value + '!'
raise Exception(msg)
input_paras.update({para_name: input_para})
return input_paras
def __check_output_para(self):
"""
检查XML文件中输出参数
:return: dic-输出参数 None-异常
"""
output_paras_note = self.__alg_compt.find("Outputs")
paras_num = int(output_paras_note.attrib.get("ParameterNum"))
para_list = output_paras_note.findall("Parameter")
if paras_num != len(para_list):
raise Exception("'ParameterNum' != number of 'Parameter'")
output_paras = {}
return output_paras
def write_out_para(self, para_name, para_value):
"""
写入输出参数
"""
output_paras_note = self.__alg_compt.find("Outputs")
para_list = output_paras_note.findall("Parameter")
flag = False
for para in para_list:
if para.find("ParaName").text == para_name:
para.find("ParaValue").text = para_value
flag = True
if flag == False:
raise Exception('Cannot find Output Parameter:'+para_name+'!')
self.__tree.write(self.in_path, encoding="utf-8", xml_declaration=True)
def __check_task_id(self):
"""
检查任务ID
:return: taskID None-异常
"""
task_id_note = self.__root.find("TaskID")
task_id = str(task_id_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if task_id is None:
raise Exception("'TaskID' is None")
return task_id
def __check_algorithm_name(self):
algorithm_name_note = self.__alg_compt.find("AlgorithmName")
algorithm_name = str(algorithm_name_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if algorithm_name is None:
raise Exception("'AlgorithmName' is None")
return algorithm_name
class CheckSource:
"""
检查配置文件中资源的完整性和有效性
"""
def __init__(self, alg_xml_handle):
self.__alg_xml_handle = alg_xml_handle
self.imageHandler = ImageHandler()
self.__ParameterDic={}
def check_alg_xml(self):
"""
检查算法配置文件
"""
if self.__alg_xml_handle.init_xml():
logger.info('init algXML succeed')
return True
else:
raise Exception('init algXML failed')
def check_run_env(self):
"""
:return: True-正常False-异常
"""
envs = self.__alg_xml_handle.get_envs()
# 检查操作系统
local_plat = platform.platform()
local_plat_list = local_plat.split("-")
flag = envs['operating_system'] == local_plat_list[0]+local_plat_list[1]
if flag is False:
msg = 'operating_system:' + local_plat_list[0] + local_plat_list[1] + ' is not ' + envs['operating_system']
#raise Exception(msg)
# 检查电脑显存
mem = psutil.virtual_memory()
mem_total = int(round(mem.total / 1024 / 1024 / 1024, 0))
mem_free = round(mem.free / 1024 / 1024 / 1024, 0)
env_memory = envs['memory']
env_memory = int(env_memory[:-2])
if env_memory > mem_total:
msg = 'memory_total ' + str(mem_total) + ' less than'+str(env_memory) + 'GB'
# raise Exception(msg)
if env_memory >= mem_free:
msg = 'mem_free ' + str(mem_free) + 'GB less than' + str(env_memory) + 'GB'
logger.warning(msg)
# 检查CPU核数
env_cpu = envs['CPU']
if env_cpu == "单核":
env_cpu_core_num = 1
elif env_cpu == "双核":
env_cpu_core_num = 2
elif env_cpu == "三核":
env_cpu_core_num = 3
else:
env_cpu_core_num = int(env_cpu[:-1])
local_cpu_core_num = int(multiprocessing.cpu_count()/2)
if env_cpu_core_num > local_cpu_core_num:
msg = 'CPU_core_num ' + str(local_cpu_core_num) + 'core less than' + str(env_cpu_core_num) + ' core'
# raise Exception(msg)
# 检查磁盘的内存
env_storage = envs['Storage']
env_storage = int(env_storage[:-2])
workspace_path = self.__alg_xml_handle.get_workspace_path()
if not os.path.isdir(workspace_path):
raise Exception('workspace_path:%s do not exist!', workspace_path)
local_storage = self.__get_free_space_mb(workspace_path)
if env_storage > local_storage:
msg = 'workspace storage ' + str(local_storage) + 'GB less than' + envs['Storage'] +"GB"
# raise Exception(msg)
return True
@staticmethod
def __get_free_space_mb(folder):
"""
:param folder:检查的路径 eg:'C:\\'
:return: folder/drive free space (GB)
"""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024 / 1024
else:
st = os.statvfs(folder)
return st.f_bavail * st.f_frsize / 1024 / 1024
def check_input_paras(self, input_para_names):
"""
:param input_para_names :需要检查参数的名称列表[name1,name2,...]
:return: 检测是否正常
"""
workspace_path = self.__alg_xml_handle.get_workspace_path()
input_paras = self.__alg_xml_handle.get_input_paras()
for name in input_para_names:
para = input_paras[name]
if para is None:
msg = "check para:"+name + " is failed!"+"para is None!"
raise Exception(msg)
if para['ParaType'] == 'File':
if para['DataType'] == 'tif':
if para['ParaValue'] != 'empty' and para['ParaValue'] != 'Empty'and para['ParaValue'] != '':
para_value_list = para['ParaValue'].split(";")
for para_value in para_value_list:
para_path = para_value
if self.__check_tif(para_path) is False:
msg = "check para:"+name + " is failed!" + "Path:" + para_path
raise Exception(msg)
if para['DataType'] == 'xml':
para_path = para['ParaValue']
if not os.path.exists(para_path):
raise Exception('para_file:%s is inexistent!', para_path)
if para['DataType'] == 'File':
para_path = para['ParaValue']
if os.path.isdir(para_path) is False:
msg = "check para:" + name + " is failed!" + "FilePath:" + para_path
raise Exception(msg)
if para["DataType"]=="ymal":
para_path = para['ParaValue']
if os.path.isfile(para_path) is False:
msg = "check para: " + name + " is failed! " + " FilePath: " + para_path
raise Exception(msg)
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float' or para['DataType'] == 'int' or para['DataType'] == 'double':
if para['ParaValue'] is None:
msg = "check para:"+name + " is failed!"+"'ParaValue' is None"
raise Exception(msg)
if self.__is_number(para['ParaValue']) is False:
raise Exception("para:"+name+" is not number!")
# if (para['MaxValue'] is not None) and (self.__is_number(para['MaxValue']) is True):
# value = para['ParaValue']
# max = para['MaxValue']
# if float(value) > float(max):
# msg = "para:" + name + " > max, para:" + value + "max:" + max
# raise Exception(msg)
# if (para['MinValue'] is not None) and (self.__is_number(para['MinValue']) is True):
# value = para['ParaValue']
# min = para['MinValue']
# if float(value) < float(min):
# msg = "para:" + name + " < min, para:" + value + "min:" + min
# raise Exception(msg)
self.__ParameterDic[name] = para['ParaValue']
__workspace_path = workspace_path
__input_paras = input_paras
return True, self.__ParameterDic
def check_output_paras(self, output_para_names):
"""
:param output_para_names :需要检查参数的名称列表[name1,name2,...]
:return: Ture or False
"""
workspace_path = self.__alg_xml_handle.get_workspace_path()
output_paras = self.__alg_xml_handle.get_output_paras()
for name in output_para_names:
para = output_paras[name]
#print(para)
if para is None:
msg = "check para:" + name + " is failed!" + "para is None!"
raise Exception(msg)
if para['ParaType'] == 'File':
if para['DataType'] == 'tif':
para_path = workspace_path + para['ParaValue']
para_dir = os.path.split(para_path)
flag_isdir = os.path.isdir(para_dir[0])
flag_istif = (para_dir[1].split(".", 1)[1] == "tif")
if flag_isdir and flag_istif is False:
msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
raise Exception(msg)
if para['DataType'] == 'File':
para_path = workspace_path + para['ParaValue']
if os.path.isdir(para_path) is False:
os.makedirs(para_path)
if os.path.isdir(para_path) is False:
msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
raise Exception(msg)
return True
@staticmethod
def __is_number(str_num):
"""
:param str_num :检查str是否为float或者double
:return: True or False
"""
if str_num[0] == '-':
str_num = str_num[1:]
pattern = re.compile(r'(.*)\.(.*)\.(.*)')
if pattern.match(str_num):
return False
return str_num.replace(".", "").isdigit()
def __check_tif(self, filename):
"""
:filename: 文件的路径
:return: True or False
"""
if self.imageHandler.get_dataset(filename) is None:
msg = "read tif error!,finame: " + filename
raise Exception(msg)
return True
class InitPara:
def __init__(self,debug = False):
self._debug = debug
@staticmethod
def init_processing_paras(input_paras):
"""
:param names:字典列表每个字典为一个输入产品的配置信息
"""
processing_paras = {}
for name in input_paras:
para = input_paras[name]
if para is None:
logger.error(name + "is None!")
return False
if para['ParaType'] == 'File':
if para['DataType'] == 'tif' or para['DataType'] == 'csv':
para_value_list = para['ParaValue'].split(";")
if len(para_value_list) == 1:
para_path = para['ParaValue']
if para_path != 'empty' and para_path != '':
processing_paras.update({name: para_path})
else:
for n, para_value in zip(range(len(para_value_list)), para_value_list):
processing_paras.update({name+str(n): para_value})
elif para['DataType'] == 'tar.gz':
paths = para['ParaValue'].split(';')
for n, path in zip(range(len(paths)), paths):
processing_paras.update({'sar_path' + str(n): path})
else:
para_path = para['ParaValue']
processing_paras.update({name: para_path})
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float':
value = float(para['ParaValue'])
elif para['DataType'] == 'int':
value = int(para['ParaValue'])
else: # 默认string
value = para['ParaValue']
processing_paras.update({name: value})
elif para['ParaType'] == 'String':
value = para['ParaValue']
if value == 'empty':
continue
else:
processing_paras.update({name: value})
return processing_paras
# 获取文件夹内的文件
@staticmethod
def get_tif_paths(file_dir,name):
in_tif_paths = []
if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
if in_tif_paths1 != []:
in_tif_paths = in_tif_paths + in_tif_paths1
else:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
if in_tif_paths != []:
in_tif_paths = in_tif_paths + in_tif_paths1
return in_tif_paths
@staticmethod
def get_tif_paths_new(file_dir, name):
in_tif_paths = []
if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
if in_tif_paths1 != []:
in_tif_paths = in_tif_paths + in_tif_paths1
else:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
if len(in_tif_paths) == 0:
in_tif_paths = in_tif_paths + in_tif_paths1
return in_tif_paths
@staticmethod
def get_polarization_mode(in_tif_paths):
pol_dic = {}
pola_list = [0,0,0,0]
for in_tif_path in in_tif_paths:
# 获取极化类型
if '_HH_' in os.path.basename(in_tif_path):
pol_dic.update({'HH': in_tif_path})
pola_list[0] = 1
elif '_HV_' in os.path.basename(in_tif_path):
pol_dic.update({'HV': in_tif_path})
pola_list[1] = 1
elif '_VH_' in os.path.basename(in_tif_path):
pol_dic.update({'VH': in_tif_path})
pola_list[2] = 1
elif '_VV_' in os.path.basename(in_tif_path):
pol_dic.update({'VV': in_tif_path})
pola_list[3] = 1
elif 'LocalIncidenceAngle' in os.path.basename(in_tif_path) or 'ncidenceAngle' in os.path.basename(in_tif_path):
pol_dic.update({'LocalIncidenceAngle': in_tif_path})
elif 'inc_angle' in os.path.basename(in_tif_path):
pol_dic.update({'inc_angle': in_tif_path})
elif 'inci_Angle-ortho' in os.path.basename(in_tif_path):
pol_dic.update({'inci_Angle-ortho': in_tif_path})
elif 'LocalincidentAngle-ortho' in os.path.basename(in_tif_path):
pol_dic.update({'LocalIncidentAngle-ortho': in_tif_path})
elif 'ori_sim' in os.path.basename(in_tif_path):
pol_dic.update({'ori_sim': in_tif_path})
elif 'sim_ori' in os.path.basename(in_tif_path):
pol_dic.update({'sim_ori': in_tif_path})
pol_dic.update({'pola':pola_list})
return pol_dic
@staticmethod
def get_meta_paths(file_dir, name):
meta_xml_paths = []
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.meta.xml')))
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.meta.xml')))
if meta_xml_paths is None or meta_xml_paths == []:
raise Exception('there is not .meta.xml in path: ', file_dir + '\\')
return meta_xml_paths
@staticmethod
def get_incidence_xml_paths(file_dir, name):
meta_xml_paths = []
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.incidence.xml')))
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.incidence.xml')))
if meta_xml_paths is None or meta_xml_paths == []:
raise Exception('there is not .incidence.xml in path: ', file_dir + '\\')
return meta_xml_paths
@staticmethod
def get_meta_dic(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in mete_path:
para_dic.update({'META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
@staticmethod
def get_incidence_dic(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in mete_path:
para_dic.update({'Incidence': mete_path})
if para_dic is {}:
raise Exception('the name of .incidence.xml is error!')
return para_dic
@staticmethod
def get_meta_dic_new(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in os.path.basename(mete_path):
para_dic.update({'META': mete_path})
else:
para_dic.update({'Origin_META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
@staticmethod
def get_meta_dic_VP(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in os.path.basename(mete_path):
para_dic.update({name + '_META': mete_path})
else:
para_dic.update({name + '_Origin_META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
def get_mult_tar_gz_inf(self,tar_gz_path, workspace_preprocessing_path):
para_dic = {}
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
para_dic.update({'name': name})
file_dir = os.path.join(workspace_preprocessing_path, name + '\\')
if self._debug == False:
fileHandle().de_targz(tar_gz_path, file_dir)
# 元文件字典
para_dic.update(InitPara.get_meta_dic_VP(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典
pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({name + "paraMeter": parameter_path})
for key, in_tif_path in pol_dic.items():
para_dic.update({name + '_' + key: in_tif_path})
return para_dic
def get_mult_tar_gz_infs(self,processing_paras, workspace_preprocessing_path):
tif_names_list = []
tar_inf_dic = {}
for key, value in processing_paras.items():
if 'sar_path' in key:
para_dic = self.get_mult_tar_gz_inf(value, workspace_preprocessing_path)
tif_names_list.append(para_dic['name'])
para_dic.pop('name')
tar_inf_dic.update(para_dic)
tar_inf_dic.update({'name_list': tif_names_list})
return tar_inf_dic

View File

@ -0,0 +1,65 @@
from xml.etree.ElementTree import ElementTree
import os
class DictXml:
def __init__(self, xml_path):
self.xml_path = xml_path
self.__tree = ElementTree()
self.__root = None
self.init_xml()
def init_xml(self):
self.__root = self.__tree.parse(self.xml_path)
if self.__root is None:
raise Exception("get root failed")
def get_extend(self):
productInfo = self.__root.find("imageinfo")
if productInfo is None:
raise Exception("get imageInfo failed")
corner = productInfo.find("corner")
if corner is None:
raise Exception("get corner failed")
topLeft = corner.find("topLeft")
if topLeft is None:
raise Exception("get topLeft failed")
topRight = corner.find("topRight")
if topRight is None:
raise Exception("get topRight failed")
bottomLeft = corner.find("bottomLeft")
if bottomLeft is None:
raise Exception("get bottomLeft failed")
bottomRight = corner.find("bottomRight")
if bottomRight is None:
raise Exception("get bottomRight failed")
point_upleft = [float(topLeft.find("longitude").text), float(topLeft.find("latitude").text)]
point_upright = [float(topRight.find("longitude").text), float(topRight.find("latitude").text)]
point_downleft = [float(bottomLeft.find("longitude").text), float(bottomLeft.find("latitude").text)]
point_downright = [float(bottomRight.find("longitude").text), float(bottomRight.find("latitude").text)]
scopes = [point_upleft, point_upright, point_downleft, point_downright]
point_upleft_buf = [float(topLeft.find("longitude").text) - 0.5, float(topLeft.find("latitude").text) + 0.5]
point_upright_buf = [float(topRight.find("longitude").text) + 0.5, float(topRight.find("latitude").text) + 0.5]
point_downleft_buf = [float(bottomLeft.find("longitude").text) - 0.5, float(bottomLeft.find("latitude").text) - 0.5]
point_downright_buf = [float(bottomRight.find("longitude").text) + 0.5, float(bottomRight.find("latitude").text) - 0.5]
scopes_buf = [point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf]
return scopes, scopes_buf
if __name__ == '__main__':
xml_path = r'E:\MicroWorkspace\GF3A_nanjing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422.meta.xml'
scopes, scopes_buf = DictXml(xml_path).get_extend()
print(scopes)
print(scopes_buf)
# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
# s = ImageHandler().band_merge(path, path2, path3)
# print(s)
# pass

View File

@ -0,0 +1,319 @@
"""
@Project microproduct
@File OnePlantHeight.PY
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
from xml.dom import minidom
from xml.etree.ElementTree import ElementTree, Element
import xml.dom.minidom
from lxml import etree
import codecs
import xml.etree.cElementTree as ET
class CreateMetafile:
"""
生成元文件案例
"""
def __init__(self, input_image_path, input_para_file, an_li_path, path):
"""
input_image_path: 影像头文件
input_para_file: 配置文件
an_li_path案例路径
path保存路径
"""
self.input_image_path = input_image_path
self.input_para_file = input_para_file
self.an_li_path= an_li_path
self.path = path
pass
def create_xml(self):
"""
读取元文件只保留从头文件到sensor节点的部分
输出sensor的节点位置
"""
tree = ElementTree()
tree.parse(self.input_image_path) # 影像头文件
root = tree.getroot()
# 1、只保留从头文件到sensor节点的部分
element_trees = list(root)
count = 0
count_01=1
for element in element_trees:
count = count+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count_01 = count-1
for i in range(0, len(element_trees)):
if i > count_01:
root.remove(element_trees[i])
# 2、只保留"satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"的部分
element_trees2 = list(root)
for i in element_trees2:
if i.tag not in ["satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"]:
root.remove(i)
# 3、获取"sensor"节点的位置,并输出
count2 = 0
count2_01=1
element_trees3 = list(root)
for element in element_trees3:
count2 = count2+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count2_01 = count2-1
tree.write(self.path, encoding="utf-8", xml_declaration=True)
return count2_01
@staticmethod
def create_node(tag, property_map, content):
"""
fun: 新造一个节点
para: tag:节点标签
para: property_map:属性及属性值map
para: content: 节点闭合标签里的文本内容
para: return 新节点
"""
element = Element(tag, property_map)
element.text = content
element.tail = "\n\t"
return element
def add_standard_xml(self, num):
"""
模板直接写入到元文件中
"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
tree2 = ElementTree()
tree2.parse(self.an_li_path) # 影像头文件
root2 = tree2.getroot()
productinfo = root2.find("productinfo")
root.insert(num + 1, productinfo)
processinfo = root2.find("processinfo")
root.insert(num + 2, processinfo)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def add_img_xml(self, num,SrcImageName):
"""添加影像信息"""
tree = ElementTree()
tree.parse(self.path)
root = tree.getroot()
a = self.create_node("SrcImageName", {"desc": "原始影像名称"}, SrcImageName)
root.insert(num+1, a)
# root.append(a)
b = self.create_node("AlgCompt", {"desc": "算法信息"}, "\n\t\t")
b.tail = "\n\n\t"
# root.append(b)
root.insert(num+2, b)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def add_info_xml(self):
"""
向元文件中添加配置文件的部分节"AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
"AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author"
"""
tree = ElementTree()
tree.parse(self.input_para_file) # 配置文件
root = tree.getroot()
tree2 = ElementTree()
tree2.parse(self.path)
root2 = tree2.getroot()
AlgCompt = root2.find("AlgCompt")
a = root.find("AlgCompt")
element_trees = list(a)
for element in element_trees:
if element.tag in ["AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
"AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author"]:
element.tail = "\n\t\t"
AlgCompt.append(element)
if element.tag == "Author":
element.tail = "\n\t"
tree2.write(self.path, encoding="utf-8", xml_declaration=True)
def add_class_info(self, type_id_name, type_id_parent=None):
"""
向元文件中
1.删除productinfo-productType信息
2.加入地物类别信息
输出
<Class1>
<parent_id>1</parent_id>
<id>101</id>
<covernm>耕地</covernm>
</Class1>
<Class2>
<parent_id>5</parent_id>
<id>502</id>
<covernm>草地</covernm>
</Class2>
"""
tree = ElementTree()
tree.parse(self.path) # 配置文件
root = tree.getroot()
productinfo = root.find("productinfo")
# element_trees = list(productinfo)
# for element in element_trees:
# if element.tag == "productType":
# productinfo.remove(element) # 移除 "productType"
productinfo.find("productConsumeTime").tail = "\n\t\t" # 定位到productConsumeTime设置好位置
b = self.create_node("LandCoverClass", {}, "\n\t\t\t")
b.tail = "\n\t\t"
productinfo_count=0
for i in list(productinfo):
productinfo_count=productinfo_count+1
if i.tag=="productConsumeTime":
break
productinfo.insert(productinfo_count, b)
# productinfo.insert(num, b) # 插入LandCoverClass
class_num = 1
for key, value in type_id_name.items():
LandCoverClass = productinfo.find("LandCoverClass")
name="Class"+str(class_num)
# name = "Class"
c = self.create_node(name, {}, "\n\t\t\t\t")
if class_num!=(len(type_id_name.keys())):
c.tail = "\n\t\t\t"
else:
c.tail = "\n\t\t"
LandCoverClass.append(c) # 插入LandCoverClass
# LandCoverClass.find("Class")[num].tail = "\n\t\t\t"
aaa=LandCoverClass.find(name)
if type_id_parent is not None:
parent_id = self.create_node("parent_id", {}, type_id_parent[key])
parent_id.tail="\n\t\t\t\t"
LandCoverClass.find(name).append(parent_id)
id = self.create_node("id", {}, str(key))
id.tail = "\n\t\t\t\t"
LandCoverClass.find(name).append(id)
covernm = self.create_node("covernm", {}, value)
covernm.tail = "\n\t\t\t"
LandCoverClass.find(name).append(covernm)
class_num=class_num+1
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def rewrite_name(self):
"""
修改class的名字:
class1->class
class2->class
"""
tree = ElementTree()
tree.parse(self.path) # 配置文件
root = tree.getroot()
productinfo = root.find("productinfo")
LandCoverClass=productinfo.find("LandCoverClass")
element_trees = list(LandCoverClass)
for element in element_trees:
element.tag="Class"
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def OrthoInsertNode(self):
"""正射算法专用,插入节点<l1aInfo>"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
# 插入节点<l1aInfo>
count2 = 0
count2_01=1
element_trees3 = list(root)
for element in element_trees3:
count2 = count2+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count2_01 = count2-1
b = self.create_node("l1aInfo", {}, "\n\t\t")
b.tail = "\n\n\t"
root.insert(count2_01+1, b)
# 查询节点位置<l1aInfo>
node_l1aInfo=root.find("l1aInfo")
img_tree = ElementTree()
img_tree.parse(self.input_image_path) # 影像头文件
img_root = img_tree.getroot()
node_imageinfo = img_root.find("imageinfo")
node_processinfo=img_root.find("processinfo")
ele_node_imageinfo = list(node_imageinfo)
ele_node_processinfo= list(node_processinfo)
for i in ele_node_imageinfo:
if i.tag == "QualifyValue":
i.tail = "\n\t\t"
node_l1aInfo.append(i)
for j in ele_node_processinfo:
if j.tag == "CalibrationConst":
j.tail = "\n\t" #后一个节点的位置
node_l1aInfo.append(j)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def process(self,SrcImageName):
"""
不涉及到地表覆盖调用此函数
"""
if self.input_image_path is None:
import xml.etree.cElementTree as ET
product = ET.Element("product") # 根节点tag= "product"
product.text = "\n\t"
tree = ET.ElementTree(product)
tree.write(self.path)
count = 0
count_2 = -1
else:
count = self.create_xml()
count_2 = count
self.add_standard_xml(count)
self.add_img_xml(count_2, SrcImageName)
self.add_info_xml()
def process2(self, type_id_name, type_id_parent,SrcImageName):
"""
涉及到地表覆盖的调用此函数
type_id_name={"101":"耕地","502":"草地"}
type_id_parent={"101":"1""502":"5"}
"""
count = self.create_xml()
self.add_standard_xml(count)
self.add_img_xml(count,SrcImageName)
self.add_info_xml()
self.add_class_info(type_id_name, type_id_parent)
self.rewrite_name()
def process3(self,SrcImageName):
"""
正射调用此函数
"""
if self.input_image_path is None:
import xml.etree.cElementTree as ET
product = ET.Element("product") # 根节点tag= "product"
product.text = "\n\t"
tree = ET.ElementTree(product)
tree.write(self.path)
count = 0
else:
count = self.create_xml()
self.add_standard_xml(count)
self.add_img_xml(count, SrcImageName)
self.add_info_xml()
self.OrthoInsertNode()

View File

@ -0,0 +1,205 @@
from xml.etree.ElementTree import ElementTree, Element
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from osgeo import gdal
import numpy as np
import datetime
import os
import glob
os.environ['PROJ_LIB'] = r"E:\soft\Anaconda\envs\micro\Lib\site-packages\osgeo\data\proj"
class CreateMetaDict:
def __init__(self, image_path, origin_xml, pack_path, out_path1, out_path2):
self.ImageHandler = ImageHandler()
self.image_path = image_path
self.origin_xml = origin_xml
self.pack_path = pack_path
self.file_size = self.get_file_size()
self.out_path1 = out_path1
self.out_path2 = out_path2
self.timeDict = self.get_productTime()
pass
def calu_nature(self):
"""
将productinfo节点需要填写的信息存入字典中
image_path:影像路径
image_pair:输入的压缩包中的极化对 hh,hv,vh,vv=1111
out_path1地理转平面的输出路径
out_path2平面转地理的输出路径
"""
para_dict = {}
proj = self.ImageHandler.get_projection(self.image_path) # 输出的影像若是投影坐标系则先转成地理坐标系
keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
if keyword == "GEOGCS":
pass
elif keyword == "PROJCS":
pp.trans_projcs2geogcs(self.out_path2, self.image_path)
image_path = self.out_path2
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('image projection is missing!')
pp.trans_geogcs2projcs(self.out_path1, self.image_path) # 坐标投影, 地理转平面投影坐标
imageinfo_widthspace = self.ImageHandler.get_geotransform(self.out_path1)[1] # 投影后的分辨率
# imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率
# para_dict.update({"imageinfo_widthspace": imageinfo_widthspace})
# para_dict.update({"imageinfo_heightspace": imageinfo_heightspace})
para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace})
para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"})
para_dict.update({"imageinfo_CompressionMethod": "None"})
para_dict.update({"imageinfo_ProductSize": str(self.file_size) + "MB"}) #todo 产品总大小
get_scope = self.ImageHandler.get_scope(self.image_path)
point_upleft, point_upright, point_downleft, point_downright = get_scope[0], get_scope[1], get_scope[2], get_scope[3]
para_dict.update({"SpatialCoverageInformation_TopLeftLatitude": point_upleft[1]})
para_dict.update({"SpatialCoverageInformation_TopLeftLongitude": point_upleft[0]})
para_dict.update({"SpatialCoverageInformation_TopRightLatitude": point_upright[1]})
para_dict.update({"SpatialCoverageInformation_TopRightLongitude": point_upright[0]})
para_dict.update({"SpatialCoverageInformation_BottomLeftLatitude": point_downleft[1]})
para_dict.update({"SpatialCoverageInformation_BottomLeftLongitude": point_downleft[0]})
para_dict.update({"SpatialCoverageInformation_BottomRightLatitude": point_downright[1]})
para_dict.update({"SpatialCoverageInformation_BottomRightLongitude": point_downright[0]})
longitude_max = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).max()
longitude_min = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).min()
latitude_max = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).max()
latitude_min = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).min()
imageinfo_center_latitude = (latitude_max + latitude_min) / 2
imageinfo_center_longitude = (longitude_max + longitude_min) / 2
para_dict.update({"SpatialCoverageInformation_CenterLatitude": imageinfo_center_latitude})
para_dict.update({"SpatialCoverageInformation_CenterLongitude": imageinfo_center_longitude})
para_dict.update({"TimeCoverageInformation_StartTime": self.timeDict.get("startTime")})
para_dict.update({"TimeCoverageInformation_CenterTime": self.timeDict.get("centerTime")})
para_dict.update({"TimeCoverageInformation_EndTime": self.timeDict.get("endTime")})
para_dict.update({"CoordinateReferenceSystemInformation_EarthEllipsoid": "WGS84"})
para_dict.update({"CoordinateReferenceSystemInformation_MapProjection": "UTM"})
para_dict.update({"CoordinateReferenceSystemInformation_ZoneNo": "None"})
para_dict.update({"MetaInfo_Unit": "none"}) # 设置单位
para_dict.update({"MetaInfo_UnitDes": "无量纲"}) # 设置单位
# 补充ProductProductionInfo节信息
data_name = os.path.basename(self.image_path)
strs = data_name.split("_")
para_dict.update({"DataSources_DataSource_Satellite": strs[0]})
para_dict.update({"DataSources_DataSource_Sensor": strs[0]})
para_dict.update({"ObservationGeometry_SatelliteAzimuth": "None"})
para_dict.update({"ObservationGeometry_SatelliteRange": "None"})
para_dict.update({"ProductProductionInfo_BandSelection": "1"})
para_dict.update({"ProductProductionInfo_DataSourceDescription": "None"})
para_dict.update({"ProductProductionInfo_DataSourceProcessingDescription": "参考产品介绍PDF"})
productGentime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
para_dict.update({"ProductProductionInfo_ProductionDate": productGentime})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": ""})
# para_dict.update({"ProductPublishInfo_Processor": "德清院"}) # 生产者
# para_dict.update({"ProductPublishInfo_DistributionUnit": "none"}) # 分发单位
# para_dict.update({"ProductPublishInfo_ContactInformation": "none"}) # 联系信息
return para_dict
def get_productTime(self):
time_dict = {}
tree = ElementTree()
tree.parse(self.origin_xml)
root = tree.getroot()
platform = root.find("platform")
if platform is None:
centerTime = " "
else:
centerTime = platform.find("CenterTime").text.split(".")[0]
productInfo = root.find("imageinfo")
imagingTime = productInfo.find("imagingTime")
if imagingTime is None:
startTime = " "
endTime = " "
else:
startTime = imagingTime.find("start").text.split(".")[0]
endTime = imagingTime.find("end").text.split(".")[0]
time_dict.update({"startTime": startTime})
time_dict.update({"centerTime": centerTime})
time_dict.update({"endTime": endTime})
return time_dict
def get_file_size(self):
in_tif_paths = list(glob.glob(os.path.join(self.pack_path, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(self.pack_path, '*.tiff')))
in_tif_paths += in_tif_paths1
size = 0
for file in in_tif_paths:
fsize = os.path.getsize(file) # 返回的是字节大小
size += fsize
return round(size / float(1024*1024), 2)
class CreateProductXml:
def __init__(self, par_dict, model_path, xml_path):
self.par_dict = par_dict
self.xml_path = xml_path
shutil.copy(model_path, xml_path)
pass
def create_standard_xml(self):
"""将字典中的信息写入到copy的xml文件中"""
tree = ElementTree()
tree.parse(self.xml_path) # 影像头文件
root = tree.getroot()
productinfo = root.find("ProductBasicInfo")
for key, value in self.par_dict.items():
if key.split("_")[0] == "imageinfo":
productinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "SpatialCoverageInformation":
imageinfo = productinfo.find("SpatialCoverageInformation")
imageinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "TimeCoverageInformation":
timeInfo = productinfo.find("TimeCoverageInformation")
timeInfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "CoordinateReferenceSystemInformation":
geoInfo = productinfo.find("CoordinateReferenceSystemInformation")
geoInfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "MetaInfo":
metaInfo = productinfo.find("MetaInfo")
metaInfo.find(key.split("_")[1]).text = str(value)
ProductProductionInfo = root.find("ProductProductionInfo") # 原始数据信息
for key, value in self.par_dict.items():
if key.split("_")[0] == "DataSources":
dataSources = ProductProductionInfo.find("DataSources")
dataSource = dataSources.find("DataSource")
dataSource.find(key.split("_")[2]).text = str(value)
elif key.split("_")[0] == "ObservationGeometry":
ObservationGeometry = ProductProductionInfo.find("ObservationGeometry")
ObservationGeometry.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "ProductProductionInfo":
ProductProductionInfo.find(key.split("_")[1]).text = str(value)
# ProductPublishInfo = root.find("ProductPublishInfo") # 发布者信息
# for key, value in self.par_dict.items():
# if key.split("_")[0] == "ProductPublishInfo":
# ProductPublishInfo.find(key.split("_")[1]).text = str(value)
tree.write(self.xml_path, encoding="utf-8", xml_declaration=True)
if __name__ == '__main__':
image_path = r'D:\Micro\WorkSpace\test\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1B_h_h_L10000073024_db_RD_geo.tif'
origin_xml = r'D:\Micro\WorkSpace\Ortho\Temporary\package\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024.meta.xml'
tem_folder = r'D:\Micro\WorkSpace\test'
pack_path = r'D:\Micro\WorkSpace\Ortho\Temporary\package'
out_dem_path1 = os.path.join(tem_folder, "trans_dem_geo_projcs.tif")
out_dem_path2 = os.path.join(tem_folder, "trans_dem_projcs_geo.tif")
para_dict = CreateMetaDict(image_path, origin_xml, pack_path, out_dem_path1, out_dem_path2).calu_nature()
model_path = r'D:\Project\microproduct\Ortho\product.xml'
xml_path = r'D:\Micro\WorkSpace\test\test.xml'
CreateProductXml(para_dict, model_path, xml_path).create_standard_xml()

View File

@ -0,0 +1,48 @@
# -*- coding: UTF-8 -*-
"""
@Project onestar
@File ConfigeHandle.py
@Contacthttps://blog.csdn.net/songlh1234/article/details/83316468
@Author SHJ
@Date 2021/11/23 16:57
@Version 1.0.0
"""
import os
import configparser
class Config:
"""读写初始化配置文件"""
def __init__(self):
pass
@staticmethod
def get(para_name, option='config', config_name='config.ini'):
config = configparser.ConfigParser()
config_path = os.path.join(os.getcwd(), config_name)
config.read(config_path, encoding='utf-8')
config.sections()
exe_name = config.get(option, para_name)
return exe_name
def get_list(self, para_name, option='config', config_name='config.ini'):
config = configparser.ConfigParser()
config_path = os.path.join(os.getcwd(), config_name)
config.read(config_path, encoding='utf-8')
config.sections()
str_name = config.get(option, para_name)
# 去除空格和回车
str_name = str(str_name).replace("\n", "").replace(' ', '') # 去除空格和回车
# 分割成lists
name_list = str_name.split(',')
return name_list
if __name__ == '__main__':
# c = Config()
# a = c.get('exe_name')
# b = bool(c.get('debug'))
# d = int(c.get('cover_threshold'))
# f = float(c.get('ndvi_threshold'))
print('done')

265
Ortho/tool/csv/csvHandle.py Normal file
View File

@ -0,0 +1,265 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : csvHandle.py
@Function : 读写csv文件
@Contact :
@Author:SHJ
@Date:2022/11/6
@Version:1.0.0
"""
import random
import csv
import logging
import numpy as np
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.CoordinateTransformation import geo2imagexy
from tool.algorithm.transforml1a.transHandle import TransImgL1A
logger = logging.getLogger("mylog")
class csvHandle:
def __init__(self, row=0, col=0):
self.imageHandler = ImageHandler()
self.row = row
self.col = col
self.img_falg = False
if row != 0 and col != 0:
self.roi_img = np.zeros((row, col), dtype=float)
self.img_falg = True
def get_roi_img(self):
if self.img_falg:
self.roi_img[self.roi_img == 0] = np.nan
return self.roi_img
else:
return np.array([])
@staticmethod
def readcsv(csv_path):
reader = csv.reader(open(csv_path, newline=''))
csv_list = []
for line_data in reader:
csv_list.append(line_data)
return csv_list[1:]
def trans_measuredata(self, meas_data, tif_path):
file_name = tif_path
dataset = self.imageHandler.get_dataset(file_name)
rows = self.imageHandler.get_img_height(file_name)
cols = self.imageHandler.get_img_width(file_name)
measdata_list = []
logger.info('[MEASURE DATA]')
for data in meas_data:
lon = float(data[1])
lat = float(data[2])
coord = geo2imagexy(dataset, lon, lat)
row = round(coord[1])
col = round(coord[0])
if row >= 0 and row <= rows and col >= 0 and col <= cols:
measdata_list.append([row, col, float(data[3])])
logger.info([row, col, float(data[3])])
else:
logger.warning("measure data: %s is beyond tif scope !", data)
pass
return measdata_list
def write_roi_img_data(self, points, type_id):
if self.img_falg:
for p in points:
r = p[0]
c = p[1]
if r < self.row and c < self.col:
self.roi_img[r, c] = type_id
def trans_landCover_measuredata(self, meas_data, cuted_ori_sim_path, max_train_num =100000):
"""
获取多边形区域内所有的点分为训练集数据和测试集数据
:para meas_data: csv读取的实测数据
"""
type_data = {}
n = 1
train_data_list = []
for data in meas_data:
for d in data:
if d == '':
raise Exception('there are empty data!', data)
type_id = int(data[1])
type_name = data[2]
if type_id not in type_data.keys():
train_data_list.append([n, type_id, type_name, []])
type_data.update({type_id: type_name})
n += 1
pointList = self.__roiPolygonAnalysis(data[3])
for points in pointList:
roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
if tr._mask is not None:
points = tr.get_roi_points()
for train_data in train_data_list:
if train_data[1] == type_id:
train_data[3] += points
self.write_roi_img_data(points, type_id)
if train_data[3] == [] :
raise Exception('there are empty data!', train_data)
if len(train_data_list) <= 1:
raise Exception('there is only one label type!', train_data_list)
for train_data in train_data_list:
logger.info(str(train_data[0]) + "," + str(train_data[2]) + "," + "num:" + str(len(train_data[3])))
max_num = max_train_num
if (len(train_data[3]) > max_num):
logger.info("max number =" + str(max_num) + ", random select" + str(max_num) + " point as train data!")
train_data[3] = random.sample(train_data[3], max_num)
return train_data_list
def trans_landCover_measuredata_dic(self, meas_data, cuted_ori_sim_path,max_train_num=100000):
train_data_list = self.trans_landCover_measuredata(meas_data, cuted_ori_sim_path,max_train_num)
return self.trans_landCover_list2dic(train_data_list)
@staticmethod
def trans_landCover_list2dic(train_data_list):
ids = []
class_ids = []
ch_names = []
positions = []
for data in train_data_list:
ids.append(data[0])
class_ids.append(data[1])
ch_names.append(data[2])
positions.append(data[3])
train_data_dic = {}
train_data_dic.update({"ids": ids})
train_data_dic.update({"class_ids": class_ids})
train_data_dic.update({"ch_names": ch_names})
train_data_dic.update({"positions": positions})
return train_data_dic
@staticmethod
def __roiPolygonAnalysis(roiStr):
"""
将csv的POLY数据转为数组
:para roiStr: poly数据
:return pointList: 保存多边形的list
"""
pointList = []
strContent = roiStr.replace("POLYGON", "")
# 解析轮廓字符串为二维数组
bracketsList = []
strTemp = ''
strList = []
for c in strContent:
if c == '(':
bracketsList.append(c)
continue
elif c == ')':
if len(bracketsList) > 0:
bracketsList.pop(0)
if len(strTemp) > 0:
strList.append(strTemp)
strTemp = ''
else:
strTemp += c
for item in strList:
if len(item) == 0:
continue
pTempList = item.split(',')
pList = []
for row in pTempList:
cells = row.split(' ')
if len(cells) != 2:
continue
point = [float(cells[0]), float(cells[1])]
pList.append(point)
pointList.append(pList)
return pointList
def class_landcover_list(self, csv_path):
"""
输出csv表中的前三列
"""
reader = csv.reader(open(csv_path, newline=''))
class_list=[]
type_id_name = {}
type_id_parent = {}
for line_data in reader:
class_list.append(line_data) # class_list含有四列
for data in class_list[1:]:
type_parent= data[0]
type_id = int(data[1])
type_name = data[2]
if type_id not in type_id_name.keys():
type_id_name.update({type_id: type_name})
type_id_parent.update({type_id: type_parent})
return type_id_name, type_id_parent
def trans_VegePhenology_measdata_dic(self, meas_data, cuted_ori_sim_path):
"""
获取多边形区域内所有的点分为训练集数据和测试集数据
:para meas_data: csv读取的实测数据
"""
train_data = []
test_data = []
type_data = {}
for data in meas_data:
data_use_type = data[0]
sar_img_name = data[1]
name = sar_img_name.rstrip('.tar.gz')
if data_use_type == 'train':
phenology_id = int(data[2])
phenology_name = data[3]
if phenology_id not in type_data.keys():
type_data.update({phenology_id: phenology_name})
else:
phenology_id = -1
pointList = self.__roiPolygonAnalysis(data[4])
l1a_points = []
for points in pointList:
roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
l1a_points = tr.get_roi_points()
# l1a_points = tr.get_lonlat_points()
if data_use_type == 'train':
train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]])
elif data_use_type == 'test':
test_data.append([name, phenology_id, l1a_points])
type_map = []
for n, id in zip(range(len(type_data)), type_data):
type_map.append([n + 1, id, type_data[id]])
return train_data, test_data, type_map
@staticmethod
def vegePhenology_class_list(csv_path):
"""
输出csv表中的前三列
"""
reader = csv.reader(open(csv_path, newline=''))
class_list=[]
type_id_name = {}
for line_data in reader:
class_list.append(line_data) # class_list含有四列
for data in class_list[1:]:
type_id = data[2]
type_name = data[3]
if type_id not in type_id_name.keys():
if type_id.strip() != "":
type_id_name.update({type_id: type_name})
return type_id_name
# if __name__ == '__main__':
# csvh = csvHandle()
# csv_path = r"I:\preprocessed\VegetationPhenologyMeasureData_E118.9_N31.4.csv"
# data = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(csv_path),r"I:\preprocessed\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_ori_sim_preprocessed.tif")
# pass

View File

@ -0,0 +1,88 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : fileHandle.py
@Function : 文件创建删除解压打包
@Contact :
@Author:SHJ
@Date:2022/11/6
@Version:1.0.0
"""
import os
import tarfile
import shutil
class fileHandle:
def __init__(self, debug_mode=False):
self.__debug_mode = debug_mode
def creat_dirs(self, path_list):
"""
创建文件夹
"""
for path in path_list:
if os.path.exists(path):
if self.__debug_mode is True:
continue
self.del_folder(path)
os.makedirs(path)
else:
os.makedirs(path)
def del_folder(self, dic):
"""
删除整个文件夹
"""
if self.__debug_mode is True:
return
if os.path.isdir(dic):
shutil.rmtree(dic)
def del_file(self, path_data):
"""
只删除文件不删除文件夹
"""
for i in os.listdir(path_data): # os.listdir(path_data)#返回一个列表,里面是当前目录下面的所有东西的相对路径
file_data = path_data + '\\' + i # 当前文件夹的下面的所有东西的绝对路径
if os.path.isfile(file_data) is True: # os.path.isfile判断是否为文件,如果是文件,就删除.如果是文件夹.递归给del_file.
os.remove(file_data)
else:
self.del_file(file_data)
@staticmethod
def make_targz(output_filename, source_dir):
"""
一次性打包整个根目录空子目录会被打包
如果只打包不压缩"w:gz"参数改为"w:""w"即可
:param output_filename:输出压缩包的完整路径eg:'E:\test.tar.gz'
:param source_dir:需要打包的跟目录eg: 'E:\testFfile\'打包文件夹里面的所有文件,'E:\testFfile'打包文件夹
"""
dir = os.path.split(output_filename)[0]
if os.path.exists(dir) is False:
os.makedirs(dir)
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
@staticmethod
def de_targz(tar_gz_path, file_dir):
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
if os.path.exists(file_dir) is False:
os.makedirs(file_dir)
# 解压
t = tarfile.open(tar_gz_path)
t.extractall(path=file_dir)
@staticmethod
def copyfile2dir(srcfile, dir): # 复制函数
if not os.path.isfile(srcfile):
print("%s not exist!" % (srcfile))
else:
fpath, fname = os.path.split(srcfile) # 分离文件名和路径
if not os.path.exists(dir):
os.makedirs(dir) # 创建路径
shutil.copy(srcfile, dir + fname) # 复制文件
# if __name__ == '__main__':
# file = fileHandle()
# file.del_floder("I:\preprocessed")
# pass

View File

@ -0,0 +1,90 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File logHandler.py
@Author SHJ
@Date 2021/9/6
@Version 1.0.0
"""
import logging
import os
# from logging import handlers
import time
import datetime
class LogHandler:
"""
生成日志
"""
__logger = logging.getLogger("mylog")
__format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
"(%(filename)s:%(lineno)d) - %(message)s")
__log_path = None
@staticmethod
def init_log_handler(log_name):
"""
初始化日志
:param log_name: 日志保存的路径和名称
:return:
"""
path = os.getcwd()
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
para_dir = os.path.split(LogHandler.__log_path)
if not os.path.exists(para_dir[0]):
os.makedirs(para_dir[0])
# 删除七天以前的文件
LogHandler.delete_outdate_files(para_dir[0], 7)
# 方法1普通日志
LOG_FORMAT = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
" (%(filename)s:Line%(lineno)d) "
DATE_FORMAT = "%m/%d/%Y %H:%M:%S"
fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
fs = logging.StreamHandler()
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT, handlers=[fp, fs]) # 调用
# 方法2回滚日志
# LogHandler.__logger.setLevel(logging.DEBUG)
# th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
# backupCount=2, encoding='utf-8')
# th.suffix = "%Y-%m-%d-%H-%M-%S.log"
# th.setFormatter(LogHandler.__format_str)
# th.setLevel(level=logging.DEBUG)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# LogHandler.__logger.addHandler(console)
# LogHandler.__logger.addHandler(th)
@staticmethod
def delete_outdate_files(path, date_interval=7):
"""
删除目录下七天前创建的文件
"""
current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
current_timeList = current_time.split("-")
current_time_day = datetime.datetime(int(current_timeList[0]), int(current_timeList[1]),
int(current_timeList[2]))
for root, dirs, files in os.walk(path):
for item in files:
item_format = item.split(".", 2)
if item_format[1] == "log":
file_path = os.path.join(root, item)
create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
create_time_list = create_time.split("-")
create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
int(create_time_list[2]))
time_difference = (current_time_day - create_time_day).days
if time_difference > date_interval:
os.remove(file_path)
#
# if __name__ == "__main__":
# # eg2:
# log_handler = LogHandler()
# log_handler.init_log_handler(r"run_log\myrun1")
# logging.warning("1")
# print("done")

47
Ortho/tool/newimage.csv Normal file
View File

@ -0,0 +1,47 @@
sar_img_name,phenology_id,phenology_name,roi_polygon
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))"
GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))"
GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))"
1 sar_img_name phenology_id phenology_name roi_polygon
2 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 10 water POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))
3 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 10 water POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))
4 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 10 water POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))
5 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))
6 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))
7 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))
8 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))
9 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))
10 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))
11 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))
12 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))
13 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))
14 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 20 build POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))
15 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))
16 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))
17 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))
18 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))
19 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))
20 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))
21 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))
22 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))
23 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))
24 GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho 30 road POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))
25 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 10 water POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))
26 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 10 water POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))
27 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 10 water POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))
28 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))
29 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))
30 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))
31 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))
32 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))
33 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))
34 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))
35 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))
36 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))
37 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 20 build POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))
38 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))
39 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))
40 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))
41 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))
42 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))
43 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))
44 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))
45 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))
46 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))
47 GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho 30 road POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))

View File

@ -0,0 +1,84 @@
#encoding=utf-8
import xml.etree.ElementTree as ET
import pandas as pd
import csv
def xml2csv(xmlpath):
tree_obj = ET.parse(xmlpath)
# 得到所有匹配Region 标签的Element对象的list集合
list_Region = tree_obj.findall("Region")
for Region in list_Region:
# 几何面对应的类(phenology_name)在标签<Region name="water" color="255,0,0">
Region_dict = Region.attrib
phenology_name = Region_dict.get("name")
print(phenology_name)
list_GeometryDef = Region.findall("GeometryDef")
list_Polygon = list_GeometryDef[0].findall("Polygon") # 获得该类下的几何面list
for polygon in list_Polygon:
# 从polygon list中获取得到<Coordinates>标签的数据 注意是空格分隔的和csv中不同
Coordinates_list = coordinates = polygon.find('.//Coordinates').text.strip().split()
# POLYGON((119.035 31.51,119.035 31.50,119.033 31.50)) csv中
print("value")
# 向csv中写
def csvfile(csvpath,data):
with open(csvpath, 'a', newline='') as file:
# 2. step
writer = csv.writer(file)
# data example
#data = ["This", "is", "a", "Test"]
writer.writerow(data)
# Define the structure of the data
#data示例 student_header = ['name', 'age', 'major', 'minor']
def csvcreateTitile(csvpath,data):
# 1. Open a new CSV file
with open(csvpath, 'w') as file:
# 2. Create a CSV writer
writer = csv.writer(file)
# 3. Write data to the file
writer.writerow(data)
# 将列表中的坐标对转换为字符串
def createcsv_roi_polygon(coordinates):
coord_str = ','.join([f'{coordinates[i]} {coordinates[i + 1]}' for i in range(0, len(coordinates), 2)])
# 构建最终的POLYGON字符串
polygon_str = f'POLYGON(({coord_str}))'
print(polygon_str)
return polygon_str
if __name__ == '__main__':
xmlpath = r"E:\MicroWorkspace\GF3A_nanjing\input-ortho\Vp_test.xml"
tree_obj = ET.parse(xmlpath)
csv_header = ['sar_img_name', 'phenology_id', 'phenology_name', 'roi_polygon']
csvpath = r".\newimage.csv"
# csvcreateTitile(csvpath,csv_header)
csvfile(csvpath,csv_header)
# 得到所有匹配Region 标签的Element对象的list集合
list_Region = tree_obj.findall("Region")
name_list = {}
count = 10
for Region in list_Region:
# 几何面对应的类(phenology_name)在标签<Region name="water" color="255,0,0">
Region_dict = Region.attrib
phenology_name = Region_dict.get("name")
if not phenology_name in name_list.keys():
name_list.update({phenology_name: count})
count += 10
print(phenology_name)
# list_GeometryDef = Region.findall("GeometryDef")
list_Polygon = Region.findall(".//Polygon") # 获得该类下的几何面list
for polygon in list_Polygon:
# 从polygon list中获取得到<Coordinates>标签的数据 注意是空格分隔的和csv中不同
Coordinates_list = coordinates = polygon.find('.//Coordinates').text.strip().split()
# 将坐标和ploygon对应的写入到.csv中
polygon_str=createcsv_roi_polygon(Coordinates_list)
# POLYGON((119.035 31.51,119.035 31.50,119.033 31.50)) csv中
data = ['0', name_list.get(phenology_name), phenology_name, polygon_str]
csvfile(csvpath, data)

4
Ortho/打包命令.txt Normal file
View File

@ -0,0 +1,4 @@
cd E:\0test\oneOrtho
打包成一个文件: pyinstaller -D packing.spec
pyinstaller -F --add-data "D:/Anaconda/envs/micro/Lib/site-packages/dask/dask.yaml;./dask" --add-data "D:/Anaconda/envs/micro/Lib/site-packages/distributed/distributed.yaml;./distributed" --hidden-import pyproj._compat

View File

@ -0,0 +1,85 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\SWork\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
<AlgorithmName>BackScattering-C-SAR-V2.1 </AlgorithmName>
<DllName>BackScattering-C-SAR-V2.1.exe</DllName>
<ChsName>后向散射系数</ChsName>
<AlgorithmDesc>微波卫星3-5级产品生产模型</AlgorithmDesc>
<AlgorithmAlias>BackScattering-C-SAR-V2.1-1</AlgorithmAlias>
<Version>1.0</Version>
<AlgorithmClass>辐射类产品_后向散射系数</AlgorithmClass>
<AlgorithmLevel>1</AlgorithmLevel>
<AlgoirthmID>BackScattering_中科卫星应用德清研究院_2.1</AlgoirthmID>
<Author>中科卫星应用德清研究院</Author>
<Type>景-算法</Type>
<InputTestFilePath />
<InputTestFileName />
<OutputTestFilePath />
<OutputTestFileName />
<jdkVersion>1.8</jdkVersion>
<algDevlanguage>python</algDevlanguage>
<Environment>
<IsCluster>0</IsCluster>
<ClusterNum>0</ClusterNum>
<OperatingSystem>Windows10</OperatingSystem>
<CPU>双核</CPU>
<Memory>4GB</Memory>
<Storage>20GB</Storage>
<NetworkCard>无需求</NetworkCard>
<Bandwidth>无需求</Bandwidth>
<GPU>无需求</GPU>
</Environment>
<Utility Satellite="GF3" Sensor="MSS" Resolution="1" />
<Inputs ParameterNum="2">
<Parameter>
<ParaName>SLC</ParaName>
<ParaChsName>SLC影像文件</ParaChsName>
<Description>正射校正后的SLC影像文件路径</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\S_SAR\HJ2E_KSC_STRIP_003375_E100.4_N26.4_20230522_SLC_HHHV_L10000057057.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
<InputType>Satellite</InputType>
<InputNum>1</InputNum>
<DateFrom>GF3B</DateFrom>
</Parameter>
<Parameter>
<ParaName>DEM</ParaName>
<ParaChsName>DEM数字高程影像</ParaChsName>
<Description>30m分辨率DEM数字高程影像</Description>
<ParaType>File</ParaType>
<DataType>File</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\S_SAR\dem</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>File</Control>
<InputType>DEM</InputType>
<InputNum>0</InputNum>
<DateFrom>DEM</DateFrom>
</Parameter>
</Inputs>
<Outputs ParameterNum="1">
<Parameter>
<ParaName>BackScatteringProduct</ParaName>
<ParaChsName>后向散射系数产品</ParaChsName>
<Description>后向散射系数产品</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>D:\micro\SWork\BackScattering\Output\HJ2E_KSC_STRIP_003375_E100.4_N26.4_20230522_SLC_HHHV_L10000057057-cal.tar.gz</ParaValue>
<MaxValue>DEFAULT</MaxValue>
<MinValue>DEFAULT</MinValue>
<OptionValue>DEFAULT</OptionValue>
<NoDataValue>DEFAULT</NoDataValue>
</Parameter>
</Outputs>
</AlgCompt>
</Root>

View File

@ -0,0 +1,81 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\WorkSpace\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
<AlgorithmName>BackScattering-C-SAR-V2.2 </AlgorithmName>
<DllName>BackScattering-C-SAR-V2.2.exe</DllName>
<ChsName>后向散射系数</ChsName>
<AlgorithmDesc>微波卫星3-5级产品生产模型</AlgorithmDesc>
<AlgorithmAlias>BackScattering-C-SAR-V2.2-1</AlgorithmAlias>
<Version>2.2</Version>
<AlgorithmClass>辐射类产品_后向散射系数</AlgorithmClass>
<AlgorithmLevel>3</AlgorithmLevel>
<AlgoirthmID>BackScattering_中科卫星应用德清研究院_2.2</AlgoirthmID>
<Author>中科卫星应用德清研究院</Author>
<Type>景-算法</Type>
<InputTestFilePath />
<InputTestFileName />
<OutputTestFilePath />
<OutputTestFileName />
<jdkVersion>1.8</jdkVersion>
<algDevlanguage>python</algDevlanguage>
<Environment>
<IsCluster>0</IsCluster>
<ClusterNum>0</ClusterNum>
<OperatingSystem>Windows10</OperatingSystem>
<CPU>双核</CPU>
<Memory>4GB</Memory>
<Storage>20GB</Storage>
<NetworkCard>无需求</NetworkCard>
<Bandwidth>无需求</Bandwidth>
<GPU>无需求</GPU>
</Environment>
<Utility Satellite="GF3" Sensor="MSS" Resolution="1" />
<Inputs ParameterNum="2">
<Parameter>
<ParaName>SLC</ParaName>
<ParaChsName>SLC影像文件</ParaChsName>
<Description>正射校正后的SLC影像文件路径</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\Micro\neimenggu\GF3_MDJ_QPSI_031847_E116.4_N43.9_20220828_L1A_AHV_L10006708819.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
<InputType>Satellite</InputType>
<InputNum>1</InputNum>
<DateFrom>GF3B</DateFrom>
</Parameter>
<Parameter>
<ParaName>DEM</ParaName>
<ParaChsName>DEM数字高程影像</ParaChsName>
<Description>30m分辨率DEM数字高程影像</Description>
<ParaType>File</ParaType>
<DataType>tif</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\MicroWorkspace\Micro\neimenggu\dem.tif</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>File</Control>
<InputType>DEM</InputType>
<InputNum>0</InputNum>
<DateFrom>DEM</DateFrom>
</Parameter>
</Inputs>
<Outputs ParameterNum="1">
<Parameter>
<ParaName>BackScatteringProduct</ParaName>
<ParaChsName>后向散射系数产品</ParaChsName>
<Description>后向散射系数产品</Description>
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>D:\micro\WorkSpace\BackScattering\Output\GF3_MDJ_QPSI_031847_E116.4_N43.9_20220828_L1A_AHV_L10006708819-cal.tar.gz</ParaValue>
</Parameter>
</Outputs>
</AlgCompt>
</Root>

View File

@ -0,0 +1,10 @@
# -*- coding: UTF-8 -*-
# 定义config分组
[config]
######1-算法基本参数######
# 算法名称。修改临时工作区生成临时文件的名称,日志名称;
exe_name = BackScattering
# 开启调试模式则不删除临时工作区True:开启调试False:不开启调试
debug = False

View File

@ -0,0 +1,96 @@
SatelliteOribit: # 轨道起算时间
StartTime:
Value:
'2017-09-15 01:55:21.0000'
format: # 时间格式
"%Y-%m-%d %H:%M:%S.%f"
ReferenceSpheroid:
Re: # 长半轴
6378137
Rp: # 短半轴
6356752.3142451795
we:
0.000072292115
GPS: # GPS 轨道节点
GPSNode_Path:
['product','GPS','GPSParam']
NodeInfomation_Name: # [时间x坐标y坐标z坐标x速度y速度z速度]
['TimeStamp', 'xPosition', 'yPosition', 'zPosition', 'xVelocity', 'yVelocity', 'zVelocity']
Time_format:
"%Y-%m-%d %H:%M:%S.%f"
imageinfo: # 影像信息
ImageBox:
NodePath:
['product','imageinfo','corner']
NodeName:
['topLeft','topRight','bottomLeft','bottomRight']
latLon:
["latitude","longitude"]
ImageWidthSpace:
NodePath:
['product','imageinfo','widthspace']
ImageHeightSpace:
NodePath:
['product','imageinfo','heightspace']
ImageWidth: # 影像宽
NodePath:
['product','imageinfo','width']
ImageHeight: # 影像高
NodePath:
['product','imageinfo','height']
StartImageTime: # 影像开始时间
NodePath:
['product','imageinfo','imagingTime',start]
Format:
"%Y-%m-%d %H:%M:%S.%f"
EndImageTime: # 影像中止时间
NodePath:
['product','imageinfo','imagingTime',end]
Format:
"%Y-%m-%d %H:%M:%S.%f"
CenterImageTime: # 中心像元时间
NodePath:
['product','platform','CenterTime']
Format:
"%Y-%m-%d %H:%M:%S.%f"
CenterImagePositon: # 中心像元,可以作为迭代起算点
NodePath:
['product','imageinfo','center']
Value:
['latitude','longitude']
NearRange: # 近斜距
NodePath:
['product','imageinfo','nearRange']
DopplerCentroidCoefficients: # 多普勒质心常数
NodePath:
['product','processinfo','DopplerCentroidCoefficients']
DopplerCentroidCoefficients_Name:
['d0','d1','d2','d3','d4']
DopplerParametersReferenceTime:
NodePath:
['product','processinfo',"DopplerParametersReferenceTime"]
ReferenceRange:
NodePath:
['product','imageinfo','refRange']
incidenceAngle: # 入射角
NearRange: # 近入射角
NodePath:
['product','processinfo','incidenceAngleNearRange']
FarRange: # 远入射角
NodePath:
['product','processinfo','incidenceAngleFarRange']
sensor:
PRF: # 脉冲重复频率
NodePath:
['product','imageinfo','eqvPRF']
bandWidth: # 信号带宽,计算距离向分辨率 用于距离向分辨率
NodePath:
['product','sensor','waveParams','wave','bandWidth']
lambda: # 波长
NodePath:
['product','sensor','lamda']
LightSpeed:
299792458

View File

@ -0,0 +1,60 @@
<Root>
<ProductBasicInfo>
<ProductName>后向散射系数</ProductName>
<ProductIdentifier>BackScattering</ProductIdentifier>
<ProductLevel>LEVEL3</ProductLevel>
<ProductResolution> </ProductResolution>
<ProductDate> </ProductDate>
<ProductFormat> </ProductFormat>
<CompressionMethod> </CompressionMethod>
<ProductSize> </ProductSize>
<SpatialCoverageInformation>
<TopLeftLatitude> </TopLeftLatitude>
<TopLeftLongitude> </TopLeftLongitude>
<TopRightLatitude> </TopRightLatitude>
<TopRightLongitude> </TopRightLongitude>
<BottomRightLatitude> </BottomRightLatitude>
<BottomRightLongitude> </BottomRightLongitude>
<BottomLeftLatitude> </BottomLeftLatitude>
<BottomLeftLongitude> </BottomLeftLongitude>
<CenterLatitude> </CenterLatitude>
<CenterLongitude> </CenterLongitude>
</SpatialCoverageInformation>
<TimeCoverageInformation>
<StartTime> </StartTime>
<EndTime> </EndTime>
<CenterTime> </CenterTime>
</TimeCoverageInformation>
<CoordinateReferenceSystemInformation>
<MapProjection> </MapProjection>
<EarthEllipsoid> </EarthEllipsoid>
<ZoneNo> </ZoneNo>
</CoordinateReferenceSystemInformation>
<MetaInfo>
<Unit> </Unit>
<UnitDes> </UnitDes>
</MetaInfo>
</ProductBasicInfo>
<ProductProductionInfo>
<DataSources number="1">
<DataSource>
<Satellite> </Satellite>
<Sensor> </Sensor>
</DataSource>
</DataSources>
<ObservationGeometry>
<SatelliteAzimuth> </SatelliteAzimuth>
<SatelliteRange> </SatelliteRange>
</ObservationGeometry>
<BandSelection>1</BandSelection>
<DataSourceDescription>None</DataSourceDescription>
<DataSourceProcessingDescription>参考产品介绍PDF</DataSourceProcessingDescription>
<ProductionDate> </ProductionDate>
<AuxiliaryDataDescription> </AuxiliaryDataDescription>
</ProductProductionInfo>
<ProductPublishInfo>
<Processor>德清</Processor>
<DistributionUnit> </DistributionUnit>
<ContactInformation> </ContactInformation>
</ProductPublishInfo>
</Root>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,482 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File BackscatteringMain.py
@Function 后向散射
@Author KHZ
@Contact
@Date 2021/9/1
@Version 1.0.0
"""
import logging
from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from BackScatteringAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2
from BackScatteringAlg import ScatteringAlg as alg
from tool.config.ConfigeHandle import Config as cf
import os
import glob
import datetime
import shutil
import tarfile
import sys
if cf.get('debug') == 'True':
DEBUG = True
else:
DEBUG = False
EXE_NAME = 'BackScattering'
LogHandler.init_log_handler('run_log\\' + EXE_NAME)
logger = logging.getLogger("mylog")
env_str = os.path.split(os.path.realpath(__file__))[0]
os.environ['PROJ_LIB'] = env_str
class ScatteringMain:
"""
后向散射系数主函数
"""
def __init__(self, alg_xml_path):
self.alg_xml_path = alg_xml_path
self.imageHandler = ImageHandler()
self.__alg_xml_handler = ManageAlgXML(alg_xml_path)
self.__check_handler = CheckSource(self.__alg_xml_handler)
self.__workspace_path = None
self.__task_id = None
self.__input_paras = {}
self.__output_paras = {}
self.__in_processing_paras = {}
self.__out_para = None
self.__preprocessed_paras = {}
self.__tif_names_list = []
self.__feature_name_list = []
def check_source(self):
"""
检查算法相关的配置文件图像辅助文件是否齐全
"""
env_str = os.getcwd()
logger.info("sysdir: %s", env_str)
if self.__check_handler.check_alg_xml() is False:
return False
if self.__check_handler.check_run_env() is False:
return False
input_para_names = ["SLC","DEM"]
if self.__check_handler.check_input_paras(input_para_names) is False:
return False
self.__workspace_path = self.__alg_xml_handler.get_workspace_path()
self.__create_work_space()
self.__task_id = self.__alg_xml_handler.get_task_id()
self.__input_paras = self.__alg_xml_handler.get_input_paras()
self.__in_processing_paras = self.__init_processing_paras(self.__input_paras)
SrcImageName = os.path.split(self.__input_paras["SLC"]['ParaValue'])[1].split('.tar.gz')[0]
AlgorithmName = self.__alg_xml_handler.get_algorithm_name()
TaskId = self.__alg_xml_handler.get_task_id()
result_name = SrcImageName + "-cal.tar.gz"
# out_name = os.path.splitext(os.path.splitext(os.path.basename(self.__input_paras['SLC']['ParaValue']))[0])[0]
# self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', "BackScatteringProduct.tar.gz")
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name)
self.__alg_xml_handler.write_out_para("BackScatteringProduct", self.__out_para) # 写入输出参数
logger.info('check_source success!')
logger.info('progress bar: 30%')
return True
def __init_processing_paras(self, names):
"""
:param names:字典列表每个字典为一个输入产品的配置信息
"""
processing_paras = {}
for name in names:
para = self.__input_paras[name]
if para is None:
logger.error(name + "is None!")
return False
if para['ParaType'] == 'File':
if para['DataType'] == 'File':
processing_paras.update({name: para['ParaValue']})
# if para['DataType'] == 'csv':
# para_value_list = para['ParaValue'].split(";")
# if len(para_value_list) == 1:
# para_path = para['ParaValue']
# processing_paras.update({name: para_path})
# else:
# for n, para_value in zip(range(len(para_value_list)), para_value_list):
# processing_paras.update({'feature'+str(n): para_value})
# self.__feature_name_list.append('feature'+str(n))
elif para['DataType'] == 'tar.gz':
paths = para['ParaValue'].split(';')
for path in paths:
tar_gz_dic = self.__dec_tar_gz(path, self.__workspace_preprocessing_path)
processing_paras.update(tar_gz_dic)
elif para['DataType'] == 'tif' or para['DataType'] == 'tiff': # 新增修改dem数据为文件绝对路径
if para['ParaValue'] != 'empty' and para['ParaValue'] != 'Empty' and para['ParaValue'] != '':
para_path_list = para['ParaValue'].split(";")
if len(para_path_list) != 0:
dem_path = os.path.join(self.__workspace_origin_path, para['ParaName'])
if os.path.exists(dem_path) is False:
os.mkdir(dem_path)
for file_path in para_path_list:
tif_name = os.path.basename(file_path)
shutil.copy(file_path, os.path.join(dem_path, tif_name))
para_path = os.path.join(self.__workspace_origin_path, para['ParaName'])
processing_paras.update({name: para_path})
else:
para_path = para['ParaValue']
processing_paras.update({name: para_path})
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float':
value = float(para['ParaValue'])
elif para['DataType'] == 'int':
value = int(para['ParaValue'])
else: # 默认string
value = para['ParaValue']
processing_paras.update({name: value})
return processing_paras
def __dec_tar_gz(self, tar_gz_path, out_dir):
"""
解压.tar_gz格式景影像文件
:param tar_gz_path:.tar_gz文件路径
:param out_dir:输出文件夹
:return para_dic:全极化影像路径
"""
# 创建文件夹
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
file_dir = os.path.join(out_dir, name + '\\')
if os.path.exists(file_dir) is False:
os.makedirs(file_dir)
# 解压
t = tarfile.open(tar_gz_path)
t.extractall(path=file_dir)
# 获取文件夹内的文件
para_dic = {}
# if os.path.exists(file_dir + name + '\\'):
# meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.meta.xml')))
# para_dic.update({'SLC': file_dir + name})
# else:
# meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.meta.xml')))
# para_dic.update({'SLC': file_dir})
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.xml')))
para_dic.update({'SLC': file_dir + name})
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.xml')))
para_dic.update({'SLC': file_dir})
if meta_xml_paths == []:
raise Exception('there is not .meta.xml in path: ', file_dir + '\\')
para_dic.update({'META': meta_xml_paths[0]})
self.image_meta_xml = meta_xml_paths
return para_dic
def __create_work_space(self):
"""
删除原有工作区文件夹,创建新工作区文件夹
"""
self.__workspace_preprocessing_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "preprocessing") # self.__workspace_path + EXE_NAME + r"\Temporary\preprocessing""\\"
self.__workspace_preprocessed_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "preprocessed") # self.__workspace_path + EXE_NAME + r"\Temporary\preprocessed""\\"
self.__workspace_processing_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary","processing\\") #self.__workspace_path + EXE_NAME + r"\Temporary\processing""\\"
self.__workspace_origin_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "origin")
path_list = [self.__workspace_preprocessing_path, self.__workspace_preprocessed_path,
self.__workspace_processing_path, self.__workspace_origin_path]
for path in path_list:
if os.path.exists(path):
if DEBUG is True:
continue
self.del_floder(path)
os.makedirs(path)
else:
os.makedirs(path)
logger.info('create new workspace success!')
def del_file(self, path_data):
"""
只删除文件不删除文件夹
"""
if DEBUG is True:
return
for i in os.listdir(path_data): # os.listdir(path_data)#返回一个列表,里面是当前目录下面的所有东西的相对路径
file_data = os.path.join(path_data, i) # path_data + "\\" + i # 当前文件夹的下面的所有东西的绝对路径
if os.path.isfile(file_data) is True: # os.path.isfile判断是否为文件,如果是文件,就删除.如果是文件夹.递归给del_file.
os.remove(file_data)
else:
self.del_file(file_data)
@staticmethod
def del_floder(dic):
"""
删除整个文件夹
"""
if DEBUG is True:
return
if os.path.isdir(dic):
shutil.rmtree(dic)
@staticmethod
def make_targz(output_filename, source_dir):
"""
一次性打包整个根目录空子目录会被打包
如果只打包不压缩"w:gz"参数改为"w:""w"即可
:param output_filename:输出压缩包的完整路径eg:'E:\test.tar.gz'
:param source_dir:需要打包的跟目录eg: 'E:\testFfile\'打包文件夹里面的所有文件,'E:\testFfile'打包文件夹
"""
dir = os.path.split(output_filename)[0]
if os.path.exists(dir) is False:
os.makedirs(dir)
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def del_temp_workspace(self):
"""
临时工作区
"""
if DEBUG is True:
return
path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary") # self.__workspace_path + EXE_NAME + r"\Temporary"
if os.path.exists(path):
self.del_floder(path)
def process_sim_ori(self, ori_sim, sim_ori):
scopes = ()
scopes += (ImageHandler.get_scope_ori_sim(ori_sim),)
intersect_polygon = pp().intersect_polygon(scopes)
if intersect_polygon is None:
raise Exception('create intersect shp fail!')
shp_path = os.path.join(self.__workspace_preprocessing_path, 'IntersectPolygon.shp')
if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
sim_ori_process = os.path.join(self.__workspace_preprocessing_path, 'sim_ori_process.tif')
pp().cut_img(sim_ori_process, sim_ori, shp_path)
return sim_ori_process
def process_handle(self,start):
in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tiff')))
hh_flag, hv_flag, vh_flag, vv_flag, angle_flag = 0, 0, 0, 0, 0
for in_tif_path in in_tif_paths:
if 'HH' in os.path.basename(in_tif_path):
hh_flag = 1
elif 'HV' in os.path.basename(in_tif_path):
hv_flag = 1
elif 'VH' in os.path.basename(in_tif_path):
vh_flag = 1
elif 'VV' in os.path.basename(in_tif_path):
vv_flag = 1
self.processinfo=[hh_flag, hv_flag, vh_flag, vv_flag]
ref_tif_path = ""
meta_file_path = self.__in_processing_paras['META']
rpc_path=None
#######################
# RD
######################
# 2.2 生成局地入射角
path2 = env_str
print("path2:" + path2)
Orthorectification = IndirectOrthorectification(os.path.join(path2,"config.yaml"))
Orthorectification.IndirectOrthorectification(self.__in_processing_paras["SLC"], self.__workspace_processing_path) # 改动1
# 2.3 输出结果
# 3 处理RD
# 合并DEM
Orth_Slc=[]
in_dem_path = self.__in_processing_paras['DEM']
meta_file_path = self.__in_processing_paras['META'] # .meta文件路径
out_dem_path = self.__workspace_preprocessing_path
dem_merged_path=DEMProcess.dem_merged(in_dem_path, meta_file_path, out_dem_path) # 生成TestDEM\mergedDEM_VRT.tif
in_slc_path=None
for slc_path in in_tif_paths:
if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0):
in_slc_path=slc_path
break
# 获取校正模型后
Orthorectification.preCaldem_sar_rc(dem_merged_path,in_slc_path,self.__workspace_preprocessing_path,self.__workspace_processing_path.replace("\\","\\\\")) # 初步筛选坐标范围
logger.info('progress bar: 40%')
# clip_dem_reample_path=os.path.join(self.__workspace_preprocessing_path, "SAR_dem.tiff")
# infooption=gdal.InfoOptions("-json")
# clip_dem_tif_info=gdal.Info(clip_dem_reample_path,options=infooption)
# dem_merged_info=gdal.Info(dem_merged_path,options=infooption)
# sampling_f=clip_dem_tif_info['size'][0]/dem_merged_info['size'][0]
# 处理RD 的结果
out_dir_path=self.__workspace_processing_path.replace("\\","\\\\")
this_outSpace_path = out_dir_path
this_out_dem_slantRange_path = os.path.join(out_dir_path, "dem_slantRange.tiff") # out_dir_path + "\\" + "dem_slantRange.tiff"#// 地形斜距
this_out_plant_slantRange_path = os.path.join(out_dir_path, "flat_slantRange.tiff") # out_dir_path + "\\" + "flat_slantRange.tiff"#// 平地斜距
# 保留结果
if(os.path.exists(this_out_dem_slantRange_path)):
os.remove(this_out_dem_slantRange_path)
if(os.path.exists(this_out_plant_slantRange_path)):
os.remove(this_out_plant_slantRange_path)
this_out_dem_rc_path = os.path.join(out_dir_path, "WGS_SAR_map.tiff") # out_dir_path + "\\" + "WGS_SAR_map.tiff"#// 经纬度与行列号映射
if(os.path.exists(this_out_dem_rc_path)):
os.remove(this_out_dem_rc_path)
this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff"
if (os.path.exists(this_out_sar_sim_path)):
os.remove(this_out_sar_sim_path)
this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射
if (os.path.exists(this_out_sar_sim_wgs_path)):
os.remove(this_out_sar_sim_wgs_path)
this_out_incidence_path = os.path.join(out_dir_path, "incidentAngle.tiff") # out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角
this_out_localIncidenct_path = os.path.join(out_dir_path, "localIncidentAngle.tiff") # out_dir_path + "\\" + "localIncidentAngle.tiff"#// 局地入射角
if(os.path.exists(this_out_incidence_path)):
shutil.move(this_out_incidence_path,os.path.join(out_dir_path, "inc_angle.tif")) # out_dir_path + "\\" + "inc_angle.tif")
if(os.path.exists(this_out_localIncidenct_path)):
shutil.move(this_out_localIncidenct_path, os.path.join(out_dir_path, "LocalIncidenceAngle.tif")) # out_dir_path + "\\" + "LocalIncidenceAngle.tif")
this_out_inc_angle_rpc_path = os.path.join(out_dir_path, "RD_incidentAngle.tiff") # out_dir_path + "\\" + "RD_incidentAngle.tiff"#// 局地入射角
this_out_local_inc_angle_rpc_path = os.path.join(out_dir_path, "RD_localIncidentAngle.tiff") # out_dir_path + "\\" + "RD_localIncidentAngle.tiff"#// 局地入射角
if(os.path.exists(this_out_inc_angle_rpc_path)):
os.remove(this_out_inc_angle_rpc_path)
if(os.path.exists(this_out_local_inc_angle_rpc_path)):
os.remove(this_out_local_inc_angle_rpc_path)
this_out_ori_sim_tiff = os.path.join(out_dir_path, "RD_ori_sim.tif") # out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角
this_in_rpc_lon_lat_path = this_out_ori_sim_tiff
this_out_sim_ori_tiff = os.path.join(out_dir_path, "RD_sim_ori.tif")
this_in_rpc_x_y_path = this_out_sim_ori_tiff
this_in_rpc_x_y_path_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, this_in_rpc_x_y_path)
parameter_path = os.path.join(self.__workspace_processing_path, "orth_para.txt")
for in_tif_path in in_tif_paths:
# out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif"
out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif"
if ('HH' in os.path.basename(in_tif_path)) or ('HV' in os.path.basename(in_tif_path)) or ('VH' in os.path.basename(in_tif_path)) or ('VV' in os.path.basename(in_tif_path)):
alg.sar_backscattering_coef(in_tif_path, meta_file_path, out_tif_path)
# 构建RPC
# 查找RPC
rpc_path=in_tif_path.replace(".tiff",".rpc") if os.path.exists(in_tif_path.replace(".tiff",".rpc")) else in_tif_path.replace(".tiff",".rpb")
if not os.path.exists(rpc_path):
logger.error('rpc not found!')
# db->地理编码
# lin_tif_path = os.path.join(self.__workspace_processing_path,
# os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif"
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path,
# out_tif_path,
# lin_tif_path)
# 线性->地理编码->db
lin_tif_path=os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-lin_geo.tif"
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro,
# out_tif_path,
# lin_tif_path)
Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro,
out_tif_path,
lin_tif_path)
tempout_tif_path = os.path.join(self.__workspace_processing_path,
os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif"
alg.lin_to_db(lin_tif_path, tempout_tif_path) #线性值转回DB值
# 移动RPC
#rpc_correction(in_tif_path,rpc_path,out_tif_path,dem_tif_file = None)
# Orthorectification.inter_Range2Geo(this_in_rpc_lon_lat_path,out_tif_path,tempout_tif_path,Orthorectification.heightspace)
self.imageHandler.write_quick_view(tempout_tif_path, color_img=False)
# self.imageHandler.write_quick_view(lin_tif_path, color_img=False)
else:
shutil.copy(in_tif_path,self.__workspace_processing_path)
ref_tif_path = tempout_tif_path
# ref_tif_path = lin_tif_path
# 构建行列号映射表
#out_rpc_rc_path = os.path.join(self.__workspace_processing_path,"RPC_ori_sim.tif")
#getRCImageRC(in_tif_path,out_rpc_rc_path,rpc_path)
logger.info('progress bar: 90%')
if(os.path.exists(this_in_rpc_lon_lat_path)):
os.remove(this_in_rpc_lon_lat_path)
if (os.path.exists(this_in_rpc_x_y_path)):
os.remove(this_in_rpc_x_y_path)
# out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.split(meta_file_path)[1].rstrip('.meta.xml') + '_DB.meta.xml')
out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.basename(meta_file_path))
shutil.copy(meta_file_path, out_mate_file_path)
if ref_tif_path != "":
# xml_path = "./model_meta.xml"
tem_folder = os.path.join(self.__workspace_path, EXE_NAME, "Temporary")
image_path = ref_tif_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
# par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
# model_xml_path = os.path.join(tem_folder, "creat_standa5rd.meta.xml") # 输出xml路径
# CreateStadardXmlFile(xml_path, par_dict, model_xml_path).create_standard_xml()
#
SrcImageName = os.path.split(self.__input_paras["SLC"]['ParaValue'])[1].split('.tar.gz')[0]
# meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-cal.meta.xml")
# CreateMetafile(self.image_meta_xml[0], self.alg_xml_path, model_xml_path,meta_xml_path).process(SrcImageName)
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-cal.meta.xml")
para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_processing_path,
out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "后向散射系数"})
para_dict.update({"imageinfo_ProductIdentifier": "BackScattering"})
para_dict.update({"imageinfo_ProductLevel": "LEVEL3"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"})
para_dict.update({"MetaInfo_UnitDes": "DB"}) # 设置单位
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
self.make_targz(self.__out_para, self.__workspace_processing_path)
logger.info('process_handle finished!')
logger.info('progress bar: 100%')
return True
if __name__ == '__main__':
start = datetime.datetime.now()
try:
if len(sys.argv)<2:
xml_path = 'BackScattering.xml'
else:
xml_path = sys.argv[1]
ScatteringMain = ScatteringMain(xml_path)
if not ScatteringMain.check_source():
raise Exception('check_source() failed!')
if not ScatteringMain.process_handle(start):
raise Exception('process_handle() failed!')
logger.info('successful production of backscattering products!')
except Exception:
logger.exception("run-time error!")
finally:
ScatteringMain.del_temp_workspace()
# pass
end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start)
logger.info(msg)

View File

@ -0,0 +1,33 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['BackScatteringMain.py'],
pathex=['D:\\estar-proj\\microproduct\\backScattering'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='BackScatteringMain',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@ -0,0 +1,208 @@
"""
@Project microproduct
@File BackScatteringXmlInfo.PY
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
from xml.etree.ElementTree import ElementTree, Element
import xml.dom.minidom
from lxml import etree
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from osgeo import gdal
import numpy as np
import datetime
from PIL import Image
class CreateDict:
"""根据影像信息输出属性字典"""
def __init__(self, image_path, image_pair, out_path1, out_path2):
self.ImageHandler = ImageHandler()
self.para_dict = {}
self.image_path = image_path
self.out_path = out_path1
self.out_path2 = out_path2
self.image_pair = image_pair
pass
def calu_nature(self,start):
"""存入字典"""
imageinfo_width=self.ImageHandler.get_img_width(self.image_path)
self.para_dict.update({"imageinfo_width":imageinfo_width})
imageinfo_height=self.ImageHandler.get_img_height(self.image_path)
self.para_dict.update({"imageinfo_height":imageinfo_height})
# imageinfo_EarthModel=self.ImageHandler.get_projection(self.image_path).split("SPHEROID[", 2)[1].split(",", 2)[0]
# self.para_dict.update({"imageinfo_EarthModel":imageinfo_EarthModel.replace('"','')})
self.para_dict.update({"imageinfo_EarthModel": "WGS84"})
# imageinfo_ProjectModel = self.ImageHandler.get_projection(self.image_path).split("DATUM[", 2)[1].split(",", 2)[0]
# self.para_dict.update({"imageinfo_ProjectModel":imageinfo_ProjectModel.replace('"','')})
self.para_dict.update({"imageinfo_ProjectModel": "UTM"})
proj = self.ImageHandler.get_projection(self.image_path) # 输出的影像若是投影坐标系则先转成地理坐标系
keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
if keyword == "GEOGCS":
pass
elif keyword == "PROJCS":
pp.trans_projcs2geogcs(self.out_path2, self.image_path)
self.image_path = self.out_path2
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('image projection is missing!')
pp.trans_geogcs2projcs(self.out_path, self.image_path) # 坐标投影, 地理转平面投影坐标
imageinfo_widthspace = self.ImageHandler.get_geotransform(self.out_path)[1] # 投影后的分辨率
imageinfo_heightspace = -self.ImageHandler.get_geotransform(self.out_path)[5] # 投影后的分辨率
self.para_dict.update({"imageinfo_widthspace":imageinfo_widthspace})
self.para_dict.update({"imageinfo_heightspace":imageinfo_heightspace})
self.para_dict.update({"imageinfo_NearincidenceAngle": "0"})
self.para_dict.update({"imageinfo_FarIncidenceAngle": "30"})
self.para_dict.update({"imageinfo_Azimuth": "1"}) # todo 结果xml新增字节
self.para_dict.update({"NominalResolution":imageinfo_widthspace})
WidthInMeters = imageinfo_width*imageinfo_widthspace # 投影后的分辨率×宽度
self.para_dict.update({"WidthInMeters":WidthInMeters})
# image_array = self.ImageHandler.get_band_array(self.image_path)
# a2 = np.where(np.isnan(image_array), 999999, image_array)
# MinValue = np.min(a2)
# a3 = np.where(np.isnan(image_array), -999999, image_array)
# MaxValue = np.max(a3)
#
# self.para_dict.update({"MaxValue":MaxValue})
# self.para_dict.update({"MinValue":MinValue})
get_scope = self.ImageHandler.get_scope(self.image_path)
point_upleft, point_upright, point_downleft, point_downright=get_scope[0], get_scope[1], get_scope[2], get_scope[3]
self.para_dict.update({"imageinfo_corner_topLeft_latitude": point_upleft[1]})
self.para_dict.update({"imageinfo_corner_topLeft_longitude": point_upleft[0]})
self.para_dict.update({"imageinfo_corner_topRight_latitude": point_upright[1]})
self.para_dict.update({"imageinfo_corner_topRight_longitude": point_upright[0]})
self.para_dict.update({"imageinfo_corner_bottomLeft_latitude": point_downleft[1]})
self.para_dict.update({"imageinfo_corner_bottomLeft_longitude": point_downleft[0]})
self.para_dict.update({"imageinfo_corner_bottomRight_latitude": point_downright[1]})
self.para_dict.update({"imageinfo_corner_bottomRight_longitude": point_downright[0]})
longitude_max=np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).max()
longitude_min=np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).min()
latitude_max=np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).max()
latitude_min=np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).min()
imageinfo_center_latitude=(latitude_max+latitude_min)/2
imageinfo_center_longitude=(longitude_max+longitude_min)/2
self.para_dict.update({"imageinfo_center_latitude": imageinfo_center_latitude})
self.para_dict.update({"imageinfo_center_longitude": imageinfo_center_longitude})
self.para_dict.update({"productType": "GTC"}) # 设置产品类型
self.para_dict.update({"productFormat": "TIF"})
productGentime = datetime.datetime.now()
self.para_dict.update({"productGentime": productGentime})
msg=productGentime-start
self.para_dict.update({"productConsumeTime": msg})
self.para_dict.update({"unit": "none"}) # 设置单位
self.para_dict.update({"NoDataValue": "nan"})
self.para_dict.update({"productLevel": "1"}) # 设置图像位深度
image_array = self.ImageHandler.get_band_array(self.image_path)
try:
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
bit_dtypes = {
'int8': 8,
'unit16': 16,
'int16': 16,
'unit32': 32,
'int32': 32,
'float32': 32,
'float64': 64,
}
if not gdal_dtypes.get(image_array.dtype.name, None) is None:
bit_num = str(bit_dtypes[image_array.dtype.name])
datatype=bit_num+"bit"
else:
datatype = str(32) + "bit"
# datatype = str(gdal.GDT_Float32)+"bit"
self.para_dict.update({"imagebit": datatype})
except Exception:
self.para_dict.update({"imagebit": "None"})
HH, HV, VH ,VV= self.image_pair[0],self.image_pair[1],self.image_pair[2],self.image_pair[3]
if HH == 0:
HH = "delete"
else:
HH = "NULL"
self.para_dict.update({"imageinfo_QualifyValue_HH": HH})
if HV==0:
HV = "delete"
else:
HV = "NULL"
self.para_dict.update({"imageinfo_QualifyValue_HV": HV})
if VH==0:
VH = "delete"
else:
VH = "NULL"
self.para_dict.update({"imageinfo_QualifyValue_VH": VH})
if VV==0:
VV = "delete"
else:
VV = "NULL"
self.para_dict.update({"imageinfo_QualifyValue_VV": VV})
return self.para_dict
class CreateStadardXmlFile:
"""读取字典中的属性值生成一个标准的xml文件"""
def __init__(self, xml_path, par_dict, path):
"""
par_dict:字典
path:xml模板输出路径
"""
self.par_dict = par_dict
self.path = path
shutil.copy(xml_path, path)
pass
def create_standard_xml(self):
"""将字典中的信息写入到copy的xml文件中"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
productinfo = root.find("productinfo")
for key, value in self.par_dict.items():
if key.split("_")[0] != "imageinfo":
productinfo.find(key).text = str(value)
elif key.split("_")[0] == "imageinfo":
imageinfo = productinfo.find("imageinfo")
if key.split("_")[1] in ["EarthModel", "ProjectModel", "width", "height", "widthspace", "heightspace",
"NearincidenceAngle", "FarIncidenceAngle", "Azimuth"]: # todo 新增xml字节
imageinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[1] == "center":
center = imageinfo.find("center")
center.find(key.split("_")[2]).text = str(value)
elif key.split("_")[1] == "corner":
corner = imageinfo.find("corner")
corner.find(key.split("_")[2]).find(key.split("_")[3]).text = str(value)
elif key.split("_")[1] == "QualifyValue":
QualifyValue = imageinfo.find("QualifyValue")
if value =="delete":
element_QualifyValue = list(QualifyValue)
for i in element_QualifyValue:
if i.tag == key.split("_")[2]:
QualifyValue.remove(i)
else:
QualifyValue.find(key.split("_")[2]).text = str(value)
pass
processinfo = root.find("processinfo")
tree.write(self.path, encoding="utf-8", xml_declaration=True)

View File

@ -0,0 +1,415 @@
# 一米正射辅助数据处理类
import time
import math
import numpy as np
from osgeo import gdal
from xml.etree.ElementTree import ElementTree
from scipy.optimize import leastsq
class OrthoAuxData:
def __init__(self):
pass
@staticmethod
def time_stamp(tm):
list = tm.split(':')
sec = math.ceil(float(list[2]))
tm1 = list[0] + ':' + list[1] + ':' + str(sec)
tmArr = time.strptime(tm1, "%Y-%m-%d %H:%M:%S")
# tmArr = time.strptime(tm1, "%Y-%m-%d %H:%M:%S.%f")
ts = float(time.mktime(tmArr)) # 转换为时间戳
return ts
@staticmethod
def read_meta(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
T = []
Xs = []
Ys = []
Zs = []
Vsx = []
Vsy = []
Vsz = []
GPS_data = root.find('GPS')
for child in GPS_data:
Xs.append(float(child.find('xPosition').text))
Ys.append(float(child.find('yPosition').text))
Zs.append(float(child.find('zPosition').text))
Vsx.append(float(child.find('xVelocity').text))
Vsy.append(float(child.find('yVelocity').text))
Vsz.append(float(child.find('zVelocity').text))
tm = child.find('TimeStamp').text
ts = OrthoAuxData.time_stamp(tm)
T.append(ts)
meta_data = [Xs, Ys, Zs, Vsx, Vsy, Vsz]
return T, meta_data
@staticmethod
def read_control_points(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
center = imageinfo.find('center')
corner = imageinfo.find('corner')
ctrl_pts = [[] for i in range(2)]
ctrl_pts[0].append(float(center.find('longitude').text))
ctrl_pts[1].append(float(center.find('latitude').text))
for child in corner:
ctrl_pts[0].append(float(child.find('longitude').text))
ctrl_pts[1].append(float(child.find('latitude').text))
return ctrl_pts
@staticmethod
def read_dem(dem_resampled_path, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
pstn_arr = np.zeros([y_size, x_size, 3], dtype=np.float)
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(0, 0, x_size, y_size)
for y in range(y_size):
for x in range(x_size):
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
altitude = data[y, x]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZ(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
pstn_arr[y, x, 0] = pstn[0]
pstn_arr[y, x, 1] = pstn[1]
pstn_arr[y, x, 2] = pstn[2]
del in_ds, data
return pstn_arr
@staticmethod
def read_demM(dem_resampled_path, part_cnt, r_cnt, c_cnt, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize // part_cnt
y_size = in_ds.RasterYSize // part_cnt
x = [[i] * y_size for i in range(x_size)]
y = [[i] * x_size for i in range(y_size)]
x = np.array(x)
x = x.T
y = np.array(y)
x_off = c_cnt * x_size
y_off = r_cnt * y_size
gt[0] = gt[0] + c_cnt * x_size * gt[1]
gt[3] = gt[3] + r_cnt * y_size * gt[5]
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(x_off, y_off, x_size, y_size)
altitude = data / 255 * 1024
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZM(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
del in_ds, data
return pstn
@staticmethod
def read_dem_row(dem_resampled_path, p, flag=1):
in_ds = gdal.Open(dem_resampled_path)
gt = list(in_ds.GetGeoTransform())
bands_num = in_ds.RasterCount
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
x = [[i] for i in range(x_size)]
x = np.array(x)
x = x.T
y = np.ones((1, x_size)) * p
x_off = 0
y_off = p
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray(x_off, y_off, x_size, 1)
altitude = data
longitude = gt[0] + x * gt[1]
latitude = gt[3] + y * gt[5]
if flag == 1:
pstn = OrthoAuxData.LLA2XYZM(longitude, latitude, altitude)
else:
pstn = [longitude, latitude, altitude]
del in_ds, data
return pstn
@staticmethod
def orbit_fitting(time_array, meta_data):
# 最小二乘法求解轨道参数
T0 = (time_array[0] + time_array[len(time_array)-1]) / 2
t = []
for i in range(len(time_array)):
t.append(time_array[i]-T0)
def func(p, x):
w3, w2, w1, w0 = p
return w3*x**3 + w2*x**2 + w1*x + w0
def error(p, x, y):
return func(p, x) - y
orbital_paras = []
for j in range(len(meta_data)):
p0 = [1, 2, 3, 4]
x = np.array(t)
y = np.array(meta_data[j])
Para = leastsq(error, p0, args=(x, y))
orbital_paras.append(Para[0])
print(Para[0], Para[1])
return orbital_paras, T0
@staticmethod
def get_PRF(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
waveParams = sensor.find('waveParams')
PRF = float(waveParams.find('wave').find('prf').text)
return PRF
@staticmethod
def get_delta_R(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
pulseWidth = float(sensor.find('waveParams').find('wave').find('pulseWidth').text)
bandWidth = float(sensor.find('waveParams').find('wave').find('bandWidth').text)
c = 299792458
delta_R = c / (1000000 * 2 * bandWidth)
return delta_R
@staticmethod
def get_doppler_rate_coef(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
processinfo = root.find('processinfo')
doppler = processinfo.find('DopplerRateValuesCoefficients')
t0 = float(processinfo.find('DopplerParametersReferenceTime').text)
r0 = float(doppler.find('r0').text)
r1 = float(doppler.find('r1').text)
r2 = float(doppler.find('r2').text)
r3 = float(doppler.find('r3').text)
r4 = float(doppler.find('r4').text)
return t0, np.array([r0, r1, r2, r3, r4]).reshape(5, 1)
@staticmethod
def get_doppler_center_coef(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
processinfo = root.find('processinfo')
doppler = processinfo.find('DopplerCentroidCoefficients')
b0 = float(doppler.find('d0').text)
b1 = float(doppler.find('d1').text)
b2 = float(doppler.find('d2').text)
return b0, b1, b2
@staticmethod
def get_lamda(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
sensor = root.find('sensor')
λ = float(sensor.find('lamda').text)
return λ
@staticmethod
def get_t0(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
tm = imageinfo.find('imagingTime').find('start').text
t0 = OrthoAuxData.time_stamp(tm)
return t0
@staticmethod
def get_start_and_end_time(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
tm0 = imageinfo.find('imagingTime').find('start').text
tm1 = imageinfo.find('imagingTime').find('end').text
starttime = OrthoAuxData.time_stamp(tm0)
endtime = OrthoAuxData.time_stamp(tm1)
return starttime, endtime
@staticmethod
def get_width_and_height(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
width = int(imageinfo.find('width').text)
height = int(imageinfo.find('height').text)
return width, height
@staticmethod
def get_R0(meta_file_path):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
imageinfo = root.find('imageinfo')
R0 = float(imageinfo.find('nearRange').text)
return R0
@staticmethod
def get_h():
h = 6.6
return h
@staticmethod
def LLA2XYZ(longitude, latitude, altitude):
'''
WGS-84坐标系下经纬度坐标转空间直角坐标
'''
# 经纬度余弦值
cosLat = math.cos(latitude * math.pi / 180)
sinLat = math.sin(latitude * math.pi / 180)
cosLon = math.cos(longitude * math.pi / 180)
sinLon = math.sin(longitude * math.pi / 180)
# WGS84坐标系参数
rad = 6378137.0 #地球赤道平均半径
f = 1.0/298.257224 #WGS84椭球扁率
C = 1.0/math.sqrt(cosLat*cosLat + (1-f)*(1-f)*sinLat*sinLat)
S = (1-f)*(1-f)*C
h = altitude
# 计算XYZ坐标
X = (rad * C + h) * cosLat * cosLon
Y = (rad * C + h) * cosLat * sinLon
Z = (rad * S + h) * sinLat
# return np.array([X, Y, Z]).reshape(1,3)
return [X, Y, Z]
@staticmethod
def LLA2XYZM(longitude, latitude, altitude):
# 经纬度余弦值
cosLat = np.cos(latitude * math.pi / 180).reshape(-1,1)
sinLat = np.sin(latitude * math.pi / 180).reshape(-1,1)
cosLon = np.cos(longitude * math.pi / 180).reshape(-1,1)
sinLon = np.sin(longitude * math.pi / 180).reshape(-1,1)
# WGS84坐标系参数
rad = 6378137.0 #地球赤道平均半径
f = 1.0/298.257224 #WGS84椭球扁率
C = 1.0/(np.sqrt(cosLat*cosLat + (1-f)*(1-f)*sinLat*sinLat)).reshape(-1,1)
S = (1-f)*(1-f)*C
h = altitude.reshape(-1,1)
# 计算XYZ坐标
X = (rad * C + h) * cosLat * cosLon
Y = (rad * C + h) * cosLat * sinLon
Z = (rad * S + h) * sinLat
return [X, Y, Z]
@staticmethod
def XYZ2LLA(X, Y, Z):
''' 大地坐标系转经纬度
适用于WGS84坐标系
args:
x,y,z
return:
lat,long,altitude
'''
# WGS84坐标系的参数
a = 6378137.0 # 椭球长半轴
b = 6356752.314245 # 椭球短半轴
ea = np.sqrt((a ** 2 - b ** 2) / a ** 2)
eb = np.sqrt((a ** 2 - b ** 2) / b ** 2)
p = np.sqrt(X ** 2 + Y ** 2)
theta = np.arctan2(Z * a, p * b)
# 计算经纬度及海拔
longitude = np.arctan2(Y, X)
latitude = np.arctan2(Z + eb ** 2 * b * np.sin(theta) ** 3, p - ea ** 2 * a * np.cos(theta) ** 3)
N = a / np.sqrt(1 - ea ** 2 * np.sin(latitude) ** 2)
altitude = p / np.cos(latitude) - N
# return np.array([np.degrees(latitude), np.degrees(longitude), altitude])
return [np.degrees(longitude), np.degrees(latitude), altitude]
@staticmethod
def XYZ2LLAM(X, Y, Z):
''' 大地坐标系转经纬度
适用于WGS84坐标系
args:
x,y,z
return:
lat,long,altitude
'''
# WGS84坐标系的参数
a = 6378137.0 # 椭球长半轴
b = 6356752.314245 # 椭球短半轴
ea = np.sqrt((a ** 2 - b ** 2) / a ** 2)
eb = np.sqrt((a ** 2 - b ** 2) / b ** 2)
p = np.sqrt(X ** 2 + Y ** 2)
theta = np.arctan2(Z * a, p * b)
# 计算经纬度及海拔
longitude = np.arctan2(Y, X)
latitude = np.arctan2(Z + eb ** 2 * b * np.sin(theta) ** 3, p - ea ** 2 * a * np.cos(theta) ** 3)
N = a / np.sqrt(1 - ea ** 2 * np.sin(latitude) ** 2)
altitude = p / np.cos(latitude) - N
# return np.array([np.degrees(latitude), np.degrees(longitude), altitude])
return [np.degrees(longitude), np.degrees(latitude), altitude]
@staticmethod
def world2Pixel(geoMatrix, x, y):
"""
使用GDAL库的geomatrix对象((gdal.GetGeoTransform()))计算地理坐标的像素位置
"""
ulx = geoMatrix[0]
uly = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = int((x - ulx) / xDist)
line = int((uly - y) / abs(yDist))
return pixel, line
@staticmethod
def sar_intensity_synthesis(in_sar_tif, out_sar_tif):
# 获取SLC格式SAR影像的相关信息
in_ds = gdal.Open(in_sar_tif)
bands_num = in_ds.RasterCount
rows = in_ds.RasterYSize
columns = in_ds.RasterXSize
proj = in_ds.GetProjection()
geotrans = in_ds.GetGeoTransform()
# 创建输出的SAR强度图
gtiff_driver = gdal.GetDriverByName('GTiff')
out_ds = gtiff_driver.Create(out_sar_tif, columns, rows, 1)
out_ds.SetProjection(proj)
out_ds.SetGeoTransform(geotrans)
# 输出SAR强度图
in_data1 = in_ds.GetRasterBand(1).ReadAsArray(0, 0, columns, rows)
in_data1 = in_data1/10
in_data1 = np.power(10, in_data1)
in_data2 = in_ds.GetRasterBand(2).ReadAsArray(0, 0, columns, rows)
in_data2 = in_data2 / 10
in_data2 = np.power(10, in_data2)
out_data = np.sqrt(in_data1**2 + in_data2**2)
out_ds.GetRasterBand(1).WriteArray(out_data)
del in_ds, out_ds

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity type="win32" name="BackScatteringMain" processorArchitecture="amd64" version="1.0.0.0"/>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.Windows.Common-Controls" language="*" processorArchitecture="*" version="6.0.0.0" publicKeyToken="6595b64144ccf1df"/>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1"/>
</dependentAssembly>
</dependency>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More