文件夹含有XML时读取有效数据

dev
cuiyyyu 2024-01-15 10:09:08 +08:00
parent 32267348fe
commit b94755f574
2218 changed files with 86 additions and 775866 deletions

View File

@ -2,6 +2,5 @@
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
<mapping directory="$PROJECT_DIR$/model_tool/process_lai" vcs="Git" />
</component>
</project>

View File

@ -1,7 +1,7 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\WorkSpace\</WorkSpace>
<WorkSpace>F:\C-SAR\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
@ -45,7 +45,7 @@
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\VegetationPhenology-likun\lijiang\GF3B_KSC_QPSI_007906_E100.2_N27.0_20230525_L1A_AHV_L10000190531.tar.gz</ParaValue>
<ParaValue>F:\C-SAR\GF3C_MYC_FSI_005874_E100.0_N26.5_20230519_L1A_HHHV_L10000148115.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -60,7 +60,7 @@
<ParaType>File</ParaType>
<DataType>File</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\VegetationPhenology-likun\lijiang\dem</ParaValue>
<ParaValue>F:\C-SAR\ns_dem</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>File</Control>
@ -92,7 +92,7 @@
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>D:\micro\WorkSpace\Ortho\Output\GF3B_KSC_QPSI_007906_E100.2_N27.0_20230525_L1A_AHV_L10000190531-ortho.tar.gz</ParaValue>
<ParaValue>F:\C-SAR\Ortho\Output\GF3C_MYC_FSI_005874_E100.0_N26.5_20230519_L1A_HHHV_L10000148115-ortho.tar.gz</ParaValue>
<MaxValue>DEFAULT</MaxValue>
<MinValue>DEFAULT</MinValue>
<OptionValue>DEFAULT</OptionValue>

View File

@ -1,7 +1,7 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\WorkSpace\</WorkSpace>
<WorkSpace>E:\Result_GF3\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
@ -40,9 +40,9 @@
<ParaChsName>主影像</ParaChsName>
<Description>经过几何校正和地形校正的SAR影像产品</Description>
<ParaType>File</ParaType>
<DataType>zip</DataType>
<DataType>tar.gz</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\download\cls\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686.tar.gz</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -55,9 +55,9 @@
<ParaChsName>辅影像</ParaChsName>
<Description>经过几何校正和地形校正的SAR影像产品</Description>
<ParaType>File</ParaType>
<DataType>zip</DataType>
<DataType>tar.gz</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\download\cls\GF3_KAS_FSI_002034_E113.4_N34.7_20161228_L1A_HHHV_L10002077539.tar.gz</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\GF3_KAS_FSI_002034_E113.4_N34.7_20161228_L1A_HHHV_L10002077539.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -72,9 +72,9 @@
pressure_level、time、Geopotential、Relative humidity、Temperature当DataType:file时ParaValue处
填写文件夹路径且文件夹中需包含三个tif格式的气象参数相对湿度、温度和位势能</Description>
<ParaType>File</ParaType>
<DataType>nc</DataType>
<DataType>zip</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\ERA5_N33_N36_E112_E115_20161228_22.nc</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\ERA5_N33_N36_E112_E115_20161228_22.zip</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -89,9 +89,9 @@
pressure_level、time、Geopotential、Relative humidity、Temperature当DataType:file时ParaValue处
填写文件夹路径且文件夹中需包含三个tif格式的气象参数相对湿度、温度和位势能</Description>
<ParaType>File</ParaType>
<DataType>nc</DataType>
<DataType>zip</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\ERA5_N33_N36_E112_E115_20161129_22.nc</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\ERA5_N33_N36_E112_E115_20161129_22.zip</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -104,9 +104,9 @@
<ParaChsName>高程数据路径</ParaChsName>
<Description>高程数据数据。数据来源:30米 ASTGTM2, 数据格式tif。备注数据的经纬度范围必须是整数</Description>
<ParaType>File</ParaType>
<DataType>File</DataType>
<DataType>zip</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\dem</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\ASTGTM2_N34E113_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N34E114_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N35E113_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N35E114_dem.zip</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -138,7 +138,7 @@
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\WorkSpace\AtmosphericDelay\Output\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686-IADC.tar.gz</ParaValue>
<ParaValue>E:\Result_GF3\AtmosphericDelay\Output\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686-IADC.tar.gz</ParaValue>
</Parameter>
</Outputs>
</AlgCompt>

View File

@ -13,6 +13,7 @@ from pykrige import OrdinaryKriging # 强制引入包
from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from tool.algorithm.block.blockprocess import BlockProcess
from tool.algorithm.transforml1a.transHandle import TransImgL1A, TransImgL1A_ori
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件
from tool.algorithm.xml.CreatMetafile import CreateMetafile
@ -38,6 +39,7 @@ from autorun import auto_run_main
import gc
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.algorithm.block.blockprocess import BlockProcess
EXE_NAME = cf.get('exe_name')
@ -102,7 +104,7 @@ class AtmosphericMain:
self.__workspace_tem_dir_path = self.__workspace_path + EXE_NAME+"\\Temporary""\\"
self.__create_work_space()
self.__input_paras = self.__alg_xml_handler.get_input_paras() # 获取输入文件夹中的数据名、类型、地址
self.__processing_paras = self.__init_processing_paras(self.__input_paras) # 输出{文件名:地址}
self.__processing_paras = self.__init_processing_paras(self.__input_paras, self.__workspace_preprocessed_path) # 输出{文件名:地址}
SrcImageName = os.path.split(self.__input_paras["MasterSarData"]['ParaValue'])[1].split('.tar.gz')[0]
result_name = SrcImageName + tar + ".tar.gz"
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name)
@ -113,7 +115,10 @@ class AtmosphericMain:
for name in name_list:
if self.__input_paras[name]["DataType"] in ["file", "File"]:
self.NcHandle.copy_tif(self.__processing_paras[name], self.__workspace_preprocessing_path, name)
elif self.__input_paras[name]["DataType"] == "nc":
# elif self.__input_paras[name]["DataType"] == "nc":
# self.NcHandle.check_nc(self.__processing_paras[name]) # 检查气象数据
# self.get_prodataset(name) # 读取气象数据另存为tif
elif "nc" in self.__input_paras[name]["ParaName"].lower():
self.NcHandle.check_nc(self.__processing_paras[name]) # 检查气象数据
self.get_prodataset(name) # 读取气象数据另存为tif
@ -121,7 +126,7 @@ class AtmosphericMain:
logger.info('progress bar :10')
return True
def __init_processing_paras(self, names):
def __init_processing_paras(self, names, out_path):
""""
param: names:字典列表每个字典为一个输入产品的配置信息
"""
@ -133,9 +138,24 @@ class AtmosphericMain:
logger.error(name + "is None!")
return False
para_path = para['ParaValue']
# if para['ParaType'] == 'File':
# if para['DataType'] == "nc":
# processing_paras.update({name: para_path})
# elif para['DataType'] == 'file':
# if name in ["MasterNC", "AuxiliaryNC"]:
# processing_paras.update({name: para_path})
if para['ParaType'] == 'File':
if para['DataType'] == "nc":
processing_paras.update({name: para_path})
if para['DataType'] == "zip":
para_value_list = out_path.split(";")
if len(para_value_list) == 1:
para_path = para['ParaValue']
if para_path != 'empty' and para_path != '':
file_path = BlockProcess.unzip_file(para_path, out_path)
processing_paras.update({name: file_path})
else:
for n, para_value_zip in zip(range(len(para_value_list)), para_value_list):
file_path = BlockProcess.unzip_file(para_value_zip, out_path)
processing_paras.update({name+str(n): file_path})
elif para['DataType'] == 'file':
if name in ["MasterNC", "AuxiliaryNC"]:
processing_paras.update({name: para_path})
@ -172,8 +192,9 @@ class AtmosphericMain:
if os.path.exists(dem_path) is False:
os.mkdir(dem_path)
for file_path in para_path_list:
tif_name = os.path.basename(file_path)
shutil.copy(file_path, os.path.join(dem_path, tif_name))
BlockProcess.unzip_file(file_path, dem_path)
# tif_name = os.path.basename(temp_file_path)
# shutil.copy(file_path, os.path.join(dem_path, tif_name))
para_path = os.path.join(self.__workspace_origin_path, para['ParaName'])
processing_paras.update({'DEM': para_path})
# # 解压DEM到指定文件夹

View File

@ -1,166 +0,0 @@
#
# 模型计算的库
#
import cython
cimport cython # 必须导入
import numpy as np
cimport numpy as np
from libc.math cimport pi
from scipy.optimize import leastsq
import random
import logging
logger = logging.getLogger("mylog")
def WMCModel(param_arr,sample_lai,sample_soil,sample_inc,sample_sigma):
""" WMC模型 增加 归一化植被指数
Args:
param_arr (np.ndarray):
sample_lai (double):
sample_soil (double):
sample_inc (double):
sample_sigma (double): 线
Returns:
double:
"""
# 映射参数,方便修改模型
A,B,C,D,M,N=param_arr # 在这里修改模型
V_lai=sample_lai
#V_lai=E*sample_lai+F
exp_gamma=np.exp(-2*B*((V_lai*D+C))*(1/np.cos(sample_inc)))
sigma_soil=M*sample_soil+N
sigma_veg=A*((V_lai))*np.cos(sample_inc)
f_veg=1
result=sigma_veg*(1-exp_gamma)+sigma_soil*exp_gamma-sample_sigma
return result
def train_WMCmodel(lai_water_inc_sigma_list,params_X0,train_err_image_path,draw_flag=True):
""" 训练模型参数
Args:
lai_waiter_inc_sigma_list (list): 使
"""
def f(X):
eqs=[]
for lai_water_inc_sigma_item in lai_water_inc_sigma_list:
sample_lai=lai_water_inc_sigma_item[4]
sample_sigma=lai_water_inc_sigma_item[5] # 5: csv_sigma, 8:tiff_sigma
sample_soil=lai_water_inc_sigma_item[6]
sample_inc=lai_water_inc_sigma_item[7]
FVC=lai_water_inc_sigma_item[8]
eqs.append(WMCModel(X,sample_lai,sample_soil,sample_inc,sample_sigma))
return eqs
X0 = params_X0 # 初始值
logger.info(str(X0))
h = leastsq(f, X0)
logger.info(h[0],h[1])
err_f=f(h[0])
x_arr=[lai_waiter_inc_sigma_item[4] for lai_waiter_inc_sigma_item in lai_water_inc_sigma_list]
# 根据误差大小进行排序
logger.info("训练集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 样点信息".format(str(np.array(err_f).shape)))
for i in np.argsort(np.array(err_f)):
logger.info('{}\t{}\t{}'.format(i,err_f[i],str(lai_water_inc_sigma_list[i])))
logger.info("\n误差点序输出结束\n")
if draw_flag:
logger.info(err_f)
logger.info(np.where(np.abs(err_f)<10))
from matplotlib import pyplot as plt
plt.scatter(x_arr,err_f)
plt.title("equation-err")
plt.savefig(train_err_image_path,dpi=600)
plt.show()
return h[0]
def test_WMCModel(lai_waiter_inc_sigma_list,param_arr,lai_X0,test_err_image_path,draw_flag=True):
""" 测试模型训练结果
Args:
lai_waiter_inc_sigma_list (list): 使
A (_type_): A
B (_type_): B
C (_type_): C
D (_type_): D
M (_type_): M
N (_type_): N
lai_X0 (_type_):
Returns:
list: [sample_lai,err,predict]
"""
err=[]
err_f=[]
x_arr=[]
err_lai=[]
for lai_waiter_inc_sigma_item in lai_waiter_inc_sigma_list:
sample_time,sample_code,sample_lon,sample_lat,sample_lai,csv_sigma,sample_soil,sample_inc,sample_sigma=lai_waiter_inc_sigma_item
def f(X):
lai=X[0]
eqs=[WMCModel(param_arr,lai,sample_soil,sample_inc,csv_sigma)]
return eqs
X0=lai_X0
h = leastsq(f, X0)
temp_err=h[0]-sample_lai
err_lai.append(temp_err[0]) # lai预测的插值
err.append([sample_lai,temp_err[0],h[0][0],sample_code])
err_f.append(f(h[0])[0]) # 方程差
x_arr.append(sample_lai)
# 根据误差大小进行排序
logger.info("测试集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 方程差\t样点信息".format(str(np.array(err_lai).shape)))
for i in np.argsort(np.array(err_lai)):
logger.info('{}\t{}\t{}\t{}'.format(i,err_lai[i],err_f[i],str(lai_waiter_inc_sigma_list[i])))
logger.info("\n误差点序输出结束\n")
if draw_flag:
from matplotlib import pyplot as plt
plt.scatter(x_arr,err_lai)
plt.title("equation-err")
plt.savefig(test_err_image_path,dpi=600)
plt.show()
return err
def processs_WMCModel(param_arr,lai_X0,sigma,inc_angle,soil_water):
if(sigma<0 ):
return np.nan
def f(X):
lai=X[0]
eqs=[WMCModel(param_arr,lai,soil_water,inc_angle,sigma )]
return eqs
h = leastsq(f, [lai_X0])
return h[0][0]
# Cython 的扩展地址
cpdef np.ndarray[double,ndim=2] process_tiff(np.ndarray[double,ndim=2] sigma_tiff,
np.ndarray[double,ndim=2] inc_tiff,
np.ndarray[double,ndim=2] soil_water_tiff,
np.ndarray[double,ndim=1] param_arr,
double lai_X0):
cdef np.ndarray[double,ndim=2] result=sigma_tiff
cdef int param_arr_length=param_arr.shape[0]
cdef int height=sigma_tiff.shape[0]
cdef int width=sigma_tiff.shape[1]
cdef int i=0
cdef int j=0
cdef double temp=0
while i<height:
j=0
while j<width:
temp = processs_WMCModel(param_arr,lai_X0,sigma_tiff[i,j],inc_tiff[i,j],soil_water_tiff[i,j])
temp=temp if temp<10 and temp>=0 else np.nan
result[i,j]=temp
j=j+1
i=i+1
return result

File diff suppressed because it is too large Load Diff

View File

@ -1,45 +0,0 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./LAIProcess') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./LAIProcess.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -1,117 +0,0 @@
# -*- encoding: utf-8 -*-
# code from https://blog.csdn.net/theonegis/article/details/54427906
from osgeo import gdal
from osgeo import osr
import numpy as np
def getSRSPair(dataset):
"""
获得给定数据的投影参考系和地理参考系
:param dataset: GDAL地理数据
:return: 投影参考系和地理参考系
"""
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
return prosrs, geosrs
def geo2lonlat(dataset, x, y):
"""
将投影坐标转为经纬度坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param x: 投影坐标x
:param y: 投影坐标y
:return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
"""
prosrs, geosrs = getSRSPair(dataset)
ct = osr.CoordinateTransformation(prosrs, geosrs)
coords = ct.TransformPoint(x, y)
return coords[:2]
def lonlat2geo(dataset, lon, lat):
"""
将经纬度坐标转为投影坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param lon: 地理坐标lon经度
:param lat: 地理坐标lat纬度
:return: 经纬度坐标(lon, lat)对应的投影坐标
"""
prosrs, geosrs = getSRSPair(dataset)
ct = osr.CoordinateTransformation(geosrs, prosrs)
coords = ct.TransformPoint(lat, lon)
return coords[:2]
def imagexy2geo(dataset, row, col):
"""
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset: GDAL地理数据
:param row: 像素的行号
:param col: 像素的列号
:return: 行列号(row, col)对应的投影坐标或地理坐标(x, y)
"""
trans = dataset.GetGeoTransform()
px = trans[0] + col * trans[1] + row * trans[2]
py = trans[3] + col * trans[4] + row * trans[5]
return px, py
def geo2imagexy(dataset, x, y):
"""
根据GDAL的六 参数模型将给定的投影或地理坐标转为影像图上坐标行列号
:param dataset: GDAL地理数据
:param x: 投影或地理坐标x
:param y: 投影或地理坐标y
:return: 影坐标或地理坐标(x, y)对应的影像图上行列号(col, row)
"""
trans = dataset.GetGeoTransform()
a = np.array([[trans[1], trans[2]], [trans[4], trans[5]]])
b = np.array([x - trans[0], y - trans[3]])
return np.linalg.solve(a, b) # 使用numpy的linalg.solve进行二元一次方程的求解
def test1():
gdal.AllRegister()
tif = 'D:/DATA/testdata/GLCFCS30_E110N25.tif'
# dataset = gdal.Open(r"D:\\DATA\\雷达测试\\GaoFen3_20200528_HH_DB.tif")
dataset = gdal.Open(tif)
print('数据投影:')
print(dataset.GetProjection())
print('数据的大小(行,列):')
print('(%s %s)' % (dataset.RasterYSize, dataset.RasterXSize))
x = 793214.118
y = 2485865.527
lon = 113.84897082317516
lat = 22.453998686022448
row = 24576
col = 22540
print('图上坐标 -> 投影坐标:')
coords = imagexy2geo(dataset, row, col)
print('(%s, %s)->(%s, %s)' % (row, col, coords[0], coords[1]))
print('投影坐标 -> 图上坐标:')
coords = geo2imagexy(dataset, x, y)
col = coords[0]
row = coords[1]
print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
print('投影坐标 -> 经纬度:')
coords = geo2lonlat(dataset, x, y)
print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
print('经纬度 -> 投影坐标:')
coords = lonlat2geo(dataset, lon, lat)
print('(%s, %s)->(%s, %s)' % (lon, lat, coords[0], coords[1]))
coords1 = geo2lonlat(dataset, 657974.118, 2633321.527)
print(coords1)
coords2 = geo2lonlat(dataset, 793214.118, 2485865.527)
print(coords2)
pass
# if __name__ == '__main__':
#
# print('done')

View File

@ -1,156 +0,0 @@
"""
@Project microproduct
@File DEMJoint
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
from osgeo import gdal, osr
import os
import numpy as np
class DEMProcess:
"""
DEM拼接重采样
"""
def __init__(self):
pass
@staticmethod
def get_extent(fn):
'''
原文链接https://blog.csdn.net/XBR_2014/article/details/85255412
'''
ds = gdal.Open(fn)
rows = ds.RasterYSize
cols = ds.RasterXSize
# 获取图像角点坐标
gt = ds.GetGeoTransform()
minx = gt[0]
maxy = gt[3]
maxx = gt[0] + gt[1] * rows
miny = gt[3] + gt[5] * cols
return (minx, maxy, maxx, miny)
@staticmethod
def img_mosaic(in_files, out_dem_path):
# 通过两两比较大小,将最终符合条件的四个角点坐标保存
# 即为拼接图像的四个角点坐标
minX, maxY, maxX, minY = DEMProcess.get_extent(in_files[0])
for fn in in_files[1:]:
minx, maxy, maxx, miny = DEMProcess.get_extent(fn)
minX = min(minX, minx)
maxY = max(maxY, maxy)
maxX = max(maxX, maxx)
minY = min(minY, miny)
# 获取输出图像的行列数
in_ds = gdal.Open(in_files[0])
bands_num = in_ds.RasterCount
gt = in_ds.GetGeoTransform()
rows = int((maxX - minX) / abs(gt[5]))
cols = int((maxY - minY) / gt[1])
# 判断栅格数据的数据类型
datatype = gdal.GDT_UInt16
# 创建输出图像
driver = gdal.GetDriverByName('GTiff')
out_dem = os.path.join(out_dem_path, 'mosaic0.tif')
out_ds = driver.Create(out_dem, cols, rows, bands_num, datatype)
out_ds.SetProjection(in_ds.GetProjection())
gt = list(in_ds.GetGeoTransform())
gt[0], gt[3] = minX, maxY
out_ds.SetGeoTransform(gt)
for fn in in_files:
in_ds = gdal.Open(fn)
x_size = in_ds.RasterXSize
y_size = in_ds.RasterYSize
trans = gdal.Transformer(in_ds, out_ds, [])
success, xyz = trans.TransformPoint(False, 0, 0)
x, y, z = map(int, xyz)
for i in range(1, bands_num + 1):
data = in_ds.GetRasterBand(i).ReadAsArray()
out_band = out_ds.GetRasterBand(i)
out_data = out_band.ReadAsArray(x, y, x_size, y_size)
data = np.maximum(data, out_data)
out_band.WriteArray(data, x, y)
del in_ds, out_band, out_ds
@staticmethod
def dem_clip(OutFilePath, DEMFilePath, SelectArea):
'''
根据选择范围裁剪DEM,并输出
agrs:
outFilePath:裁剪DEM输出地址
DEMFilePath:被裁减DEM地址
SelectArea:list [(xmin,ymax),(xmax,ymin)] 框选范围 左上角右下角
'''
DEM_ptr = gdal.Open(DEMFilePath)
DEM_GeoTransform = DEM_ptr.GetGeoTransform() # 读取影像的投影变换
DEM_InvGeoTransform = gdal.InvGeoTransform(DEM_GeoTransform)
SelectAreaArrayPoints = [gdal.ApplyGeoTransform(DEM_InvGeoTransform, p[0], p[1]) for p in SelectArea]
SelectAreaArrayPoints = list(map(lambda p: (int(p[0]), int(p[1])), SelectAreaArrayPoints)) # 确定坐标
[(ulx, uly), (brx, bry)] = SelectAreaArrayPoints
rowCount, colCount = bry - uly, brx - ulx
# 输出DEM的桌面坐标转换
Out_Transfrom = list(DEM_GeoTransform)
Out_Transfrom[0] = SelectArea[0][0]
Out_Transfrom[3] = SelectArea[0][1]
# 构建输出DEM
Bands_num = DEM_ptr.RasterCount
gtiff_driver = gdal.GetDriverByName('GTiff')
datatype = gdal.GDT_UInt16
out_dem = gtiff_driver.Create(OutFilePath, colCount, rowCount, Bands_num, datatype)
out_dem.SetProjection(DEM_ptr.GetProjection())
out_dem.SetGeoTransform(Out_Transfrom)
for i in range(1, Bands_num + 1):
data_band = DEM_ptr.GetRasterBand(i)
out_band = out_dem.GetRasterBand(i)
data = data_band.ReadAsArray(ulx, uly, colCount, rowCount)
out_band.WriteArray(data)
del out_dem
@staticmethod
def dem_resample(in_dem_path, out_dem_path):
'''
DEM重采样函数默认坐标系为WGS84
agrs:
in_dem_path: 输入的DEM文件夹路径
meta_file_path: 输入的xml元文件路径
out_dem_path: 输出的DEM文件夹路径
'''
# 读取文件夹中所有的DEM
dem_file_paths=[os.path.join(in_dem_path,dem_name) for dem_name in os.listdir(in_dem_path) if dem_name.find(".tif")>=0 and dem_name.find(".tif.")==-1]
spatialreference=osr.SpatialReference()
spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree
spatialproj=spatialreference.ExportToWkt() # 导出投影结果
# 将DEM拼接成一张大图
mergeFile =gdal.BuildVRT(os.path.join(out_dem_path,"mergeDEM.tif"), dem_file_paths)
out_DEM=os.path.join(out_dem_path,"mosaic.tif")
gdal.Warp(out_DEM,
mergeFile,
format="GTiff",
dstSRS=spatialproj,
dstNodata=-9999,
outputType=gdal.GDT_Float32)
return out_DEM
# if __name__ == "__main__":
# DEMProcess = DEMProcess()
# in_dem_path = r'F:\大气延迟\out_dem'
# out_dem_path = r'F:\大气延迟\out_dem'
# DEMProcess.dem_resample(in_dem_path, out_dem_path)

View File

@ -1,154 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File ScatteringAuxData.py
@Function 后向散射
@Author SHJ
@Contact
@Date 2022/6/29
@Version 1.0.0
修改历史
[修改序列] [修改日期] [修改者] [修改内容]
1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
"""
import logging
from xml.etree.ElementTree import ElementTree
import math
logger = logging.getLogger("mylog")
class GF3L1AMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text) if root.find('processinfo').find('CalibrationConst').find(polarization).text!="NULL" else 0
return Kdb
class OrthoMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
lamda = float(root.find('sensor').find('lamda').text)
return lamda
class MetaDataHandler:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
try:
QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_QualifyValue() error!')
QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_QualifyValue() success!')
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
try:
Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_Kdb() error!')
Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_Kdb() success!')
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率,单位GHz
RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
lamda = OrthoMetaData.get_lamda(meta_file_path)
return lamda
class Calibration:
def __init__(self):
pass
@staticmethod
def get_Calibration_coefficient(meta_file_path, polarization):
calibration = [0, 0, 0, 0]
for i in polarization:
if i == 'HH':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[0] = math.sqrt(data_value)
if i == 'HV':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[1] = math.sqrt(data_value)
if i == 'VH':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[2] = math.sqrt(data_value)
if i == 'VV':
quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
calibration[3] = math.sqrt(data_value)
return calibration
# if __name__ == '__main__':
# A = ScatteringAuxData()
# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
# path1 = dir + 'OrthoProduct.meta.xml'
# t1 = A.get_QualifyValue(path, 'HH')
# t2 = A.get_Kdb(path, 'HH')
# t3 = A.get_RadarCenterFrequency(path)
# t4 = A.get_lamda(path)
# pass

View File

@ -1,528 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File PreProcess.py
@Function @Function: 坐标转换,坐标系转换图像裁剪重投影重采样
@Author LMM
@Date 2021/8/25 14:17
@Version 1.0.0
"""
from shapely.geometry import Polygon # 导入 gdal库要放到这一句的后面不然会引起错误
from osgeo import gdal
from osgeo import gdalconst
from osgeo import osr
from osgeo import ogr
import os
import cv2
import numpy as np
import shutil
import scipy.spatial.transform
import scipy.spatial.transform._rotation_groups # 用于解决打包错误
import scipy.special.cython_special # 用于解决打包错误
import scipy.spatial.transform._rotation_groups # 解决打包的问题
import shapefile
from shapely.errors import TopologicalError
from tool.algorithm.image.ImageHandle import ImageHandler
import logging
logger = logging.getLogger("mylog")
#
# os.environ['PROJ_LIB'] = os.getcwd()
class PreProcess:
"""
预处理所有的影像配准
"""
def __init__(self):
self._ImageHandler = ImageHandler()
pass
def cal_scopes(self, processing_paras):
# 计算roi
scopes = ()
for key, value in processing_paras.items():
if 'ori_sim' in key:
scopes += (ImageHandler.get_scope_ori_sim(value),)
if(processing_paras['box'] != "" or processing_paras['box'] != "empty"):
scopes += self.box2scope(processing_paras['box'])
return scopes
def cal_scopes_roi(self, processing_paras):
return self.intersect_polygon(self.cal_scopes(processing_paras))
def cut_geoimg(self,workspace_preprocessing_path, para_names_geo, processing_paras):
# print(os.environ['PROJ_LIB'])
self.check_img_projection(workspace_preprocessing_path, para_names_geo, processing_paras)
# 计算roi
scopes = self.cal_scopes(processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
scopes_roi = self.cal_intersect_shp(intersect_shp_path, para_names_geo, processing_paras, scopes)
# 裁剪
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names_geo, processing_paras, intersect_shp_path)
return cutted_img_paths, scopes_roi
def preprocessing(self, para_names, ref_img_name, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
# 读取每一张图像,检查图像坐标系
self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
self.box2scope(processing_paras['box']))
logger.info('create intersect shp success!')
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names, processing_paras,
intersect_shp_path)
logger.info('cut images success!')
# 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
preprocessed_paras = self.resampling_img(workspace_preprocessed_path, para_names, cutted_img_paths,cutted_img_paths[ref_img_name])
# 清除预处理缓存文件
logger.info('preprocess_handle success!')
return preprocessed_paras # cutted_img_paths
def get_ref_inf(self, ref_img_path):
"""获取参考影像的图像信息"""
ref_img_path = ref_img_path
cols = ImageHandler.get_img_width(ref_img_path)
rows = ImageHandler.get_img_height(ref_img_path)
proj = ImageHandler.get_projection(ref_img_path)
geo = ImageHandler.get_geotransform(ref_img_path)
return ref_img_path, cols, rows, proj, geo
def check_img_projection(self, out_dir, para_names, processing_paras):
"""
读取每一张图像,检查图像坐标系;
将投影坐标系影像转换为地理坐标系影像(EPSG:4326)
:param para_names:需要检查的参数名称
"""
if len(para_names) == 0:
return False
for name in para_names:
proj = ImageHandler.get_projection(processing_paras[name])
keyword = proj.split("[", 2)[0]
if keyword == "PROJCS":
# 投影坐标系 转 地理坐标系
para_dir = os.path.split(processing_paras[name])
out_para = os.path.join(out_dir, para_dir[1].split(".", 1)[0] + "_EPSG4326.tif")
self.trans_epsg4326(out_para, processing_paras[name])
processing_paras[name] = out_para
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('coordinate is missing!')
def preprocessing_oh2004(self, para_names, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
# 读取每一张图像,检查图像坐标系
self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
# 计算图像的轮廓,并求相交区域
intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
scopes = self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
self.box2scope(processing_paras['box']))
logger.info('create intersect shp success!')
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = self.cut_imgs(workspace_preprocessed_path, para_names, processing_paras,
intersect_shp_path)
logger.info('cut images success!')
# 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
return cutted_img_paths, scopes
@staticmethod
def lonlat2geo(lat, lon):
"""
WGS84转平面坐标
Param: lat 为WGS_1984的纬度
Param: lon 为WGS_1984的经度
输出转换后的坐标x,y
"""
dstsrs1 = osr.SpatialReference()
dstsrs1.ImportFromEPSG(32649)
dstsrs2 = osr.SpatialReference()
dstsrs2.ImportFromEPSG(4326)
ct = osr.CoordinateTransformation(dstsrs2, dstsrs1)
coords = ct.TransformPoint(lat, lon)
# print("输出转换后的坐标x,y:",coords[:2])
return coords[:2]
@staticmethod
def trans_geogcs2projcs(out_path, in_path):
"""
:param out_path:wgs84投影坐标影像保存路径
:param in_path:地理坐标影像输入路径
"""
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:4326', dstSRS='EPSG:32649')
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_projcs2geogcs(out_path, in_path):
"""
:param out_path:wgs84地理坐标影像输入路径
:param in_path:wgs84投影坐标影像保存路径
"""
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:32649', dstSRS='EPSG:4326')
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_projcs2geogcs(out_path, in_path ,EPSG_src=32649,EPSG_dst=4326):
"""
:param out_path:wgs84地理坐标影像输入路径
:param in_path:wgs84投影坐标影像保存路径
:param EPSG_src:原始投影系
:param EPSG_dst:目标坐标系
"""
str_EPSG_src = 'EPSG:'+ str(EPSG_src)
str_EPSG_dst = 'EPSG:'+ str(EPSG_dst)
# 创建文件
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
options = gdal.WarpOptions(format='GTiff', srcSRS=str_EPSG_src, dstSRS=str_EPSG_dst)
gdal.Warp(out_path, in_path, options=options)
@staticmethod
def trans_epsg4326(out_path, in_path):
OutTile = gdal.Warp(out_path, in_path,
dstSRS='EPSG:4326',
resampleAlg=gdalconst.GRA_Bilinear
)
OutTile = None
return True
@staticmethod
def box2scope(str_box):
roi_box = ()
if str_box == '' or str_box == 'empty':
return roi_box
box_list = [float(num) for num in list(str_box.split(';'))]
if len(box_list) == 4:
roi_box = ([[box_list[2], box_list[1]], [box_list[3], box_list[1]], [box_list[2], box_list[0]],
[box_list[3], box_list[0]]],)
return roi_box
def cal_intersect_shp(self, shp_path, para_names,processing_paras, add_scope =()):
"""
:param shp_path:相交区域矢量文件保存区域
:param para_names:判断相交影像的名称
:return: True or False
"""
scopes = ()
if len(add_scope) != 0:
scopes += add_scope
for name in para_names:
scope_tuple = (self._ImageHandler.get_scope(processing_paras[name]),)
scopes += scope_tuple
for n, scope in zip( range(len(scopes)), scopes):
logging.info("scope" + str(n) + ":%s", scope)
intersect_polygon = self.intersect_polygon(scopes)
if intersect_polygon is None:
logger.error('image range does not overlap!')
raise Exception('create intersect shp fail!')
logging.info("scope roi :%s", intersect_polygon)
if self.write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
return intersect_polygon
@staticmethod
def intersect_polygon(scopes_tuple):
"""
功能说明计算多边形相交的区域坐标;注意多边形区域会转变成凸区域再求交
:param scopes_tuple: 输入多个区域坐标的tuple
:return: 多边形相交的区域坐标((x0,y0),(x1,y1),..., (xn,yn))
"""
if len(scopes_tuple) < 2:
logger.error('len(scopes_tuple) < 2')
# return # todo 修改只有单景会出现无法判断相交区域问题
try:
# python四边形对象会自动计算四个点最后四个点顺序为左上 左下 右下 右上 左上
tmp = tuple(scopes_tuple[0])
poly_intersect = Polygon(tmp).convex_hull
for i in range(len(scopes_tuple)-1):
polygon_next = Polygon(tuple(scopes_tuple[i+1])).convex_hull
if poly_intersect.intersects(polygon_next):
poly_intersect = poly_intersect.intersection(polygon_next)
else:
msg = 'Image:' + str(i) + 'range does not overlap!'
logger.error(msg)
return
return list(poly_intersect.boundary.coords)[:-1]
# except shapely.geos.TopologicalError:
except TopologicalError:
logger.error('shapely.geos.TopologicalError occurred!')
return
@staticmethod
def write_polygon_shp(out_shp_path, point_list, EPSG =32649):
"""
功能说明创建闭环的矢量文件
:param out_shp_path :矢量文件保存路径
:param point_list :装有闭环点的列表[[x0,y0],[x1,y1]...[xn,yn]]
:return: True or False
"""
# 为了支持中文路径,请添加下面这句代码
gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "NO")
# 为了使属性表字段支持中文,请添加下面这句
gdal.SetConfigOption("SHAPE_ENCODING", "")
# 注册所有的驱动
ogr.RegisterAll()
# 创建数据这里以创建ESRI的shp文件为例
str_driver_name = "ESRI Shapefile"
o_driver = ogr.GetDriverByName(str_driver_name)
if o_driver is None:
msg = 'driver('+str_driver_name+')is invalid value'
logger.error(msg)
return False
# 创建数据源
if os.path.exists(out_shp_path) and os.path.isfile(out_shp_path): # 如果已存在同名文件
os.remove(out_shp_path) # 则删除之
o_ds = o_driver.CreateDataSource(out_shp_path)
if o_ds is None:
msg = 'create file failed!' + out_shp_path
logger.error(msg)
return False
# 创建图层,创建一个多边形图层
srs = osr.SpatialReference()
#srs.ImportFromEPSG(32649) # 投影坐标系空间参考WGS84
srs.ImportFromEPSG(EPSG) # 地理坐标系EPSG
o_layer = o_ds.CreateLayer("TestPolygon", srs, ogr.wkbPolygon)
if o_layer is None:
msg = 'create coverage failed!'
logger.error(msg)
return False
# 下面创建属性表
# 先创建一个叫FieldID的整型属性
o_field_id = ogr.FieldDefn("FieldID", ogr.OFTInteger)
o_layer.CreateField(o_field_id, 1)
# 再创建一个叫FeatureName的字符型属性字符长度为50
o_field_name = ogr.FieldDefn("FieldName", ogr.OFTString)
o_field_name.SetWidth(100)
o_layer.CreateField(o_field_name, 1)
o_defn = o_layer.GetLayerDefn()
# 创建矩形要素
o_feature_rectangle = ogr.Feature(o_defn)
o_feature_rectangle.SetField(0, 1)
o_feature_rectangle.SetField(1, "IntersectRegion")
# 创建环对象ring
ring = ogr.Geometry(ogr.wkbLinearRing)
for i in range(len(point_list)):
ring.AddPoint(point_list[i][0], point_list[i][1])
ring.CloseRings()
# 创建环对象polygon
geom_rect_polygon = ogr.Geometry(ogr.wkbPolygon)
geom_rect_polygon.AddGeometry(ring)
o_feature_rectangle.SetGeometry(geom_rect_polygon)
o_layer.CreateFeature(o_feature_rectangle)
o_ds.Destroy()
return True
def cut_imgs(self, out_dir, para_names, processing_paras, shp_path):
"""
使用矢量数据裁剪影像
:param para_names:需要检查的参数名称
:param shp_path裁剪的shp文件
"""
if len(para_names) == 0:
return {}
cutted_img_paths = {}
try:
for name in para_names:
input_path = processing_paras[name]
output_path = os.path.join(out_dir, name + '_cut.tif')
self.cut_img(output_path, input_path, shp_path)
cutted_img_paths.update({name: output_path})
logger.info('cut %s success!', name)
except BaseException:
logger.error('cut_img failed!')
return {}
return cutted_img_paths
@staticmethod
def cut_img(output_path, input_path, shp_path):
"""
:param output_path:剪切后的影像
:param input_path:待剪切的影像
:param shp_path:矢量数据
:return: True or False
"""
r = shapefile.Reader(shp_path)
box = r.bbox
input_dataset = gdal.Open(input_path)
gdal.Warp(output_path, input_dataset, format='GTiff', outputBounds=box, cutlineDSName=shp_path, dstNodata=-9999)
# cutlineWhere="FIELD = whatever",
# optionally you can filter your cutline (shapefile) based on attribute values
# select the no data value you like
# ds = None
# do other stuff with ds object, it is your cropped dataset. in this case we only close the dataset.
del input_dataset
return True
def resampling_img(self, out_dir, para_names, img_paths, refer_img_path):
"""
以主影像为参考对影像重采样
:param para_names:需要检查的参数名称
:param img_paths待重采样影像路径
:param refer_img_path参考影像路径
"""
if len(para_names) == 0 or len(img_paths) == 0:
return
prepro_imgs_path = {}
for name in para_names:
img_path = img_paths[name]
output_para = os.path.join(out_dir, name + '_preprocessed.tif') # + name + '_preprocessed.tif'
self.resampling_by_scale(img_path, output_para, refer_img_path)
prepro_imgs_path.update({name: output_para})
logger.info('resampling %s success!', name)
return prepro_imgs_path
@staticmethod
def resampling_by_scale(input_path, target_file, refer_img_path):
"""
按照缩放比例对影像重采样
:param input_path: GDAL地理数据路径
:param target_file: 输出影像
:param refer_img_path:参考影像
:return: True or False
"""
ref_dataset = gdal.Open(refer_img_path)
ref_cols = ref_dataset.RasterXSize # 列数
ref_rows = ref_dataset.RasterYSize # 行数
target_dataset = gdal.Open(input_path)
target_cols = target_dataset.RasterXSize # 列数
target_rows = target_dataset.RasterYSize # 行数
if(ref_cols == target_cols) and (ref_rows == target_rows):
shutil.copyfile(input_path, target_file)
return True
dataset = gdal.Open(input_path)
if dataset is None:
logger.error('resampling_by_scale:dataset is None!')
return False
band_count = dataset.RasterCount # 波段数
if (band_count == 0) or (target_file == ""):
logger.error("resampling_by_scale:Parameters of the abnormal!")
return False
cols = dataset.RasterXSize # 列数
rows = dataset.RasterYSize # 行数
scale_x = ref_cols/cols
scale_y = ref_rows/rows
# rows = dataset.RasterYSize # 行数
# cols = int(cols * scale) # 计算新的行列数
# rows = int(rows * scale)
cols = ref_cols
rows = ref_rows
geotrans = list(dataset.GetGeoTransform())
geotrans[1] = geotrans[1] / scale_x # 像元宽度变为原来的scale倍
geotrans[5] = geotrans[5] / scale_y # 像元高度变为原来的scale倍
if os.path.exists(target_file) and os.path.isfile(target_file): # 如果已存在同名影像
os.remove(target_file) # 则删除之
if not os.path.exists(os.path.split(target_file)[0]):
os.makedirs(os.path.split(target_file)[0])
band1 = dataset.GetRasterBand(1)
data_type = band1.DataType
target = dataset.GetDriver().Create(target_file, xsize=cols, ysize=rows, bands=band_count,
eType=data_type)
target.SetProjection(dataset.GetProjection()) # 设置投影坐标
target.SetGeoTransform(geotrans) # 设置地理变换参数
total = band_count + 1
for index in range(1, total):
# 读取波段数据
data = dataset.GetRasterBand(index).ReadAsArray(buf_xsize=cols, buf_ysize=rows)
out_band = target.GetRasterBand(index)
no_data_value = dataset.GetRasterBand(index).GetNoDataValue() # 获取没有数据的点
if not (no_data_value is None):
out_band.SetNoDataValue(no_data_value)
out_band.WriteArray(data) # 写入数据到新影像中
out_band.FlushCache()
out_band.ComputeBandStats(False) # 计算统计信息
del dataset
del target
return True
@staticmethod
def cv_mean_filter(out_path, in_path, filter_size):
"""
:param out_path:滤波后的影像
:param in_path:滤波前的影像
:param filter_size:滤波尺寸
:return: True or False
"""
proj = ImageHandler.get_projection(in_path)
geotrans = ImageHandler.get_geotransform(in_path)
array = ImageHandler.get_band_array(in_path, 1)
array = cv2.blur(array, (filter_size, filter_size)) # 均值滤波
ImageHandler.write_img(out_path, proj, geotrans, array)
return True
@staticmethod
def check_LocalIncidenceAngle(out_tif_path, in_tif_path):
"""
将角度的无效值设置为nan把角度值转为弧度值
:param out_tif_path:处理后影像路径
:param in_tif_path:处理前影像路径
"""
proj, geo, angle = ImageHandler.read_img(in_tif_path)
angle = angle.astype(np.float32, order='C')
angle[angle == -9999] = np.nan
mean = np.nanmean(angle)
if mean > np.pi:
angle = np.deg2rad(angle)# 角度转弧度
angle[np.where(angle >= 0.5 * np.pi)] = np.nan
angle[np.where(angle < 0)] = np.nan
ImageHandler.write_img(out_tif_path, proj, geo, angle)

View File

@ -1,236 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:microproduct
@File:ROIAlg.py
@Function:
@Contact:
@Author:SHJ
@Date:2021/11/17
@Version:1.0.0
"""
import logging
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
import numpy as np
logger = logging.getLogger("mylog")
class ROIAlg:
def __init__(self,):
pass
@staticmethod
def roi_process(names, processing_path, processing_paras, preprocessed_paras):
roi_paths = []
roi = ROIAlg()
for name in names:
if 'LocalIncidenceAngle' in name:
# 利用角度为nan生成Mask
pp.check_LocalIncidenceAngle(preprocessed_paras[name],preprocessed_paras[name])
angle_nan_mask_path = processing_path + 'angle_nan_mask.tif'
roi.trans_tif2mask(angle_nan_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(angle_nan_mask_path)
elif ("HH" in name) or ("HV" in name) or ("VH" in name) or ("VV" in name):
# 利用影像的有效范围生成MASK
tif_mask_path = processing_path + name + "_tif_mask.tif"
roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(tif_mask_path)
elif name == 'Covering':
# 利用cover计算植被覆盖范围
cover_mask_path = processing_path + "cover_mask.tif"
if processing_paras['CoveringIDs'] == 'empty':
cover_data = ImageHandler.get_data(preprocessed_paras[name])
cover_id_list = list(np.unique(cover_data))
else:
cover_id_list = list(processing_paras['CoveringIDs'].split(';'))
cover_id_list = [int(num) for num in cover_id_list]
roi.trans_cover2mask(cover_mask_path, preprocessed_paras[name], cover_id_list)
roi_paths.append(cover_mask_path)
elif name == "NDVI":
# 利用NDVI计算裸土范围该指数的输出值在 -1.0 和 1.0 之间,大部分表示植被量,
# 负值主要根据云、水和雪而生成
# 接近零的值则主要根据岩石和裸土而生成。
# 较低的(小于等于 0.1NDVI 值表示岩石、沙石或雪覆盖的贫瘠区域。
# 中等值0.2 至 0.3)表示灌木丛和草地
# 较高的值0.6 至 0.8)表示温带雨林和热带雨林。
ndvi_mask_path = processing_path + "ndvi_mask.tif"
ndvi_scope = list(processing_paras['NDVIScope'].split(';'))
threshold_of_ndvi_min = float(ndvi_scope[0])
threshold_of_ndvi_max = float(ndvi_scope[1])
roi.trans_tif2mask(ndvi_mask_path, preprocessed_paras[name], threshold_of_ndvi_min, threshold_of_ndvi_max)
roi_paths.append(ndvi_mask_path)
# else:
# # 其他特征影像
# tif_mask_path = processing_path + name + "_mask.tif"
# roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
# roi_paths.append(tif_mask_path)
bare_land_mask_path = processing_path + "bare_land_mask.tif"
for roi_path in roi_paths:
roi.combine_mask(bare_land_mask_path, roi_path, bare_land_mask_path)
return bare_land_mask_path
@staticmethod
def roi_process_VP(names, processing_path, processing_paras, preprocessed_paras, file_name):
roi_paths = []
roi = ROIAlg()
for name in names:
if 'LocalIncidenceAngle' in name:
# 利用角度为nan生成Mask
pp.check_LocalIncidenceAngle(preprocessed_paras[name], preprocessed_paras[name])
angle_nan_mask_path = processing_path + 'angle_nan_mask.tif'
roi.trans_tif2mask(angle_nan_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(angle_nan_mask_path)
elif ("HH" in name) or ("HV" in name) or ("VH" in name) or ("VV" in name):
# 利用影像的有效范围生成MASK
tif_mask_path = processing_path + name + "_tif_mask.tif"
roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
roi_paths.append(tif_mask_path)
elif name == 'Covering':
# 利用cover计算植被覆盖范围
cover_mask_path = processing_path + "cover_mask.tif"
if processing_paras['CoveringIDs'] == 'empty':
cover_data = ImageHandler.get_data(preprocessed_paras[name])
cover_id_list = list(np.unique(cover_data))
else:
cover_id_list = list(processing_paras['CoveringIDs'].split(';'))
cover_id_list = [int(num) for num in cover_id_list]
roi.trans_cover2mask(cover_mask_path, preprocessed_paras[file_name + '_' + name], cover_id_list)
roi_paths.append(cover_mask_path)
elif name == "NDVI":
# 利用NDVI计算裸土范围该指数的输出值在 -1.0 和 1.0 之间,大部分表示植被量,
# 负值主要根据云、水和雪而生成
# 接近零的值则主要根据岩石和裸土而生成。
# 较低的(小于等于 0.1NDVI 值表示岩石、沙石或雪覆盖的贫瘠区域。
# 中等值0.2 至 0.3)表示灌木丛和草地
# 较高的值0.6 至 0.8)表示温带雨林和热带雨林。
ndvi_mask_path = processing_path + "ndvi_mask.tif"
ndvi_scope = list(processing_paras['NDVIScope'].split(';'))
threshold_of_ndvi_min = float(ndvi_scope[0])
threshold_of_ndvi_max = float(ndvi_scope[1])
roi.trans_tif2mask(ndvi_mask_path, preprocessed_paras[name], threshold_of_ndvi_min,
threshold_of_ndvi_max)
roi_paths.append(ndvi_mask_path)
# else:
# # 其他特征影像
# tif_mask_path = processing_path + name + "_mask.tif"
# roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
# roi_paths.append(tif_mask_path)
bare_land_mask_path = processing_path + "bare_land_mask.tif"
for roi_path in roi_paths:
roi.combine_mask(bare_land_mask_path, roi_path, bare_land_mask_path)
return bare_land_mask_path
@staticmethod
def trans_tif2mask(out_mask_path, in_tif_path, threshold_min, threshold_max = None):
"""
:param out_mask_path:mask输出路径
:param in_tif_path:输入路径
:param threshold_min:最小阈值
:param threshold_max:最大阈值
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
array = image_handler.get_band_array(in_tif_path, 1)
if threshold_max == None and np.isnan(threshold_min)==True:
nan = np.isnan(array)
mask = (nan.astype(int) == 0).astype(int)
mask1 = ((array == -9999).astype(int) == 0).astype(int)
mask *= mask1
image_handler.write_img(out_mask_path, proj, geotrans, mask)
else:
if threshold_min < threshold_max:
mask = ((array > threshold_min) & (array < threshold_max)).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
elif threshold_min > threshold_max:
mask = ((array < threshold_min) & (array > threshold_max)).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
elif threshold_max == threshold_min:
mask = ((array == threshold_min).astype(int) == 0).astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
logger.info("trans_tif2mask success, path: %s", out_mask_path)
return True
@staticmethod
def trans_cover2mask(out_mask_path, in_tif_path, cover_id_list):
"""
:param out_mask_path:mask输出路径
:param in_tif_path:输入路径
:param cover_id_list 地表覆盖类型数据的id
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
array = image_handler.get_band_array(in_tif_path, 1)
mask = np.zeros(array.shape, dtype=bool)
for id in cover_id_list:
mask_tmp = (array == id)
mask = mask | mask_tmp
mask = mask.astype(int)
image_handler.write_img(out_mask_path, proj, geotrans, mask)
@staticmethod
def combine_mask(out_mask_path, in_main_mask_path, in_sub_mask_path):
"""
:param out_mask_path:输出路径
:param in_main_mask_path:主mask路径输出影像采用主mask的地理信息
:param in_sub_mask_path:副mask路径
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_main_mask_path)
geotrans = image_handler.get_geotransform(in_main_mask_path)
main_array = image_handler.get_band_array(in_main_mask_path, 1)
if image_handler.get_dataset(in_sub_mask_path) != None:
sub_array = image_handler.get_band_array(in_sub_mask_path, 1)
main_array = main_array * sub_array
image_handler.write_img(out_mask_path, proj, geotrans, main_array)
logger.info("combine_mask success, path: %s", out_mask_path)
return True
@staticmethod
def cal_roi(out_tif_path, in_tif_path, mask_path, background_value=1):
"""
:param out_tif_path:ROI的影像
:param in_tif_path:计算ROI的影像
:param mask_path:掩模
:param background_value:无效区域设置的背景值
:return: True or False
"""
image_handler = ImageHandler()
proj = image_handler.get_projection(in_tif_path)
geotrans = image_handler.get_geotransform(in_tif_path)
tif_array = image_handler.get_data(in_tif_path) # 读取所有波段的像元值存为数组
mask_array = image_handler.get_band_array(mask_path, 1)
if len(tif_array.shape) == 3:
im_bands, im_height, im_width = tif_array.shape
else:
im_bands, (im_height, im_width) = 1, tif_array.shape
if im_bands == 1:
tif_array[np.isnan(mask_array)] = background_value
tif_array[mask_array == 0] = background_value
elif im_bands>1:
for i in range(0, im_bands):
tif_array[i, :, :][np.isnan(mask_array)] = background_value
tif_array[i, :, :][mask_array == 0] = background_value
image_handler.write_img(out_tif_path, proj, geotrans, tif_array, '-9999')
logger.info("cal_roi success, path: %s", out_tif_path)
return True
# if __name__ == '__main__':
# dir = r'G:\MicroWorkspace\C-SAR\SoilMoisture\Temporary\processing/'
# out_tif_path = dir + 'soil_moisture_roi.tif'
# in_tif_path = dir + 'soil_moisture.tif'
# mask_path = dir + 'bare_land_mask.tif'
# background_value = np.nan
# ROIAlg.cal_roi(out_tif_path, in_tif_path, mask_path, background_value)
# pass

View File

@ -1,57 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:sieve_filter.py
@Function:gdal斑点滤波功能
@Contact: 'https://www.osgeo.cn/gdal/api/gdal_alg.html?highlight=gdalsievefilter#'
'_CPPv415GDALSieveFilter15GDALRasterBandH15GDALRasterBandH15GDALRasterBandHiiPPc16GDALProgressFuncPv'
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import logging
from osgeo import gdal
import numpy as np
# from onestar.soilMoisture.OneMoistureImage import ImageHandler
from tool.algorithm.image.ImageHandle import ImageHandler
logger = logging.getLogger("mylog")
def gdal_sieve_filter(dst_filename, src_filename, threshold=100, connectedness=4):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
mask_band = src_band.GetMaskBand()
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
#
# if __name__ == '__main__':
# inputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25.tif'
# outputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25_sieve_filter.tif'
# flag = gdal_sieve_filter(outputfile, inputfile, threshold=100, connectedness=4)

View File

@ -1,122 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File ScatteringAuxData.py
@Function 后向散射
@Author SHJ
@Contact
@Date 2022/6/29
@Version 1.0.0
修改历史
[修改序列] [修改日期] [修改者] [修改内容]
1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
"""
import logging
from xml.etree.ElementTree import ElementTree
logger = logging.getLogger("mylog")
class GF3L1AMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
class OrthoMetaData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
tree = ElementTree()
tree.parse(meta_file_path)
root = tree.getroot()
lamda = float(root.find('sensor').find('lamda').text)
return lamda
class ScatteringAuxData:
def __init__(self):
pass
@staticmethod
def get_QualifyValue(meta_file_path, polarization):
try:
QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_QualifyValue() error!')
QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_QualifyValue() success!')
return QualifyValue
@staticmethod
def get_Kdb(meta_file_path, polarization):
try:
Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
except Exception:
logger.warning('OrthoMetaData.get_Kdb() error!')
Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
logger.info('GF3L1AMetaData.get_Kdb() success!')
return Kdb
@staticmethod
def get_RadarCenterFrequency(meta_file_path):
# 获取微波中心频率,单位GHz
RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
return RadarCenterFrequency
@staticmethod
def get_lamda(meta_file_path):
# 获取微波波长单位m
lamda = OrthoMetaData.get_lamda(meta_file_path)
return lamda
# if __name__ == '__main__':
# A = ScatteringAuxData()
# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
# path1 = dir + 'OrthoProduct.meta.xml'
# t1 = A.get_QualifyValue(path, 'HH')
# t2 = A.get_Kdb(path, 'HH')
# t3 = A.get_RadarCenterFrequency(path)
# t4 = A.get_lamda(path)
# pass

View File

@ -1,414 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File CalculateIncident.py
@Function 计算局部入射角计算
@Author LMM
@Date 2021/8/25 14:17
@Version 1.0.0
"""
import os
import numpy as np
from osgeo import gdal
from osgeo import gdalconst
import gc
import math
from xml.dom import minidom # 不需要安装,默认环境里就有
class CalculateIncident:
def __init__(self):
pass
@staticmethod
def add_round(npgrid):
"""
边缘填充一圈,然后输出填充得到的矩阵
paramnpgrid dem数组
"""
ny, nx = npgrid.shape # ny:行数nx:列数
zbc = np.zeros((ny + 2, nx + 2))
zbc[1:-1, 1:-1] = npgrid
# 四边
zbc[0, 1:-1] = npgrid[0, :]
zbc[-1, 1:-1] = npgrid[-1, :]
zbc[1:-1, 0] = npgrid[:, 0]
zbc[1:-1, -1] = npgrid[:, -1]
# 角点
zbc[0, 0] = npgrid[0, 0]
zbc[0, -1] = npgrid[0, -1]
zbc[-1, 0] = npgrid[-1, 0]
zbc[-1, -1] = npgrid[-1, -1]
print("输出填充后的数组的形状", zbc.shape)
return zbc
@staticmethod
def cal_dxdy(zbc, dx):
"""
计算dx,dy
paramzbc填充后的数组
paramdx dem数据像元大小
"""
we_x = ((zbc[1:-1, :-2]) - (zbc[1:-1, 2:])) / dx / 2 # WE方向
ns_y = ((zbc[2:, 1:-1]) - (zbc[:-2, 1:-1])) / dx / 2 # NS方向
print("输出Sx的数组的形状", we_x.shape, "输出Sy的数组的形状", ns_y.shape)
sx = we_x[1:-1, 1:-1]
sy = ns_y[1:-1, 1:-1]
# np.savetxt("dxdy.csv",dx,delimiter=",")
print("输出Sx2的数组的形状", sx.shape, "输出Sy2的数组的形状", sy.shape)
return sx, sy
@staticmethod
def cal_slopasp(dx, dy):
# 计算坡度\坡向
# 坡度计算 slope
slope = (np.arctan(np.sqrt(dx * dx + dy * dy))) * 57.29578 # 转换成°,57.29578=180/math.pi
slope = slope[1:-1, 1:-1]
# 坡向计算 aspect
aspect = np.ones([dx.shape[0], dx.shape[1]]).astype(np.float32) # 生成一个全是0的数组
# dx = dx.astype(np.float32)
# dy = dy.astype(np.float32)
# a1=(np.where(dx==0) and np.where(dy ==0))
# print(a1)
# aspect[a1]=-1
# a2 = (np.where(dx == 0) and np.where(dy > 0))
# aspect[a2] =0.0
# a3 = (np.where(dx == 0) and np.where(dy <0))
# aspect[a3] =180.0
# a4 = (np.where(dx > 0) and np.where(dy ==0))
# aspect[a4] =90.0
# a5 = (np.where(dx < 0) and np.where(dy ==0))
# aspect[a5] =270.0
# a6 = (np.where(dx != 0) or np.where(dy !=0))
# b=dy[a6]
# print(":", 1)
# aspect[a6] =float(math.atan2(dy[i, j], dx[i, j])) * 57.29578
# a7=np.where(aspect[a6]< 0.0)
# aspect[a7] = 90.0 - aspect[a7]
# a8=np.where(aspect[a6]> 90.0)
# aspect[a8] = 450.0- aspect[a8]
# a9 =np.where(aspect[a6] >= 0 or aspect[a6] <= 90)
# aspect[a9] =90.0 - aspect[a9]
for i in range(dx.shape[0]):
for j in range(dx.shape[1]):
x = float(dx[i, j])
y = float(dy[i, j])
if (x == 0.0) & (y == 0.0):
aspect[i, j] = -1
elif x == 0.0:
if y > 0.0:
aspect[i, j] = 0.0
else:
aspect[i, j] = 180.0
elif y == 0.0:
if x > 0.0:
aspect[i, j] = 90.0
else:
aspect[i, j] = 270.0
else:
aspect[i, j] = float(math.atan2(y, x)) * 57.29578 # 范围(-Π/2Π/2
if aspect[i, j] < 0.0:
aspect[i, j] = 90.0 - aspect[i, j]
elif aspect[i, j] > 90.0:
aspect[i, j] = 450.0 - aspect[i, j]
else:
aspect[i, j] = 90.0 - aspect[i, j]
print("输出aspect形状:", aspect.shape) # 3599, 3599
print("输出aspect:", aspect)
return slope, aspect
def creat_twofile(self, dem_file_path, slope_out_path, aspect_out_path):
"""
生成坡度图坡向图
param: path_file1 为输入文件tif数据的文件路径
"""
if os.path.isfile(dem_file_path):
print("高程数据文件存在")
else:
print("高程数据文件不存在")
dataset_caijian = gdal.Open(dem_file_path)
x_size = dataset_caijian.RasterXSize
y_size = dataset_caijian.RasterYSize
geo = dataset_caijian.GetGeoTransform()
pro = dataset_caijian.GetProjection()
array0 = dataset_caijian.ReadAsArray(0, 0, x_size, y_size)
print("输出dem数据的数组", array0)
zbc = self.add_round(array0)
sx, sy = self.cal_dxdy(zbc, 30)
slope, aspect = self.cal_slopasp(sx, sy)
driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver.Register()
newfile = driver.Create(slope_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
newfile.SetProjection(pro)
geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
newfile.SetGeoTransform(geo)
newfile.GetRasterBand(1).WriteArray(slope)
driver2 = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver2.Register()
newfile2 = driver2.Create(aspect_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
newfile2.SetGeoTransform(geo)
newfile2.GetRasterBand(1).WriteArray(aspect)
@staticmethod
def resampling(input_file1, input_file2, ref_file, output_file, output_file2):
"""
采用gdal.Warp()方法进行重采样差值法为双线性插值
:param input_file1 slope path
:param input_file2 aspect path
:param ref_file: 参考图像路径
:param output_file: slope path
:param output_file2 aspect path
:return:
"""
gdal.AllRegister()
in_ds1 = gdal.Open(input_file1)
in_ds2 = gdal.Open(input_file2)
ref_ds = gdal.Open(ref_file, gdal.GA_ReadOnly)
# 获取输入影像信息
input_file_proj = in_ds1.GetProjection()
# inputefileTrans = in_ds1.GetGeoTransform()
reference_file_proj = ref_ds.GetProjection()
reference_file_trans = ref_ds.GetGeoTransform()
nbands = in_ds1.RasterCount
bandinputfile1 = in_ds1.GetRasterBand(1)
bandinputfile2 = in_ds2.GetRasterBand(1)
x = ref_ds.RasterXSize
y = ref_ds.RasterYSize
# 创建重采样输出文件(设置投影及六参数)
driver1 = gdal.GetDriverByName('GTiff')
output1 = driver1.Create(output_file, x, y, nbands, bandinputfile1.DataType)
output1.SetGeoTransform(reference_file_trans)
output1.SetProjection(reference_file_proj)
# options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
# resampleAlg = gdalconst.GRA_NearestNeighbour
gdal.ReprojectImage(in_ds1, output1, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
driver2 = gdal.GetDriverByName('GTiff')
output2 = driver2.Create(output_file2, x, y, nbands, bandinputfile2.DataType)
output2.SetGeoTransform(reference_file_trans)
output2.SetProjection(reference_file_proj)
# options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
# resampleAlg = gdalconst.GRA_NearestNeighbour
gdal.ReprojectImage(in_ds2, output2, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
@staticmethod
def getorbitparameter(xml_path):
"""
从轨道参数文件xml中获取升降轨信息影像四个角的经纬度坐标
"""
# 打开xml文档,根据路径初始化DOM
doc = minidom.parse(xml_path)
# 得到xml文档元素对象,初始化root对象
root = doc.documentElement
# 输出升降轨信息DEC降轨ASC升轨
direction = root.getElementsByTagName("Direction")[0]
# print("输出Direction的子节点列表",Direction.firstChild.data)
pd = direction.firstChild.data
imageinfo = root.getElementsByTagName("imageinfo")[0]
# 输出topLeft的纬度和经度
top_left = imageinfo.getElementsByTagName("topLeft")[0]
latitude = top_left.getElementsByTagName("latitude")[0]
longitude = top_left.getElementsByTagName("longitude")[0]
# print("输出topLeft的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
tl_lat, tl_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出topRight的纬度和经度
top_right = imageinfo.getElementsByTagName("topRight")[0]
latitude = top_right.getElementsByTagName("latitude")[0]
longitude = top_right.getElementsByTagName("longitude")[0]
# print("输出topRight的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
tr_lat, tr_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出 bottomLeft的纬度和经度
bottom_left = imageinfo.getElementsByTagName("bottomLeft")[0]
latitude = bottom_left.getElementsByTagName("latitude")[0]
longitude = bottom_left.getElementsByTagName("longitude")[0]
# print("输出bottomLeft的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
bl_lat, bl_lon = latitude.firstChild.data, longitude.firstChild.data
# 输出 bottomRight的纬度和经度
bottom_right = imageinfo.getElementsByTagName("bottomRight")[0]
latitude = bottom_right.getElementsByTagName("latitude")[0]
longitude = bottom_right.getElementsByTagName("longitude")[0]
# print("输出bottomRight的纬度lat和经度lon", latitude.firstChild.data,longitude.firstChild.data)
br_lat, br_lon = latitude.firstChild.data, longitude.firstChild.data
print("pd,tl_lat,tl_lon,tr_lat,tr_lon,bl_lat,bl_lon,br_lat,br_lon", pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat,
bl_lon, br_lat, br_lon)
return pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon
def get_rparademeter(self, xml_path):
"""
计算雷达视线向方向角R
"""
pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
tl_lat = float(tl_lat) # 原来的数是带有小数点的字符串int会报错使用float
tl_lon = float(tl_lon)
# tr_lat = float(tr_lat)
# tr_lon = float(tr_lon)
bl_lat = float(bl_lat)
bl_lon = float(bl_lon)
# br_lat = float(br_lat)
# br_lon = float(br_lon)
if pd == "DEC":
# 降轨
b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
r = 270 + b
return r
# tl_lat, tl_lon = lonlat2geo(tl_lat, tl_lon)
# tr_lat, tr_lon = lonlat2geo(tr_lat, tr_lon)
# bl_lat, bl_lon = lonlat2geo(bl_lat, bl_lon)
# br_lat, br_lon = lonlat2geo(br_lat, br_lon)
# B2 = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
# R2 = 270 + B2
# print(("输出R2", R2))
if pd == "ASC":
# 升轨
b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
return b
def clau(self, pathfile1, pathfile2, pathfile3, xml_path, save_localangle_path):
"""
计算局部入射角
param: pathfile1是slope的坡度图路径
param: pathfile2是aspect的坡向图路径
param: pathfile3是入射角文件的路径
param: xml_path是轨道参数文件
r是雷达视线向方位角
"""
r = self.get_rparademeter(xml_path)
pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
print("输出升降轨:", pd)
dataset = gdal.Open(pathfile1)
x = dataset.RasterXSize
y = dataset.RasterYSize
print("输出slope的行、列", x, y)
slope_array = dataset.ReadAsArray(0, 0, x, y)
dataset2 = gdal.Open(pathfile2)
x2 = dataset2.RasterXSize
y2 = dataset2.RasterYSize
print("输出aspect的行、列", x2, y2)
aspect_array = dataset2.ReadAsArray(0, 0, x2, y2)
dataset3 = gdal.Open(pathfile3)
x3 = dataset3.RasterXSize
y3 = dataset3.RasterYSize
geo3 = dataset3.GetGeoTransform()
pro3 = dataset3.GetProjection()
print("输出入射角文件的行、列:", x3, y3)
rushe_array = dataset3.ReadAsArray(0, 0, x3, y3)
# b0 = np.where(rushe_array > 0.00001, 0, 1)
radina_value = 0
if pd == "DEC":
# 降轨数据
# 雷达视线角-坡度角在90度到270度之间
where_0 = np.where(rushe_array == 0)
bb1 = (r-aspect_array).all() and (r-aspect_array).all()
bb2 = np.where(90 < bb1 < 270, 1, 0)
b1 = (bb1 and bb2)
# b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) * np.sin(
rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d1 = b1 * c1
# 雷达视线角-坡度角=90度或=270度时
b2 = np.where((r-aspect_array == 90) | (r-aspect_array == 270), 1, 0)
d2 = b2*c1
# 雷达视线角-坡度角在90度到270度之间
b3 = 1-b1-b2
c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d3 = b3 * c3
del b1, b2, b3, c3, c1
gc.collect()
radina_value = d1 + d2 + d3
radina_value[where_0] = 0
del d1, d2, d3
gc.collect()
if pd == "ASC":
# 升轨数据
# 坡度-雷达视线角在90度到270度之间
where_0 = np.where(rushe_array == 0)
bb1 = (r-aspect_array).all() and (r-aspect_array).all()
bb2 = np.where(90 < bb1 < 270, 1, 0)
b1 = (bb1 and bb2)
# b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d1 = b1 * c1
# 坡度-雷达视线角=90或=270时
b2 = np.where((aspect_array-r == 90) | (aspect_array-r == 270), 1, 0)
d2 = b2 * c1
# 坡度-雷达视线角在0-90度或270-360度之间
b3 = 1 - b1-b2
c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) *\
np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
d3 = b3 * c3
radina_value = d1 + d2 + d3
radina_value[where_0] = 0
del b1, b2, b3, c3, c1, d1, d2, d3
gc.collect()
jubu_o = 57.29578 * np.arccos(radina_value)
print("输出局部入射角", jubu_o)
driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
driver.Register()
newfile = driver.Create(save_localangle_path, x3, y3, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
newfile.SetProjection(pro3)
newfile.SetGeoTransform(geo3)
newfile.GetRasterBand(1).WriteArray(jubu_o)
def localangle(self, dem_path, incidence_angle_path, orbital_parameters_path):
"""
获取输入文件的路径
计算坡度图坡向图
计算局部入射角
"""
para_names = ["Dem", "IncidenceAngle", "OrbitalParameters", "经验A"]
if len(para_names) == 0:
return False
# 获取三个文件的路径
# print("输出三个文件路径",Dem_path,IncidenceAngle_path,OrbitalParameters_path)
# 确定坡度、坡向的输出路径,输出坡度、坡向图
slope_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipslope.tif"
aspect_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipaspect.tif"
print("slope_out_path的路径是", slope_out_path)
print("aspect_out_path的路径是", aspect_out_path)
self.creat_twofile(dem_path, slope_out_path, aspect_out_path)
# 根据入射角文件对坡度坡向图进行裁剪与重采样
slope_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipslope.tif"
aspect_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipaspect.tif"
self.resampling(slope_out_path, aspect_out_path, incidence_angle_path, slope_out_path2, aspect_out_path2)
# 输出局部入射角文件
save_localangle_path = r"D:\\MicroWorkspace\\LocaLangle\\Temporary\\\localangle.tif"
self.clau(slope_out_path2, aspect_out_path2, incidence_angle_path,
orbital_parameters_path, save_localangle_path)
# if __name__ == '__main__':
# calu_incident = CalculateIncident()
# Dem_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\dem.tif"
# IncidenceAngle_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\RSJ.tif"
# OrbitalParameters_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\" \
# "GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml"
# calu_incident.localangle(Dem_path, IncidenceAngle_path, OrbitalParameters_path)
# print('done')

View File

@ -1,302 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:lee_filter.py
@Function:lee_filter
@Contact: https://github.com/PyRadar/pyradar
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import numpy as np
import math
from PIL import Image
import multiprocessing
import multiprocessing
from tool.algorithm.block.blockprocess import BlockProcess
import logging
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
from tool.algorithm.algtools.filter import lee_Filter_c as lee_Filter_c
logger = logging.getLogger("mylog")
file =fileHandle(False)
COEF_VAR_DEFAULT = 0.01
CU_DEFAULT = 0.25
import os
class Filter:
def __int__(self):
pass
@staticmethod
def assert_window_size(win_size):
"""
Asserts invalid window size.
Window size must be odd and bigger than 3.
"""
assert win_size >= 3, 'ERROR: win size must be at least 3'
if win_size % 2 == 0:
print('It is highly recommended to user odd window sizes.'
'You provided %s, an even number.' % (win_size, ))
@staticmethod
def assert_indices_in_range(width, height, xleft, xright, yup, ydown):
"""
Asserts index out of image range.
"""
# assert xleft >= 0 and xleft <= width, \
assert 0 <= xleft <= width, \
"index xleft:%s out of range (%s<= xleft < %s)" % (xleft, 0, width)
# assert xright >= 0 and xright <= width, \
assert 0 <= xright <= width, "index xright:%s out of range (%s<= xright < %s)" % (xright, 0, width)
# assert yup >= 0 and yup <= height, \
assert 0 <= yup <= height, "index yup:%s out of range. (%s<= yup < %s)" % (yup, 0, height)
# assert ydown >= 0 and ydown <= height, \
assert 0 <= ydown <= height, "index ydown:%s out of range. (%s<= ydown < %s)" % (ydown, 0, height)
@staticmethod
def weighting(window, cu=CU_DEFAULT):
"""
Computes the weighthing function for Lee filter using cu as the noise
coefficient.
"""
# cu is the noise variation coefficient
two_cu = cu * cu
# ci is the variation coefficient in the window
window_mean = window.mean()
window_std = window.std()
ci = window_std / window_mean
two_ci = ci * ci
if not two_ci: # dirty patch to avoid zero division
two_ci = COEF_VAR_DEFAULT
if cu > ci:
w_t = 0.0
else:
w_t = 1.0 - (two_cu / two_ci)
return w_t
def lee_filter(self, in_path, out_path, win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
cu = CU_DEFAULT
self.assert_window_size(win_size)
# img = self.ImageHandler.get_band_array(img, 1)
array1 = Image.open(in_path)
img = np.array(array1)
# we process the entire img as float64 to avoid type overflow error
img = np.float64(img)
img_filtered = np.zeros_like(img)
# n, m = img.shape
# win_offset = win_size / 2
#
# for i in range(0, n):
# xleft = i - win_offset
# xright = i + win_offset
#
# if xleft < 0:
# xleft = 0
# if xright >= n:
# xright = n
#
# for j in range(0, m):
# yup = j - win_offset
# ydown = j + win_offset
#
# if yup < 0:
# yup = 0
# if ydown >= m:
# ydown = m
#
# self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
#
# pix_value = img[i, j]
#
# window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
# w_t = self.weighting(window, cu)
# window_mean = window.mean()
# new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
#
# if not new_pix_value > 0:
# new_pix_value = 0
# img_filtered[i, j] = round(new_pix_value)
# # return img_filtered
self.lee_filter_array(img, img_filtered, win_size)
out_image = Image.fromarray(img_filtered)
out_image.save(out_path)
print("lee_filter finish! path:" + out_path)
return True
@staticmethod
def lee_filter_array(in_arry, out_arry, win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
f = Filter()
#cu = CU_DEFAULT
f.assert_window_size(win_size)
img = in_arry
# we process the entire img as float64 to avoid type overflow error
img = np.float64(img)
img = img + 100
# lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arryint win_offset,int win_size):
newOUt=lee_Filter_c.lee_filter_array(img,out_arry,win_size)
newOUt=newOUt-100
out_arry[:,:]=newOUt[:,:]
# def lee_filter_array(self, in_arry, out_arry, win_size):
# """
# Apply lee to a numpy matrix containing the image, with a window of
# win_size x win_size.
# """
# cu = CU_DEFAULT
# self.assert_window_size(win_size)
# img = in_arry
# # we process the entire img as float64 to avoid type overflow error
# img = np.float64(img)
# img = img + 100
# img_filtered = np.zeros_like(img)
# n, m = img.shape
# win_offset = win_size / 2
#
# for i in range(0, n):
# xleft = i - win_offset
# xright = i + win_offset
#
# if xleft < 0:
# xleft = 0
# if xright >= n:
# xright = n
#
# for j in range(0, m):
# yup = j - win_offset
# ydown = j + win_offset
#
# if yup < 0:
# yup = 0
# if ydown >= m:
# ydown = m
#
# self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
#
# pix_value = img[i, j]
#
# window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
# w_t = self.weighting(window, cu)
# window_mean = window.mean()
# new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
#
# if not new_pix_value > 0:
# new_pix_value = 0
# out_arry[i, j] = round(new_pix_value)
# out_arry = out_arry - 100
#
def lee_filter_multiprocess(self, in_paths, out_paths, win_size =3,processes_num=10):
if len(in_paths) != len(out_paths):
return False
# 开启多进程处理
pool = multiprocessing.Pool(processes=processes_num)
pl = []
for i in range(len(in_paths)):
#self.lee_filter(in_paths[i], out_paths[i], win_size)
pl.append(pool.apply_async(self.lee_filter,(in_paths[i], out_paths[i], win_size)))
print("lee_filter runing! path:" + in_paths[i])
pool.close()
pool.join()
return True
def lee_filter_block_multiprocess(self, in_path, out_path, win_size =3):
in_name = os.path.basename(in_path)
out_name = os.path.basename(out_path)
outDir= os.path.split(out_path)[0]
#创建工作文件夹
src_path = os.path.join(outDir, "src_img")
block_path = os.path.join(outDir, "block")
block_filtered = os.path.join(outDir, "block_filtered")
file.creat_dirs([src_path, block_path, block_filtered])
shutil.copyfile(in_path, os.path.join(src_path, in_name))
cols = ImageHandler.get_img_width(in_path)
rows = ImageHandler.get_img_height(in_path)
# 分块
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
logger.info('blocking tifs success!')
img_dir, img_name = bp.get_file_names(block_path, ['tif'])
dir_dict = bp.get_same_img(img_dir, img_name)
img_path_list = [value for value in dir_dict.values()][0]
processes_num = min([len(img_path_list), multiprocessing.cpu_count() - 1])
out_img_path_list =[]
for in_path in img_path_list:
suffix = bp.get_suffix(os.path.basename(in_path))
out_path = os.path.join(block_filtered, out_name.replace('.tif', suffix))
out_img_path_list.append(out_path)
self.lee_filter_multiprocess(img_path_list, out_img_path_list, win_size = win_size, processes_num=processes_num)
# 开启多进程处理
# pool = multiprocessing.Pool(processes=processes_num)
#
# for i in range(len(hh_list)):
# block_img_path = hh_list[i]
# suffix = bp.get_suffix(os.path.basename(hh_list[i]))
# filed_block_img_path = os.path.join(block_filtered,out_name.replace('.tif',suffix))
# pool.apply_async(self.lee_filter, (block_img_path, filed_block_img_path, win_size))
# print("lee_filter runing! path:" + block_img_path)
# logger.info('total:%s, block:%s lee_filter!', len(hh_list), i)
#
# pool.close()
# pool.join()
# # 合并处理后的影像
bp.combine(block_filtered, cols, rows, outDir, file_type=['tif'], datetype='float32')
file.del_folder(src_path)
file.del_folder(block_path)
file.del_folder(block_filtered)
pass
def lee_process_sar(self, in_sar, out_sar, win_size, noise_var):
'''
# std::cout << "mode 12"
# std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var"
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar,
out_sar, win_size, noise_var)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
if __name__ == '__main__':
# 示例1
# path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif"
# f = Filter()
# f.lee_filter(path,path,3)
#示例2
f = Filter()
f.lee_filter_block_multiprocess('I:\preprocessed\HH.tif','I:\preprocessed\HHf.tif')
pass

View File

@ -1,124 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:lee_filter.py
@Function:lee_filter
@Contact: https://github.com/PyRadar/pyradar
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import os
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi
from libc.math cimport atan as math_atan
from libc.math cimport log10 as math_log10
from libc.math cimport log as math_log
from libc.math cimport floor as math_floor
from libc.math cimport sqrt as math_sqrt
from libc.math cimport exp as math_exp
from libc.math cimport sin as math_sin
from libc.math cimport cos as math_cos
from libc.math cimport tan as math_tan
from libc.math cimport asin as math_asin
from libc.math cimport acos as math_acos
from libc.math cimport tan as math_atan
from libc.math cimport sinh as math_sinh
from libc.math cimport cosh as math_cosh
from libc.math cimport tanh as math_tanh
from libc.math cimport floor as math_floor
from libc.math cimport ceil as math_ceil
from libc.math cimport lround as math_round
cdef double COEF_VAR_DEFAULT = 0.01
cdef double CU_DEFAULT = 0.25
cdef int ceil_usr(double v):
return int(math_ceil(v))
cdef double weighting(np.ndarray[double,ndim=2] window,double cu):
"""
Computes the weighthing function for Lee filter using cu as the noise
coefficient.
"""
# cu is the noise variation coefficient
cdef double two_cu = cu * cu
# ci is the variation coefficient in the window
cdef double window_mean = window.mean()
cdef double window_std = window.std()
cdef double ci = window_std / window_mean
cdef double two_ci = ci * ci
cdef double w_t=0;
if not (two_ci==0): # dirty patch to avoid zero division
two_ci = COEF_VAR_DEFAULT
if cu > ci:
w_t = 0.0
else:
w_t = 1.0 - (two_cu / two_ci)
return w_t
cpdef np.ndarray[double,ndim=2] lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arry,int win_size):
"""
Apply lee to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
# we process the entire img as float64 to avoid type overflow error
#n, m = img.shape
cdef double cu = CU_DEFAULT
cdef int i=0
cdef int j=0
cdef int xleft=0
cdef int xright=0
cdef int yup=0
cdef int ydown=0
cdef np.ndarray[double,ndim=2] window;
cdef double w_t=0;
cdef double window_mean=0;
cdef double new_pix_valu=0;
cdef int n = img.shape[0]
cdef int m=img.shape[1]
cdef int win_offset=int(win_size/2)
while i<n:
xleft=ceil_usr(i-win_offset)
xright=int(i+win_offset)
if xleft < 0:
xleft = 0
if xright >= n:
xright = n
j=0
while j<m:
yup = ceil_usr(j - win_offset)
yup=0 if yup<0 else yup
ydown = int(j + win_offset)
if yup < 0:
yup = 0
if ydown >= m:
ydown = m
pix_value = img[i, j]
window = img[xleft:xright+1, yup:ydown+1]
w_t = weighting(window, cu)
window_mean = np.mean(window)
new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
if not new_pix_value > 0:
new_pix_value = 0
out_arry[i, j] = round(new_pix_value*100000.0)/100000.0
j=j+1
i=i+1
return out_arry

View File

@ -1,45 +0,0 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./lee_Filter') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./lee_Filter/lee_Filter_c.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -1,108 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File logHandler.py
@Function 日志检查生成
@Author SHJ
@Date 2021/12/1
@Version 1.0.0
"""
import logging
import os
import time
import datetime
import colorlog
class LogHandler:
"""
生成日志
"""
__logger = logging.getLogger("mylog")
__format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
"(%(filename)s:%(lineno)d) - %(message)s")
__log_path = None
__log_colors_config = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
@staticmethod
def init_log_handler(log_name):
"""
初始化日志
:param log_name: 日志保存的路径和名称
:return:
"""
path = os.getcwd()
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
para_dir = os.path.split(LogHandler.__log_path)
if not os.path.exists(para_dir[0]):
os.makedirs(para_dir[0])
# 删除七天以前的文件
LogHandler.delete_outdate_files(para_dir[0])
# 方法1普通日志
log_format = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
" (%(filename)s:Line%(lineno)d) "
date_format = "%m/%d/%Y %H:%M:%S"
formatter = colorlog.ColoredFormatter(
"%(log_color)s[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s"
" (%(filename)s:Line%(lineno)d) ",
log_colors=LogHandler.__log_colors_config)
fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
fs = logging.StreamHandler()
fs.setFormatter(formatter)
# logging.basicConfig(level=logging.INFO, format=log_format, datefmt=date_format, handlers=[fp, fs]) # 调用
logging.basicConfig(level=logging.INFO, datefmt=date_format, handlers=[fp, fs]) # 调用
# 方法2回滚日志
# LogHandler.__logger.setLevel(logging.DEBUG)
# th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
# backupCount=2, encoding='utf-8')
# th.suffix = "%Y-%m-%d-%H-%M-%S.log"
# th.setFormatter(LogHandler.__format_str)
# th.setLevel(level=logging.DEBUG)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# LogHandler.__logger.addHandler(console)
# LogHandler.__logger.addHandler(th)
@staticmethod
def delete_outdate_files(path, date_interval=7):
"""
删除目录下七天前创建的文件
"""
current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
current_time_list = current_time.split("-")
current_time_day = datetime.datetime(int(current_time_list[0]), int(current_time_list[1]),
int(current_time_list[2]))
for root, dirs, files in os.walk(path):
for item in files:
item_format = item.split(".", 2)
if item_format[1] == "log":
file_path = os.path.join(root, item)
create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
create_time_list = create_time.split("-")
create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
int(create_time_list[2]))
time_difference = (current_time_day - create_time_day).days
if time_difference > date_interval:
os.remove(file_path)
#
# if __name__ == "__main__":
# # eg2:
# log_handler = LogHandler()
# log_handler.init_log_handler(r"run_log\myrun1")
# logging.warning("1")
# print("done")

View File

@ -1,90 +0,0 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 14 18:53:14 2021
@author: Dipankar
References
----------
Oh (2004): Quantitative retrieval of soil moisture content and surface roughness from multipolarized radar observations of bare soil surface. IEEE TGRS 42(3). 596-601.
"""
# ---------------------------------------------------------------------------------------
# Copyright (C) 2021 by Microwave Remote Sensing Lab, IITBombay http://www.mrslab.in
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, see http://www.gnu.org/licenses/
# ---------------------------------------------------------------------------------------
import numpy as np
#import matplotlib.pyplot as plt
## Description: Given sigma_0_vv, sigma_0_hh, and sigma_0_hv, the inverse
## model computes s, and mv
sigma0vvdB = -14.1
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
k = 2*np.pi*f/0.3 #calculate the wave number
theta_rad = theta*np.pi/180 #represent angle in radians
sigma_0_vv = np.power(10,(sigma0vvdB/10)) #%represent data in linear scale
sigma_0_hh = np.power(10,(sigma0hhdB/10))
sigma_0_hv = np.power(10,(sigma0hvdB/10))
p = sigma_0_hh / sigma_0_vv #calculate the p-ratio
q = sigma_0_hv / sigma_0_vv #calculate the q-ratio
mv0 = np.arange(0.05,0.5,0.01) # set Gamma0 range of values (fine increments)
## First estimates s1 and mv1
ks = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
err = (1 - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
abs_err = np.abs(err)
min_err = np.min(abs_err) #find the value of minimum error
mv1 = mv0[np.where(abs_err == min_err)]
ks1 = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)))**0.556
s1 = ks1/k
## Second estimate s2 and mv2
ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
s2 = ks2/k
xx = (1-p)/np.exp(-0.4 * ks2**1.4)
if xx<=0:
mv2 =0
else:
yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
mv2 = yy**(-100/65)
print(mv2,yy,np.power(yy,-100/65))
## Third estimate mv3
mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
## weighted average s and mv-------------------------------------
sf = (s1 + 0.25*s2)/(1+0.25)
mvf = (mv1+mv2+mv3)/3
print(mv1,mv2,mv3,s1,s2)
print('Estimated rms height s (cm): ', sf*100)
print('Estimated volumetric soil moisture: ', mvf)

View File

@ -1,128 +0,0 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 14:59:54 2013
@author: Sat Kumar Tomer
@email: satkumartomer@gmail.com
@website: www.ambhas.com
"""
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi
from scipy.optimize import fmin
cpdef np.ndarray[double,ndim=1] inverse_oh2004(double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f):
"""
sigma0vvdB = -14.1 dB
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
"""
#print("--------------------------------------------------------\n")
cdef np.ndarray[double,ndim=1] result=np.ones((2))
result[0]=np.nan
result[1]=np.nan
#print("*************设置为nan****************")
#print(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f)
cdef double k = 2*3.1415926*f/0.299792458; #calculate the wave number
cdef double theta_rad = theta*3.1415926/180; #represent angle in radians
cdef double sigma_0_vv = np.power(10.,(sigma0vvdB/10.)) #%represent data in linear scale
cdef double sigma_0_hh = np.power(10.,(sigma0hhdB/10.))
cdef double sigma_0_hv = np.power(10.,(sigma0hvdB/10.))
if sigma_0_vv==0:
#print("***********sigma_0_vv==0*************")
return result
cdef double p = sigma_0_hh / sigma_0_vv; #calculate the p-ratio
cdef double q = sigma_0_hv / sigma_0_vv; #calculate the q-ratio
cdef np.ndarray[double,ndim=1] mv0 = np.arange(0.05,0.9,0.01) # set Gamma0 range of values (fine increments)
## First estimates s1 and mv1
cdef np.ndarray[double,ndim=1] ks = ((-1.)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
cdef np.ndarray[double,ndim=1] err = (1. - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
cdef np.ndarray[double,ndim=1] abs_err = np.abs(err);
cdef double min_err = np.nanmin(abs_err); #find the value of minimum error
#print(np.where(abs_err == min_err)[0].shape)
if min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0 :
#print("***************min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0")
return result
cdef double mv1 = mv0[np.where(abs_err == min_err)[0][0]]
cdef double temp_ks1=1. - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)
if temp_ks1<0:
#print("*********************temp_ks1<0")
return result
cdef double ks1 = ((-1)*3.125*np.log(temp_ks1))**0.556
cdef double s1 = ks1/k
## Second estimate s2 and mv2
cdef double ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
cdef double s2 = ks2/k
cdef double mv2 =0.
cdef double yy =0.
cdef double xx = (1-p)/np.exp(-0.4 * ks2**1.4)
if xx<=0:
mv2 =0.
else:
yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
mv2=np.power(yy,-100.0/65)
## Third estimate mv3
cdef double mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
## weighted average s and mv-------------------------------------
#print("q:\t",q)
#print("k:\t",k)
#print("ks1:\t",ks1)
#print("ks2:\t",ks2)
#print("theta_rad:\t",theta_rad)
cdef double sf = (s1 + 0.25*s2)/(1+0.25)
cdef double mvf = (mv1+mv2+mv3)/3
result[0]=mvf*1.0
result[1]=sf*1.0
#print("mv1:\t",mv1)
#print("mv2:\t",mv2)
#print("mv3:\t",mv3)
#print("s1:\t",s1)
#print("s2:\t",s2)
#print("Estimated volumetric soil moisture: ",result[0])
#print("Estimated rms height s (m): ",result[1])
#print("\nend\n")
return result
cpdef double lamda2freq(double lamda):
return 299792458.0/lamda
cpdef double freq2lamda(double freq):
return 299792458.0/freq
# double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f
cpdef int retrieve_oh2004_main(int n,np.ndarray[double,ndim=1] mv,np.ndarray[double,ndim=1] h,np.ndarray[int,ndim=1] mask,np.ndarray[double,ndim=1] sigma0vvdB,np.ndarray[double,ndim=1] sigma0hhdB,np.ndarray[double,ndim=1] sigma0hvdB, np.ndarray[double,ndim=1] vh, np.ndarray[double,ndim=1] theta,double f):
cdef int i=0;
cdef np.ndarray[double,ndim=1] result;
while i<n:
if mask[i]<0.5:
mv[i]=np.nan
h[i] =np.nan
else:
#print(i)
##print(sigma0vvdB[i], sigma0hhdB[i],sigma0hvdB[i], theta[i], f)
result= inverse_oh2004(sigma0vvdB[i], sigma0hhdB[i],sigma0hvdB[i], theta[i], f)
##print(result)
mv[i]=result[0]
h[i] =result[1]
##print(mv[i],h[i])
##print(result[0],result[1])
i=i+1
return 1

View File

@ -1,45 +0,0 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./oh2004') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./oh2004/oh2004.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -1,26 +0,0 @@
import numpy as np
import oh2004
sigma0vvdB = -14.1
sigma0hhdB = -16.0
sigma0hvdB = -26.5
theta = 35. ##Incidence angle
f = 5.0 ##GHz
#print(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f)
#print(oh2004.inverse_oh2004(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f))
n=3
mask=np.ones((3))
mask[1]=0
mask=mask.astype(np.int32)
sigma0hhdB=np.ones((3))*sigma0hhdB
sigma0vvdB=np.ones((3))*sigma0vvdB
sigma0hvdB=np.ones((3))*sigma0hvdB
theta=np.ones((3))*theta
mv=np.zeros(3)*1.0
h=np.zeros(3)*1.0
oh2004.retrieve_oh2004_main(n,mv,h, mask,sigma0vvdB,sigma0hhdB,sigma0hvdB,sigma0hvdB, theta,f)
print(mask)
print(mv)
print(h)

View File

@ -1,92 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:sieve_filter.py
@Function:gdal斑点滤波功能
@Contact: 'https://www.osgeo.cn/gdal/api/gdal_alg.html?highlight=gdalsievefilter#'
'_CPPv415GDALSieveFilter15GDALRasterBandH15GDALRasterBandH15GDALRasterBandHiiPPc16GDALProgressFuncPv'
@Author:SHJ
@Date:2021/8/30 8:42
@Version:1.0.0
"""
import logging
from osgeo import gdal
from tool.algorithm.image.ImageHandle import ImageHandler
logger = logging.getLogger("mylog")
def gdal_sieve_filter_test(dst_filename, src_filename, threshold=100, connectedness=8):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
mask_band = src_band.GetMaskBand()
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
def gdal_sieve_filter(dst_filename, src_filename, threshold=2, connectedness=4):
"""
基于python GDAL栅格滤波
:param dst_filename: 输出滤波后的影像
:param src_filename: 输入需要处理的文件
:param threshold: 滤波的值大小
:param connectedness: 连通域, 范围4或者8
:return:
"""
# 4表示对角像素不被视为直接相邻用于多边形成员资格8表示对角像素不相邻
# connectedness = 4
gdal.AllRegister()
# print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
dataset = gdal.Open(src_filename, gdal.GA_Update)
if dataset is None:
logger.error('{}open tif fail!'.format(src_filename))
return False
# 获取需要处理的源栅格波段
src_band = dataset.GetRasterBand(1)
#只能处理整数
src_array = src_band.ReadAsArray(0, 0, src_band.XSize, src_band.YSize)
src_array = src_array * 1000
src_band.WriteArray(src_array)
mask_band = None
dst_band = src_band
prog_func = gdal.TermProgress_nocb
# 调用gdal滤波函数
result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
if result != 0:
return False
proj = dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
dst_array = dst_array / 1000
ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
del dataset
return True
if __name__ == '__main__':
inputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25.tif'
outputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25_sieve_filter.tif'
flag = gdal_sieve_filter(outputfile, inputfile, threshold=100, connectedness=4)

View File

@ -1,449 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : blockprocess.py
@Function : tiftiff图像分块处理拼接功能
@Contact : https://blog.csdn.net/qq_38308388/article/details/102978755
@Author:SHJ
@Date:2021/9/6
@Version:1.0.0
"""
from osgeo import osr, gdal
import numpy as np
import os
from PIL import Image
# import time
# from skimage import io
from tool.algorithm.image.ImageHandle import ImageHandler
class BlockProcess:
def __init__(self):
pass
@staticmethod
def get_block_size(rows, cols):
block_size = 512
if rows > 2048 and cols > 2048:
block_size = 1024
return block_size
# def get_block_size(rows, cols, block_size_config):
# block_size = 512 if block_size_config < 512 else block_size_config
# if rows > 2048 and cols > 2048:
# block_size = block_size_config
# return block_size
@staticmethod
def get_suffix(path_name):
name = path_name
suffix = '_' + name.split('_')[-4] + '_' + name.split('_')[-3] + '_' + name.split('_')[-2] + '_' + \
name.split('_')[-1]
return suffix
@staticmethod
def get_file_names(data_dir, file_type=['tif', 'tiff']):
"""
获取data_dir文件夹下file_type类型的文件路径
"""
result_dir = []
result_name = []
for maindir, subdir, file_name_list in os.walk(data_dir):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = apath.split('.')[-1]
if ext in file_type:
result_dir.append(apath)
result_name.append(filename)
else:
pass
return result_dir, result_name
@staticmethod
def get_same_img(img_dir, img_name):
"""
在img_dir路径下用img_name的子图像路径集合将集合以字典输出
"""
result = {}
for idx, name in enumerate(img_name):
temp_name = ''
for idx2, item in enumerate(name.split('_')[:-4]):
if idx2 == 0:
temp_name = temp_name + item
else:
temp_name = temp_name + '_' + item
if temp_name in result:
result[temp_name].append(img_dir[idx])
else:
result[temp_name] = []
result[temp_name].append(img_dir[idx])
return result
@staticmethod
def assign_spatial_reference_byfile(src_path, dst_path):
"""
将src_path的地理信息输入到dst_path图像中
"""
src_ds = gdal.Open(src_path, gdal.GA_ReadOnly)
if src_ds is None:
return False
sr = osr.SpatialReference()
sr.ImportFromWkt(src_ds.GetProjectionRef())
geo_transform = src_ds.GetGeoTransform()
dst_ds = gdal.Open(dst_path, gdal.GA_Update)
if dst_ds is None:
return False
dst_ds.SetProjection(sr.ExportToWkt())
dst_ds.SetGeoTransform(geo_transform)
del dst_ds
del src_ds
return True
@staticmethod
def assign_spatial_reference_bypoint(row_begin, col_begin, src_proj, src_geo, img_path):
"""
将src_path的地理信息输入到dst_path图像中
"""
sr = osr.SpatialReference()
sr.ImportFromWkt(src_proj)
geo_transform = src_geo
geo_transform[0] = src_geo[0] + col_begin * src_geo[1] + row_begin * src_geo[2]
geo_transform[3] = src_geo[3] + col_begin * src_geo[4] + row_begin * src_geo[5]
dst_ds = gdal.Open(img_path, gdal.GA_Update)
if dst_ds is None:
return False
dst_ds.SetProjection(sr.ExportToWkt())
dst_ds.SetGeoTransform(geo_transform)
del dst_ds
return True
@staticmethod
def __get_band_array(filename, num):
"""
:param filename: tif路径
:param num: 波段序号
:return: 对应波段的矩阵数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.GetRasterBand(num)
array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
del dataset
return array
@staticmethod
def get_data(filename):
"""
:param filename: tif路径
:return: 获取所有波段的数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_width = dataset.RasterXSize
im_height = dataset.RasterYSize
im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
del dataset
return im_data
def get_tif_dtype(self, filename):
"""
:param filename: tif路径
:return: tif数据类型
"""
image = self.__get_band_array(filename, 1)
return image.dtype.name
def cut(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
"""
:param in_dir:存放待裁剪的影像文件夹不用指定到tif文件
:param out_dir:存放裁剪结果的影像文件夹
:param file_type:待裁剪的影像文件类型tiftiffbmpjpgpng等等
:param out_type:裁剪结果影像文件类型
:param out_size:裁剪尺寸裁剪为n*n的方形
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
data_dir_list, _ = self.get_file_names(in_dir, file_type)
count = 0
for each_dir in data_dir_list:
name_suffix = os.path.basename(each_dir)
img_name = os.path.splitext(name_suffix)[0]
# gdal读取方法
image = self.__get_band_array(each_dir, 1)
cut_factor_row = int(np.ceil(image.shape[0] / out_size))
cut_factor_clo = int(np.ceil(image.shape[1] / out_size))
for i in range(cut_factor_row):
for j in range(cut_factor_clo):
if i == cut_factor_row - 1:
i = image.shape[0] / out_size - 1
else:
pass
if j == cut_factor_clo - 1:
j = image.shape[1] / out_size - 1
else:
pass
start_x = int(np.rint(i * out_size))
start_y = int(np.rint(j * out_size))
end_x = int(np.rint((i + 1) * out_size))
end_y = int(np.rint((j + 1) * out_size))
out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
end_y) + '.' + out_type)
# + '/' + img_name \
# + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
# end_y) + '.' + out_type
# temp_image = image[start_x:end_x, start_y:end_y]
# out_image = Image.fromarray(temp_data)
# out_image = Image.fromarray(temp_image)
# out_image.save(out_dir_images)
data = ImageHandler.get_data(each_dir)
if ImageHandler.get_bands(each_dir) > 1:
temp_data = data[:,start_x:end_x, start_y:end_y]
else:
temp_data = data[start_x:end_x, start_y:end_y]
ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
count += 1
return True
def cut_new(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
"""
:param in_dir:存放待裁剪的影像文件夹不用指定到tif文件
:param out_dir:存放裁剪结果的影像文件夹
:param file_type:待裁剪的影像文件类型tiftiffbmpjpgpng等等
:param out_type:裁剪结果影像文件类型
:param out_size:裁剪尺寸裁剪为n*n的方形
:return: True or Flase
20230831修改 ----tjx
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
data_dir_list, _ = self.get_file_names(in_dir, file_type)
count = 0
for each_dir in data_dir_list:
name_suffix = os.path.basename(each_dir)
img_name = os.path.splitext(name_suffix)[0]
# gdal读取方法
image = self.__get_band_array(each_dir, 1)
block_x = int(np.ceil(image.shape[1] / out_size))
block_y = int(np.ceil(image.shape[0] / out_size)) # todo 修改分块
for i in range(block_y):
for j in range(block_x):
start_x = j * out_size
start_y = i * out_size
end_x = image.shape[1] if (j + 1) * out_size > image.shape[1] else (j + 1) * out_size
end_y = image.shape[0] if (i + 1) * out_size > image.shape[0] else (i + 1) * out_size
out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
end_y) + '.' + out_type)
# print(out_dir_images)
data = ImageHandler.get_data(each_dir)
if ImageHandler.get_bands(each_dir) > 1:
# temp_data = data[:,start_x:end_x, start_y:end_y]
temp_data = data[:,start_y:end_y, start_x:end_x]
else:
# temp_data = data[start_x:end_x, start_y:end_y]
temp_data = data[start_y:end_y, start_x:end_x]
ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
count += 1
return True
def combine(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
"""
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
temp_label = np.zeros(shape=(h, w), dtype=datetype)
dir_list = dir_dict[key]
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[x_start:x_end, y_start:y_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
return True
# todo 20230901 修改分块同步修改合并代码
def combine_new(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
"""
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
dir_list = dir_dict[key]
bands = ImageHandler.get_bands(dir_list[0])
if bands > 1:
temp_label = np.zeros(shape=(bands, h, w), dtype=datetype)
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[:, y_start:y_end, x_start:x_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
else:
temp_label = np.zeros(shape=(h, w), dtype=datetype)
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
# img = Image.open(item)
img = ImageHandler.get_band_array(item, 1)
img = np.array(img)
temp_label[y_start:y_end, x_start:x_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir, img_name)
ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
# label = Image.fromarray(temp_label)
# label.save(new_out_dir)
count += 1
return True
def combine_Tif(self, data_dir, w, h, out_dir, proj, geo, out_type='tif', file_type=['tif', 'tiff'],
datetype='float16'):
"""
将文件夹下的tif拼接成一个大的tif
:param data_dir: 存放待裁剪的影像文件夹不用指定到tif文件
:param w 拼接影像的宽度
:param h 拼接影像的高度
:param out_dir: 存放裁剪结果的影像文件夹
:param proj: 指定投影系
:param geo: 指定变换参数
:param out_type: 裁剪结果影像文件类型
:param file_type: 待裁剪的影像文件类型
:param datetype:数据类型 int8int16float16float32
:return: True or Flase
"""
image_handler = ImageHandler()
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_dir, img_name = self.get_file_names(data_dir, file_type)
dir_dict = self.get_same_img(img_dir, img_name)
count = 0
for key in dir_dict.keys():
temp_label = np.zeros(shape=(h, w), dtype=datetype)
dir_list = dir_dict[key]
for item in dir_list:
name_split = item.split('_')
x_start = int(name_split[-4])
x_end = int(name_split[-3])
y_start = int(name_split[-2])
y_end = int(name_split[-1].split('.')[0])
img = image_handler.get_data(item)
temp_label[x_start:x_end, y_start:y_end] = img
img_name = key + '.' + out_type
new_out_dir = os.path.join(out_dir,img_name)
image_handler.write_img(new_out_dir, proj, geo, temp_label)
count += 1
return True
# if __name__ == '__main__':
# bp = BlockProcess()
# # # cut
# data_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\cut"
# out_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\combine"
# file_type = ['tif']
# out_type = 'tif'
# cut_size = 1024
# #
# bp.cut_new(data_dir, out_dir, file_type, out_type, cut_size)
# # # combine
# # data_dir=r"D:\Workspace\SoilMoisture\Temporary\test"
# w= 5043
# h= 1239
# out_dirs=r"D:\BaiduNetdiskDownload\HF\cut_outs"
# # out_type='tif'
# # file_type=['tif']
# datetype = 'float'
# # src_path = r"D:\Workspace\SoilMoisture\Temporary\preprocessed\HH_preprocessed.tif"
# # datetype = bp.get_tif_dtype(src_path)
# bp.combine_new(out_dir, w, h, out_dirs, out_type, file_type, datetype)
#
# # 添加地理信息
# new_out_dir =r"D:\DATA\testdata1\combine\TEST_20200429_NDVI.tif"
# bp.assign_spatial_reference_byfile(src_path, new_out_dir)
# fn = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture.tif'
# product_path = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture_1.tif'
#
# proj, geos, img = ImageHandler.read_img(fn)
# img[img>1] = 1
# img[img<0] = 0
# ImageHandler.write_img(product_path, proj, geos, img)

View File

@ -1,730 +0,0 @@
"""
@Project microproduct
@File ImageHandle.py
@Function 实现对待处理SAR数据的读取格式标准化和处理完后保存文件功能
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
from PIL import Image
import time
from osgeo import gdal
from osgeo import osr
import numpy as np
from PIL import Image
import cv2
import logging
import math
logger = logging.getLogger("mylog")
class ImageHandler:
"""
影像读取编辑保存
"""
def __init__(self):
pass
@staticmethod
def get_dataset(filename):
"""
:param filename: tif路径
:return: 图像句柄
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
return dataset
def get_scope(self, filename):
"""
:param filename: tif路径
:return: 图像范围
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_scope = self.cal_img_scope(dataset)
del dataset
return im_scope
@staticmethod
def get_projection(filename):
"""
:param filename: tif路径
:return: 地图投影信息
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_proj = dataset.GetProjection()
del dataset
return im_proj
@staticmethod
def get_geotransform(filename):
"""
:param filename: tif路径
:return: 从图像坐标空间也称为像素线到地理参考坐标空间投影或地理坐标的仿射变换
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
geotransform = dataset.GetGeoTransform()
del dataset
return geotransform
def get_invgeotransform(filename):
"""
:param filename: tif路径
:return: 从地理参考坐标空间投影或地理坐标的到图像坐标空间
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
geotransform = dataset.GetGeoTransform()
geotransform=gdal.InvGeoTransform(geotransform)
del dataset
return geotransform
@staticmethod
def get_bands(filename):
"""
:param filename: tif路径
:return: 影像的波段数
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.RasterCount
del dataset
return bands
@staticmethod
def geo2lonlat(dataset, x, y):
"""
将投影坐标转为经纬度坐标具体的投影坐标系由给定数据确定
:param dataset: GDAL地理数据
:param x: 投影坐标x
:param y: 投影坐标y
:return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
"""
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
ct = osr.CoordinateTransformation(prosrs, geosrs)
coords = ct.TransformPoint(x, y)
return coords[:2]
@staticmethod
def get_band_array(filename, num=1):
"""
:param filename: tif路径
:param num: 波段序号
:return: 对应波段的矩阵数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
bands = dataset.GetRasterBand(num)
array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
# if 'int' in str(array.dtype):
# array[np.where(array == -9999)] = np.inf
# else:
# array[np.where(array < -9000.0)] = np.nan
del dataset
return array
@staticmethod
def get_data(filename):
"""
:param filename: tif路径
:return: 获取所有波段的数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
im_width = dataset.RasterXSize
im_height = dataset.RasterYSize
im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
del dataset
return im_data
@staticmethod
def get_dataset(filename):
"""
:param filename: tif路径
:return: 获取所有波段的数据
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
return dataset
@staticmethod
def get_all_band_array(filename):
"""
大气延迟算法
将ERA-5影像所有波段存为一个数组, 波段数在第三维度 get_data->3788
:param filename 影像路径 get_all_band_array ->8837
:return: 影像数组
"""
dataset = gdal.Open(filename)
x_size = dataset.RasterXSize
y_size = dataset.RasterYSize
nums = dataset.RasterCount
array = np.zeros((y_size, x_size, nums), dtype=float)
if nums == 1:
bands_0 = dataset.GetRasterBand(1)
array = bands_0.ReadAsArray(0, 0, x_size, y_size)
else:
for i in range(0, nums):
bands = dataset.GetRasterBand(i+1)
arr = bands.ReadAsArray(0, 0, x_size, y_size)
array[:, :, i] = arr
return array
@staticmethod
def get_img_width(filename):
"""
:param filename: tif路径
:return: 影像宽度
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
width = dataset.RasterXSize
del dataset
return width
@staticmethod
def get_img_height(filename):
"""
:param filename: tif路径
:return: 影像高度
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
height = dataset.RasterYSize
del dataset
return height
@staticmethod
def read_img(filename):
"""
影像读取
:param filename:
:return:
"""
gdal.AllRegister()
img_dataset = gdal.Open(filename) # 打开文件
if img_dataset is None:
msg = 'Could not open ' + filename
logger.error(msg)
return None, None, None
im_proj = img_dataset.GetProjection() # 地图投影信息
if im_proj is None:
return None, None, None
im_geotrans = img_dataset.GetGeoTransform() # 仿射矩阵
im_width = img_dataset.RasterXSize # 栅格矩阵的行数
im_height = img_dataset.RasterYSize # 栅格矩阵的行数
im_arr = img_dataset.ReadAsArray(0, 0, im_width, im_height)
del img_dataset
return im_proj, im_geotrans, im_arr
def cal_img_scope(self, dataset):
"""
计算影像的地理坐标范围
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset :GDAL地理数据
:return: list[point_upleft, point_upright, point_downleft, point_downright]
"""
if dataset is None:
return None
img_geotrans = dataset.GetGeoTransform()
if img_geotrans is None:
return None
width = dataset.RasterXSize # 栅格矩阵的列数
height = dataset.RasterYSize # 栅格矩阵的行数
point_upleft = self.trans_rowcol2geo(img_geotrans, 0, 0)
point_upright = self.trans_rowcol2geo(img_geotrans, width, 0)
point_downleft = self.trans_rowcol2geo(img_geotrans, 0, height)
point_downright = self.trans_rowcol2geo(img_geotrans, width, height)
return [point_upleft, point_upright, point_downleft, point_downright]
@staticmethod
def get_scope_ori_sim(filename):
"""
计算影像的地理坐标范围
根据GDAL的六参数模型将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param dataset :GDAL地理数据
:return: list[point_upleft, point_upright, point_downleft, point_downright]
"""
gdal.AllRegister()
dataset = gdal.Open(filename)
if dataset is None:
return None
width = dataset.RasterXSize # 栅格矩阵的列数
height = dataset.RasterYSize # 栅格矩阵的行数
band1 = dataset.GetRasterBand(1)
array1 = band1.ReadAsArray(0, 0, band1.XSize, band1.YSize)
band2 = dataset.GetRasterBand(2)
array2 = band2.ReadAsArray(0, 0, band2.XSize, band2.YSize)
if array1[0, 0] < array1[0, width-1]:
point_upleft = [array1[0, 0], array2[0, 0]]
point_upright = [array1[0, width-1], array2[0, width-1]]
else:
point_upright = [array1[0, 0], array2[0, 0]]
point_upleft = [array1[0, width-1], array2[0, width-1]]
if array1[height-1, 0] < array1[height-1, width-1]:
point_downleft = [array1[height - 1, 0], array2[height - 1, 0]]
point_downright = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
else:
point_downright = [array1[height - 1, 0], array2[height - 1, 0]]
point_downleft = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
if(array2[0, 0] < array2[height - 1, 0]):
#上下调换顺序
tmp1 = point_upleft
point_upleft = point_downleft
point_downleft = tmp1
tmp2 = point_upright
point_upright = point_downright
point_downright = tmp2
return [point_upleft, point_upright, point_downleft, point_downright]
@staticmethod
def trans_rowcol2geo(img_geotrans,img_col, img_row):
"""
据GDAL的六参数模型仿射矩阵将影像图上坐标行列号转为投影坐标或地理坐标根据具体数据的坐标系统转换
:param img_geotrans: 仿射矩阵
:param img_col:图像纵坐标
:param img_row:图像横坐标
:return: [geo_x,geo_y]
"""
geo_x = img_geotrans[0] + img_geotrans[1] * img_col + img_geotrans[2] * img_row
geo_y = img_geotrans[3] + img_geotrans[4] * img_col + img_geotrans[5] * img_row
return [geo_x, geo_y]
@staticmethod
def write_era_into_img(filename, im_proj, im_geotrans, im_data):
"""
影像保存
:param filename:
:param im_proj:
:param im_geotrans:
:param im_data:
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_height, im_width, im_bands = im_data.shape # shape[0] 行数
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
else:
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[:, :, i])
# dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
del dataset
# 写GeoTiff文件
@staticmethod
def lat_lon_to_pixel(raster_dataset_path, location):
"""From zacharybears.com/using-python-to-translate-latlon-locations-to-pixels-on-a-geotiff/."""
gdal.AllRegister()
raster_dataset = gdal.Open(raster_dataset_path)
if raster_dataset is None:
return None
ds = raster_dataset
gt = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
srs_lat_lon = srs.CloneGeogCS()
ct = osr.CoordinateTransformation(srs_lat_lon, srs)
new_location = [None, None]
# Change the point locations into the GeoTransform space
(new_location[1], new_location[0], holder) = ct.TransformPoint(location[1], location[0])
# Translate the x and y coordinates into pixel values
Xp = new_location[0]
Yp = new_location[1]
dGeoTrans = gt
dTemp = dGeoTrans[1] * dGeoTrans[5] - dGeoTrans[2] * dGeoTrans[4]
Xpixel = (dGeoTrans[5] * (Xp - dGeoTrans[0]) - dGeoTrans[2] * (Yp - dGeoTrans[3])) / dTemp
Yline = (dGeoTrans[1] * (Yp - dGeoTrans[3]) - dGeoTrans[4] * (Xp - dGeoTrans[0])) / dTemp
del raster_dataset
return (Xpixel, Yline)
@staticmethod
def write_img(filename, im_proj, im_geotrans, im_data, no_data='0'):
"""
影像保存
:param filename: 保存的路径
:param im_proj:
:param im_geotrans:
:param im_data:
:param no_data: 把无效值设置为 nodata
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
flag = False
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
flag = True
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
# outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
if flag:
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data[0])
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else:
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data)
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else:
for i in range(im_bands):
outband = dataset.GetRasterBand(1 + i)
outband.WriteArray(im_data[i])
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
# outRaster.GetRasterBand(i + 1).WriteArray(array[i])
del dataset
# 写GeoTiff文件
@staticmethod
def write_img_envi(filename, im_proj, im_geotrans, im_data, no_data='null'):
"""
影像保存
:param filename: 保存的路径
:param im_proj:
:param im_geotrans:
:param im_data:
:param no_data: 把无效值设置为 nodata
:return:
"""
gdal_dtypes = {
'int8': gdal.GDT_Byte,
'unit16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'unit32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
}
if not gdal_dtypes.get(im_data.dtype.name, None) is None:
datatype = gdal_dtypes[im_data.dtype.name]
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
if os.path.exists(os.path.split(filename)[0]) is False:
os.makedirs(os.path.split(filename)[0])
driver = gdal.GetDriverByName("ENVI") # 数据类型必须有,因为要计算需要多大内存空间
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
if im_bands == 1:
# outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data)
if no_data != 'null':
outband.SetNoDataValue(no_data)
outband.FlushCache()
else:
for i in range(im_bands):
outband = dataset.GetRasterBand(1 + i)
outband.WriteArray(im_data[i])
outband.FlushCache()
# outRaster.GetRasterBand(i + 1).WriteArray(array[i])
del dataset
@staticmethod
def write_img_rpc(filename, im_proj, im_geotrans, im_data, rpc_dict):
"""
图像中写入rpc信息
"""
# 判断栅格数据的数据类型
if 'int8' in im_data.dtype.name:
datatype = gdal.GDT_Byte
elif 'int16' in im_data.dtype.name:
datatype = gdal.GDT_Int16
else:
datatype = gdal.GDT_Float32
# 判读数组维数
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 创建文件
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
dataset.SetProjection(im_proj) # 写入投影
# 写入RPC参数
for k in rpc_dict.keys():
dataset.SetMetadataItem(k, rpc_dict[k], 'RPC')
if im_bands == 1:
dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
else:
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
del dataset
def transtif2mask(self,out_tif_path, in_tif_path, threshold):
"""
:param out_tif_path:输出路径
:param in_tif_path:输入的路径
:param threshold:阈值
"""
im_proj, im_geotrans, im_arr, im_scope = self.read_img(in_tif_path)
im_arr_mask = (im_arr < threshold).astype(int)
self.write_img(out_tif_path, im_proj, im_geotrans, im_arr_mask)
def write_quick_view(self, tif_path, color_img=False, quick_view_path=None):
"""
生成快视图,默认快视图和影像同路径且同名
:param tif_path:影像路径
:param color_img:是否生成随机伪彩色图
:param quick_view_path:快视图路径
"""
if quick_view_path is None:
quick_view_path = os.path.splitext(tif_path)[0]+'.jpg'
n = self.get_bands(tif_path)
if n == 1: # 单波段
t_data = self.get_data(tif_path)
else: # 多波段,转为强度数据
t_data = self.get_data(tif_path)
t_data = t_data.astype(float)
t_data = np.sqrt(t_data[0] ** 2 + t_data[1] ** 2)
t_r = self.get_img_height(tif_path)
t_c = self.get_img_width(tif_path)
if t_r > 10000 or t_c > 10000:
q_r = int(t_r / 10)
q_c = int(t_c / 10)
elif 1024 < t_r < 10000 or 1024 < t_c < 10000:
if t_r > t_c:
q_r = 1024
q_c = int(t_c/t_r * 1024)
else:
q_c = 1024
q_r = int(t_r/t_c * 1024)
else:
q_r = t_r
q_c = t_c
if color_img is True:
# 生成伪彩色图
img = np.zeros((t_r, t_c, 3), dtype=np.uint8) # (高,宽,维度)
u = np.unique(t_data)
for i in u:
if i != 0:
w = np.where(t_data == i)
img[w[0], w[1], 0] = np.random.randint(0, 255) # 随机生成一个0到255之间的整数 可以通过挑参数设定不同的颜色范围
img[w[0], w[1], 1] = np.random.randint(0, 255)
img[w[0], w[1], 2] = np.random.randint(0, 255)
img = cv2.resize(img, (q_c, q_r)) # (宽,高)
cv2.imwrite(quick_view_path, img)
# cv2.imshow("result4", img)
# cv2.waitKey(0)
else:
# 灰度图
min = np.percentile(t_data, 2) # np.nanmin(t_data)
max = np.percentile(t_data, 98) # np.nanmax(t_data)
t_data[np.isnan(t_data)] = max
if (max - min) < 256:
t_data = (t_data - min) / (max - min) * 255
out_img = Image.fromarray(t_data)
out_img = out_img.resize((q_c, q_r)) # 重采样
out_img = out_img.convert("L") # 转换成灰度图
out_img.save(quick_view_path)
def limit_field(self, out_path, in_path, min_value, max_value):
"""
:param out_path:输出路径
:param in_path:主mask路径输出影像采用主mask的地理信息
:param min_value
:param max_value
"""
proj = self.get_projection(in_path)
geotrans = self.get_geotransform(in_path)
array = self.get_band_array(in_path, 1)
array[array < min_value] = min_value
array[array > max_value] = max_value
self.write_img(out_path, proj, geotrans, array)
return True
def band_merge(self, lon, lat, ori_sim):
lon_arr = self.get_data(lon)
lat_arr = self.get_data(lat)
temp = np.zeros((2, lon_arr.shape[0], lon_arr.shape[1]), dtype=float)
temp[0, :, :] = lon_arr[:, :]
temp[1, :, :] = lat_arr[:, :]
self.write_img(ori_sim, '', [0.0, 1.0, 0.0, 0.0, 0.0, 1.0], temp, '0')
def get_scopes(self, ori_sim):
ori_sim_data = self.get_data(ori_sim)
lon = ori_sim_data[0, :, :]
lat = ori_sim_data[1, :, :]
min_lon = np.nanmin(np.where((lon != 0) & ~np.isnan(lon), lon, np.inf))
max_lon = np.nanmax(np.where((lon != 0) & ~np.isnan(lon), lon, -np.inf))
min_lat = np.nanmin(np.where((lat != 0) & ~np.isnan(lat), lat, np.inf))
max_lat = np.nanmax(np.where((lat != 0) & ~np.isnan(lat), lat, -np.inf))
scopes = [[min_lon, max_lat], [max_lon, max_lat], [min_lon, min_lat], [max_lon, min_lat]]
return scopes
@staticmethod
def dem_merged(in_dem_path, out_dem_path):
'''
DEM重采样函数默认坐标系为WGS84
agrs:
in_dem_path: 输入的DEM文件夹路径
meta_file_path: 输入的xml元文件路径
out_dem_path: 输出的DEM文件夹路径
'''
# 读取文件夹中所有的DEM
dem_file_paths = [os.path.join(in_dem_path, dem_name) for dem_name in os.listdir(in_dem_path) if
dem_name.find(".tif") >= 0 and dem_name.find(".tif.") == -1]
spatialreference = osr.SpatialReference()
spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree
spatialproj = spatialreference.ExportToWkt() # 导出投影结果
# 将DEM拼接成一张大图
mergeFile = gdal.BuildVRT(os.path.join(out_dem_path, "mergedDEM_VRT.tif"), dem_file_paths)
out_DEM = os.path.join(out_dem_path, "mergedDEM.tif")
gdal.Warp(out_DEM,
mergeFile,
format="GTiff",
dstSRS=spatialproj,
dstNodata=-9999,
outputType=gdal.GDT_Float32)
time.sleep(3)
# gdal.CloseDir(out_DEM)
return out_DEM
if __name__ == '__main__':
fn = r'D:\micro\WorkSpace\LandCover\Temporary\preprocessing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1B_h_h_L10003515422-ortho.tif'
a = ImageHandler.get_scope_n(fn)
print(a)
# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
# s = ImageHandler().band_merge(path, path2, path3)
# print(s)
# pass

View File

@ -1,185 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:SalinityMain.py
@File:MonteCarloSampling.py
@Function:基于蒙特卡洛随机抽样的最优特征选择算法
@Contact:
@Author:SHJ
@Date:2021/10/19 11:30
@Version:1.0.0
"""
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
import seaborn as sns
import logging
logger = logging.getLogger("mylog")
def api_sel_feature(x_list, iter=100, alpha=0.5, ts=-0.5, iter_ratio=0.2):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],
Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:para iter: 迭代次数
:para alpha: 调节因子
:para ts: com_sep_coef的阈值
:para iter_ratio : 迭代次数阈值
:return : True-特征与类别相关度高False-特征与类别相关度低
"""
com_sep_coef_old = cal_com_sep_coef(x_list, alpha)
# print('com_sep_coef_old:', com_sep_coef_old)
if com_sep_coef_old < ts:
return False, com_sep_coef_old
X = np.zeros(1) # x_list组合为行向量X
x_len_list = [] # 记录每个类别x的位置
num_sampler = 0 # 样本总数
t = 0
flag = 0
for x in x_list:
len_x = len(x)
if t == 0:
X = x
x_len_list.append(len_x)
else:
X = np.hstack([X, x])
x_len_list.append(x_len_list[t - 1] + len_x)
num_sampler += len_x
t += 1
x_len_list.pop()
num = int(np.ceil(num_sampler / 3))
for i in range(iter):
# 生成随机数组
randmtx = np.random.rand(1, num)
randmtx_ceil = np.ceil(randmtx * num_sampler).astype(int)
randmtx_ceil = np.sort(randmtx_ceil[0, :]) - 1
# 随机取值,重排后,替换原来的数据,组成新数组
X_new_sel = X.copy()
X_new_sel[randmtx_ceil] = np.random.permutation(X[randmtx_ceil])
X_new_list = np.split(X_new_sel, x_len_list)
com_sep_coef_new = cal_com_sep_coef(X_new_list, alpha)
if com_sep_coef_new <= com_sep_coef_old:
flag += 1
# print('com_sep_coef_new:', com_sep_coef_new)
logger.info('flag:' + str(flag) +', iter:' + str(iter) + ', falg/iter:' + str(int(flag)/int(iter)))
if flag > (iter * iter_ratio):
return False, com_sep_coef_old
return True, com_sep_coef_old
def cal_com_coef(x_list):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:return com_coef : 类内聚合因子Compactness Coefficient
"""
class_num = len(x_list)
coef_array = np.full((1, class_num), 0.0)
for m in range(class_num):
sample_num = len(x_list[m])
c = np.full((1, sample_num), 0.0)
for u in range(sample_num):
l = np.full((1, sample_num), x_list[m][u])
c[0, u] = np.sum(np.abs(l - x_list[m]))
coef_array[0, m] = np.sum(c) / (sample_num * (sample_num - 1))
com_coef = np.sum(coef_array) / class_num
return com_coef
def cal_sep_coef(x_list):
"""
:para x_list : k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:return sep_coef : 类间离散度Separation Coefficient
"""
class_num = len(x_list)
coef_list = []
coef_sum = 0
for m in range(class_num):
xm = x_list[m]
l_xm = len(xm)
for n in range(class_num):
if not n == m:
xn = x_list[n]
l_xn = len(xn)
xm = np.expand_dims(xm, 1)
coef_list.append(np.sum(np.abs(xm - xn)) / (l_xm * l_xn))
for coef in coef_list:
coef_sum = coef_sum + coef
if class_num == 1 or class_num == 0:
sep_coef = coef_sum
else:
sep_coef = coef_sum / (class_num * (class_num - 1))
return sep_coef
def cal_com_sep_coef(x_list, alpha = 0.5):
"""
:para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
:para alpha : 调节因子
:return com_sep_coef: 类内聚合度和类间离散度的因子Compactness- Separation Coeffcient
"""
if not alpha >= 0 and alpha <= 1:
raise ('input_para_alpha beyond (0,1)!')
com_coef = cal_com_coef(x_list)
sep_coef = cal_sep_coef(x_list)
com_sep_coef = alpha * com_coef - (1-alpha) * sep_coef
return com_sep_coef
def get_logistic_rand_number(num, u=0.4): #弃用
randmtx = np.full((1, num), 0.0)
# randmtx[0,0] = np.random.rand(1, 1) #随机初始值
randmtx[0, 0] = 0.5 #初始值
for i in range(1, num):
randmtx[0, i] = u * randmtx[0, i-1]*(1-randmtx[0, i-1])
randmtx = randmtx * 3 * num
randmtx_ceil = np.ceil(randmtx)
# 绘制随机数分布图
# randmty = np.arange(0,num,1)
# randmty = np.expand_dims( randmty, 1)
# fig, axes = plt.subplots(1, 1, figsize=(5, 5))
# axes.scatter(randmty, randmtx_ceil, alpha=.3, label='ground truth')
# axes.legend()
# plt.tight_layout()
# plt.show()
return randmtx_ceil
def test():
'''测试生成随机数'''
# 插入
# a = np.array([3.4, 2.5, 1.8, 4.7, 5.6, 2.1])
# b = np.array([2.5, 4.7, 5.6])
# c = a[[0,1]]
# a[[0,1]] = np.array([1, 1])
# 随机排列
random.shuffle()
# logist随机数
sns.distplot(random.normal(scale=2, size=1000), hist=False, label='normal')
sns.distplot(random.logistic(loc=2, scale=0.5, size=1000), hist=False, label='logistic')
plt.show()
# 绘制随机数
randmtx = random.logistic(loc=0.5, scale=0.5, size=100)
randmtx.sort(axis=0)
randmty = np.arange(0,100,1)
randmty = np.expand_dims(randmty, 1)
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.scatter(randmty, randmtx, alpha=.3, label='ground truth')
axes.legend()
plt.tight_layout()
plt.show()
# if __name__ == '__main__':
# 例子
# x1 = np.array([1, 1.1])
# x2 = np.array([2, 2.1, 2.2])
# x3 = np.array([3, 3.4, 3.1])
# x_list = [x1, x2, x3]
# com_sep_coef = cal_com_sep_coef(x_list, 0.5)
# flag = api_sel_feature(x_list)
# print('done')

View File

@ -1,433 +0,0 @@
import sklearn # 用于解决打包错误
import sklearn.utils # 用于解决打包错误
import sklearn.utils._cython_blas # 用于解决打包错误
import sklearn.utils._weight_vector # 用于解决打包错误
import sklearn.neighbors # 用于解决打包错误
import sklearn.neighbors._typedefs # 用于解决打包错误
import sklearn.neighbors._partition_nodes # 用于解决打包错误
import sklearn.neighbors._quad_tree # 用于解决打包错误
import sklearn.tree._utils # 用于解决打包错误
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.svm import SVC
import numpy as np
from scipy.stats import pearsonr
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.block.blockprocess import BlockProcess
import logging
import os
import glob
from PIL import Image
from tool.file.fileHandle import fileHandle
import multiprocessing
logger = logging.getLogger("mylog")
file = fileHandle()
class MachineLeaning:
"""
机器学习库
"""
def __init__(self):
pass
@staticmethod
def gene_optimal_train_set(train_data_dic, feature_tif_dir, important_threshold=0.3, correlation_threshold=0.7): # todo 修改特征重要性
ml = MachineLeaning()
name_list = ml.get_name_list(feature_tif_dir)
X_train, Y_train = ml.gene_train_set(train_data_dic, feature_tif_dir)
optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
X_train = X_train[:, optimal_feature]
logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
return X_train, Y_train, optimal_feature
@ staticmethod
def sel_optimal_feature(X_train, Y_train, name_list,important_threshold=0.3, correlation_threshold=0.7):
ml = MachineLeaning()
optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
X_train = X_train[:, optimal_feature]
logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
return X_train, Y_train, optimal_feature
@staticmethod
def gene_test_set(feature_tif_dir, optimal_feature):
"""
生成测试集
:param feature_tif_dir : 特征影像路径字典
:param optimal_feature : 最优特征子集
:return X_test_list : 分块测试集影像路径
"""
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
cols = ImageHandler.get_img_width(in_tif_paths[0])
rows = ImageHandler.get_img_height(in_tif_paths[0])
workspace_block_tif_path = os.path.join(feature_tif_dir, 'block')
workspace_block_feature_path = os.path.join(feature_tif_dir, 'feature')
file.creat_dirs([workspace_block_tif_path, workspace_block_feature_path])
# 特征分块
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
bp.cut(feature_tif_dir, workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size)
img_dir, img_name = bp.get_file_names(workspace_block_tif_path, ['tif'])
dir_dict_all = bp.get_same_img(img_dir, img_name)
# 选择最优特征子集特征影像
dir_dict = {}
for n, key in zip(range(len(dir_dict_all)), dir_dict_all):
if n in optimal_feature:
dir_dict.update({key: dir_dict_all[key]})
logger.info('test_feature:%s', dir_dict.keys())
logger.info('blocking tifs success!')
X_test_list = []
# 特征维度合并
for key in dir_dict:
key_name = key
block_num = len(dir_dict[key])
break
for n in range(block_num):
name = os.path.basename(dir_dict[key_name][n])
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
features_path = os.path.join(workspace_block_feature_path, "features" + suffix) # + "\\features" + suffix
X_test_list.append(features_path)
features_array = np.zeros((len(dir_dict), block_size, block_size), dtype='float32')
for m, value in zip(range(len(dir_dict)), dir_dict.values()):
features_array[m, :, :] = ImageHandler.get_band_array(value[n])
features_array[np.isnan(features_array)] = 0.0 # 异常值转为0
ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
logger.info('create features matrix success!')
# file.del_folder(workspace_block_tif_path)
# file.del_folder(workspace_block_feature_path)
return X_test_list
@staticmethod
def predict_blok(clf, X_test, rows, cols, img_path, row_begin, col_begin, block_sum, n):
logger.info('total:%s,block:%s testing data !path:%s', block_sum, n, img_path)
Y_test = clf.predict(X_test)
img = Y_test.reshape(rows, cols)
out_image = Image.fromarray(img)
out_image.save(img_path)
# bp = BlockProcess()
# bp.assign_spatial_reference_bypoint(row_begin, col_begin, self.__proj, self.__geo, img_path)
# sr = osr.SpatialReference()
# sr.ImportFromWkt(self.__proj)
# geo_transform = (self.__geo[0] + col_begin * self.__geo[1] + row_begin * self.__geo[2],
# self.__geo[1],
# self.__geo[2],
# self.__geo[3] + col_begin * self.__geo[4] + row_begin * self.__geo[5],
# self.__geo[4],
# self.__geo[5]
# )
# dst_ds = gdal.Open(img_path, gdal.GA_Update)
# if dst_ds is None:
# return False
# dst_ds.SetProjection(sr.ExportToWkt())
# dst_ds.SetGeoTransform(geo_transform)
# del dst_ds
if not os.path.exists(img_path):
logger.error('total:%s,block:%s test data failed !path:%s', block_sum, n, img_path)
logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path)
return True
@staticmethod
def predict(clf, X_test_list, out_tif_name, workspace_processing_path,rows, cols):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
ml = MachineLeaning()
# 开启多进程处理
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
block_features_dir = X_test_list
bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name + '\\') # workspace_processing_path + out_tif_name + '\\'
file.creat_dirs([bp_cover_dir])
processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 1])
pool = multiprocessing.Pool(processes=processes_num)
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
# features_array = ImageHandler.get_data(path)
band = ImageHandler.get_bands(path)
if band == 1:
features_array = np.zeros((1, 1024, 1024), dtype=float)
feature_array = ImageHandler.get_data(path)
features_array[0, :, :] = feature_array
else:
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
pool.close()
pool.join()
# 合并影像
data_dir = bp_cover_dir
out_path = workspace_processing_path[0:-1]
bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
# 添加地理信息
cover_path = os.path.join(workspace_processing_path, out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
@staticmethod
def predict_VP(clf, X_test_list, out_tif_name, workspace_processing_path, rows, cols):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
ml = MachineLeaning()
# 开启多进程处理
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
block_features_dir = X_test_list
bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name,
'pre_result\\') # workspace_processing_path + out_tif_name + '\\'
file.creat_dirs([bp_cover_dir])
processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 7])
pool = multiprocessing.Pool(processes=processes_num)
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
band = ImageHandler.get_bands(path)
if band == 1:
features_array = np.zeros((1, 1024, 1024), dtype=float)
feature_array = ImageHandler.get_data(path)
features_array[0, :, :] = feature_array
else:
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \
name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
# ml.predict_blok(clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n)
pool.close()
pool.join()
del pool
# 合并影像
data_dir = bp_cover_dir
out_path = workspace_processing_path[0:-1]
bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
# 添加地理信息
cover_path = os.path.join(workspace_processing_path,
out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
@staticmethod
def get_name_list(feature_tif_dir):
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
name_list = []
dim = len(in_tif_paths)
for n, path in zip(range(dim), in_tif_paths):
name_list.append(str(n)+': '+os.path.split(path)[1])
logger.info('feature_list:%s', name_list)
return name_list
@staticmethod
def gene_train_set(train_data_dic, feature_tif_dir):
"""
生成训练集
:param train_data_dic : 从csv读取的训练数据
:param feature_tif_dir : 特征影像路径路径
:return X_train, Y_train : 训练数据
"""
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
dim = len(in_tif_paths)
X_train = np.empty(shape=(0, dim))
Y_train = np.empty(shape=(0, 1))
ids = train_data_dic['ids']
positions = train_data_dic['positions']
for id, points in zip(ids, positions):
# for data in train_data_list:
if points == []:
raise Exception('data is empty!')
row, col = zip(*points)
l = len(points)
X = np.empty(shape=(l, dim))
for n, tif_path in zip(range(dim), in_tif_paths):
feature_array = ImageHandler.get_data(tif_path)
feature_array[np.isnan(feature_array)] = 0 # 异常值填充为0
x = feature_array[row, col].T
X[:, n] = x
Y = np.full((l, 1), id)
X_train = np.vstack((X_train, X))
Y_train = np.vstack((Y_train, Y))
Y_train = Y_train.T[0, :]
logger.info("gene_train_set success!")
return X_train, Y_train
@staticmethod
def standardization(data, num=1):
# 矩阵标准化到[0,1]
min = np.nanmin(data)
max = np.nanmax(data)
data[np.isnan(data)] = min # 异常值填充为0
_range = max - min
return (data - min) / _range * num
@staticmethod
def sel_optimal_feature_set(X_train, Y_train, threshold=0.01):
"""
筛选最优特征组合(极度随机树
"""
model = ExtraTreesClassifier()
max = np.max(Y_train)
if max < 0.1:
Y_train = (Y_train*10000).astype('int')
model.fit(X_train, Y_train.astype('int'))
# select the relative importance of each attribute
importances = model.feature_importances_
logger.info('importances:%s,threshold=%s', importances, threshold)
importances_resort = -np.sort(-importances) # 从大到小排序
imp_argsort = np.argsort(-importances) # 输出从大到小的序号
optimal_feature = list(imp_argsort[np.where(importances_resort > threshold)]) # 过滤重要性低的特征
logger.info('optimal_feature:%s', optimal_feature)
if len(optimal_feature)==0:
logger.error('optimal_feature is empty')
optimal_feature = list(imp_argsort)
return optimal_feature
@staticmethod
def correlation_map(x, y):
# https://blog.csdn.net/weixin_39836726/article/details/110783640
# cc matrix based on scipy pearsonr
n_row_x = x.shape[0]
n_row_y = x.shape[0]
ccmtx_xy = np.empty((n_row_x, n_row_y))
for n in range(n_row_x):
for m in range(n_row_y):
ccmtx_xy[n, m] = pearsonr(x[n, :], y[m, :])[0]
return ccmtx_xy
@staticmethod
def remove_correlation_feature(X_train,validity_list, threshold=0.85):
"""
相关性抑制,去除相关性
:param X_train : 训练集
:param validity_list : 最优特征子集
:param threshold: 相关性阈值
:return validity_list : 最优特征子集
"""
ccmtx = MachineLeaning().correlation_map(X_train[:, validity_list].T, X_train[:, validity_list].T)
ccmtx = np.abs(ccmtx)
for r in range(len(validity_list)):
for c in range(len(validity_list)):
if c <= r:
ccmtx[r, c] = 0
logger.info('correlation_map:\n %s', ccmtx)
# 相关性大于0.85的特征删除com_sep_coef较大的特征
high_corr = np.unique(np.where(ccmtx > threshold)[1]) # 删除的特征序号
validity_list = np.delete(validity_list, high_corr)
logger.info('validity_list_corr:%s', validity_list)
logger.info(validity_list)
return validity_list
@staticmethod
def gene_train_data(block_features_dir,rows,cols,block_size,measured_data_img):
# 生成训练集
X_train = []
Y_train = []
block_rows = int(np.ceil(rows/block_size))
block_cols = int(np.ceil(cols/block_size))
for data, n in zip(measured_data_img, range(len(measured_data_img))):
row = data[0]
col = data[1]
block_row = row//block_size
block_col = col//block_size
if block_row == block_rows-1:
part_img_row = row - (rows - block_size)
else:
part_img_row = row % block_size
if block_col == block_cols-1:
part_img_col = col - (cols-block_size)
else:
part_img_col = col % block_size
features_path = block_features_dir[block_row*(block_rows-1) + block_col]
features_array = ImageHandler().get_data(features_path)
feature = features_array[:, part_img_row, part_img_col]
if not np.isnan(feature).any() or np.isinf(feature).any():
X_train.append(list(feature))
Y_train.append([data[2]])
logger.info('total:%s,num:%s create train set success!', len(measured_data_img), n)
return np.array(X_train), np.array(Y_train)
@staticmethod
def trainRF(X_train, Y_train):
#随机森林
logger.info('RF trainning')
clf = RandomForestClassifier()
clf.fit(X_train, Y_train)
return clf
@staticmethod
def trainSVM(X_train, Y_train, cost=1, kernel='rbf'):
logger.info('svm trainning')
clf = SVC(decision_function_shape='ovo')
clf.fit(X_train, Y_train)
SVC(C=cost, cache_size=1000, class_weight='balanced', coef0=0.0, decision_function_shape='ovr',
degree=3, gamma='auto', kernel=kernel, max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=True)
return clf
@staticmethod
def vegetationPhenology_combine_feature(feature_dir,workspace_processing_path, name, rows, cols, debug =False):
ml = MachineLeaning()
path_list = list(glob.glob(os.path.join(feature_dir, '*.tif')))
#多维矩阵合并为一个
name_featuresPath_dic = {}
dim = len(path_list)
features_path = workspace_processing_path + name + "/"+ name +'_features.tif'
if debug== False:
features_array = np.zeros((dim, rows, cols), dtype='float16')
for m, path in zip(range(dim), path_list):
data = ImageHandler.get_data(path)
data = ml.standardization(data)
features_array[m, :, :] = data
# 异常值转为0
features_array[np.isnan(features_array)] = 0.0
features_array[np.isinf(features_array)] = 0.0
ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
name_featuresPath_dic.update({name: features_path})
return name_featuresPath_dic

View File

@ -1,491 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:AHVToPolsarpro.py
@Function:全极化影像转成polsarpro格式T3数据
@Contact:
@Author:SHJ
@Date:2021/9/18 16:44
@Version:1.0.0
"""
import os
import numpy as np
import glob
import struct
from tool.algorithm.image.ImageHandle import ImageHandler
class AHVToPolsarpro:
"""
全极化影像转换为bin格式T3矩阵支持polsarpro处理
"""
def __init__(self, hh_hv_vh_vv_path_list=[]):
self._hh_hv_vh_vv_path_list = hh_hv_vh_vv_path_list
pass
@staticmethod
def __ahv_to_s2_veg(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if '_HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif '_HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif '_VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif '_VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_soil(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_list(ahv_path_list):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = ahv_path_list
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path')
return s11, s12, s21, s22
@staticmethod
def __ahv_to_s2_list_2(hh_hv_vh_vv_path_list):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
global s11
in_tif_paths = hh_hv_vh_vv_path_list
s11, s12, s21, s22 = None, None, None, None
flag_list = [0, 0, 0, 0]
for in_tif_path, n in zip(in_tif_paths, range(len(in_tif_paths))):
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if n == 0:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif n == 1:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif n == 2:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif n == 3:
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('HH or HV or VH or VV is not in path')
return s11, s12, s21, s22
@staticmethod
def __s2_to_t3(s11, s12, s21, s22):
"""
S2矩阵转T3矩阵
:param s11: HH极化数据
:param s12: HV极化数据
:param s21: VH极化数据
:param s22: VV极化数据
:return: 极化相干矩阵T3
"""
HH = s11
HV = s12
VH = s21
VV = s22
t11 = (np.abs(HH + VV)) ** 2 / 2
t12 = (HH + VV) * np.conj(HH - VV) / 2
t13 = (HH + VV) * np.conj(HV + VH)
t21 = (HH - VV) * np.conj(HH + VV) / 2
t22 = np.abs(HH - VV) ** 2 / 2
t23 = (HH - VV) * np.conj(HV + VH)
t31 = (HV + VH) * np.conj(HH + VV)
t32 = (HV + VH) * np.conj(HH - VV)
t33 = 2 * np.abs(HV + VH) ** 2
return t11, t12, t13, t21, t22, t23, t31, t32, t33
def __t3_to_polsarpro_t3(self, out_dir, t11, t12, t13, t22, t23, t33):
"""
T3矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param t11:
:param t12:
:param t13:
:param t22:
:param t23:
:param t33:
:return: bin格式矩阵T3和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = t11.shape[0]
cols = t11.shape[1]
bins_dict = {
'T11.bin': t11,
'T12_real.bin': t12.real,
'T12_imag.bin': t12.imag,
'T13_real.bin': t13.real,
'T13_imag.bin': t13.imag,
'T22.bin': t22,
'T23_real.bin': t23.real,
'T23_imag.bin': t23.imag,
'T33.bin': t33}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_img_bin(data, bin_path) # todo 修改T3阵保存方式
# data.tofile(bin_path)
out_hdr_path = bin_path + '.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
def rows(self):
"""获取影像行数"""
return self._rows
def cols(self):
"""获取影像列数"""
return self._cols
def __write_img_bin(self, im, file_path):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
self._rows = im.shape[0]
self._cols = im.shape[1]
for row in range(self._rows):
im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
f.write(im_bin)
f.close()
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'File Imported into ENVI. }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 4' # 数据格式
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'band names = {'
h14 = bin_path + '}'
# h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
# doc = open(out_hdr_path, 'w')
# for i in range(0, 14):
# print(h[i], end='', file=doc)
# print('\n', end='', file=doc)
h = [h1, h4, h5, h6, h7, h8, h9, h10, h12]
doc = open(out_hdr_path, 'w')
for i in range(0, 9):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
h8 = 'monostatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'full'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def incidence_tif2bin(self, incidence_file, out_path):
if not os.path.exists(out_path):
os.mkdir(out_path)
incidence_bin = os.path.join(out_path, 'incidence.bin')
data = ImageHandler().get_data(incidence_file)
rows = data.shape[0]
cols = data.shape[1]
self.__write_img_bin(data, incidence_bin)
if not os.path.exists(incidence_bin):
raise Exception('incidence to bin failed')
out_hdr_path = incidence_bin + '.hdr'
self.__write_bin_hdr(out_hdr_path, incidence_bin, rows, cols)
return incidence_bin
def ahv_to_polsarpro_t3_veg(self, out_file_dir, in_ahv_dir=''):
if self._hh_hv_vh_vv_path_list == [] :
s11, s12, s21, s22 = self.__ahv_to_s2_veg(in_ahv_dir)
else:
s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
s11, s12, s21, s22)
self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
def ahv_to_polsarpro_t3_soil(self, out_file_dir, in_ahv_dir=''):
if self._hh_hv_vh_vv_path_list == [] :
s11, s12, s21, s22 = self.__ahv_to_s2_soil(in_ahv_dir)
else:
s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
s11, s12, s21, s22)
self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
def calibration(self, calibration_value, in_ahv_dir='', name=''):
if name == '':
out_dir = os.path.join(in_ahv_dir, 'calibration')
else:
out_dir = os.path.join(in_ahv_dir, name, 'calibration')
flag_list = [0, 0, 0, 0]
if self._hh_hv_vh_vv_path_list == []: # 地表覆盖、土壤盐碱度
in_tif_paths = list(glob.glob(os.path.join(in_ahv_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(in_ahv_dir, '*.tiff')))
in_tif_paths += in_tif_paths1
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
name = os.path.basename(in_tif_path)
data_new = np.zeros(data.shape)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[0]
data_new[1, :, :] = data[1, :, :] * calibration_value[0]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[1]
data_new[1, :, :] = data[1, :, :] * calibration_value[1]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[2]
data_new[1, :, :] = data[1, :, :] * calibration_value[2]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[3]
data_new[1, :, :] = data[1, :, :] * calibration_value[3]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[3] = 1
if not flag_list == [1, 1, 1, 1]:
raise Exception('calibration error! ')
else:
for in_tif_path in self._hh_hv_vh_vv_path_list: # 植被物候
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
name = os.path.basename(in_tif_path)
data_new = np.zeros(data.shape)
# 获取极化类型
if '_HH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[0]
data_new[1, :, :] = data[1, :, :] * calibration_value[0]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[0] = 1
elif '_HV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[1]
data_new[1, :, :] = data[1, :, :] * calibration_value[1]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[1] = 1
elif '_VH' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[2]
data_new[1, :, :] = data[1, :, :] * calibration_value[2]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[2] = 1
elif '_VV' in os.path.basename(in_tif_path):
data_new[0, :, :] = data[0, :, :] * calibration_value[3]
data_new[1, :, :] = data[1, :, :] * calibration_value[3]
ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
flag_list[3] = 1
if not flag_list == [1, 1, 1, 1]:
raise Exception('calibration error! ')
self._hh_hv_vh_vv_path_list = []
return out_dir
if __name__ == '__main__':
#实例1
# atp = AHVToPolsarpro()
# ahv_path = 'D:\\DATA\\GAOFEN3\\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\\'
# # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
# out_file_path = 'D:\\bintest0923\\'
# atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
# # 极化分解得到T3矩阵
# atp = AHVToPolsarpro()
# ahv_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPC"
# t3_path = ahv_path + 'psp_t3\\'
# atp.ahv_to_polsarpro_t3(t3_path, ahv_path)
#实例2
# dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
# path_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
#
#
# atp = AHVToPolsarpro(path_list)
# atp.ahv_to_polsarpro_t3(r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC/t3')
print("done")

View File

@ -1,228 +0,0 @@
"""
@Project microproduct
@File AHVToPolsarpro.PY
@Function 将四个极化数据转成S2矩阵文件
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
import os
import numpy as np
import glob
import struct
from tool.algorithm.image.ImageHandle import ImageHandler
class AHVToPolsarproS2:
"""
全极化影像转换为bin格式S2矩阵支持polsarpro处理
"""
def __init__(self):
pass
@staticmethod
def __ahv_to_s2(ahv_dir):
"""
全极化影像转S2矩阵
:param ahv_dir: 全极化影像文件夹路径
:return: 极化散射矩阵S2
"""
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
s11, s12, s21, s22 = None,None,None,None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
data_real = data[0, :, :] # 获取第一个波段 (实部)
data_imag = data[1, :, :] # 获取第二个波段 (虚部)
s11 = data_real + 1j * data_imag
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s12 = data_real + 1j * data_imag
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s21 = data_real + 1j * data_imag
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
data_real = data[0, :, :]
data_imag = data[1, :, :]
s22 = data_real + 1j * data_imag
flag_list[3] = 1
else:
continue
if not flag_list == [1, 1, 1, 1]:
raise Exception('tif of HH or HV or VH or VV is not in path :%s', ahv_dir)
return s11, s12, s21, s22
def __s2_to_bin(self, out_dir, s11, s12, s21, s22):
"""
S2矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param s11:
:param s12:
:param s21
:param s22:
:return: bin格式矩阵S2和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = s11.shape[0]
cols = s11.shape[1]
bins_dict = {'s11.bin': s11,
's12.bin': s12,
's21.bin': s21,
's22.bin': s22}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_slc_img_bin(data, bin_path,name)
out_hdr_path = bin_path+'.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
@staticmethod
def __write_slc_img_bin(im, file_path,name):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
rows = im.shape[0]
cols = im.shape[1]
cre_im = np.zeros((rows, 2*cols), dtype=float)
cre_im[:, ::2] = im.real #存 real
cre_im[:, 1::2] = im.imag #存 imag
for row in range(rows):
cre_im_bin = struct.pack("f" * 2*cols, *np.reshape(cre_im[row, :], (2*cols, 1), order='F'))
f.write(cre_im_bin)
f.close()
@staticmethod
def read_slc_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4 * 2:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
bin_data = np.zeros([rows, cols*2], dtype=np.float32)
img_array = np.zeros([2,rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols * 2) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols*2, data) # 转为一行float数据
bin_data[row, :] = row_data
bin_file.close()
img_array[0] = bin_data[:, ::2] # real
img_array[1] = bin_data[:, 1::2] # imag
return img_array
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'ENVI File, Created [] }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 6' # 数据格式,6代表复数
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'wavelength units = Unknown'
h14 = 'complex function = Power'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
doc = open(out_hdr_path, 'w')
for i in range(0, 14):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
# h8 = 'monostatic'
h8 = 'bistatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'full'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def api_ahv_to_polsarpro_s2(self, out_file_dir, in_ahv_dir):
s11, s12, s21, s22 = self.__ahv_to_s2(in_ahv_dir)
self.__s2_to_bin(out_file_dir, s11, s12, s21, s22)
# if __name__ == '__main__':
# # test()
# atp = AHVToPolsarproS2()
# ahv_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087'
# # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
# out_file_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ_2'
# atp.api_ahv_to_polsarpro_s2(out_file_path, ahv_path)
# bin_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ\s11.bin'
# # data = atp.read_slc_bin_to_img(bin_path)
# print("done")

View File

@ -1,196 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:DualPolarToPolsarproC2.py
@Function:双极化影像转成polsarpro格式C2数据
@Contact:
@Author:SHJ
@Date:2021/11/5
@Version:1.0.0
"""
import os
import numpy as np
import glob
import struct
import gc
from tool.algorithm.image.ImageHandle import ImageHandler
class DualPolarToPolsarproC2:
"""
双极化影像转换为bin格式C2矩阵支持polsarpro处理
"""
def __init__(self):
pass
@staticmethod
def __dual_polar_to_c2(dual_polar_dir):
"""
双影像转S2矩阵
:param dual_polar_dir: 双极化影像文件夹路径
:return: C2矩阵
"""
in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tif')))
if in_tif_paths == []:
in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tiff')))
s11, s22 = None, None
flag_list = [0, 0, 0, 0]
for in_tif_path in in_tif_paths:
# 读取原始SAR影像
proj, geotrans, data = ImageHandler.read_img(in_tif_path)
# 获取极化类型
if 'HH' in os.path.basename(in_tif_path):
s11 = data[0, :, :] + 1j * data[1, :, :]
flag_list[0] = 1
elif 'HV' in os.path.basename(in_tif_path):
s22 = data[0, :, :] + 1j * data[1, :, :]
flag_list[1] = 1
elif 'VH' in os.path.basename(in_tif_path):
s22 = data[0, :, :] + 1j * data[1, :, :]
flag_list[2] = 1
elif 'VV' in os.path.basename(in_tif_path):
s11 = data[0, :, :] + 1j * data[1, :, :]
flag_list[3] = 1
else:
continue
del data
gc.collect()
if flag_list != [1, 1, 0, 0] and flag_list != [0, 0, 1, 1] :
raise Exception('Dual-Polarization SAR is not in path :%s',in_tif_path)
c11,c12,c22 = None, None, None
c11 = np.abs(s11)** 2
c12 = s11 * np.conj(s22)
del s11
gc.collect()
c22 = np.abs(s22)**2
return c11, c12, c22
def __c2_to_polsarpro_c2(self, out_dir, c11, c12, c22):
"""
C2矩阵转bin格式支持 polsarpro处理
:param out_dir: 输出的文件夹路径
:param c11:
:param c12:
:param c21:
:param c22:
:return: bin格式矩阵C3和头文件
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rows = c11.shape[0]
cols = c11.shape[1]
bins_dict = {
'C11.bin': c11,
'C12_real.bin': c12.real,
'C12_imag.bin': c12.imag,
'C22.bin': c22}
for name, data in bins_dict.items():
bin_path = os.path.join(out_dir, name)
self.__write_img_bin(data, bin_path)
out_hdr_path = bin_path + '.hdr'
self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
self.__write_config_file(out_dir, rows, cols)
def rows(self):
"""获取影像行数"""
return self._rows
def cols(self):
"""获取影像列数"""
return self._cols
def __write_img_bin(self, im, file_path):
"""
写入影像到bin文件中保存为float32类型
:param im : 影像矩阵数据暂支持单通道影像数据
:param file_path: bin文件的完整路径
"""
with open(file_path, 'wb') as f:
self._rows = im.shape[0]
self._cols = im.shape[1]
for row in range(self._rows):
im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
f.write(im_bin)
f.close()
@staticmethod
def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
"""
写入影像的头文件
:param out_hdr_path : 头文件的路径
:param bin_path: bin文件的路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
name = os.path.split(bin_path)[1]
h1 = 'ENVI'
h2 = 'description = {'
h3 = 'File Imported into ENVI. }'
h4 = 'samples = ' + str(cols) # 列
h5 = 'lines = ' + str(rows) # 行
h6 = 'bands = 1 ' # 波段数
h7 = 'header offset = 0'
h8 = 'file type = ENVI Standard'
h9 = 'data type = 4' # 数据格式 浮点型
h10 = 'interleave = bsq' # 存储格式
h11 = 'sensor type = Unknown'
h12 = 'byte order = 0'
h13 = 'band names = {'
h14 = name + '}'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
doc = open(out_hdr_path, 'w')
for i in range(0, 14):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
@staticmethod
def __write_config_file(out_config_dir, rows, cols):
"""
写入polsarpro配置文件
:param out_config_dir : 配置文件路径
:param rows: 影像的行数
:param cols: 影像的列数
"""
h1 = 'Nrow'
h2 = str(rows)
h3 = '---------'
h4 = 'Ncol'
h5 = str(cols)
h6 = '---------'
h7 = 'PolarCase'
h8 = 'monostatic'
h9 = '---------'
h10 = 'PolarType'
h11 = 'pp1'
h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
out_config_path = os.path.join(out_config_dir, 'config.txt')
doc = open(out_config_path, 'w')
for i in range(0, 11):
print(h[i], end='', file=doc)
print('\n', end='', file=doc)
doc.close()
def api_dual_polar__to_polsarpro_c2(self, out_file_dir, dual_polar_dir):
c11, c12, c22 = self.__dual_polar_to_c2(dual_polar_dir)
self.__c2_to_polsarpro_c2(out_file_dir,c11, c12, c22)
# if __name__ == '__main__':
# tp = DualPolarToPolsarproC2()
# out_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\SLC_SHJ1'
# in_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\'
# # out_file_path = 'D:\\bintest0923\\'
# tp.api_dual_polar__to_polsarpro_c2(out_dic,in_dic)
# # atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
#
# print("done")

View File

@ -1,97 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project onestar
@File GLDM.py
@Contact
scikit-image feature计算图像特征https://blog.csdn.net/lyxleft/article/details/102904909
python如何在二维图像上进行卷积https://www.xz577.com/j/281686.html
利用python的skimage计算灰度共生矩阵https://zhuanlan.zhihu.com/p/147066037
@function 计算图像灰度共生矩阵
@Author SHJ
@Date 2021/11/10 14:42
@Version 1.0.0
"""
import numpy as np
import os
from skimage.feature import greycomatrix, greycoprops
import datetime
from tool.algorithm.image.ImageHandle import ImageHandler
class GLDM:
def __init__(self,win_size = 15, step=2,levels=16,angles=[0,45,90,135],
prop=['contrast', 'dissimilarity', 'homogeneity', 'energy', 'correlation', 'ASM']):
self._win_size = win_size # 计算灰度共生矩阵窗口尺寸,为奇数
self._step = step # 步长
self._levels = levels # 灰度等级例如16256
self._angles = list(np.deg2rad(np.array(angles))) #角度,使用弧度制
"""
'contrast':对比度反映了图像的清晰度和纹理沟纹深浅的程度
'dissimilarity':差异性
'homogeneity':同质性/逆差距度量图像纹理局部变化的多少其值大则说明图像纹理的不同区域间缺少变化局部非常均匀
'energy':能量是灰度共生矩阵元素值的平方和所以也称能量反映了图像灰度分布均匀程度和纹理粗细度
'correlation':相关性它度量空间灰度共生矩阵元素在行或列方向上的相似程度
'ASM':二阶距
"""
self._prop = prop #纹理特征名称
def get_glcm_value(self,input):
values_temp = []
# 统计得到glcm
# 得到共生矩阵,参数:图像矩阵,距离,方向,灰度级别,是否对称,是否标准化
# para2: [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4] 一共计算了四个方向,你也可以选择一个方向
glcm = greycomatrix(input, [self._step], self._angles, self._levels, symmetric=False, normed=True)
# print(glcm.shape)
# 循环计算表征纹理的参数
for prop in self._prop:
temp = greycoprops(glcm, prop)
# print(temp)
values_temp.append(np.mean(temp))
return values_temp
def get_glcm_array(self,inputs: np.ndarray, win_size):
h, w = inputs.shape
pad = (win_size - 1) // 2
inputs = np.pad(inputs, pad_width=[(pad, pad), (pad, pad)], mode="constant", constant_values=0)
glcm_array ={}
for name in self._prop:
glcm_array.update({name:np.zeros(shape=(h, w),dtype=np.float32)})
for i in range(h): # 行号
for j in range(w): # 列号
window = inputs[i: i + win_size, j: j + win_size]
value = self.get_glcm_value(window)
print('i:%s,j:%s',i,j)
# print(value)
for n,array in zip(range(len(glcm_array)),glcm_array.values()):
array[i,j] = value[n]
return glcm_array
@staticmethod
def standardization(data, num=1):
# 矩阵标准化到[0,1]
data[np.isnan(data)] = np.min(data) # 异常值填充为0
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range * num
def api_get_glcm_array(self,out_dir,in_tif_path,name=''):
ih = ImageHandler()
proj, geotrans, array = ih.read_img(in_tif_path)
array[np.where(array > 500000)]=500000 #去除过大的值避免标准化时大部分的值都接近0
array = self.standardization(array,self._levels-1) #标准化到0~self._levels-1
array = np.uint8(array)
glcm_array = self.get_glcm_array(array, self._win_size)
for key,value in glcm_array.items():
out_path = os.path.join(out_dir,name+'_'+key+'.tif')
ih.write_img(out_path, proj, geotrans,value)
if __name__ == '__main__':
start = datetime.datetime.now()
gldm = GLDM(win_size=9,levels=16,step=3,angles=[0,45,90,135])
gldm.api_get_glcm_array('D:\glcm','D:\glcm\src_img.tif',)
end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start)
print(msg)
# 666*720尺寸影像消耗的running use time: 0:04:23.155424

View File

@ -1,85 +0,0 @@
import os
import glob
import numpy as np
import struct
from PIL import Image
from tool.algorithm.ml.machineLearning import MachineLeaning as ml
def read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
def write_bin_to_tif(out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
img_array = ml.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path
def write_bin_to_tif_soil(out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
# img_array = ml.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path

View File

@ -1,190 +0,0 @@
from tool.algorithm.algtools.MetaDataHandler import Calibration
from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3
from tool.algorithm.polsarpro.pspCloudePottierDecomposition import PspCloudePottierDecomposition
from tool.algorithm.polsarpro.pspFreemanDecomposition import PspFreemanDecomposition
from tool.algorithm.polsarpro.pspYamaguchiDecomposition import PspYamaguchiDecomposition
from tool.algorithm.polsarpro.pspTouziDecomposition import PspTouziDecomposition
from tool.algorithm.polsarpro.bin2tif import write_bin_to_tif
from tool.algorithm.polsarpro.pspHAAlphaDecomposition import PspHAAlphaDecomposition
from tool.algorithm.xml.AlgXmlHandle import InitPara
import logging
import os
import shutil
import glob
logger = logging.getLogger("mylog")
class CreateFeature:
"""
生产特征
"""
def __init__(self, debug = False, exe_dir = ''):
self._debug = debug
self._exe_dir = exe_dir
pass
def ahv_to_t3(self, workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name='',FILTER_SIZE=3):
# 全极化tif转bin格式T3数据
atp = AHVToPolsarpro()
atp = AHVToPolsarpro(hh_hv_vh_vv_list)
lee_filter_path = os.path.join(workspace_processing_path, name, 'lee_filter\\') # workspace_processing_path + name + '\\lee_filter\\'
if self._debug == False:
t3_path = os.path.join(workspace_processing_path, name, 'psp_t3\\') # workspace_processing_path + name + '\\psp_t3\\'
# atp.ahv_to_polsarpro_t3(t3_path, tif_path)
polarization = ['HH', 'HV', 'VH', 'VV']
if os.path.exists(workspace_preprocessing_path + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(workspace_preprocessing_path + name, '*.meta.xml')))
meta_dic = InitPara.get_meta_dic_new(meta_xml_paths, name)
calibration = Calibration.get_Calibration_coefficient(meta_dic['Origin_META'], polarization)
tif_path = atp.calibration(calibration, workspace_preprocessing_path, name)
atp.ahv_to_polsarpro_t3_veg(t3_path, tif_path)
# Lee滤波
leeFilter = LeeRefinedFilterT3()
leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols(), FILTER_SIZE)
logger.info("refine_lee filter success!")
return lee_filter_path
def decompose(self,workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic={},FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']): # , 'Touzi'
"""
极化分解FreemanTouziYamaguchiCloude
:param t3_path: t3文件路径
:param rows: 影像行数
:return cols:影像列数
"""
# 计算特征组合
exeDir = self._exe_dir
outFolderDic = {}
if 'Freeman' in FeatureInput:
# freeman分解
freemanOutDir = os.path.join(workspace_processing_path, name + '\\freeman\\')
if self._debug == False:
freemDecom = PspFreemanDecomposition(exeDir, t3_path, freemanOutDir)
flag = freemDecom.api_freeman_decomposition_T3(0, 0, rows, cols)
if not flag:
logger.error('FreemanDecomposition err')
return False, None
outFolderDic['Freeman'] = freemanOutDir
# Touzi分解
if 'Touzi' in FeatureInput:
touziOutDir = os.path.join(workspace_processing_path, name + '\\touzi\\')
if not os.path.exists(touziOutDir):
os.makedirs(touziOutDir)
if self._debug == False:
# touzi分解耗时较长且对特征表达效果较差
p = PspTouziDecomposition(hh_hv_vh_vv_dic, touziOutDir)
p.Touzi_decomposition_multiprocessing()
outFolderDic['Touzi'] = touziOutDir
if 'Yamaguchi' in FeatureInput:
# Yamaguchi分解
yamaguchiOutDir = os.path.join(workspace_processing_path, name + '\\yamaguchi\\')
if self._debug == False:
yamaguchiDecom = PspYamaguchiDecomposition(exeDir, t3_path, yamaguchiOutDir)
flag = yamaguchiDecom.api_yamaguchi_4components_decomposition_T3(0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Yamaguchi'] = yamaguchiOutDir
if 'Cloude' in FeatureInput:
# CloudePottier分解
cloudeOutDir = os.path.join(workspace_processing_path, name + '\\cloude\\')
if self._debug == False:
cloudeDecom = PspCloudePottierDecomposition(
exeDir, t3_path, cloudeOutDir)
flag = cloudeDecom.api_h_a_alpha_decomposition_T3(
0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Cloude'] = cloudeOutDir
return True, outFolderDic
def creat_h_a_alpha_features(self, t3_path, out_dir):
logger.info('ahv transform to polsarpro T3 matrix success!')
logger.info('progress bar: 20%')
h_a_alpha_decomposition_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_decomposition_T3.exe')
h_a_alpha_eigenvalue_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvalue_set_T3.exe')
h_a_alpha_eigenvector_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvector_set_T3.exe')
if self._debug == False:
haa = PspHAAlphaDecomposition(normalization=True)
haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir,
h_a_alpha_decomposition_T3_path=h_a_alpha_decomposition_T3_path ,
h_a_alpha_eigenvalue_set_T3_path=h_a_alpha_eigenvalue_set_T3_path ,
h_a_alpha_eigenvector_set_T3_path=h_a_alpha_eigenvector_set_T3_path,
polsarpro_in_dir=t3_path)
def cereat_features_dic(self,outFolderDic, feature_tif_dir):
if not os.path.exists(feature_tif_dir):
os.makedirs(feature_tif_dir)
feature_tif_paths = {}
for key in outFolderDic:
feature_bin_dic = outFolderDic[key]
if key == 'Touzi':
for path in list(glob.glob(os.path.join(feature_bin_dic, '*.tif'))):
name = os.path.split(path)[1].split('.')[0]
if self._debug == False:
shutil.copyfile(path, os.path.join(feature_tif_dir, name + '.tif')) # feature_tif_dir + '\\' + name + '.tif')
feature_tif_paths.update({name: os.path.join(feature_tif_dir, name + '.tif')}) # feature_tif_dir + '\\' + name + '.tif'
else:
feature_tif_paths.update(write_bin_to_tif(feature_tif_dir, feature_bin_dic))
return feature_tif_paths
@staticmethod
def decompose_single_tar(hh_hv_vh_vv_list, workspace_processing_path, workspace_preprocessing_path, name, exe_dir, rows, cols, FILTER_SIZE = 3, debug =False, FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']):
hh_hv_vh_vv_dic = {}
hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
t3_path = os.path.join(workspace_processing_path, name, "lee_filter") # workspace_processing_path + name + "\\lee_filter"
feature_tif_dir = os.path.join(workspace_processing_path, name, 'features') # workspace_processing_path + name + "\\features"
cfeature = CreateFeature(debug, exe_dir)
cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
t3_path = cfeature.ahv_to_t3(workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name, FILTER_SIZE)
flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic, FeatureInput) # , 'Touzi'
cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
return feature_tif_dir
if __name__ == '__main__':
# # 实例1
# exe_dir = os.getcwd()
# dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
# hh_hv_vh_vv_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
# dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
#
# workspace_processing_path= r"D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing/"
# name= 'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC'
# hh_hv_vh_vv_dic = {}
# hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
# hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
# hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
# hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
# t3_path = workspace_processing_path + name + "\\lee_filter"
# feature_tif_dir = workspace_processing_path + name + "\\features"
#
# cfeature = CreateFeature(False, exe_dir)
#
# cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
#
# t3_path = cfeature.ahv_to_t3(workspace_processing_path, hh_hv_vh_vv_list, name, 3)
# flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, 997, 1227, hh_hv_vh_vv_dic, FeatureInput=['Freeman', 'Touzi', 'Yamaguchi', 'Cloude'])
#
# feature_tifs_dic = cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
pass

View File

@ -1,132 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspHAAlphaDecomposition.py Cloude-Pottier分解
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
V1.0.1:1可选分解特征2bin转tif格式
@Contact:
@Author:SHJ
@Date:2021/9/24 9:06
@Version:1.0.1
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspCloudePottierDecomposition:
"""
调用polsarpro4.2.0的Cloude-Pottier极化分解 h_a_alpha_decomposition_T3.exe
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='h_a_alpha_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_h_a_alpha_decomposition_T3(
self,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
alpbetdelgam = 1
Lambda = 1
alpha = 1
entropy = 1
anisotropy = 1
CombHA = 1
CombH1mA = 1
Comb1mHA = 1
Comb1mH1mA = 1
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
alpbetdelgam,
Lambda,
alpha,
entropy,
anisotropy,
CombHA,
CombH1mA,
Comb1mHA,
Comb1mH1mA]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -1,109 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspFreemanDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/12 18:45
@Version:1.0.0
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspFreemanDecomposition:
"""
Freeman分解
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='freeman_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_freeman_decomposition_T3(
self,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -1,435 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspHAAlphaDecomposition.py
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
V1.0.1:1可选分解特征2bin转tif格式
@Contact:
@Author:SHJ
@Date:2021/9/24 9:06
@Version:1.0.1
"""
import os
import shutil
import subprocess
import struct
import numpy as np
import glob
from PIL import Image
import logging
logger = logging.getLogger("mylog")
import multiprocessing
class PspHAAlphaDecomposition:
"""
调用polsarpro4.2.0的Cloude-Pottier极化分解
"""
def __init__(self,normalization = False):
self.__normalization = normalization #是否做归一化
self.__res_h_a_alpha_decomposition_T3 = {}
self.__res_h_a_alpha_eigenvalue_set_T3 = {}
self.__res_h_a_alpha_eigenvector_set_T3 = {}
pass
def api_creat_h_a_alpha_features_single_process(self, h_a_alpha_out_dir,
h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decompositionh_a_alpha_eigenvalue_set h_a_alpha_eigenvector_set)
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
h_a_alpha_features ={}
h_a_alpha_features.update(self.api_h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_decomposition_T3 success!")
logger.info('progress bar: 40%')
h_a_alpha_features.update(self.api_h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
logger.info('progress bar: 60%')
h_a_alpha_features.update(self.api_h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1)))
logger.info("run h_a_alpha_eigenvector_set_T3 success!")
logger.info('progress bar: 80%')
if is_trans_to_tif:
self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
if is_read_to_dic:
h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
return h_a_alpha_features
def api_creat_h_a_alpha_features(self, h_a_alpha_out_dir,
h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decompositionh_a_alpha_eigenvalue_set h_a_alpha_eigenvector_set)
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
pool = multiprocessing.Pool(processes=3)
pl = []
logger.info("run h_a_alpha_decomposition_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_decomposition_T3, (h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1))))
logger.info("run h_a_alpha_eigenvalue_set_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_eigenvalue_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))))
logger.info("run h_a_alpha_eigenvector_set_T3!")
pl.append(pool.apply_async(self.api_h_a_alpha_eigenvector_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1))))
pool.close()
pool.join()
logger.info(pl)
logger.info('progress bar: 60%')
h_a_alpha_features = {}
h_a_alpha_features.update(self.__res_h_a_alpha_decomposition_T3)
logger.info("run h_a_alpha_decomposition_T3 success!")
h_a_alpha_features.update(self.__res_h_a_alpha_eigenvalue_set_T3)
logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
h_a_alpha_features.update(self.__res_h_a_alpha_eigenvector_set_T3)
logger.info("run h_a_alpha_eigenvector_set_T3 success!")
if is_trans_to_tif:
self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
if is_read_to_dic:
h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
return h_a_alpha_features
def api_h_a_alpha_decomposition_T3(self, h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解H-A-Alpha分解
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args)
name_list = ['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_decomposition_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_decomposition_T3
else:
return {}
def api_h_a_alpha_eigenvalue_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
Cloude-Pottier eigenvalue based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args)
name_list = ['anisotropy', 'anisotropy_lueneburg', 'anisotropy12', 'asymetry', 'derd', 'derd_norm', 'entropy_shannon',
'entropy_shannon_I', 'entropy_shannon_I_norm', 'entropy_shannon_norm', 'entropy_shannon_P',
'entropy_shannon_P_norm', 'l1', 'l2', 'l3', 'p1', 'p2', 'p3', 'pedestal', 'polarisation_fraction',
'rvi', 'serd', 'serd_norm']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_eigenvalue_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_eigenvalue_set_T3
else:
return {}
def api_h_a_alpha_eigenvector_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
"""
Cloude-Pottier eigenvector based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvector
:param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param is_trans_to_tif:分解特征是否转换为tif
:param is_read_to_dic:分解特征是否以字典输出
:param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
:return : 包含分解特征的字典
"""
if not os.path.exists(h_a_alpha_out_dir):
os.makedirs(h_a_alpha_out_dir)
self.__h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args)
name_list = ['alpha', 'alpha1', 'alpha2', 'alpha3',
'beta', 'beta1', 'beta2', 'beta3',
'delta', 'delta1', 'delta2', 'delta3',
'gamma', 'gamma1', 'gamma2', 'gamma3']
if is_trans_to_tif:
self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
if is_read_to_dic:
self.__res_h_a_alpha_eigenvector_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
return self.__res_h_a_alpha_eigenvector_set_T3
else:
return {}
def api_read_T3_matrix(self,polsarpro_T3_dir):
"""
读取T3矩阵转换字典
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:return : 包含T3矩阵的字典
"""
name_list = ['T11', 'T12_imag', 'T12_real',
'T22', 'T13_imag', 'T13_real',
'T33', 'T23_imag', 'T23_real']
return self.__read_haalpha(polsarpro_T3_dir, name_list)
def api_trans_T3_to_tif(self, out_tif_dir, polsarpro_T3_dir):
"""
将T3矩阵从bin格式转换为tif格式
:param out_tif_dir:保存路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
"""
name_list = ['T11', 'T12_imag', 'T12_real',
'T22', 'T13_imag', 'T13_real',
'T33', 'T23_imag', 'T23_real']
self.__write_haalpha_to_tif(out_tif_dir, polsarpro_T3_dir, name_list)
@staticmethod
def __h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args):
"""
对porsarpro格式T3矩阵做Cloude-Pottier分解H-A-Alpha分解
:param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
:param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:9个可选输出变量(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0输出:1
"""
if not os.path.exists(h_a_alpha_decomposition_T3_path):
raise Exception(h_a_alpha_decomposition_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
alpbetdelgam = int(args[0])
Lambda = int(args[1])
alpha = int(args[2])
entropy = int(args[3])
anisotropy = int(args[4])
CombHA = int(args[5])
CombH1mA = int(args[6])
Comb1mHA = int(args[7])
Comb1mH1mA = int(args[8])
para_list = [h_a_alpha_decomposition_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(alpbetdelgam), str(Lambda), str(alpha), str(entropy), str(anisotropy),
str(CombHA), str(CombH1mA), str(Comb1mHA), str(Comb1mH1mA)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
@staticmethod
def __h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args):
"""
Cloude-Pottier eigenvalue based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
:param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:11个可选输出变量(eigen123,proba123,anisotropy,anisotropy12,asymetry,
polarisation_fraction,erd,rvi,pedestal,shannon,lueneburg),不输出0输出1
"""
if not os.path.exists(h_a_alpha_eigenvalue_set_T3_path):
raise Exception(h_a_alpha_eigenvalue_set_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
eigen123 = int(args[0])
proba123 = int(args[1])
anisotropy = int(args[2])
anisotropy12 = int(args[3])
asymetry = int(args[4])
polarisation_fraction = int(args[5])
erd = int(args[6])
rvi = int(args[7])
pedestal = int(args[8])
shannon = int(args[9])
lueneburg = int(args[10])
para_list = [h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(eigen123), str(proba123), str(anisotropy), str(anisotropy12), str(asymetry),
str(polarisation_fraction), str(erd), str(rvi), str(pedestal),
str(shannon), str(lueneburg)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
@staticmethod
def __h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args):
"""
Cloude-Pottier eigenvector based decomposition of a coherency matrix
:param h_a_alpha_out_dir : Cloude-Pottier eigenvector
:param h_a_alpha_eigenvector_set_T3_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
:param polsarpro_in_dir:输入porsarpro格式T3矩阵目录包含.bin,.config
:param *args:5个可选输出变量(alpha123,beta123,delta123,gamma123,alpbetdelgam),不输出0输出1
"""
if not os.path.exists(h_a_alpha_eigenvector_set_T3_path):
raise Exception(h_a_alpha_eigenvector_set_T3_path +' is not exists!')
NwinFilter = 1
offsetRow = 0
offsetCol = 0
config_path = os.path.join(polsarpro_in_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
numRow = int(config[1])
numCol = int(config[4])
alpha123 = int(args[0])
beta123 = int(args[1])
delta123 = int(args[2])
gamma123 = int(args[3])
alpbetdelgam = int(args[4])
para_list = [h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
str(alpha123), str(beta123), str(delta123), str(gamma123), str(alpbetdelgam)]
cmd = ' '.join(para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
raise Exception(result_tuple[1])
shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
def __read_haalpha(self, h_a_alpha_dir, name_list):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
:name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
:return : 包含H-A-Alpha矩阵信息的字典
"""
dir = os.path.join(h_a_alpha_dir, '*.bin')
bin_paths = list(glob.glob(dir))
haalpha_dic ={}
for name in name_list:
path = os.path.join(h_a_alpha_dir, name + '.bin')
if path in bin_paths:
img = self.__read_bin_to_img(path)
haalpha_dic.update({name: img})
return haalpha_dic
def standardization(self, data, num=1):
# 矩阵标准化到[0,1]
data[np.isnan(data)] = np.min(data) # 异常值填充为0
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range * num
def __write_haalpha_to_tif(self, out_tif_dir, h_a_alpha_dir, name_list):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
:name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
"""
dir = os.path.join(h_a_alpha_dir, '*.bin')
bin_paths = list(glob.glob(dir))
for name in name_list:
in_path = os.path.join(h_a_alpha_dir, name + '.bin')
out_path = os.path.join(out_tif_dir, name + '.tif')
if in_path in bin_paths:
img_array = self.__read_bin_to_img(in_path)
if self.__normalization is True:
img_array = self.standardization(img_array, num=1)
out_image = Image.fromarray(img_array)
out_image.save(out_path)
@staticmethod
def __read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows*cols*4:
raise Exception('bin size less than rows*cols*4! size:', size, 'byte, rows:', rows, 'cols:', cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
# if __name__ == '__main__':
# h_a_alpha_decomposition_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_decomposition_T3.exe'
# h_a_alpha_eigenvalue_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvalue_set_T3.exe'
# h_a_alpha_eigenvector_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvector_set_T3.exe'
# polsarpro_in_dir = 'D:\\PolSARpro_v4.2.0\\in'
# haalpha_out_dir = 'D:\\PolSARpro_v4.2.0\\out'
# h_a_alpha_eigenvalue_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvalue_set_T3'
# h_a_alpha_eigenvector_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvector_set_T3'
#
# haa = PspHAAlphaDecomposition()
# h_a_alpha_features = haa.api_creat_h_a_alpha_features(haalpha_out_dir, h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir)
# haa = PspHAAlphaDecomposition(normalization=True)
# psp_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPCpsp_t3"
# t3_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\t3"
# exe_dir = r"I:\microproduct\soilSalinity/"
# haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=t3_path,
# h_a_alpha_decomposition_T3_path= exe_dir + 'h_a_alpha_decomposition_T3.exe',
# h_a_alpha_eigenvalue_set_T3_path= exe_dir + 'h_a_alpha_eigenvalue_set_T3.exe',
# h_a_alpha_eigenvector_set_T3_path=exe_dir +'h_a_alpha_eigenvector_set_T3.exe',
# polsarpro_in_dir=psp_path)
# print('done')

View File

@ -1,170 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspLeeRefinedFilterC2.py
@Function:
@Contact:
@Author:SHJ
@Date:2021/11/5
@Version:1.0.0
"""
import logging
import os
import shutil
import subprocess
import glob
import numpy as np
import struct
from PIL import Image
logger = logging.getLogger("mylog")
class LeeRefinedFilterC2:
"""
调用polsarpro4.2.0的lee_refined_filter_C2.exe做精致Lee滤波
"""
def __init__(self, exeFilterName='lee_refined_filter_C2.exe'):
self.__exeName = exeFilterName
pass
def api_lee_refined_filter_C2(
self,
exeDir,
inC2Dir,
outDir,
off_row,
off_col,
Nrow,
Ncol,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inC2Dir:C2矩阵目录
:param outDir:输出目录
:param off_row:行偏移行启始位置
:param off_col:列偏移列启始位置
:param Nrow:终止行
:param Ncol:终止列
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
# if not self._checkT3Matrix(inT3Dir):
# raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = off_row
Off_col = off_col
Sub_Nlig = Nrow
Sub_Ncol = Ncol
para_list = [
exePath,
inC2Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
config_path = os.path.join(inC2Dir, 'config.txt')
if config_path != os.path.join(outDir, 'config.txt'):
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
@staticmethod
def _checkC2Matrix(C2Dir):
# 检测C2矩阵
if not os.path.exists(C2Dir):
return False
file_name_in_out = ['C11.bin', 'C12_real.bin', 'C12_imag.bin', 'C22.bin','config.txt']
for item in file_name_in_out:
if not os.path.exists(C2Dir + "\\" + item):
return False
return True
def write_bin_to_tif(self, out_tif_dir, bin_dir):
"""
读取H-A-Alpha分解二进制数据输出为矩阵格式的字典
:param out_tif_dir : tif的输出路径
:param bin_dir : 二进制数据的目录,包含.bin,.config
:return out_tif_path: 生成tif的路径字典
"""
bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
out_tif_path = {}
for in_path in bin_paths:
name = os.path.split(in_path)[1].split('.')[0]
out_path = os.path.join(out_tif_dir, name + '.tif')
out_tif_path.update({name: out_path})
if os.path.exists(os.path.split(out_path)[0]) is False:
os.makedirs(os.path.split(out_path)[0])
img_array = self.__read_bin_to_img(in_path)
img_array[np.isnan(img_array)] = 0 # 异常值填充为0
# img_array = self.standardization(img_array) # 数据标准化到[0,1]
out_image = Image.fromarray(img_array)
out_image.save(out_path)
return out_tif_path
@staticmethod
def __read_bin_to_img(bin_path):
"""
读取bin格式二进制数据输出为矩阵
:param bin_path : bin文件的路径包含.bin,.config
:return : 矩阵信息
"""
(bin_dir, bin_name) = os.path.split(bin_path)
config_path = os.path.join(bin_dir, 'config.txt')
config = open(config_path, 'r').read().split('\n', -1)
rows = int(config[1])
cols = int(config[4])
bin_file = open(bin_path, 'rb') # 打开二进制文件
size = os.path.getsize(bin_path) # 获得文件大小
if size < rows * cols * 4:
raise Exception(
'bin size less than rows*cols*4! size:',
size,
'byte, rows:',
rows,
'cols:',
cols)
img = np.zeros([rows, cols], dtype=np.float32)
for row in range(rows):
data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
row_data = struct.unpack('f' * cols, data) # 转为一行float数据
img[row, :] = row_data
bin_file.close()
return img
if __name__ == '__main__':
tp =LeeRefinedFilterC2()
inC2Dir=r'E:\MicroWorkspace\LandCover\HHHV1'
outDir =r'E:\MicroWorkspace\LandCover\HHHV1_f'
off_row = 0
off_col = 0
Nrow = 666
Ncol = 746
tp.api_lee_refined_filter_C2( '',inC2Dir,outDir,off_row,off_col,Nrow,Ncol)
tp.write_bin_to_tif(outDir,outDir)
print('done')

View File

@ -1,104 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspLeeRefinedFilterT3.py
@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
(Averaging using a sliding window)
@Contact:
@Author:LVY
@Date:2021/10/12 9:06
@Version:1.0.0
"""
import logging
import os
import shutil
import subprocess
logger = logging.getLogger("mylog")
class LeeRefinedFilterT3:
"""
调用polsarpro4.2.0的lee_refined_filter_T3.exe做精致Lee滤波
"""
def __init__(self, exeFilterName='lee_refined_filter_T3.exe'):
self.__exeName = exeFilterName
pass
def api_lee_refined_filter_T3(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -1,393 +0,0 @@
import logging
import os
import shutil
import subprocess
logger = logging.getLogger("mylog")
class SurfaceInversionDubois:
"""
调用polsarpro4.2.0的surface_inversion_dubois.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_dubois.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_dubois(
self,
exeDir,
inT3Dir,
outDir,
incidence,
rectX,
rectY,
row,
col,
frequency, # GHZ
angleFlag, # 0:deg, 1:rad
):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = row
Sub_Ncol = col
dataFormat = 'T3'
calibration_flag = 1
calibration_coefficient = 0.0
threshold_HHHH_VVVV = 0.0
threshold_HVHV_VVVV = 0.0
para_list = [
exePath,
inT3Dir,
outDir,
dataFormat,
incidence,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
frequency, # GHZ
angleFlag,
]
cmd = "surface_inversion_dubois.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -caf {} -cac {} -th1 {} -th2 {}".format(
inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag,
calibration_flag, calibration_coefficient, threshold_HHHH_VVVV, threshold_HVHV_VVVV)
logger.info('surface_inversion_dubois:{}'.format(cmd))
result = os.system(cmd)
logger.info('cmd_result:{}'.format(result))
logger.info('surface_inversion_dubois finish!')
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# cmd = ' '.join(str(i) for i in para_list)
# config_path = os.path.join(inT3Dir, 'config.txt')
# shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# result_tuple = subprocess.getstatusoutput(cmd)
#
# if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
# raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionHisto:
"""
调用polsarpro4.2.0的surface_inversion_histo.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_histo.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_histo(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionOh:
"""
调用polsarpro4.2.0的surface_inversion_oh.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_oh.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_oh(
self,
exeDir,
inT3Dir,
outDir,
rectX,
rectY,
rectWidth,
rectHeight,
Nwin=7,
Nlook=1):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0则使用默认值
Nwin = 7
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
inT3Dir,
outDir,
Nlook,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = ' '.join(str(i) for i in para_list)
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True
class SurfaceInversionOh2004:
"""
调用polsarpro4.2.0的surface_inversion_oh2004.exe做土壤水分反演
"""
def __init__(self, exeFilterName='surface_inversion_oh2004.exe'):
self.__exeName = exeFilterName
pass
def api_surface_inversion_oh2004(
self,
exeDir,
inT3Dir,
outDir,
incidence,
rectY,
rectX,
row,
col,
frequency, # GHZ
angleFlag):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin:滤波窗口大小 3 5 7 9 11
:param Nlook:一般是1
"""
if len(exeDir) == 0:
if not os.path.exists(self.__exeName):
raise Exception(self.__exeName + ' not exists.')
exePath = self.__exeName
else:
if not os.path.exists(exeDir + '\\' + self.__exeName):
raise Exception(
exeDir +
'\\' +
self.__exeName +
' not exists.')
exePath = exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(inT3Dir):
raise Exception('T3 Matrix check failed.')
if not os.path.exists(outDir):
os.makedirs(outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = row
Sub_Ncol = col
dataFormat = 'T3'
threshold_mv = 1.0
threshold_s = 7.0
para_list = [
exePath,
inT3Dir,
outDir,
dataFormat,
incidence,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol,
frequency, # GHZ
angleFlag,
threshold_mv,
threshold_s]
cmd = "surface_inversion_oh2004.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -th1 {} -th2 {}".format(
inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag, threshold_mv, threshold_s)
logger.info('surface_inversion_oh2004:{}'.format(cmd))
result = os.system(cmd)
logger.info('cmd_result:{}'.format(result))
logger.info('surface_inversion_oh2004 finish!')
config_path = os.path.join(inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
# cmd = ' '.join(str(i) for i in para_list)
# result_tuple = subprocess.getstatusoutput(cmd)
# #
# if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
# raise Exception(result_tuple[1])
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
# file_name_in_out = ['T11.img', 'T12_real.img', 'T12_imag.img',
# 'T13_real.img', 'T13_imag.img', 'T22.img',
# 'T23_real.img', 'T23_imag.img', 'T33.img']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -1,146 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspTouziDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/14 10:11
@Version:1.0.0
"""
import os
import logging
from tool.algorithm.polsarpro.polarizationDecomposition import ModTouzi as TouziDecomp
from osgeo import gdal
import multiprocessing
from tool.algorithm.block.blockprocess import BlockProcess
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
logger = logging.getLogger("mylog")
file =fileHandle(False)
class PspTouziDecomposition:
"""
Touzi分解
"""
def __init__(self, inDic, outDir):
"""
:param inDic:T3矩阵目录
:param outDir:输出目录
"""
self.__inDic = inDic
self.__outDir = outDir
self.__DecompostFlag = False
if self._checkTifFileDic(self.__inDic) is False:
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
def api_Touzi_decomposition_TIF(self, Nwin = 5):
"""
:param Nwin:滤波窗口大小 3 5 7 9 11
"""
bandHH = gdal.Open(self.__inDic["HH"])
bandHV = gdal.Open(self.__inDic["HV"])
bandVH = gdal.Open(self.__inDic["VH"])
bandVV = gdal.Open(self.__inDic["VV"])
bandAll = [bandHH, bandHV, bandVH, bandVV]
decomposition = TouziDecomp(bandAll, Nwin)
decomposition.get_result(self.__outDir)
return True
def Touzi_decomposition_TIF(self,hh_path,hv_path,vh_path,vv_path,out_dir,suffix,Nwin = 5):
"""
:param Nwin:滤波窗口大小 3 5 7 9 11
"""
bandHH = gdal.Open(hh_path)
bandHV = gdal.Open(hv_path)
bandVH = gdal.Open(vh_path)
bandVV = gdal.Open(vv_path)
bandAll = [bandHH, bandHV, bandVH, bandVV]
decomposition = TouziDecomp(bandAll, Nwin)
decomposition.get_result_block(out_dir, suffix)
return True
@staticmethod
def _checkTifFileDic(inDic):
file_name_in_out = ['HH', 'VV', 'HV', 'VH']
for item in file_name_in_out:
if item in inDic:
print(inDic[item])
if not os.path.exists(os.path.join(inDic[item])):
return False
else:
return False
return True
def Touzi_decomposition_multiprocessing(self):
#创建工作文件夹
src_path = os.path.join(self.__outDir, "src_img")
block_path = os.path.join(self.__outDir, "block")
decomposition_path = os.path.join(self.__outDir, "feature")
file.creat_dirs([src_path,block_path,decomposition_path])
shutil.copyfile(self.__inDic["HH"], os.path.join(src_path, "HH.tif"))
shutil.copyfile(self.__inDic["HV"], os.path.join(src_path, "HV.tif"))
shutil.copyfile(self.__inDic["VH"], os.path.join(src_path, "VH.tif"))
shutil.copyfile(self.__inDic["VV"], os.path.join(src_path, "VV.tif"))
self.__cols = ImageHandler.get_img_width(self.__inDic["HH"])
self.__rows = ImageHandler.get_img_height(self.__inDic["HH"])
# 分块
bp = BlockProcess()
block_size = bp.get_block_size(self.__rows, self.__cols)
bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
logger.info('blocking tifs success!')
img_dir, img_name = bp.get_file_names(block_path, ['tif'])
dir_dict = bp.get_same_img(img_dir, img_name)
hh_list, vv_list, hv_list, vh_list = None, None, None, None
for key in dir_dict.keys():
tmp = key.split('_', 2)[0]
if tmp == 'HH':
hh_list = dir_dict[key]
elif tmp == 'VV':
vv_list = dir_dict[key]
elif tmp == 'HV':
hv_list = dir_dict[key]
elif tmp == 'VH':
vh_list = dir_dict[key]
processes_num = min([len(hh_list), multiprocessing.cpu_count() - 1])
# 开启多进程处理
pool = multiprocessing.Pool(processes=processes_num)
for i in range(len(hh_list)):
suffix = bp.get_suffix(os.path.basename(hh_list[i]))
# self.Touzi_decomposition_TIF(hh_list[i], hv_list[i], vh_list[i], vv_list[i], block_path, suffix,5)
pool.apply_async(self.Touzi_decomposition_TIF, (hh_list[i], hv_list[i], vh_list[i], vv_list[i], decomposition_path, suffix,5))
logger.info('total:%s, block:%s touzi!', len(hh_list), i)
pool.close()
pool.join()
# 合并处理后的影像
bp.combine(decomposition_path, self.__cols, self.__rows, self.__outDir, file_type=['tif'], datetype='float16')
file.del_folder(src_path)
file.del_folder(block_path)
file.del_folder(decomposition_path)
pass
# if __name__ == '__main__':
# dir = {}
# dir.update({"HH":"I:\preprocessed\HH_preprocessed.tif"})
# dir.update({"HV":"I:\preprocessed\HV_preprocessed.tif"})
# dir.update({"VH":"I:\preprocessed\VH_preprocessed.tif"})
# dir.update({"VV":"I:\preprocessed\VV_preprocessed.tif"})
#
#
# p = PspTouziDecomposition(dir, "I:/preprocessed/")
# p.Touzi_decomposition_multiprocessing()
# pass

View File

@ -1,104 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project:__init__.py
@File:pspFreemanDecomposition.py
@Function:
@Contact:
@Author:LVY
@Date:2021/10/12 18:45
@Version:1.0.0
"""
import os
import shutil
import subprocess
import logging
logger = logging.getLogger("mylog")
class PspYamaguchiDecomposition:
"""
Yamaguchi yamaguchi_3components_decomposition_T3.exe yamaguchi_4components_decomposition_T3.exe
"""
def __init__(
self,
exeDir,
inT3Dir,
outDir,
exeDecomposeName='yamaguchi_4components_decomposition_T3.exe'):
"""
:param exeDir:exe所在目录
:param inT3Dir:T3矩阵目录
:param outDir:输出目录
"""
self.__exeName = exeDecomposeName
self.__exeDir = exeDir
self.__inT3Dir = inT3Dir
self.__outDir = outDir
self.__DecompostFlag = False
pass
def api_yamaguchi_4components_decomposition_T3(
self, rectX, rectY, rectWidth, rectHeight, Nwin=1):
"""
:param rectX:有效区域x
:param rectY:有效区域y
:param rectWidth:有效区域宽
:param rectHeight:有效区域高
:param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
"""
if self.__DecompostFlag:
return True
if len(self.__exeDir) == 0:
if not os.path.exists(self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeName
else:
if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
logger.error(self.__exeName + ' not exists.')
return False
exePath = self.__exeDir + '\\' + self.__exeName
if not self._checkT3Matrix(self.__inT3Dir):
logger.error('T3 Matrix check failed.')
return False
if not os.path.exists(self.__outDir):
os.makedirs(self.__outDir)
Off_lig = rectX
Off_col = rectY
Sub_Nlig = rectWidth
Sub_Ncol = rectHeight
para_list = [
exePath,
self.__inT3Dir,
self.__outDir,
Nwin,
Off_lig,
Off_col,
Sub_Nlig,
Sub_Ncol]
cmd = " ".join(str(i) for i in para_list)
config_path = os.path.join(self.__inT3Dir, 'config.txt')
shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
result_tuple = subprocess.getstatusoutput(cmd)
if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
raise Exception(result_tuple[1])
self.__DecompostFlag = True
return True
@staticmethod
def _checkT3Matrix(T3Dir):
# 检测T3矩阵
if not os.path.exists(T3Dir):
return False
file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
'T13_real.bin', 'T13_imag.bin', 'T22.bin',
'T23_real.bin', 'T23_imag.bin', 'T33.bin']
for item in file_name_in_out:
if not os.path.exists(T3Dir + "\\" + item):
return False
return True

View File

@ -1,194 +0,0 @@
import os
cimport cython # 必须导入
import numpy as np##必须为c类型和python类型的数据都申明一个np
cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
from libc.math cimport pi,ceil,floor
from scipy.interpolate import griddata
#####################
# 结构定义区
####################
cdef struct Point: # 结构
double x
double y
######################
# 射线法
######################
cdef int rayCasting(Point p,np.ndarray[double,ndim=2] poly):
cdef double px = p.x,
cdef double py = p.y,
cdef int flag = 0
cdef int i=0
cdef int l=poly.shape[0]
cdef int j=l-1
cdef double sx
cdef double sy
cdef double tx
cdef double ty
cdef x=0
while(i<l):
sx=poly[i,0]
sy=poly[i,1]
tx=poly[j,0]
ty=poly[j,1]
# 点与多边形顶点重合
if((sx == px and sy == py) or (tx == px and ty == py)):
return 1
#// 判断线段两端点是否在射线两侧
if((sy < py and ty >= py) or (sy >= py and ty < py)) :
#// 线段上与射线 Y 坐标相同的点的 X 坐标
x = sx + (py - sy) * (tx - sx) / (ty - sy)
#// 点在多边形的边上
if(x == px):
return 1
#// 射线穿过多边形的边界
if(x > px):
flag = 0 if flag==1 else 1
# 循环体
j=i
i=i+1
#// 射线穿过多边形边界的次数为奇数时点在多边形内
return 1 if flag==1 else 0
cpdef np.ndarray[double,ndim=2] insert_data(np.ndarray[double,ndim=2] ori2geo_img,np.ndarray[int , ndim=1] row_ids,np.ndarray[int,ndim=1] col_ids,np.ndarray[double,ndim=1] data):
cdef int i=0
cdef int count=row_ids.shape[0]
while i<count:
ori2geo_img[row_ids[i],col_ids[i]]=data[i]
i=i+1
return ori2geo_img
cpdef np.ndarray[double,ndim=2] cut_L1A_img(np.ndarray[double,ndim=3] ori2geo_img,np.ndarray[double,ndim=2] roi_list):
""" 根据roi 获取栅格对象
"""
cdef int height=ori2geo_img.shape[1]
cdef int width=ori2geo_img.shape[2]
cdef int i=0
cdef int j=0
cdef Point temp_p
cdef np.ndarray[double,ndim=2] mask=np.zeros((height,width),dtype=np.float64)
while i<height:
j=0
while j<width:
temp_p.x=ori2geo_img[0,i,j] # temp_p
temp_p.y=ori2geo_img[1,i,j] # temp_p
if rayCasting(temp_p,roi_list)==1:
mask[i,j]=1
else:
mask[i,j]=np.nan
j=j+1
i=i+1
return mask
cpdef np.ndarray[double,ndim=2] gereratorMask(np.ndarray[double,ndim=1] rlist,np.ndarray[double,ndim=1] clist,np.ndarray[double,ndim=2] mask):
cdef int rcount=rlist.shape[0]
cdef int ccount=clist.shape[0]
cdef int count=rcount if rcount<ccount else ccount
cdef int i=0
cdef int j=0
cdef int temp_row=0
cdef int temp_col=0
cdef int height=mask.shape[0]
cdef int width=mask.shape[1]
while i<count:
# 1
temp_row=int(ceil(rlist[i]))
temp_col=int(ceil(clist[i]))
if temp_row>=0 and temp_col>=0 and temp_row<height and temp_col<width:
mask[temp_row,temp_col]=1
# 2
temp_row=int(floor(rlist[i]))
temp_col=int(ceil(clist[i]))
if temp_row>=0 and temp_col>=0 and temp_row<height and temp_col<width:
mask[temp_row,temp_col]=1
# 3
temp_row=int(ceil(rlist[i]))
temp_col=int(floor(clist[i]))
if temp_row>=0 and temp_col>=0 and temp_row<height and temp_col<width:
mask[temp_row,temp_col]=1
# 4
temp_row=int(floor(rlist[i]))
temp_col=int(floor(clist[i]))
if temp_row>=0 and temp_col>=0 and temp_row<height and temp_col<width:
mask[temp_row,temp_col]=1
i=i+1
return mask
cdef double distance_powe(Point p1,Point p2):
return (p1.x-p2.x)**2+(p1.y-p2.y)**2
cpdef np.ndarray[int,ndim=2] get_r_c(np.ndarray[double,ndim=3] ori2geo,np.ndarray[double,ndim=2] lon_lat):
cdef int p_count=lon_lat.shape[0]
cdef int height=ori2geo.shape[1]
cdef int width=ori2geo.shape[2]
cdef int i=0
cdef int j=0
cdef int c=0
cdef double dist=999
cdef double temp_dist=0
cdef Point p1
cdef Point p2
cdef int min_i
cdef int min_j
cdef np.ndarray[double,ndim=2] result=np.ones((p_count,2))*-1
# 范围
cdef double min_lon=np.min(ori2geo[0,:,:])
cdef double max_lon=np.max(ori2geo[0,:,:])
cdef double min_lat=np.min(ori2geo[1,:,:])
cdef double max_lat=np.max(ori2geo[1,:,:])
while c<p_count:
p1.x=lon_lat[c,0]
p1.y=lon_lat[c,1]
if min_lon>p1.x or max_lon<p1.x or p2.y<min_lat or p2.y>max_lat:
continue
c=c+1
# 测试程序
cpdef np.ndarray[double,ndim=2] Add(np.ndarray[double,ndim=2] a,double x):
cdef double d=0; # 声明 注意 cython没有 bool类型
print("调用成功")
print(a)
print(x)
return a+x

View File

@ -1,45 +0,0 @@
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
from pathlib import Path
import shutil
class MyBuildExt(build_ext):
def run(self):
build_ext.run(self)
build_dir = Path(self.build_lib)
root_dir = Path(__file__).parent
target_dir = build_dir if not self.inplace else root_dir
self.copy_file(Path('./SAR_geo') / '__init__.py', root_dir, target_dir)
#self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
def copy_file(self, path, source_dir, destination_dir):
if not (source_dir / path).exists():
return
shutil.copyfile(str(source_dir / path), str(destination_dir / path))
setup(
name="MyModule",
ext_modules=cythonize(
[
#Extension("pkg1.*", ["root/pkg1/*.py"]),
Extension("pkg2.*", ["./SAR_geo/SAR_GEO.pyx"]),
#Extension("1.*", ["root/*.py"])
],
build_dir="build",
compiler_directives=dict(
always_allow_keywords=True
)),
cmdclass=dict(
build_ext=MyBuildExt
),
packages=[],
include_dirs=[numpy.get_include()],
)
# 指令: python setup.py build_ext --inplace

View File

@ -1,730 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File AlgXmlHandle.py
@Function 算法描述文件读写和检查
@Contact https://www.cnblogs.com/feifeifeisir/p/10893127.html
@Author SHJ
@Date 2021/9/6
@Version 1.0.0
"""
import logging
from xml.etree.ElementTree import ElementTree
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.file.fileHandle import fileHandle
import os
import re
import platform
import psutil
import multiprocessing
import ctypes
logger = logging.getLogger("mylog")
import glob
class ManageAlgXML:
"""
检查和读取XML文件信息
"""
def __init__(self, xml_path):
self.in_path = xml_path
self.__tree = ElementTree()
self.__root = None
self.__alg_compt = None
self.__workspace_path = None
self.__taskID = None
self.__algorithm_name = None
self.__envs = {}
self.__input_paras = {}
self.__output_paras = {}
self.__init_flag = False
def init_xml(self):
"""
初始化XML文件
:return: True初始化成功 False 初始化失败
"""
try:
self.__tree.parse(self.in_path)
except FileNotFoundError as ex:
msg = ex + "xml_path = " + self.in_path
raise Exception(msg)
except BaseException:
raise Exception("cannot open algXMl")
self.__root = self.__tree.getroot()
if self.__root is None:
raise Exception("get root failed")
self.__alg_compt = self.__root.find("AlgCompt")
if self.__alg_compt is None:
raise Exception("get AlgCompt failed")
self.__workspace_path = self.__check_workspace_path()
if self.__workspace_path is None:
raise Exception("check workspace_path failed")
self.__taskID = self.__check_task_id()
if self.__taskID is None:
raise Exception("check taskID failed")
self.__algorithm_name = self.__check_algorithm_name()
if self.__algorithm_name is None:
raise Exception("check AlgorithmName failed")
self.__envs = self.__check_environment()
if self.__envs is None or self.__envs == {}:
raise Exception("check environment failed")
self.__input_paras = self.__check_input_para()
if self.__input_paras is None or self.__input_paras == {}:
raise Exception("check input para failed")
self.__output_paras = self.__check_output_para()
self.__init_flag = True
return True
def get_workspace_path(self):
"""
获取工作空间路径
:return: 工作空间路径 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__workspace_path
def get_task_id(self):
"""
获取任务ID
:return: taskID None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__taskID
def get_algorithm_name(self):
"""
获取算法名
:return:
"""
if not self.__init_flag:
raise Exception("AlgorithmName is not initialized")
return self.__algorithm_name
def get_envs(self):
"""
获取运行环境要求
:return:运行环境要求 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__envs
def get_input_paras(self):
"""
获取输入参数
:return:输入参数 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__input_paras
def get_output_paras(self):
"""
获取输出参数
:return:输出参数 None-异常
"""
if not self.__init_flag:
raise Exception("XML is not initialized")
return self.__output_paras
def __check_workspace_path(self):
"""
检查工作空间路径
:return: 工作空间路径 None-异常
"""
workspace_note = self.__root.find("WorkSpace")
workspace_path = str(workspace_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if workspace_path is None:
raise Exception("'workspace_path' is None")
if not os.path.isdir(workspace_path):
raise Exception("'workspace_path' is not save:%s",workspace_path)
if workspace_path[-1] != '\\':
workspace_path += "'\'"
return workspace_path
def __check_environment(self):
"""
检查XML文件中运行环境要求
:return: dic-运行环境要求 None-异常
"""
env_note = self.__alg_compt.find("Environment")
is_cluster = int(env_note.find("IsCluster").text.replace("\n", "").replace(' ', ''))
is_legal = is_cluster in [0, 1]
if not is_legal:
raise Exception("IsCluster is not 0 or 1")
cluster_num = int(env_note.find("ClusterNum").text)
is_legal = cluster_num in [0, 1, 2, 3, 4, 5, 6, 7]
if not is_legal:
raise Exception("cluster_num is beyond [0,1,2,3,4,5,6,7]")
operating_system = env_note.find("OperatingSystem").text.replace("\n", "").replace(' ', '') #去除空格和回车
# is_legal = operating_system in ["Windows10", "Windows7", "WindowsXP"]
# if not is_legal:
# raise Exception("OperatingSystem is beyond [Windows10, Windows7, WindowsXP]")
cpu = env_note.find("CPU").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = cpu in ["单核", "双核", "3核", "4核", "6核", "8核"]
if not is_legal:
raise Exception("OperatingSystem is beyond [单核, 双核, 3核, 4核, 6核, 8核]")
memory = env_note.find("Memory").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = memory in ["1GB", "2GB", "4GB", "6GB", "8GB", "10GB", "12GB", "16GB"]
if not is_legal:
raise Exception("OperatingSystem is beyond [1GB, 2GB, 4GB, 6GB, 8GB, 10GB, 12GB, 16GB]")
storage = env_note.find("Storage").text.replace("\n", "").replace(' ', '') #去除空格和回车
is_legal = int(storage[:-2]) > 0
if not is_legal:
raise Exception("Storage < 0GB")
network_card = env_note.find("NetworkCard").text
# is_legal = network_card in ["无需求"]
# if not is_legal:
# # 输出异常
# return
band_width = env_note.find("Bandwidth").text
# is_legal = band_width in ["无需求"]
# if not is_legal:
# # 输出异常
# return
gpu = env_note.find("GPU").text
# is_legal = GPU in ["无需求"]
# if not is_legal:
# # 输出异常
# return
envs = {"is_Cluster": is_cluster, "cluster_num": cluster_num, "operating_system": operating_system,
"CPU": cpu, "memory": memory}
envs.update({"Storage": storage, "network_card": network_card, "band_width": band_width, "GPU": gpu})
return envs
def __check_input_para(self):
"""
检查XML文件中输入参数
:return: dic-输入参数 None-异常
"""
input_paras_note = self.__alg_compt.find("Inputs")
paras_num = int(input_paras_note.attrib.get("ParameterNum"))
para_list = input_paras_note.findall("Parameter")
if paras_num != len(para_list):
msg ="'ParameterNum':"+ str(paras_num) + " != number of 'Parameter':" + str(len(para_list))
logger.warning(msg)
input_paras = {}
for para in para_list:
para_name = para.find("ParaName").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_chs_name = para.find("ParaChsName").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_type = para.find("ParaType").text.replace("\n", "").replace(' ', '') #去除空格和回车
data_type = para.find("DataType").text.replace("\n", "").replace(' ', '') #去除空格和回车
para_value = para.find("ParaValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
input_para = {"ParaName": para_name, "ParaChsName": para_chs_name, "ParaType": para_type,
"DataType": data_type, "ParaValue": para_value}
#print(para_name)
if para_type == "Value":
# max_value = para.find("MaxValue").text
# min_value = para.find("MinValue").text
# option_value = para.find("OptionValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
# input_para.update({"MaxValue": max_value, "MinValue": min_value, "OptionValue": option_value})
# input_para.update({"OptionValue": option_value}) todo
pass
if para_name is None or para_type is None or para_value is None:
msg = 'there is None among para_name:' + para_name + ',para_type:' + para_type + 'or para_value:' + para_value + '!'
raise Exception(msg)
input_paras.update({para_name: input_para})
return input_paras
def __check_output_para(self):
"""
检查XML文件中输出参数
:return: dic-输出参数 None-异常
"""
output_paras_note = self.__alg_compt.find("Outputs")
paras_num = int(output_paras_note.attrib.get("ParameterNum"))
para_list = output_paras_note.findall("Parameter")
if paras_num != len(para_list):
raise Exception("'ParameterNum' != number of 'Parameter'")
output_paras = {}
return output_paras
def write_out_para(self, para_name, para_value):
"""
写入输出参数
"""
output_paras_note = self.__alg_compt.find("Outputs")
para_list = output_paras_note.findall("Parameter")
flag = False
for para in para_list:
if para.find("ParaName").text == para_name:
para.find("ParaValue").text = para_value
flag = True
if flag == False:
raise Exception('Cannot find Output Parameter:'+para_name+'!')
self.__tree.write(self.in_path, encoding="utf-8", xml_declaration=True)
def __check_task_id(self):
"""
检查任务ID
:return: taskID None-异常
"""
task_id_note = self.__root.find("TaskID")
task_id = str(task_id_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if task_id is None:
raise Exception("'TaskID' is None")
return task_id
def __check_algorithm_name(self):
algorithm_name_note = self.__alg_compt.find("AlgorithmName")
algorithm_name = str(algorithm_name_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
if algorithm_name is None:
raise Exception("'AlgorithmName' is None")
return algorithm_name
class CheckSource:
"""
检查配置文件中资源的完整性和有效性
"""
def __init__(self, alg_xml_handle):
self.__alg_xml_handle = alg_xml_handle
self.imageHandler = ImageHandler()
self.__ParameterDic={}
def check_alg_xml(self):
"""
检查算法配置文件
"""
if self.__alg_xml_handle.init_xml():
logger.info('init algXML succeed')
return True
else:
raise Exception('init algXML failed')
def check_run_env(self):
"""
:return: True-正常False-异常
"""
envs = self.__alg_xml_handle.get_envs()
# 检查操作系统
local_plat = platform.platform()
local_plat_list = local_plat.split("-")
flag = envs['operating_system'] == local_plat_list[0]+local_plat_list[1]
if flag is False:
msg = 'operating_system:' + local_plat_list[0] + local_plat_list[1] + ' is not ' + envs['operating_system']
#raise Exception(msg)
# 检查电脑显存
mem = psutil.virtual_memory()
mem_total = int(round(mem.total / 1024 / 1024 / 1024, 0))
mem_free = round(mem.free / 1024 / 1024 / 1024, 0)
env_memory = envs['memory']
env_memory = int(env_memory[:-2])
if env_memory > mem_total:
msg = 'memory_total ' + str(mem_total) + ' less than'+str(env_memory) + 'GB'
# raise Exception(msg)
if env_memory >= mem_free:
msg = 'mem_free ' + str(mem_free) + 'GB less than' + str(env_memory) + 'GB'
logger.warning(msg)
# 检查CPU核数
env_cpu = envs['CPU']
if env_cpu == "单核":
env_cpu_core_num = 1
elif env_cpu == "双核":
env_cpu_core_num = 2
elif env_cpu == "三核":
env_cpu_core_num = 3
else:
env_cpu_core_num = int(env_cpu[:-1])
local_cpu_core_num = int(multiprocessing.cpu_count()/2)
if env_cpu_core_num > local_cpu_core_num:
msg = 'CPU_core_num ' + str(local_cpu_core_num) + 'core less than' + str(env_cpu_core_num) + ' core'
# raise Exception(msg)
# 检查磁盘的内存
env_storage = envs['Storage']
env_storage = int(env_storage[:-2])
workspace_path = self.__alg_xml_handle.get_workspace_path()
if not os.path.isdir(workspace_path):
raise Exception('workspace_path:%s do not exist!', workspace_path)
local_storage = self.__get_free_space_mb(workspace_path)
if env_storage > local_storage:
msg = 'workspace storage ' + str(local_storage) + 'GB less than' + envs['Storage'] +"GB"
# raise Exception(msg)
return True
@staticmethod
def __get_free_space_mb(folder):
"""
:param folder:检查的路径 eg:'C:\\'
:return: folder/drive free space (GB)
"""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024 / 1024
else:
st = os.statvfs(folder)
return st.f_bavail * st.f_frsize / 1024 / 1024
def check_input_paras(self, input_para_names):
"""
:param input_para_names :需要检查参数的名称列表[name1,name2,...]
:return: 检测是否正常
"""
workspace_path = self.__alg_xml_handle.get_workspace_path()
input_paras = self.__alg_xml_handle.get_input_paras()
for name in input_para_names:
para = input_paras[name]
if para is None:
msg = "check para:"+name + " is failed!"+"para is None!"
raise Exception(msg)
if para['ParaType'] == 'File':
if para['DataType'] == 'tif':
if para['ParaValue'] != 'empty' and para['ParaValue'] != 'Empty'and para['ParaValue'] != '':
para_value_list = para['ParaValue'].split(";")
for para_value in para_value_list:
para_path = para_value
if self.__check_tif(para_path) is False:
msg = "check para:"+name + " is failed!" + "Path:" + para_path
raise Exception(msg)
if para['DataType'] == 'xml':
para_path = para['ParaValue']
if not os.path.exists(para_path):
raise Exception('para_file:%s is inexistent!', para_path)
if para['DataType'] == 'File':
para_path = para['ParaValue']
if os.path.isdir(para_path) is False:
msg = "check para:" + name + " is failed!" + "FilePath:" + para_path
raise Exception(msg)
if para["DataType"]=="ymal":
para_path = para['ParaValue']
if os.path.isfile(para_path) is False:
msg = "check para: " + name + " is failed! " + " FilePath: " + para_path
raise Exception(msg)
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float' or para['DataType'] == 'int' or para['DataType'] == 'double':
if para['ParaValue'] is None:
msg = "check para:"+name + " is failed!"+"'ParaValue' is None"
raise Exception(msg)
if self.__is_number(para['ParaValue']) is False:
raise Exception("para:"+name+" is not number!")
# if (para['MaxValue'] is not None) and (self.__is_number(para['MaxValue']) is True):
# value = para['ParaValue']
# max = para['MaxValue']
# if float(value) > float(max):
# msg = "para:" + name + " > max, para:" + value + "max:" + max
# raise Exception(msg)
# if (para['MinValue'] is not None) and (self.__is_number(para['MinValue']) is True):
# value = para['ParaValue']
# min = para['MinValue']
# if float(value) < float(min):
# msg = "para:" + name + " < min, para:" + value + "min:" + min
# raise Exception(msg)
self.__ParameterDic[name] = para['ParaValue']
__workspace_path = workspace_path
__input_paras = input_paras
return True, self.__ParameterDic
def check_output_paras(self, output_para_names):
"""
:param output_para_names :需要检查参数的名称列表[name1,name2,...]
:return: Ture or False
"""
workspace_path = self.__alg_xml_handle.get_workspace_path()
output_paras = self.__alg_xml_handle.get_output_paras()
for name in output_para_names:
para = output_paras[name]
#print(para)
if para is None:
msg = "check para:" + name + " is failed!" + "para is None!"
raise Exception(msg)
if para['ParaType'] == 'File':
if para['DataType'] == 'tif':
para_path = workspace_path + para['ParaValue']
para_dir = os.path.split(para_path)
flag_isdir = os.path.isdir(para_dir[0])
flag_istif = (para_dir[1].split(".", 1)[1] == "tif")
if flag_isdir and flag_istif is False:
msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
raise Exception(msg)
if para['DataType'] == 'File':
para_path = workspace_path + para['ParaValue']
if os.path.isdir(para_path) is False:
os.makedirs(para_path)
if os.path.isdir(para_path) is False:
msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
raise Exception(msg)
return True
@staticmethod
def __is_number(str_num):
"""
:param str_num :检查str是否为float或者double
:return: True or False
"""
if str_num[0] == '-':
str_num = str_num[1:]
pattern = re.compile(r'(.*)\.(.*)\.(.*)')
if pattern.match(str_num):
return False
return str_num.replace(".", "").isdigit()
def __check_tif(self, filename):
"""
:filename: 文件的路径
:return: True or False
"""
if self.imageHandler.get_dataset(filename) is None:
msg = "read tif error!,finame: " + filename
raise Exception(msg)
return True
class InitPara:
def __init__(self,debug = False):
self._debug = debug
@staticmethod
def init_processing_paras(input_paras):
"""
:param names:字典列表每个字典为一个输入产品的配置信息
"""
processing_paras = {}
for name in input_paras:
para = input_paras[name]
if para is None:
logger.error(name + "is None!")
return False
if para['ParaType'] == 'File':
if para['DataType'] == 'tif' or para['DataType'] == 'csv':
para_value_list = para['ParaValue'].split(";")
if len(para_value_list) == 1:
para_path = para['ParaValue']
if para_path != 'empty' and para_path != '':
processing_paras.update({name: para_path})
else:
for n, para_value in zip(range(len(para_value_list)), para_value_list):
processing_paras.update({name+str(n): para_value})
elif para['DataType'] == 'tar.gz':
paths = para['ParaValue'].split(';')
for n, path in zip(range(len(paths)), paths):
processing_paras.update({'sar_path' + str(n): path})
else:
para_path = para['ParaValue']
processing_paras.update({name: para_path})
elif para['ParaType'] == 'Value':
if para['DataType'] == 'float':
value = float(para['ParaValue'])
elif para['DataType'] == 'int':
value = int(para['ParaValue'])
else: # 默认string
value = para['ParaValue']
processing_paras.update({name: value})
elif para['ParaType'] == 'String':
value = para['ParaValue']
if value == 'empty':
continue
else:
processing_paras.update({name: value})
return processing_paras
# 获取文件夹内的文件
@staticmethod
def get_tif_paths(file_dir,name):
in_tif_paths = []
if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
if in_tif_paths1 != []:
in_tif_paths = in_tif_paths + in_tif_paths1
else:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
if in_tif_paths != []:
in_tif_paths = in_tif_paths + in_tif_paths1
return in_tif_paths
@staticmethod
def get_tif_paths_new(file_dir, name):
in_tif_paths = []
if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
if in_tif_paths1 != []:
in_tif_paths = in_tif_paths + in_tif_paths1
else:
in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
if len(in_tif_paths) == 0:
in_tif_paths = in_tif_paths + in_tif_paths1
return in_tif_paths
@staticmethod
def get_polarization_mode(in_tif_paths):
pol_dic = {}
pola_list = [0,0,0,0]
for in_tif_path in in_tif_paths:
# 获取极化类型
if '_HH_' in os.path.basename(in_tif_path):
pol_dic.update({'HH': in_tif_path})
pola_list[0] = 1
elif '_HV_' in os.path.basename(in_tif_path):
pol_dic.update({'HV': in_tif_path})
pola_list[1] = 1
elif '_VH_' in os.path.basename(in_tif_path):
pol_dic.update({'VH': in_tif_path})
pola_list[2] = 1
elif '_VV_' in os.path.basename(in_tif_path):
pol_dic.update({'VV': in_tif_path})
pola_list[3] = 1
elif 'LocalIncidenceAngle' in os.path.basename(in_tif_path) or 'ncidenceAngle' in os.path.basename(in_tif_path):
pol_dic.update({'LocalIncidenceAngle': in_tif_path})
elif 'inc_angle' in os.path.basename(in_tif_path):
pol_dic.update({'inc_angle': in_tif_path})
elif 'inci_Angle-ortho' in os.path.basename(in_tif_path):
pol_dic.update({'inci_Angle-ortho': in_tif_path})
elif 'LocalincidentAngle-ortho' in os.path.basename(in_tif_path):
pol_dic.update({'LocalIncidentAngle-ortho': in_tif_path})
elif 'ori_sim' in os.path.basename(in_tif_path):
pol_dic.update({'ori_sim': in_tif_path})
elif 'sim_ori' in os.path.basename(in_tif_path):
pol_dic.update({'sim_ori': in_tif_path})
pol_dic.update({'pola':pola_list})
return pol_dic
@staticmethod
def get_meta_paths(file_dir, name):
meta_xml_paths = []
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.meta.xml')))
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.meta.xml')))
if meta_xml_paths is None or meta_xml_paths == []:
raise Exception('there is not .meta.xml in path: ', file_dir + '\\')
return meta_xml_paths
@staticmethod
def get_incidence_xml_paths(file_dir, name):
meta_xml_paths = []
if os.path.exists(file_dir + name + '\\'):
meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.incidence.xml')))
else:
meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.incidence.xml')))
if meta_xml_paths is None or meta_xml_paths == []:
raise Exception('there is not .incidence.xml in path: ', file_dir + '\\')
return meta_xml_paths
@staticmethod
def get_meta_dic(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in mete_path:
para_dic.update({'META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
@staticmethod
def get_incidence_dic(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in mete_path:
para_dic.update({'Incidence': mete_path})
if para_dic is {}:
raise Exception('the name of .incidence.xml is error!')
return para_dic
@staticmethod
def get_meta_dic_new(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in os.path.basename(mete_path):
para_dic.update({'META': mete_path})
else:
para_dic.update({'Origin_META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
@staticmethod
def get_meta_dic_VP(meta_xml_paths, name):
para_dic = {}
for mete_path in meta_xml_paths:
if name in os.path.basename(mete_path):
para_dic.update({name + '_META': mete_path})
else:
para_dic.update({name + '_Origin_META': mete_path})
if para_dic is {}:
raise Exception('the name of .meta.xml is error!')
return para_dic
def get_mult_tar_gz_inf(self,tar_gz_path, workspace_preprocessing_path):
para_dic = {}
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
para_dic.update({'name': name})
file_dir = os.path.join(workspace_preprocessing_path, name + '\\')
if self._debug == False:
fileHandle().de_targz(tar_gz_path, file_dir)
# 元文件字典
para_dic.update(InitPara.get_meta_dic_VP(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典
pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({name + "paraMeter": parameter_path})
for key, in_tif_path in pol_dic.items():
para_dic.update({name + '_' + key: in_tif_path})
return para_dic
def get_mult_tar_gz_infs(self,processing_paras, workspace_preprocessing_path):
tif_names_list = []
tar_inf_dic = {}
for key, value in processing_paras.items():
if 'sar_path' in key:
para_dic = self.get_mult_tar_gz_inf(value, workspace_preprocessing_path)
tif_names_list.append(para_dic['name'])
para_dic.pop('name')
tar_inf_dic.update(para_dic)
tar_inf_dic.update({'name_list': tif_names_list})
return tar_inf_dic

View File

@ -1,135 +0,0 @@
from xml.etree.ElementTree import ElementTree
import os
class DictXml:
def __init__(self, xml_path):
self.xml_path = xml_path
self.__tree = ElementTree()
self.__root = None
self.init_xml()
def init_xml(self):
self.__root = self.__tree.parse(self.xml_path)
if self.__root is None:
raise Exception("get root failed")
def get_extend(self):
productInfo = self.__root.find("imageinfo")
if productInfo is None:
raise Exception("get imageInfo failed")
corner = productInfo.find("corner")
if corner is None:
raise Exception("get corner failed")
topLeft = corner.find("topLeft")
if topLeft is None:
raise Exception("get topLeft failed")
topRight = corner.find("topRight")
if topRight is None:
raise Exception("get topRight failed")
bottomLeft = corner.find("bottomLeft")
if bottomLeft is None:
raise Exception("get bottomLeft failed")
bottomRight = corner.find("bottomRight")
if bottomRight is None:
raise Exception("get bottomRight failed")
point_upleft = [float(topLeft.find("longitude").text), float(topLeft.find("latitude").text)]
point_upright = [float(topRight.find("longitude").text), float(topRight.find("latitude").text)]
point_downleft = [float(bottomLeft.find("longitude").text), float(bottomLeft.find("latitude").text)]
point_downright = [float(bottomRight.find("longitude").text), float(bottomRight.find("latitude").text)]
scopes = [point_upleft, point_upright, point_downleft, point_downright]
point_upleft_buf = [float(topLeft.find("longitude").text) - 0.5, float(topLeft.find("latitude").text) + 0.5]
point_upright_buf = [float(topRight.find("longitude").text) + 0.5, float(topRight.find("latitude").text) + 0.5]
point_downleft_buf = [float(bottomLeft.find("longitude").text) - 0.5,
float(bottomLeft.find("latitude").text) - 0.5]
point_downright_buf = [float(bottomRight.find("longitude").text) + 0.5,
float(bottomRight.find("latitude").text) - 0.5]
scopes_buf = [point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf]
return scopes
class xml_extend:
def __init__(self, xml_path):
self.xml_path = xml_path
self.__tree = ElementTree()
self.__root = None
self.init_xml()
def init_xml(self):
self.__root = self.__tree.parse(self.xml_path)
if self.__root is None:
raise Exception("get root failed")
def get_extend(self):
ProductBasicInfo = self.__root.find("ProductBasicInfo")
if ProductBasicInfo is None:
raise Exception("get ProductBasicInfo failed")
SpatialCoverageInformation = ProductBasicInfo.find("SpatialCoverageInformation")
if SpatialCoverageInformation is None:
raise Exception("get SpatialCoverageInformation failed")
TopLeftLongitude = SpatialCoverageInformation.find("TopLeftLongitude")
if TopLeftLongitude is None:
raise Exception("get TopLeftLongitude failed")
TopLeftLatitude = SpatialCoverageInformation.find("TopLeftLatitude")
if TopLeftLatitude is None:
raise Exception("get TopLeftLatitude failed")
TopRightLongitude = SpatialCoverageInformation.find("TopRightLongitude")
if TopRightLongitude is None:
raise Exception("get TopRightLongitude failed")
TopRightLatitude = SpatialCoverageInformation.find("TopRightLatitude")
if TopRightLatitude is None:
raise Exception("get TopRightLatitude failed")
BottomRightLongitude = SpatialCoverageInformation.find("BottomRightLongitude")
if BottomRightLongitude is None:
raise Exception("get BottomRightLongitude failed")
BottomRightLatitude = SpatialCoverageInformation.find("BottomRightLatitude")
if BottomRightLatitude is None:
raise Exception("get BottomRightLatitude failed")
BottomLeftLongitude = SpatialCoverageInformation.find("BottomLeftLongitude")
if BottomLeftLongitude is None:
raise Exception("get BottomLeftLongitude failed")
BottomLeftLatitude = SpatialCoverageInformation.find("BottomLeftLatitude")
if BottomLeftLatitude is None:
raise Exception("get BottomLeftLatitude failed")
point_upleft = [float(TopLeftLongitude.text), float(TopLeftLatitude.text)]
point_upright = [float(TopRightLongitude.text), float(TopRightLatitude.text)]
point_downleft = [float(BottomLeftLongitude.text), float(BottomLeftLatitude.text)]
point_downright = [float(BottomRightLongitude.text), float(BottomRightLatitude.text)]
scopes = [point_upleft, point_upright, point_downleft, point_downright]
point_upleft_buf = [float(TopLeftLongitude.text) - 0.5, float(TopLeftLatitude.text) + 0.5]
point_upright_buf = [float(TopRightLongitude.text) + 0.5, float(TopRightLatitude.text) + 0.5]
point_downleft_buf = [float(BottomLeftLongitude.text) - 0.5, float(BottomLeftLatitude.text) - 0.5]
point_downright_buf = [float(BottomRightLongitude.text) + 0.5, float(BottomRightLatitude.text) - 0.5]
scopes_buf = [point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf]
return scopes
if __name__ == '__main__':
xml_path = r'E:\MicroWorkspace\GF3A_nanjing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422.meta.xml'
scopes, scopes_buf = DictXml(xml_path).get_extend()
print(scopes)
print(scopes_buf)
# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
# s = ImageHandler().band_merge(path, path2, path3)
# print(s)
# pass

View File

@ -1,319 +0,0 @@
"""
@Project microproduct
@File OnePlantHeight.PY
@Function 主函数
@Author LMM
@Date 2021/10/19 14:39
@Version 1.0.0
"""
from xml.dom import minidom
from xml.etree.ElementTree import ElementTree, Element
import xml.dom.minidom
from lxml import etree
import codecs
import xml.etree.cElementTree as ET
class CreateMetafile:
"""
生成元文件案例
"""
def __init__(self, input_image_path, input_para_file, an_li_path, path):
"""
input_image_path: 影像头文件
input_para_file: 配置文件
an_li_path案例路径
path保存路径
"""
self.input_image_path = input_image_path
self.input_para_file = input_para_file
self.an_li_path= an_li_path
self.path = path
pass
def create_xml(self):
"""
读取元文件只保留从头文件到sensor节点的部分
输出sensor的节点位置
"""
tree = ElementTree()
tree.parse(self.input_image_path) # 影像头文件
root = tree.getroot()
# 1、只保留从头文件到sensor节点的部分
element_trees = list(root)
count = 0
count_01=1
for element in element_trees:
count = count+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count_01 = count-1
for i in range(0, len(element_trees)):
if i > count_01:
root.remove(element_trees[i])
# 2、只保留"satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"的部分
element_trees2 = list(root)
for i in element_trees2:
if i.tag not in ["satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"]:
root.remove(i)
# 3、获取"sensor"节点的位置,并输出
count2 = 0
count2_01=1
element_trees3 = list(root)
for element in element_trees3:
count2 = count2+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count2_01 = count2-1
tree.write(self.path, encoding="utf-8", xml_declaration=True)
return count2_01
@staticmethod
def create_node(tag, property_map, content):
"""
fun: 新造一个节点
para: tag:节点标签
para: property_map:属性及属性值map
para: content: 节点闭合标签里的文本内容
para: return 新节点
"""
element = Element(tag, property_map)
element.text = content
element.tail = "\n\t"
return element
def add_standard_xml(self, num):
"""
模板直接写入到元文件中
"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
tree2 = ElementTree()
tree2.parse(self.an_li_path) # 影像头文件
root2 = tree2.getroot()
productinfo = root2.find("productinfo")
root.insert(num + 1, productinfo)
processinfo = root2.find("processinfo")
root.insert(num + 2, processinfo)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def add_img_xml(self, num,SrcImageName):
"""添加影像信息"""
tree = ElementTree()
tree.parse(self.path)
root = tree.getroot()
a = self.create_node("SrcImageName", {"desc": "原始影像名称"}, SrcImageName)
root.insert(num+1, a)
# root.append(a)
b = self.create_node("AlgCompt", {"desc": "算法信息"}, "\n\t\t")
b.tail = "\n\n\t"
# root.append(b)
root.insert(num+2, b)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def add_info_xml(self):
"""
向元文件中添加配置文件的部分节"AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
"AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author"
"""
tree = ElementTree()
tree.parse(self.input_para_file) # 配置文件
root = tree.getroot()
tree2 = ElementTree()
tree2.parse(self.path)
root2 = tree2.getroot()
AlgCompt = root2.find("AlgCompt")
a = root.find("AlgCompt")
element_trees = list(a)
for element in element_trees:
if element.tag in ["AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
"AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author"]:
element.tail = "\n\t\t"
AlgCompt.append(element)
if element.tag == "Author":
element.tail = "\n\t"
tree2.write(self.path, encoding="utf-8", xml_declaration=True)
def add_class_info(self, type_id_name, type_id_parent=None):
"""
向元文件中
1.删除productinfo-productType信息
2.加入地物类别信息
输出
<Class1>
<parent_id>1</parent_id>
<id>101</id>
<covernm>耕地</covernm>
</Class1>
<Class2>
<parent_id>5</parent_id>
<id>502</id>
<covernm>草地</covernm>
</Class2>
"""
tree = ElementTree()
tree.parse(self.path) # 配置文件
root = tree.getroot()
productinfo = root.find("productinfo")
# element_trees = list(productinfo)
# for element in element_trees:
# if element.tag == "productType":
# productinfo.remove(element) # 移除 "productType"
productinfo.find("productConsumeTime").tail = "\n\t\t" # 定位到productConsumeTime设置好位置
b = self.create_node("LandCoverClass", {}, "\n\t\t\t")
b.tail = "\n\t\t"
productinfo_count=0
for i in list(productinfo):
productinfo_count=productinfo_count+1
if i.tag=="productConsumeTime":
break
productinfo.insert(productinfo_count, b)
# productinfo.insert(num, b) # 插入LandCoverClass
class_num = 1
for key, value in type_id_name.items():
LandCoverClass = productinfo.find("LandCoverClass")
name="Class"+str(class_num)
# name = "Class"
c = self.create_node(name, {}, "\n\t\t\t\t")
if class_num!=(len(type_id_name.keys())):
c.tail = "\n\t\t\t"
else:
c.tail = "\n\t\t"
LandCoverClass.append(c) # 插入LandCoverClass
# LandCoverClass.find("Class")[num].tail = "\n\t\t\t"
aaa=LandCoverClass.find(name)
if type_id_parent is not None:
parent_id = self.create_node("parent_id", {}, type_id_parent[key])
parent_id.tail="\n\t\t\t\t"
LandCoverClass.find(name).append(parent_id)
id = self.create_node("id", {}, str(key))
id.tail = "\n\t\t\t\t"
LandCoverClass.find(name).append(id)
covernm = self.create_node("covernm", {}, value)
covernm.tail = "\n\t\t\t"
LandCoverClass.find(name).append(covernm)
class_num=class_num+1
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def rewrite_name(self):
"""
修改class的名字:
class1->class
class2->class
"""
tree = ElementTree()
tree.parse(self.path) # 配置文件
root = tree.getroot()
productinfo = root.find("productinfo")
LandCoverClass=productinfo.find("LandCoverClass")
element_trees = list(LandCoverClass)
for element in element_trees:
element.tag="Class"
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def OrthoInsertNode(self):
"""正射算法专用,插入节点<l1aInfo>"""
tree = ElementTree()
tree.parse(self.path) # 影像头文件
root = tree.getroot()
# 插入节点<l1aInfo>
count2 = 0
count2_01=1
element_trees3 = list(root)
for element in element_trees3:
count2 = count2+1
if element.tag == "sensor":
element.tail = "\n\n\t"
count2_01 = count2-1
b = self.create_node("l1aInfo", {}, "\n\t\t")
b.tail = "\n\n\t"
root.insert(count2_01+1, b)
# 查询节点位置<l1aInfo>
node_l1aInfo=root.find("l1aInfo")
img_tree = ElementTree()
img_tree.parse(self.input_image_path) # 影像头文件
img_root = img_tree.getroot()
node_imageinfo = img_root.find("imageinfo")
node_processinfo=img_root.find("processinfo")
ele_node_imageinfo = list(node_imageinfo)
ele_node_processinfo= list(node_processinfo)
for i in ele_node_imageinfo:
if i.tag == "QualifyValue":
i.tail = "\n\t\t"
node_l1aInfo.append(i)
for j in ele_node_processinfo:
if j.tag == "CalibrationConst":
j.tail = "\n\t" #后一个节点的位置
node_l1aInfo.append(j)
tree.write(self.path, encoding="utf-8", xml_declaration=True)
def process(self,SrcImageName):
"""
不涉及到地表覆盖调用此函数
"""
if self.input_image_path is None:
import xml.etree.cElementTree as ET
product = ET.Element("product") # 根节点tag= "product"
product.text = "\n\t"
tree = ET.ElementTree(product)
tree.write(self.path)
count = 0
count_2 = -1
else:
count = self.create_xml()
count_2 = count
self.add_standard_xml(count)
self.add_img_xml(count_2, SrcImageName)
self.add_info_xml()
def process2(self, type_id_name, type_id_parent,SrcImageName):
"""
涉及到地表覆盖的调用此函数
type_id_name={"101":"耕地","502":"草地"}
type_id_parent={"101":"1""502":"5"}
"""
count = self.create_xml()
self.add_standard_xml(count)
self.add_img_xml(count,SrcImageName)
self.add_info_xml()
self.add_class_info(type_id_name, type_id_parent)
self.rewrite_name()
def process3(self,SrcImageName):
"""
正射调用此函数
"""
if self.input_image_path is None:
import xml.etree.cElementTree as ET
product = ET.Element("product") # 根节点tag= "product"
product.text = "\n\t"
tree = ET.ElementTree(product)
tree.write(self.path)
count = 0
else:
count = self.create_xml()
self.add_standard_xml(count)
self.add_img_xml(count, SrcImageName)
self.add_info_xml()
self.OrthoInsertNode()

View File

@ -1,245 +0,0 @@
import json
from xml.etree.ElementTree import ElementTree, Element
import shutil
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from osgeo import gdal
import numpy as np
import datetime
import os
import glob
import xmltodict
# os.environ['PROJ_LIB'] = r"E:\soft\Anaconda\envs\micro\Lib\site-packages\osgeo\data\proj"
class CreateMetaDict:
def __init__(self, image_path, origin_xml, pack_path, out_path1, out_path2):
self.ImageHandler = ImageHandler()
self.image_path = image_path
self.origin_xml = origin_xml
self.pack_path = pack_path
self.file_size = self.get_file_size()
self.out_path1 = out_path1
self.out_path2 = out_path2
self.timeDict = self.get_productTime()
pass
def calu_nature(self):
"""
将productinfo节点需要填写的信息存入字典中
image_path:影像路径
image_pair:输入的压缩包中的极化对 hh,hv,vh,vv=1111
out_path1地理转平面的输出路径
out_path2平面转地理的输出路径
"""
para_dict = {}
proj = self.ImageHandler.get_projection(self.image_path) # 输出的影像若是投影坐标系则先转成地理坐标系
keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
if keyword == "GEOGCS":
pass
elif keyword == "PROJCS":
pp.trans_projcs2geogcs(self.out_path2, self.image_path)
image_path = self.out_path2
elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
raise Exception('image projection is missing!')
pp.trans_geogcs2projcs(self.out_path1, self.image_path) # 坐标投影, 地理转平面投影坐标
imageinfo_widthspace = self.ImageHandler.get_geotransform(self.out_path1)[1] # 投影后的分辨率
# imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率
# para_dict.update({"imageinfo_widthspace": imageinfo_widthspace})
# para_dict.update({"imageinfo_heightspace": imageinfo_heightspace})
para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace})
para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"})
para_dict.update({"imageinfo_CompressionMethod": "None"})
para_dict.update({"imageinfo_ProductSize": str(self.file_size) + "MB"}) #todo 产品总大小
get_scope = self.ImageHandler.get_scope(self.image_path)
point_upleft, point_upright, point_downleft, point_downright = get_scope[0], get_scope[1], get_scope[2], get_scope[3]
para_dict.update({"SpatialCoverageInformation_TopLeftLatitude": point_upleft[1]})
para_dict.update({"SpatialCoverageInformation_TopLeftLongitude": point_upleft[0]})
para_dict.update({"SpatialCoverageInformation_TopRightLatitude": point_upright[1]})
para_dict.update({"SpatialCoverageInformation_TopRightLongitude": point_upright[0]})
para_dict.update({"SpatialCoverageInformation_BottomLeftLatitude": point_downleft[1]})
para_dict.update({"SpatialCoverageInformation_BottomLeftLongitude": point_downleft[0]})
para_dict.update({"SpatialCoverageInformation_BottomRightLatitude": point_downright[1]})
para_dict.update({"SpatialCoverageInformation_BottomRightLongitude": point_downright[0]})
longitude_max = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).max()
longitude_min = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).min()
latitude_max = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).max()
latitude_min = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).min()
imageinfo_center_latitude = (latitude_max + latitude_min) / 2
imageinfo_center_longitude = (longitude_max + longitude_min) / 2
para_dict.update({"SpatialCoverageInformation_CenterLatitude": imageinfo_center_latitude})
para_dict.update({"SpatialCoverageInformation_CenterLongitude": imageinfo_center_longitude})
para_dict.update({"TimeCoverageInformation_StartTime": self.timeDict.get("startTime")})
para_dict.update({"TimeCoverageInformation_CenterTime": self.timeDict.get("centerTime")})
para_dict.update({"TimeCoverageInformation_EndTime": self.timeDict.get("endTime")})
para_dict.update({"CoordinateReferenceSystemInformation_EarthEllipsoid": "WGS84"})
para_dict.update({"CoordinateReferenceSystemInformation_MapProjection": "UTM"})
para_dict.update({"CoordinateReferenceSystemInformation_ZoneNo": "None"})
para_dict.update({"MetaInfo_Unit": "none"}) # 设置单位
para_dict.update({"MetaInfo_UnitDes": "无量纲"}) # 设置单位
# 补充ProductProductionInfo节信息
data_name = os.path.basename(self.image_path)
strs = data_name.split("_")
para_dict.update({"DataSources_DataSource_Satellite": strs[0]})
para_dict.update({"DataSources_DataSource_Sensor": strs[0]})
para_dict.update({"ObservationGeometry_SatelliteAzimuth": "None"})
para_dict.update({"ObservationGeometry_SatelliteRange": "None"})
para_dict.update({"ProductProductionInfo_BandSelection": "1"})
para_dict.update({"ProductProductionInfo_DataSourceDescription": "None"})
para_dict.update({"ProductProductionInfo_DataSourceProcessingDescription": "参考产品介绍PDF"})
productGentime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
para_dict.update({"ProductProductionInfo_ProductionDate": productGentime})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": ""})
# para_dict.update({"ProductPublishInfo_Processor": "德清院"}) # 生产者
# para_dict.update({"ProductPublishInfo_DistributionUnit": "none"}) # 分发单位
# para_dict.update({"ProductPublishInfo_ContactInformation": "none"}) # 联系信息
return para_dict
def get_productTime(self):
time_dict = {}
tree = ElementTree()
tree.parse(self.origin_xml)
root = tree.getroot()
platform = root.find("platform")
if platform is None:
centerTime = " "
else:
centerTime = platform.find("CenterTime").text.split(".")[0]
productInfo = root.find("imageinfo")
imagingTime = productInfo.find("imagingTime")
if imagingTime is None:
startTime = " "
endTime = " "
else:
startTime = imagingTime.find("start").text.split(".")[0]
endTime = imagingTime.find("end").text.split(".")[0]
time_dict.update({"startTime": startTime})
time_dict.update({"centerTime": centerTime})
time_dict.update({"endTime": endTime})
return time_dict
def get_file_size(self):
in_tif_paths = list(glob.glob(os.path.join(self.pack_path, '*.tif')))
in_tif_paths1 = list(glob.glob(os.path.join(self.pack_path, '*.tiff')))
in_tif_paths += in_tif_paths1
size = 0
for file in in_tif_paths:
fsize = os.path.getsize(file) # 返回的是字节大小
size += fsize
return round(size / float(1024*1024), 2)
class CreateProductXml:
def __init__(self, par_dict, model_path, xml_path):
self.par_dict = par_dict
self.xml_path = xml_path
shutil.copy(model_path, xml_path)
pass
def create_standard_xml(self):
"""将字典中的信息写入到copy的xml文件中"""
tree = ElementTree()
tree.parse(self.xml_path) # 影像头文件
root = tree.getroot()
productinfo = root.find("ProductBasicInfo")
for key, value in self.par_dict.items():
if key.split("_")[0] == "imageinfo":
productinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "SpatialCoverageInformation":
imageinfo = productinfo.find("SpatialCoverageInformation")
imageinfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "TimeCoverageInformation":
timeInfo = productinfo.find("TimeCoverageInformation")
timeInfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "CoordinateReferenceSystemInformation":
geoInfo = productinfo.find("CoordinateReferenceSystemInformation")
geoInfo.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "MetaInfo":
metaInfo = productinfo.find("MetaInfo")
metaInfo.find(key.split("_")[1]).text = str(value)
ProductProductionInfo = root.find("ProductProductionInfo") # 原始数据信息
for key, value in self.par_dict.items():
if key.split("_")[0] == "DataSources":
dataSources = ProductProductionInfo.find("DataSources")
dataSource = dataSources.find("DataSource")
dataSource.find(key.split("_")[2]).text = str(value)
elif key.split("_")[0] == "ObservationGeometry":
ObservationGeometry = ProductProductionInfo.find("ObservationGeometry")
ObservationGeometry.find(key.split("_")[1]).text = str(value)
elif key.split("_")[0] == "ProductProductionInfo":
ProductProductionInfo.find(key.split("_")[1]).text = str(value)
# ProductPublishInfo = root.find("ProductPublishInfo") # 发布者信息
# for key, value in self.par_dict.items():
# if key.split("_")[0] == "ProductPublishInfo":
# ProductPublishInfo.find(key.split("_")[1]).text = str(value)
tree.write(self.xml_path, encoding="utf-8", xml_declaration=True)
class OrthoAzimuth:
@staticmethod
def FindInfomationFromJson(HeaderFile_dom_json, node_path_list):
"""
在Json文件中按照指定路径解析出制定节点
"""
result_node = HeaderFile_dom_json
for nodename in node_path_list:
result_node = result_node[nodename]
return result_node
@staticmethod
def get_Azimuth_incidence(Azimuth_path):
Azimuth_incidence = 0
if not os.path.exists(Azimuth_path):
return Azimuth_incidence
with open(Azimuth_path) as f:
Azimuth_incidence = f.readline()
return Azimuth_incidence
@staticmethod
def read_Azimuth_incidence(xml_path):
# tree = ElementTree()
# tree.parse(xml_path)
# root = tree.getroot()
# Azimuth_incidence = float(root.find('ProductProductionInfo').find('ObservationGeometry').find('SatelliteAzimuth').text)
# return Azimuth_incidence
with open(xml_path, 'r', encoding='utf-8') as fp:
HeaderFile_dom_str = fp.read()
HeaderFile_dom = xmltodict.parse(HeaderFile_dom_str) # 将XML转成json文本
HeaderFile_dom_json = json.loads(json.dumps(HeaderFile_dom))
node_path_list = ['Root', 'ProductProductionInfo', 'ObservationGeometry', 'SatelliteAzimuth']
Azimuth_incidence = OrthoAzimuth.FindInfomationFromJson(HeaderFile_dom_json, node_path_list)
return Azimuth_incidence
if __name__ == '__main__':
image_path = r'D:\Micro\WorkSpace\test\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1B_h_h_L10000073024_db_RD_geo.tif'
origin_xml = r'D:\Micro\WorkSpace\Ortho\Temporary\package\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024.meta.xml'
tem_folder = r'D:\Micro\WorkSpace\test'
pack_path = r'D:\Micro\WorkSpace\Ortho\Temporary\package'
out_dem_path1 = os.path.join(tem_folder, "trans_dem_geo_projcs.tif")
out_dem_path2 = os.path.join(tem_folder, "trans_dem_projcs_geo.tif")
para_dict = CreateMetaDict(image_path, origin_xml, pack_path, out_dem_path1, out_dem_path2).calu_nature()
model_path = r'D:\Project\microproduct\Ortho\product.xml'
xml_path = r'D:\Micro\WorkSpace\test\test.xml'
CreateProductXml(para_dict, model_path, xml_path).create_standard_xml()

View File

@ -1,48 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project onestar
@File ConfigeHandle.py
@Contacthttps://blog.csdn.net/songlh1234/article/details/83316468
@Author SHJ
@Date 2021/11/23 16:57
@Version 1.0.0
"""
import os
import configparser
class Config:
"""读写初始化配置文件"""
def __init__(self):
pass
@staticmethod
def get(para_name, option='config', config_name='config.ini'):
config = configparser.ConfigParser()
config_path = os.path.join(os.getcwd(), config_name)
config.read(config_path, encoding='utf-8')
config.sections()
exe_name = config.get(option, para_name)
return exe_name
def get_list(self, para_name, option='config', config_name='config.ini'):
config = configparser.ConfigParser()
config_path = os.path.join(os.getcwd(), config_name)
config.read(config_path, encoding='utf-8')
config.sections()
str_name = config.get(option, para_name)
# 去除空格和回车
str_name = str(str_name).replace("\n", "").replace(' ', '') # 去除空格和回车
# 分割成lists
name_list = str_name.split(',')
return name_list
if __name__ == '__main__':
# c = Config()
# a = c.get('exe_name')
# b = bool(c.get('debug'))
# d = int(c.get('cover_threshold'))
# f = float(c.get('ndvi_threshold'))
print('done')

View File

@ -1,265 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : csvHandle.py
@Function : 读写csv文件
@Contact :
@Author:SHJ
@Date:2022/11/6
@Version:1.0.0
"""
import random
import csv
import logging
import numpy as np
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.CoordinateTransformation import geo2imagexy
from tool.algorithm.transforml1a.transHandle import TransImgL1A
logger = logging.getLogger("mylog")
class csvHandle:
def __init__(self, row=0, col=0):
self.imageHandler = ImageHandler()
self.row = row
self.col = col
self.img_falg = False
if row != 0 and col != 0:
self.roi_img = np.zeros((row, col), dtype=float)
self.img_falg = True
def get_roi_img(self):
if self.img_falg:
self.roi_img[self.roi_img == 0] = np.nan
return self.roi_img
else:
return np.array([])
@staticmethod
def readcsv(csv_path):
reader = csv.reader(open(csv_path, newline=''))
csv_list = []
for line_data in reader:
csv_list.append(line_data)
return csv_list[1:]
def trans_measuredata(self, meas_data, tif_path):
file_name = tif_path
dataset = self.imageHandler.get_dataset(file_name)
rows = self.imageHandler.get_img_height(file_name)
cols = self.imageHandler.get_img_width(file_name)
measdata_list = []
logger.info('[MEASURE DATA]')
for data in meas_data:
lon = float(data[1])
lat = float(data[2])
coord = geo2imagexy(dataset, lon, lat)
row = round(coord[1])
col = round(coord[0])
if row >= 0 and row <= rows and col >= 0 and col <= cols:
measdata_list.append([row, col, float(data[3])])
logger.info([row, col, float(data[3])])
else:
logger.warning("measure data: %s is beyond tif scope !", data)
pass
return measdata_list
def write_roi_img_data(self, points, type_id):
if self.img_falg:
for p in points:
r = p[0]
c = p[1]
if r < self.row and c < self.col:
self.roi_img[r, c] = type_id
def trans_landCover_measuredata(self, meas_data, cuted_ori_sim_path, max_train_num =100000):
"""
获取多边形区域内所有的点分为训练集数据和测试集数据
:para meas_data: csv读取的实测数据
"""
type_data = {}
n = 1
train_data_list = []
for data in meas_data:
for d in data:
if d == '':
raise Exception('there are empty data!', data)
type_id = int(data[1])
type_name = data[2]
if type_id not in type_data.keys():
train_data_list.append([n, type_id, type_name, []])
type_data.update({type_id: type_name})
n += 1
pointList = self.__roiPolygonAnalysis(data[3])
for points in pointList:
roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
if tr._mask is not None:
points = tr.get_roi_points()
for train_data in train_data_list:
if train_data[1] == type_id:
train_data[3] += points
self.write_roi_img_data(points, type_id)
if train_data[3] == [] :
raise Exception('there are empty data!', train_data)
if len(train_data_list) <= 1:
raise Exception('there is only one label type!', train_data_list)
for train_data in train_data_list:
logger.info(str(train_data[0]) + "," + str(train_data[2]) + "," + "num:" + str(len(train_data[3])))
max_num = max_train_num
if (len(train_data[3]) > max_num):
logger.info("max number =" + str(max_num) + ", random select" + str(max_num) + " point as train data!")
train_data[3] = random.sample(train_data[3], max_num)
return train_data_list
def trans_landCover_measuredata_dic(self, meas_data, cuted_ori_sim_path,max_train_num=100000):
train_data_list = self.trans_landCover_measuredata(meas_data, cuted_ori_sim_path,max_train_num)
return self.trans_landCover_list2dic(train_data_list)
@staticmethod
def trans_landCover_list2dic(train_data_list):
ids = []
class_ids = []
ch_names = []
positions = []
for data in train_data_list:
ids.append(data[0])
class_ids.append(data[1])
ch_names.append(data[2])
positions.append(data[3])
train_data_dic = {}
train_data_dic.update({"ids": ids})
train_data_dic.update({"class_ids": class_ids})
train_data_dic.update({"ch_names": ch_names})
train_data_dic.update({"positions": positions})
return train_data_dic
@staticmethod
def __roiPolygonAnalysis(roiStr):
"""
将csv的POLY数据转为数组
:para roiStr: poly数据
:return pointList: 保存多边形的list
"""
pointList = []
strContent = roiStr.replace("POLYGON", "")
# 解析轮廓字符串为二维数组
bracketsList = []
strTemp = ''
strList = []
for c in strContent:
if c == '(':
bracketsList.append(c)
continue
elif c == ')':
if len(bracketsList) > 0:
bracketsList.pop(0)
if len(strTemp) > 0:
strList.append(strTemp)
strTemp = ''
else:
strTemp += c
for item in strList:
if len(item) == 0:
continue
pTempList = item.split(',')
pList = []
for row in pTempList:
cells = row.split(' ')
if len(cells) != 2:
continue
point = [float(cells[0]), float(cells[1])]
pList.append(point)
pointList.append(pList)
return pointList
def class_landcover_list(self, csv_path):
"""
输出csv表中的前三列
"""
reader = csv.reader(open(csv_path, newline=''))
class_list=[]
type_id_name = {}
type_id_parent = {}
for line_data in reader:
class_list.append(line_data) # class_list含有四列
for data in class_list[1:]:
type_parent= data[0]
type_id = int(data[1])
type_name = data[2]
if type_id not in type_id_name.keys():
type_id_name.update({type_id: type_name})
type_id_parent.update({type_id: type_parent})
return type_id_name, type_id_parent
def trans_VegePhenology_measdata_dic(self, meas_data, cuted_ori_sim_path):
"""
获取多边形区域内所有的点分为训练集数据和测试集数据
:para meas_data: csv读取的实测数据
"""
train_data = []
test_data = []
type_data = {}
for data in meas_data:
data_use_type = data[0]
sar_img_name = data[1]
name = sar_img_name.rstrip('.tar.gz')
if data_use_type == 'train':
phenology_id = int(data[2])
phenology_name = data[3]
if phenology_id not in type_data.keys():
type_data.update({phenology_id: phenology_name})
else:
phenology_id = -1
pointList = self.__roiPolygonAnalysis(data[4])
l1a_points = []
for points in pointList:
roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
l1a_points = tr.get_roi_points()
# l1a_points = tr.get_lonlat_points()
if data_use_type == 'train':
train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]])
elif data_use_type == 'test':
test_data.append([name, phenology_id, l1a_points])
type_map = []
for n, id in zip(range(len(type_data)), type_data):
type_map.append([n + 1, id, type_data[id]])
return train_data, test_data, type_map
@staticmethod
def vegePhenology_class_list(csv_path):
"""
输出csv表中的前三列
"""
reader = csv.reader(open(csv_path, newline=''))
class_list=[]
type_id_name = {}
for line_data in reader:
class_list.append(line_data) # class_list含有四列
for data in class_list[1:]:
type_id = data[2]
type_name = data[3]
if type_id not in type_id_name.keys():
if type_id.strip() != "":
type_id_name.update({type_id: type_name})
return type_id_name
# if __name__ == '__main__':
# csvh = csvHandle()
# csv_path = r"I:\preprocessed\VegetationPhenologyMeasureData_E118.9_N31.4.csv"
# data = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(csv_path),r"I:\preprocessed\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_ori_sim_preprocessed.tif")
# pass

View File

@ -1,88 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project : microproduct
@File : fileHandle.py
@Function : 文件创建删除解压打包
@Contact :
@Author:SHJ
@Date:2022/11/6
@Version:1.0.0
"""
import os
import tarfile
import shutil
class fileHandle:
def __init__(self, debug_mode=False):
self.__debug_mode = debug_mode
def creat_dirs(self, path_list):
"""
创建文件夹
"""
for path in path_list:
if os.path.exists(path):
if self.__debug_mode is True:
continue
self.del_folder(path)
os.makedirs(path)
else:
os.makedirs(path)
def del_folder(self, dic):
"""
删除整个文件夹
"""
if self.__debug_mode is True:
return
if os.path.isdir(dic):
shutil.rmtree(dic)
def del_file(self, path_data):
"""
只删除文件不删除文件夹
"""
for i in os.listdir(path_data): # os.listdir(path_data)#返回一个列表,里面是当前目录下面的所有东西的相对路径
file_data = path_data + '\\' + i # 当前文件夹的下面的所有东西的绝对路径
if os.path.isfile(file_data) is True: # os.path.isfile判断是否为文件,如果是文件,就删除.如果是文件夹.递归给del_file.
os.remove(file_data)
else:
self.del_file(file_data)
@staticmethod
def make_targz(output_filename, source_dir):
"""
一次性打包整个根目录空子目录会被打包
如果只打包不压缩"w:gz"参数改为"w:""w"即可
:param output_filename:输出压缩包的完整路径eg:'E:\test.tar.gz'
:param source_dir:需要打包的跟目录eg: 'E:\testFfile\'打包文件夹里面的所有文件,'E:\testFfile'打包文件夹
"""
dir = os.path.split(output_filename)[0]
if os.path.exists(dir) is False:
os.makedirs(dir)
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
@staticmethod
def de_targz(tar_gz_path, file_dir):
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
if os.path.exists(file_dir) is False:
os.makedirs(file_dir)
# 解压
t = tarfile.open(tar_gz_path)
t.extractall(path=file_dir)
@staticmethod
def copyfile2dir(srcfile, dir): # 复制函数
if not os.path.isfile(srcfile):
print("%s not exist!" % (srcfile))
else:
fpath, fname = os.path.split(srcfile) # 分离文件名和路径
if not os.path.exists(dir):
os.makedirs(dir) # 创建路径
shutil.copy(srcfile, dir + fname) # 复制文件
# if __name__ == '__main__':
# file = fileHandle()
# file.del_floder("I:\preprocessed")
# pass

View File

@ -1,90 +0,0 @@
# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File logHandler.py
@Author SHJ
@Date 2021/9/6
@Version 1.0.0
"""
import logging
import os
# from logging import handlers
import time
import datetime
class LogHandler:
"""
生成日志
"""
__logger = logging.getLogger("mylog")
__format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
"(%(filename)s:%(lineno)d) - %(message)s")
__log_path = None
@staticmethod
def init_log_handler(log_name):
"""
初始化日志
:param log_name: 日志保存的路径和名称
:return:
"""
path = os.getcwd()
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
para_dir = os.path.split(LogHandler.__log_path)
if not os.path.exists(para_dir[0]):
os.makedirs(para_dir[0])
# 删除七天以前的文件
LogHandler.delete_outdate_files(para_dir[0], 7)
# 方法1普通日志
LOG_FORMAT = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
" (%(filename)s:Line%(lineno)d) "
DATE_FORMAT = "%m/%d/%Y %H:%M:%S"
fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
fs = logging.StreamHandler()
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT, handlers=[fp, fs]) # 调用
# 方法2回滚日志
# LogHandler.__logger.setLevel(logging.DEBUG)
# th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
# backupCount=2, encoding='utf-8')
# th.suffix = "%Y-%m-%d-%H-%M-%S.log"
# th.setFormatter(LogHandler.__format_str)
# th.setLevel(level=logging.DEBUG)
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# LogHandler.__logger.addHandler(console)
# LogHandler.__logger.addHandler(th)
@staticmethod
def delete_outdate_files(path, date_interval=7):
"""
删除目录下七天前创建的文件
"""
current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
current_timeList = current_time.split("-")
current_time_day = datetime.datetime(int(current_timeList[0]), int(current_timeList[1]),
int(current_timeList[2]))
for root, dirs, files in os.walk(path):
for item in files:
item_format = item.split(".", 2)
if item_format[1] == "log":
file_path = os.path.join(root, item)
create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
create_time_list = create_time.split("-")
create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
int(create_time_list[2]))
time_difference = (current_time_day - create_time_day).days
if time_difference > date_interval:
os.remove(file_path)
#
# if __name__ == "__main__":
# # eg2:
# log_handler = LogHandler()
# log_handler.init_log_handler(r"run_log\myrun1")
# logging.warning("1")
# print("done")

View File

@ -57,7 +57,7 @@
<ParaType>File</ParaType>
<DataType>zip</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>E:\GF3Data\backscatter\ASTGTM2_N43E116_dem.zip</ParaValue>
<ParaValue>E:\GF3Data\backscatter\ASTGTM2_N43E116_dem.zip;E:\GF3Data\backscatter\ASTGTM2_N43E117_dem.zip;E:\GF3Data\backscatter\ASTGTM2_N44E116_dem.zip;E:\GF3Data\backscatter\ASTGTM2_N44E117_dem.zip</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>True</EnMultipleChoice>
<Control>File</Control>

View File

@ -1,7 +1,7 @@
<?xml version='1.0' encoding='utf-8'?>
<Root>
<TaskID>CSAR_202107275419_0001-0</TaskID>
<WorkSpace>D:\micro\WorkSpace\</WorkSpace>
<WorkSpace>E:\Result_GF3\</WorkSpace>
<AlgCompt>
<DataTransModel>File</DataTransModel>
<Artificial>ElementAlg</Artificial>
@ -51,10 +51,10 @@
<ParaChsName>SAR影像文件夹路径</ParaChsName>
<Description>哨兵1号数据存放的文件夹</Description>
<ParaType>File</ParaType>
<DataType>zip</DataType>
<DataType>tar.gz</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\download\cls\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686.tar.gz;
D:\micro\microproduct_depdence\GF3-Deformation\download\cls\GF3_KAS_FSI_002034_E113.4_N34.7_20161228_L1A_HHHV_L10002077539.tar.gz</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686.tar.gz;
E:\GF3Data\AtmophericDealy\GF3_KAS_FSI_002034_E113.4_N34.7_20161228_L1A_HHHV_L10002077539.tar.gz</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -82,9 +82,9 @@
<ParaChsName>高程数据路径</ParaChsName>
<Description>高程数据数据。数据来源:30米 ASTGTM2, 数据格式tif。备注数据的经纬度范围必须是整数</Description>
<ParaType>File</ParaType>
<DataType>File</DataType>
<DataType>zip</DataType>
<ParaSource>Man</ParaSource>
<ParaValue>D:\micro\microproduct_depdence\GF3-Deformation\dem</ParaValue>
<ParaValue>E:\GF3Data\AtmophericDealy\ASTGTM2_N34E113_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N34E114_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N35E113_dem.zip;E:\GF3Data\AtmophericDealy\ASTGTM2_N35E114_dem.zip</ParaValue>
<EnModification>True</EnModification>
<EnMultipleChoice>False</EnMultipleChoice>
<Control>File</Control>
@ -116,7 +116,7 @@
<ParaType>File</ParaType>
<DataType>tar.gz</DataType>
<ParaSource>Cal</ParaSource>
<ParaValue>D:\micro\WorkSpace\Deformation\Output\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686-DF.tar.gz</ParaValue>
<ParaValue>E:\Result_GF3\Deformation\Output\GF3_SAY_FSI_001614_E113.2_N34.5_20161129_L1A_HHHV_L10002015686-DF.tar.gz</ParaValue>
</Parameter>
</Outputs>
</AlgCompt>

View File

@ -29,7 +29,7 @@ from logHandler import LogHandler
from ConfigeHandle import Config as cf
import sklearn.neighbors._typedefs
import sklearn.neighbors._partition_nodes
from tool.algorithm.block.blockprocess import BlockProcess
EXE_NAME = cf.get('exe_name')
@ -148,8 +148,9 @@ class DeformationMain:
if os.path.exists(dem_path) is False:
os.mkdir(dem_path)
for file_path in para_path_list:
tif_name = os.path.basename(file_path)
shutil.copy(file_path, os.path.join(dem_path, tif_name))
BlockProcess.unzip_file(file_path, dem_path)
# tif_name = os.path.basename(file_path)
# shutil.copy(file_path, os.path.join(dem_path, tif_name))
para_path = os.path.join(self.__workspace_origin_path, para['ParaName'])
processing_paras.update({'dem': para_path})

View File

@ -1,396 +0,0 @@
"""Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
match = None
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass

View File

@ -1,44 +0,0 @@
AUTHORS
=======
PGP key fingerprints are enclosed in parentheses.
* Alex Gaynor <alex.gaynor@gmail.com> (E27D 4AA0 1651 72CB C5D2 AF2B 125F 5C67 DFE9 4084)
* Hynek Schlawack <hs@ox.cx> (C2A0 4F86 ACE2 8ADC F817 DBB7 AE25 3622 7F69 F181)
* Donald Stufft <donald@stufft.io>
* Laurens Van Houtven <_@lvh.io> (D9DC 4315 772F 8E91 DD22 B153 DFD1 3DF7 A8DD 569B)
* Christian Heimes <christian@python.org>
* Paul Kehrer <paul.l.kehrer@gmail.com> (05FD 9FA1 6CF7 5735 0D91 A560 235A E5F1 29F9 ED98)
* Jarret Raim <jarito@gmail.com>
* Alex Stapleton <alexs@prol.etari.at> (A1C7 E50B 66DE 39ED C847 9665 8E3C 20D1 9BD9 5C4C)
* David Reid <dreid@dreid.org> (0F83 CC87 B32F 482B C726 B58A 9FBF D8F4 DA89 6D74)
* Matthew Lefkowitz <glyph@twistedmatrix.com> (06AB F638 E878 CD29 1264 18AB 7EC2 8125 0FBC 4A07)
* Konstantinos Koukopoulos <koukopoulos@gmail.com> (D6BD 52B6 8C99 A91C E2C8 934D 3300 566B 3A46 726E)
* Stephen Holsapple <sholsapp@gmail.com>
* Terry Chia <terrycwk1994@gmail.com>
* Matthew Iversen <matt@notevencode.com> (2F04 3DCC D6E6 D5AC D262 2E0B C046 E8A8 7452 2973)
* Mohammed Attia <skeuomorf@gmail.com>
* Michael Hart <michael.hart1994@gmail.com>
* Mark Adams <mark@markadams.me> (A18A 7DD3 283C CF2A B0CE FE0E C7A0 5E3F C972 098C)
* Gregory Haynes <greg@greghaynes.net> (6FB6 44BF 9FD0 EBA2 1CE9 471F B08F 42F9 0DC6 599F)
* Chelsea Winfree <chelsea.winfree@gmail.com>
* Steven Buss <steven.buss@gmail.com> (1FB9 2EC1 CF93 DFD6 B47F F583 B1A5 6C22 290D A4C3)
* Andre Caron <andre.l.caron@gmail.com>
* Jiangge Zhang <tonyseek@gmail.com> (BBEC 782B 015F 71B1 5FF7 EACA 1A8C AA98 255F 5000)
* Major Hayden <major@mhtx.net> (1BF9 9264 9596 0033 698C 252B 7370 51E0 C101 1FB1)
* Phoebe Queen <foibey@gmail.com> (10D4 7741 AB65 50F4 B264 3888 DA40 201A 072B C1FA)
* Google Inc.
* Amaury Forgeot d'Arc <amauryfa@google.com>
* Dirkjan Ochtman <dirkjan@ochtman.nl> (25BB BAC1 13C1 BFD5 AA59 4A4C 9F96 B929 3038 0381)
* Maximilian Hils <max@maximilianhils.com>
* Simo Sorce <simo@redhat.com>
* Thomas Sileo <t@a4.io>
* Fraser Tweedale <ftweedal@redhat.com>
* Ofek Lev <ofekmeister@gmail.com> (FFB6 B92B 30B1 7848 546E 9912 972F E913 DAD5 A46E)
* Erik Daguerre <fallenwolf@wolfthefallen.com>
* Aviv Palivoda <palaviv@gmail.com>
* Chris Wolfe <chriswwolfe@gmail.com>
* Jeremy Lainé <jeremy.laine@m4x.org>
* Denis Gladkikh <denis@gladkikh.email>
* John Pacific <me@johnpacific.com> (2CF6 0381 B5EF 29B7 D48C 2020 7BB9 71A0 E891 44D9)
* Marti Raudsepp <marti@juffo.org>

View File

@ -1,6 +0,0 @@
This software is made available under the terms of *either* of the licenses
found in LICENSE.APACHE or LICENSE.BSD. Contributions to cryptography are made
under the terms of *both* these licenses.
The code used in the OS random engine is derived from CPython, and is licensed
under the terms of the PSF License Agreement.

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,27 +0,0 @@
Copyright (c) Individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of PyCA Cryptography nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,41 +0,0 @@
1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
the Individual or Organization ("Licensee") accessing and otherwise using Python
2.7.12 software in source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python 2.7.12 alone or in any derivative
version, provided, however, that PSF's License Agreement and PSF's notice of
copyright, i.e., "Copyright © 2001-2016 Python Software Foundation; All Rights
Reserved" are retained in Python 2.7.12 alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python 2.7.12 or any part thereof, and wants to make the
derivative work available to others as provided herein, then Licensee hereby
agrees to include in any such work a brief summary of the changes made to Python
2.7.12.
4. PSF is making Python 2.7.12 available to Licensee on an "AS IS" basis.
PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
USE OF PYTHON 2.7.12 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.12
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.12, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material breach of
its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any relationship
of agency, partnership, or joint venture between PSF and Licensee. This License
Agreement does not grant permission to use PSF trademarks or trade name in a
trademark sense to endorse or promote products or services of Licensee, or any
third party.
8. By copying, installing or otherwise using Python 2.7.12, Licensee agrees
to be bound by the terms and conditions of this License Agreement.

View File

@ -1,136 +0,0 @@
Metadata-Version: 2.1
Name: cryptography
Version: 3.3.2
Summary: cryptography is a package which provides cryptographic recipes and primitives to Python developers.
Home-page: https://github.com/pyca/cryptography
Author: The cryptography developers
Author-email: cryptography-dev@python.org
License: BSD or Apache License, Version 2.0
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: License :: OSI Approved :: BSD License
Classifier: Natural Language :: English
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Operating System :: POSIX
Classifier: Operating System :: POSIX :: BSD
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: Microsoft :: Windows
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Security :: Cryptography
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*
Description-Content-Type: text/x-rst
License-File: LICENSE
License-File: LICENSE.APACHE
License-File: LICENSE.BSD
License-File: LICENSE.PSF
License-File: AUTHORS.rst
Requires-Dist: six (>=1.4.1)
Requires-Dist: cffi (>=1.12)
Requires-Dist: enum34 ; python_version < '3'
Requires-Dist: ipaddress ; python_version < '3'
Provides-Extra: docs
Requires-Dist: sphinx (!=1.8.0,!=3.1.0,!=3.1.1,>=1.6.5) ; extra == 'docs'
Requires-Dist: sphinx-rtd-theme ; extra == 'docs'
Provides-Extra: docstest
Requires-Dist: doc8 ; extra == 'docstest'
Requires-Dist: pyenchant (>=1.6.11) ; extra == 'docstest'
Requires-Dist: twine (>=1.12.0) ; extra == 'docstest'
Requires-Dist: sphinxcontrib-spelling (>=4.0.1) ; extra == 'docstest'
Provides-Extra: pep8test
Requires-Dist: black ; extra == 'pep8test'
Requires-Dist: flake8 ; extra == 'pep8test'
Requires-Dist: flake8-import-order ; extra == 'pep8test'
Requires-Dist: pep8-naming ; extra == 'pep8test'
Provides-Extra: ssh
Requires-Dist: bcrypt (>=3.1.5) ; extra == 'ssh'
Provides-Extra: test
Requires-Dist: pytest (!=3.9.0,!=3.9.1,!=3.9.2,>=3.6.0) ; extra == 'test'
Requires-Dist: pretend ; extra == 'test'
Requires-Dist: iso8601 ; extra == 'test'
Requires-Dist: pytz ; extra == 'test'
Requires-Dist: hypothesis (!=3.79.2,>=1.11.4) ; extra == 'test'
pyca/cryptography
=================
.. image:: https://img.shields.io/pypi/v/cryptography.svg
:target: https://pypi.org/project/cryptography/
:alt: Latest Version
.. image:: https://readthedocs.org/projects/cryptography/badge/?version=latest
:target: https://cryptography.io
:alt: Latest Docs
.. image:: https://github.com/pyca/cryptography/workflows/CI/badge.svg?branch=master
:target: https://github.com/pyca/cryptography/actions?query=workflow%3ACI+branch%3Amaster
.. image:: https://codecov.io/github/pyca/cryptography/coverage.svg?branch=master
:target: https://codecov.io/github/pyca/cryptography?branch=master
``cryptography`` is a package which provides cryptographic recipes and
primitives to Python developers. Our goal is for it to be your "cryptographic
standard library". It supports Python 2.7, Python 3.6+, and PyPy 5.4+.
``cryptography`` includes both high level recipes and low level interfaces to
common cryptographic algorithms such as symmetric ciphers, message digests, and
key derivation functions. For example, to encrypt something with
``cryptography``'s high level symmetric encryption recipe:
.. code-block:: pycon
>>> from cryptography.fernet import Fernet
>>> # Put this somewhere safe!
>>> key = Fernet.generate_key()
>>> f = Fernet(key)
>>> token = f.encrypt(b"A really secret message. Not for prying eyes.")
>>> token
'...'
>>> f.decrypt(token)
'A really secret message. Not for prying eyes.'
You can find more information in the `documentation`_.
You can install ``cryptography`` with:
.. code-block:: console
$ pip install cryptography
For full details see `the installation documentation`_.
Discussion
~~~~~~~~~~
If you run into bugs, you can file them in our `issue tracker`_.
We maintain a `cryptography-dev`_ mailing list for development discussion.
You can also join ``#cryptography-dev`` on Freenode to ask questions or get
involved.
Security
~~~~~~~~
Need to report a security issue? Please consult our `security reporting`_
documentation.
.. _`documentation`: https://cryptography.io/
.. _`the installation documentation`: https://cryptography.io/en/latest/installation.html
.. _`issue tracker`: https://github.com/pyca/cryptography/issues
.. _`cryptography-dev`: https://mail.python.org/mailman/listinfo/cryptography-dev
.. _`security reporting`: https://cryptography.io/en/latest/security.html

View File

@ -1,97 +0,0 @@
cryptography-3.3.2.dist-info/AUTHORS.rst,sha256=MoKTlP6yOmnLC_KXarHVQP0sItBk11dtZ7LzV0VhNB0,2475
cryptography-3.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
cryptography-3.3.2.dist-info/LICENSE,sha256=Q9rSzHUqtyHNmp827OcPtTq3cTVR8tPYaU2OjFoG1uI,323
cryptography-3.3.2.dist-info/LICENSE.APACHE,sha256=qsc7MUj20dcRHbyjIJn2jSbGRMaBOuHk8F9leaomY_4,11360
cryptography-3.3.2.dist-info/LICENSE.BSD,sha256=YCxMdILeZHndLpeTzaJ15eY9dz2s0eymiSMqtwCPtPs,1532
cryptography-3.3.2.dist-info/LICENSE.PSF,sha256=aT7ApmKzn5laTyUrA6YiKUVHDBtvEsoCkY5O_g32S58,2415
cryptography-3.3.2.dist-info/METADATA,sha256=633UPUD3ojepCqe83P99A1w51VeAscaDTg3CS1ov7bw,5190
cryptography-3.3.2.dist-info/RECORD,,
cryptography-3.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
cryptography-3.3.2.dist-info/WHEEL,sha256=qN4i5kDbMd9ITQn9KMACG44Nnrayu7b_5zsXEXpun_M,110
cryptography-3.3.2.dist-info/direct_url.json,sha256=rJ9-4Dc7CpzxT6pTLmKzAHjQup8UFVRSwnlP_eoguio,174
cryptography-3.3.2.dist-info/top_level.txt,sha256=rR2wh6A6juD02TBZNJqqonh8x9UP9Sa5Z9Hl1pCPCiM,31
cryptography/__about__.py,sha256=x2f7Chx8oX2tr7vddOVqh8E-cMot6emZefrup1uQY6Y,835
cryptography/__init__.py,sha256=lJ5HUOGCKi9r-XG4Y3qXq9dhCFv8RqwZKZgkQjQLboA,964
cryptography/exceptions.py,sha256=NPtDqIq1lsQ1Gb1BXkjsGIvbMrWMaKCaT8epiSgi010,1259
cryptography/fernet.py,sha256=sg5RNOCKx9BrPV6wIfyXB9sDWJcw9-GPcPgN4lVmr8w,5980
cryptography/hazmat/__init__.py,sha256=hEPNQw8dgjIPIn42qaLwXNRLCyTGNZeSvkQb57DPhbs,483
cryptography/hazmat/_der.py,sha256=NkwxQBcrR_KMAZCM3WKidXgx8CHFVU5iBnoFIrhQMQs,5205
cryptography/hazmat/_oid.py,sha256=3L1KLxAsQJJoy15ZCl0T4I-PU-DVvzGS-ZTdS-PNy14,2432
cryptography/hazmat/backends/__init__.py,sha256=EEhjIZgqApO7coGuybLXyaEaWIHcdg8oC0i2vxQ4RSI,616
cryptography/hazmat/backends/interfaces.py,sha256=GXySHrpGLgeTrjUgxOYtK6viaphO1dDKAOA95JFj_pM,10770
cryptography/hazmat/backends/openssl/__init__.py,sha256=k4DMe228_hTuB2kY3Lwk62JdI3EmCd7VkV01zJm57ps,336
cryptography/hazmat/backends/openssl/aead.py,sha256=ljOSkI7NXgXi9OyfHjm9J07m3EVHFNm9kfHAIogSWtc,5765
cryptography/hazmat/backends/openssl/backend.py,sha256=CwITPFn7F3Bjxr_W6xFJcIIv-MLlQLJFdcnso8SS_U0,106372
cryptography/hazmat/backends/openssl/ciphers.py,sha256=aDTU8pMDjl2N3AKcYZO2jXpeqW9mV5rIOp0guPpKlp4,8608
cryptography/hazmat/backends/openssl/cmac.py,sha256=n34WXNXt-r0trp207u0cSKwGMth8qEiEs2jjgmHNtWE,2855
cryptography/hazmat/backends/openssl/decode_asn1.py,sha256=BS2Y-4ZudWl-CB_fZ0YqVYIOQrnv7ziOhjpo-QIq8_o,32332
cryptography/hazmat/backends/openssl/dh.py,sha256=1fZn8one2aSla85LIe6vXbf0qoLTDS-B7tYMcrJshnY,10239
cryptography/hazmat/backends/openssl/dsa.py,sha256=Cp1w1Z6J_PEW-Qd2RAzfC04MU9YxqYOaef57f_QVpYI,10036
cryptography/hazmat/backends/openssl/ec.py,sha256=c3DUb_AZ215f9AaAHyOKqBoNUSd6sFbUIDMbLrbLuLA,12071
cryptography/hazmat/backends/openssl/ed25519.py,sha256=fInLppwHZnYgwkQQ5MdsOCux_y3kfW-290EbGn-0bKE,5618
cryptography/hazmat/backends/openssl/ed448.py,sha256=Wp7dkPjb2Tyjzguh1bHwzXItMPqJq_A9-D7zCwHqnc8,5574
cryptography/hazmat/backends/openssl/encode_asn1.py,sha256=5tQmLfLEyKTm3Eg_GfhGJcPtuQ0Ef2OdbsgxVC2fcMc,24075
cryptography/hazmat/backends/openssl/hashes.py,sha256=n6XJwCI-2OU6FndiatFbE_Pgb3f1NoVuHwpCMW0z340,3117
cryptography/hazmat/backends/openssl/hmac.py,sha256=D_YcF2OiLSfrWtA7fksLiKWcaVh-G1igpqNHuM5l62c,2933
cryptography/hazmat/backends/openssl/ocsp.py,sha256=NEGrc30GfPBLbjnt-K3K48-dZK2dEyQa2oCyv7-laMs,14028
cryptography/hazmat/backends/openssl/poly1305.py,sha256=LiovW4SvSUhWA109IkLlw4nnokmF9m24V4pGxqoPmMI,2393
cryptography/hazmat/backends/openssl/rsa.py,sha256=hcBFzZ51LA2UJfVGf00xul5nWLeT-9Sz7ufCKls195w,19577
cryptography/hazmat/backends/openssl/utils.py,sha256=-JMyOgOlplSWL5zTu_3-vl5_gE1FBK3ew6n0Zs35QYo,2348
cryptography/hazmat/backends/openssl/x25519.py,sha256=-MNAPGS_DZ37-skSn17-gIakFLoJmuNx8PlC8s2-00g,4488
cryptography/hazmat/backends/openssl/x448.py,sha256=5WH3Rw7kZGLS3EDDVzjrYriAG-tzUnyWetyqMYTiEhA,4011
cryptography/hazmat/backends/openssl/x509.py,sha256=EMN9qSPW1BVZ1VAOHzgi8oO8idI8iOb0wrWjdrr5FpI,21620
cryptography/hazmat/bindings/__init__.py,sha256=0wGw2OF9R7fHX7NWENCmrsYigbXHU2ojgn-N4Rkjs9U,246
cryptography/hazmat/bindings/_openssl.abi3.dll,sha256=iUsiW2tZHOzE7jp78OHA2-g1hAV7oYvFWiRkXhEuEww,2366796
cryptography/hazmat/bindings/_padding.abi3.dll,sha256=arMD6x2xM0At_NxI28-TvHmvSmTPmGYhE_eEaiG3xw8,77631
cryptography/hazmat/bindings/openssl/__init__.py,sha256=0wGw2OF9R7fHX7NWENCmrsYigbXHU2ojgn-N4Rkjs9U,246
cryptography/hazmat/bindings/openssl/_conditional.py,sha256=6-EwpZeSqbLNRPhzsXFPTO498wLGaDXW-LvkqiJm4vQ,8291
cryptography/hazmat/bindings/openssl/binding.py,sha256=yT5e2JrzANd6FG__us6aj9ocb48EnNJK61cKwrpeM08,5816
cryptography/hazmat/primitives/__init__.py,sha256=0wGw2OF9R7fHX7NWENCmrsYigbXHU2ojgn-N4Rkjs9U,246
cryptography/hazmat/primitives/asymmetric/__init__.py,sha256=WhUn3tGxoLAxGAsZHElJ2aOILXSh55AZi04MBudYmQA,1020
cryptography/hazmat/primitives/asymmetric/dh.py,sha256=kuyPcccLeOYy4OuGkegEyqMSzRo-QyjlUw463jzfrGs,5859
cryptography/hazmat/primitives/asymmetric/dsa.py,sha256=XuE2mUXl-fXi2q7w22qKyiCTFUz-852cFTwV4WOUQgw,7181
cryptography/hazmat/primitives/asymmetric/ec.py,sha256=2rorlIEXHGkLnI8bbeFKMRr-gJfEipuJigQDQh4xk7w,14006
cryptography/hazmat/primitives/asymmetric/ed25519.py,sha256=rfImUQH-PcTliuxiF864aSww7dQCWVwZgjPPbDXiGlI,2401
cryptography/hazmat/primitives/asymmetric/ed448.py,sha256=JyrEHwYF_Ftj_E60t-Gmvm3CGnQSxVbasptZBW84eBk,2328
cryptography/hazmat/primitives/asymmetric/padding.py,sha256=2pPqBu4dGERtFPHnPRTZ0iRO_XY9hr9RTwlTcr_J5bw,2250
cryptography/hazmat/primitives/asymmetric/rsa.py,sha256=MgxdkA8PWlXGt2lMPpnV9QYYvQnYTFjb0RtJRDjnlfU,10672
cryptography/hazmat/primitives/asymmetric/utils.py,sha256=w2lQIcKrFvS9D_Ekt7qWed39TXM6hueg72FFrfwIo58,1201
cryptography/hazmat/primitives/asymmetric/x25519.py,sha256=vrN1jcO6sjbQrc7auIlf2aEvcH3P17cKUuaVXxaTvxI,2277
cryptography/hazmat/primitives/asymmetric/x448.py,sha256=u3v-L1IJIG2RyLVTh7FMkXh_Y-oVb3HdEj5b1c-JlKk,2255
cryptography/hazmat/primitives/ciphers/__init__.py,sha256=mi4yR3Fxc4-Au3yX4PyhFNaiFn0yywZKiTzecdI77EI,647
cryptography/hazmat/primitives/ciphers/aead.py,sha256=lXgZOxlbxtBp1k7KmlqgiN_Xu6yPsJE_DNJLwsgm0o0,6134
cryptography/hazmat/primitives/ciphers/algorithms.py,sha256=GKFIhvOoqsYscjjP7onl8XnAmOa-kSQ6jiMMS2zeGBM,4225
cryptography/hazmat/primitives/ciphers/base.py,sha256=vceN5l7yxLWmNTptlzC3gmfFY-K_ANKk4HdNl2Ptz2k,7253
cryptography/hazmat/primitives/ciphers/modes.py,sha256=-0VTtHN3kKO_Jyc_iLAgp8bqtsXJY5V2F__Bkr6nvtM,6805
cryptography/hazmat/primitives/cmac.py,sha256=eJpysDFbc7W6OiplzWKWrL4owy30Cq6Nsao8mzapqbE,2130
cryptography/hazmat/primitives/constant_time.py,sha256=_x4mrHW-9ihfgY89BwhATFiIuG2_1l-HMkCxmOUkydM,430
cryptography/hazmat/primitives/hashes.py,sha256=dzL1QcEFj4eElzczo8QmuOeooZ96EFwBy3c-6cpew0w,6315
cryptography/hazmat/primitives/hmac.py,sha256=AYzTQMDiruKmZKKLR6ceVjX5yQ3mpciWIx__tpNLyr4,2306
cryptography/hazmat/primitives/kdf/__init__.py,sha256=nod5HjPswjZr8wFp6Tsu6en9blHYF3khgXI5R0zIcnM,771
cryptography/hazmat/primitives/kdf/concatkdf.py,sha256=gW-xAU6sPE6aZhg_G9ucZ5b_uctSbPcfSpHyyt7Q8MA,4095
cryptography/hazmat/primitives/kdf/hkdf.py,sha256=SJJQzeQ9OH0t3tUdUq2GT6IQXv9oPLDjulT7wnLTkMg,3598
cryptography/hazmat/primitives/kdf/kbkdf.py,sha256=awf7zessT-amokp2VBdyW8TWrDnmTXGzHHX4scBO9Uc,5100
cryptography/hazmat/primitives/kdf/pbkdf2.py,sha256=RYexIlGomzUEU-_QQXTW81rdY5YVZB30XrfnJq8NsIU,2220
cryptography/hazmat/primitives/kdf/scrypt.py,sha256=C0C3m-gEnlLlAVxzRFdzx1mfDuWs_BkZDoSV2hfahfk,2268
cryptography/hazmat/primitives/kdf/x963kdf.py,sha256=26-b_ckyUYiqbWM9mZ7FEWbuvR7eTLksIeWQeW1TJ04,2407
cryptography/hazmat/primitives/keywrap.py,sha256=fF-HA5ETz9RH8s8LB94uDoWRLPvwPkYAC5_Kylej6sA,5730
cryptography/hazmat/primitives/padding.py,sha256=zeJmjPfX8Cx_gqO45FDBNe8iN2trPr0ULyBsz1Kmyu4,6173
cryptography/hazmat/primitives/poly1305.py,sha256=NNC1WYiYQGNJ8mblkaHRxBm1PLdaKRzkILocsYH5zgY,1679
cryptography/hazmat/primitives/serialization/__init__.py,sha256=eLzmqoHgVlPK1aTGiEfpaIrUf9mX5PRrM7IHEc8FeQU,1132
cryptography/hazmat/primitives/serialization/base.py,sha256=ZSzV-5zl2Bt_mmihcPqieBC6UjMSryUaehgExvjZksg,2249
cryptography/hazmat/primitives/serialization/pkcs12.py,sha256=oJxangAtSSsniXfguLaoPgejVchs-VpCTBdWSW4rF54,1853
cryptography/hazmat/primitives/serialization/pkcs7.py,sha256=vGlw_2R4VeLWtoRxkfz8fMLE5i_CCdaY9bEtYMV62rk,4625
cryptography/hazmat/primitives/serialization/ssh.py,sha256=a_FKWuqpHO-RzUBEoBWS5q7WyMZwS56MD92Wr6j3KBA,21682
cryptography/hazmat/primitives/twofactor/__init__.py,sha256=BWrm3DKDoAa281E7U_nzz8v44OmAiXmlIycFcsehwfE,288
cryptography/hazmat/primitives/twofactor/hotp.py,sha256=2uCTCTHMFmWL9kOjA890F0CVrljsvOjJYISKBup7GyI,2679
cryptography/hazmat/primitives/twofactor/totp.py,sha256=iJRTxPNWPdsTQHePgSE6KGdRNURTv188VNqpyvBwvBY,1780
cryptography/hazmat/primitives/twofactor/utils.py,sha256=ZKZSOL2cLsGCsSNfx3kYlYt91A4bcU1w9up2EL1hwaA,982
cryptography/utils.py,sha256=QpZgLOABfeaDciPlrF-W8giJiOL2AzU6Ajjq6h6WkzY,4745
cryptography/x509/__init__.py,sha256=1juFH-nvLS7kU0x52VMN7pN6s7H55Y86NqUszaBhhi4,7699
cryptography/x509/base.py,sha256=burWvWUouPiPzmPUzNZUzEe64gR-WMkNyiDpjYCvEc8,26409
cryptography/x509/certificate_transparency.py,sha256=eJ9lrITdyMn4XsrcVdrTaFVI_RR7mX_VzMZyiaEpbps,1000
cryptography/x509/extensions.py,sha256=HOwYCKAy-4qK5eWWYB4UnJejC9Ru3FBQMsLXodasR9Y,52924
cryptography/x509/general_name.py,sha256=nNIG--rJ-TzREkhEq727Fe3tjvxVflW7iPIMjJs6LrI,7942
cryptography/x509/name.py,sha256=j2khdee8jQBkbZd4RV60ji8V0ZngbsB07i5cnflDBPk,8291
cryptography/x509/ocsp.py,sha256=nr5Bk3B_b9LaG-1njEmo0f_smAg2B6CU5Wr6wMr81MI,13245
cryptography/x509/oid.py,sha256=Wp6Y4WMrFa7vsUmV4tbMvPPAl0Iiu4QxQ7on2np94QU,12594

View File

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.37.0)
Root-Is-Purelib: false
Tag: cp39-cp39-cygwin_3_3_3_x86_64

View File

@ -1 +0,0 @@
{"archive_info": {}, "url": "file:///pub/devel/python/python-cryptography/python-cryptography-3.3.2-1.x86_64/build/dist/cryptography-3.3.2-cp39-cp39-cygwin_3_3_3_x86_64.whl"}

View File

@ -1,3 +0,0 @@
_openssl
_padding
cryptography

View File

@ -1,180 +0,0 @@
'''
OpenCV Python binary extension loader
'''
import os
import importlib
import sys
__all__ = []
try:
import numpy
import numpy.core.multiarray
except ImportError:
print('OpenCV bindings requires "numpy" package.')
print('Install it via command:')
print(' pip install numpy')
raise
# TODO
# is_x64 = sys.maxsize > 2**32
def __load_extra_py_code_for_module(base, name, enable_debug_print=False):
module_name = "{}.{}".format(__name__, name)
export_module_name = "{}.{}".format(base, name)
native_module = sys.modules.pop(module_name, None)
try:
py_module = importlib.import_module(module_name)
except ImportError as err:
if enable_debug_print:
print("Can't load Python code for module:", module_name,
". Reason:", err)
# Extension doesn't contain extra py code
return False
if not hasattr(base, name):
setattr(sys.modules[base], name, py_module)
sys.modules[export_module_name] = py_module
# If it is C extension module it is already loaded by cv2 package
if native_module:
setattr(py_module, "_native", native_module)
for k, v in filter(lambda kv: not hasattr(py_module, kv[0]),
native_module.__dict__.items()):
if enable_debug_print: print(' symbol: {} = {}'.format(k, v))
setattr(py_module, k, v)
return True
def __collect_extra_submodules(enable_debug_print=False):
def modules_filter(module):
return all((
# module is not internal
not module.startswith("_"),
# it is not a file
os.path.isdir(os.path.join(_extra_submodules_init_path, module))
))
if sys.version_info[0] < 3:
if enable_debug_print:
print("Extra submodules is loaded only for Python 3")
return []
__INIT_FILE_PATH = os.path.abspath(__file__)
_extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH)
return filter(modules_filter, os.listdir(_extra_submodules_init_path))
def bootstrap():
import sys
import copy
save_sys_path = copy.copy(sys.path)
if hasattr(sys, 'OpenCV_LOADER'):
print(sys.path)
raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.')
sys.OpenCV_LOADER = True
DEBUG = False
if hasattr(sys, 'OpenCV_LOADER_DEBUG'):
DEBUG = True
import platform
if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system())))
LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
PYTHON_EXTENSIONS_PATHS = []
BINARIES_PATHS = []
g_vars = globals()
l_vars = locals()
if sys.version_info[:2] < (3, 0):
from . load_config_py2 import exec_file_wrapper
else:
from . load_config_py3 import exec_file_wrapper
def load_first_config(fnames, required=True):
for fname in fnames:
fpath = os.path.join(LOADER_DIR, fname)
if not os.path.exists(fpath):
if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath))
continue
if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath))
exec_file_wrapper(fpath, g_vars, l_vars)
return True
if required:
raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames))
load_first_config(['config.py'], True)
load_first_config([
'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]),
'config-{}.py'.format(sys.version_info[0])
], True)
if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS'])))
if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS'])))
applySysPathWorkaround = False
if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'):
applySysPathWorkaround = True
else:
try:
BASE_DIR = os.path.dirname(LOADER_DIR)
if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR:
applySysPathWorkaround = True
except:
if DEBUG: print('OpenCV loader: exception during checking workaround for sys.path[0]')
pass # applySysPathWorkaround is False
for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']):
sys.path.insert(1 if not applySysPathWorkaround else 0, p)
if os.name == 'nt':
if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302
for p in l_vars['BINARIES_PATHS']:
try:
os.add_dll_directory(p)
except Exception as e:
if DEBUG: print('Failed os.add_dll_directory(): '+ str(e))
pass
os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '')
if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH'])))
else:
# amending of LD_LIBRARY_PATH works for sub-processes only
os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '')
if DEBUG: print("Relink everything from native cv2 module to cv2 package")
py_module = sys.modules.pop("cv2")
native_module = importlib.import_module("cv2")
sys.modules["cv2"] = py_module
setattr(py_module, "_native", native_module)
for item_name, item in filter(lambda kv: kv[0] not in ("__file__", "__loader__", "__spec__",
"__name__", "__package__"),
native_module.__dict__.items()):
if item_name not in g_vars:
g_vars[item_name] = item
sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502)
try:
del sys.OpenCV_LOADER
except Exception as e:
if DEBUG:
print("Exception during delete OpenCV_LOADER:", e)
if DEBUG: print('OpenCV loader: binary extension... OK')
for submodule in __collect_extra_submodules(DEBUG):
if __load_extra_py_code_for_module("cv2", submodule, DEBUG):
if DEBUG: print("Extra Python code for", submodule, "is loaded")
if DEBUG: print('OpenCV loader: DONE')
bootstrap()

View File

@ -1,3 +0,0 @@
PYTHON_EXTENSIONS_PATHS = [
os.path.join(LOADER_DIR, 'python-3.9')
] + PYTHON_EXTENSIONS_PATHS

View File

@ -1,5 +0,0 @@
import os
BINARIES_PATHS = [
os.path.join(os.path.join(LOADER_DIR, '../../../../'), 'lib')
] + BINARIES_PATHS

View File

@ -1,6 +0,0 @@
# flake8: noqa
import sys
if sys.version_info[:2] < (3, 0):
def exec_file_wrapper(fpath, g_vars, l_vars):
execfile(fpath, g_vars, l_vars)

View File

@ -1,9 +0,0 @@
# flake8: noqa
import os
import sys
if sys.version_info[:2] >= (3, 0):
def exec_file_wrapper(fpath, g_vars, l_vars):
with open(fpath) as f:
code = compile(f.read(), os.path.basename(fpath), 'exec')
exec(code, g_vars, l_vars)

View File

@ -1,33 +0,0 @@
__all__ = []
import sys
import numpy as np
import cv2 as cv
# NumPy documentation: https://numpy.org/doc/stable/user/basics.subclassing.html
class Mat(np.ndarray):
'''
cv.Mat wrapper for numpy array.
Stores extra metadata information how to interpret and process of numpy array for underlying C++ code.
'''
def __new__(cls, arr, **kwargs):
obj = arr.view(Mat)
return obj
def __init__(self, arr, **kwargs):
self.wrap_channels = kwargs.pop('wrap_channels', getattr(arr, 'wrap_channels', False))
if len(kwargs) > 0:
raise TypeError('Unknown parameters: {}'.format(repr(kwargs)))
def __array_finalize__(self, obj):
if obj is None:
return
self.wrap_channels = getattr(obj, 'wrap_channels', None)
Mat.__module__ = cv.__name__
cv.Mat = Mat
cv._registerMatType(Mat)

View File

@ -1 +0,0 @@
from .version import get_ocv_version

View File

@ -1,5 +0,0 @@
import cv2
def get_ocv_version():
return getattr(cv2, "__version__", "unavailable")

View File

@ -1,14 +0,0 @@
from collections import namedtuple
import cv2
NativeMethodPatchedResult = namedtuple("NativeMethodPatchedResult",
("py", "native"))
def testOverwriteNativeMethod(arg):
return NativeMethodPatchedResult(
arg + 1,
cv2.utils._native.testOverwriteNativeMethod(arg)
)

Some files were not shown because too many files have changed in this diff Show More