1.更新土壤类算法lee滤波模块,更新地理编码为双线性插值,更新分类算法处理流程,修改正射流程,线性->

dev
tian jiax 2023-10-30 09:33:41 +08:00
parent ded2843ceb
commit edf1899e8c
22 changed files with 3676 additions and 364 deletions

View File

@ -100,10 +100,27 @@ class ScatteringAlg:
coef_arr[np.isinf(coef_arr)] = -9999 coef_arr[np.isinf(coef_arr)] = -9999
coef_arr[where_9999_0] = -9999 coef_arr[where_9999_0] = -9999
coef_arr[where_9999_1] = -9999 coef_arr[where_9999_1] = -9999
# 输出的SAR后向散射系数产品 ## 输出的SAR后向散射系数产品
ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, -9999) # ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, 0)
tif_array = np.power(10.0, coef_arr / 10.0) # dB --> 线性值 后向散射系数
tif_array[np.isnan(tif_array)] = 0
tif_array[np.isinf(tif_array)] = 0
tif_array[where_9999_0] = 0
tif_array[where_9999_1] = 0
ImageHandler.write_img(out_sar_tif, proj, geotrans, tif_array, 0)
return True return True
@staticmethod
def lin_to_db(lin_path, db_path):
proj, geotrans, in_data = ImageHandler.read_img(lin_path)
db_arr = 10 * np.log10(in_data)
# db_arr[np.isnan(db_arr)] = -9999
# db_arr[np.isinf(db_arr)] = -9999
ImageHandler.write_img(db_path, proj, geotrans, db_arr, -9999)
@ -1156,6 +1173,8 @@ class Orthorectification(object):
# 12、PRF # 12、PRF
HeaderInformation_json['PRF'] = float( HeaderInformation_json['PRF'] = float(
FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['PRF']['NodePath'])) FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['PRF']['NodePath']))
HeaderInformation_json['Fs'] = float(
FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['Fs']['NodePath']))
# 13、中心时间 # 13、中心时间
HeaderInformation_json['ImageInformation']['CenterTime'] = datetime.datetime.strptime( HeaderInformation_json['ImageInformation']['CenterTime'] = datetime.datetime.strptime(
FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['CenterImageTime']['NodePath']), FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['CenterImageTime']['NodePath']),
@ -1178,6 +1197,7 @@ class Orthorectification(object):
self.heightspace=HeaderInformation_json['ImageInformation']['ImageHeightSpace'] self.heightspace=HeaderInformation_json['ImageInformation']['ImageHeightSpace']
self.refrange=HeaderInformation_json['ImageInformation']['refRange'] self.refrange=HeaderInformation_json['ImageInformation']['refRange']
self.nearrange=HeaderInformation_json['ImageInformation']['NearRange'] self.nearrange=HeaderInformation_json['ImageInformation']['NearRange']
self.Fs = HeaderInformation_json['Fs']*1e6 # Mhz
return HeaderInformation_json return HeaderInformation_json
pass pass
@ -1440,7 +1460,9 @@ class IndirectOrthorectification(Orthorectification):
fp.write("{}\n".format(self.header_info['ImageInformation']['StartTime'])) fp.write("{}\n".format(self.header_info['ImageInformation']['StartTime']))
fp.write("{}\n".format(self.header_info['PRF'])) fp.write("{}\n".format(self.header_info['PRF']))
fp.write("{}\n".format(self.refrange)) fp.write("{}\n".format(self.refrange))
fp.write("{}\n".format(self.widthspace)) fp.write("{}\n".format(self.Fs))
fp.write("{}\n".format(self.header_info['ImageInformation']['DopplerParametersReferenceTime']))
#fp.write("{}\n".format(self.widthspace))
# 多普勒系数 # 多普勒系数
fp.write("{}\n".format(len(self.header_info['ImageInformation']['DopplerCentroidCoefficients']))) fp.write("{}\n".format(len(self.header_info['ImageInformation']['DopplerCentroidCoefficients'])))
@ -1474,6 +1496,8 @@ class IndirectOrthorectification(Orthorectification):
fp.write("{}".format(startTime.tm_mday)) fp.write("{}".format(startTime.tm_mday))
self.paramterFile_path=outparameter_path self.paramterFile_path=outparameter_path
def IndirectOrthorectification(self, FilePath_str,workspace_dir): def IndirectOrthorectification(self, FilePath_str,workspace_dir):
""" """
正射校正组件 正射校正组件
@ -1601,6 +1625,32 @@ class IndirectOrthorectification(Orthorectification):
print(exe_cmd) print(exe_cmd)
print(os.system(exe_cmd)) print(os.system(exe_cmd))
print("==========================================================================") print("==========================================================================")
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
'''
# std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
dem_rc, in_sar, out_sar)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def lee_process_sar(self,in_sar, out_sar, win_size, noise_var):
'''
# std::cout << "mode 12"
# std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var"
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar,
out_sar, win_size, noise_var)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def getPowerTif(self,in_ori_path,out_power_path): def getPowerTif(self,in_ori_path,out_power_path):
''' '''

View File

@ -11,6 +11,7 @@
import logging import logging
from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.algorithm.algtools.PreProcess import PreProcess as pp
import tarfile import tarfile
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件 from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件
from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2 from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2
@ -126,7 +127,8 @@ class OrthoMain:
def check_source(self): def check_source(self):
""" """
检查算法相关的配置文件图像辅助文件是否齐全 检查算法相关的配置文件
辅助文件是否齐全
""" """
if self.__check_handler.check_alg_xml() is False: if self.__check_handler.check_alg_xml() is False:
return False return False
@ -332,7 +334,7 @@ class OrthoMain:
para_dic.update({name1: file_dir}) # {SLC: file_path} para_dic.update({name1: file_dir}) # {SLC: file_path}
# 获取文件夹内的文件 # 获取文件夹内的文件
hh_flag, hv_flag, vh_flag, vv_flag ,dh_flag= 0, 0, 0, 0 ,0 # hh_flag, hv_flag, vh_flag, vv_flag, dh_flag = 0, 0, 0, 0, 0 #
if os.path.exists(file_dir + name + '\\'): if os.path.exists(file_dir + name + '\\'):
in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif'))) in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
if in_tif_paths == []: if in_tif_paths == []:
@ -492,6 +494,22 @@ class OrthoMain:
left_up_lon = 0 left_up_lon = 0
left_up_lat = 0 left_up_lat = 0
def process_sim_ori(self, ori_sim, sim_ori):
scopes = ()
scopes += (ImageHandler.get_scope_ori_sim(ori_sim),)
intersect_polygon = pp().intersect_polygon(scopes)
if intersect_polygon is None:
raise Exception('create intersect shp fail!')
shp_path = os.path.join(self.__workspace_Temporary_path, 'IntersectPolygon.shp')
if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
sim_ori_process = os.path.join(self.__workspace_Temporary_path, 'sim_ori_process.tif')
pp().cut_img(sim_ori_process, sim_ori, shp_path)
return sim_ori_process
def RD_process_handle(self): def RD_process_handle(self):
# RPC # RPC
@ -519,7 +537,7 @@ class OrthoMain:
# 3 处理RD # 3 处理RD
in_slc_path=None in_slc_path=None
for slc_path in os.listdir(slc_paths): for slc_path in os.listdir(slc_paths):
if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0 ): if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0 or slc_path.find("_DH_")>0):
in_slc_path=os.path.join(slc_paths,slc_path) in_slc_path=os.path.join(slc_paths,slc_path)
break break
# 获取校正模型后 # 获取校正模型后
@ -548,6 +566,15 @@ class OrthoMain:
if(os.path.exists(this_out_dem_rc_path)): if(os.path.exists(this_out_dem_rc_path)):
os.remove(this_out_dem_rc_path) os.remove(this_out_dem_rc_path)
this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff"
if (os.path.exists(this_out_sar_sim_path)):
os.remove(this_out_sar_sim_path)
this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射
if (os.path.exists(this_out_sar_sim_wgs_path)):
os.remove(this_out_sar_sim_wgs_path)
this_out_incidence_path = out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角 this_out_incidence_path = out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角
this_out_localIncidenct_path = out_dir_path + "\\" + "localincidentAngle.tiff"#// 局地入射角 this_out_localIncidenct_path = out_dir_path + "\\" + "localincidentAngle.tiff"#// 局地入射角
this_out_inc_angle_rpc_path = out_dir_path + "\\" + "RD_incidentAngle.tiff"#// 局地入射角 this_out_inc_angle_rpc_path = out_dir_path + "\\" + "RD_incidentAngle.tiff"#// 局地入射角
@ -568,28 +595,47 @@ class OrthoMain:
this_out_ori_sim_tiff = out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角 this_out_ori_sim_tiff = out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角
if (os.path.exists(this_out_ori_sim_tiff)): if (os.path.exists(this_out_ori_sim_tiff)):
shutil.move(this_out_ori_sim_tiff, out_dir_path + "\\" + "ori_sim-ortho.tif") shutil.move(this_out_ori_sim_tiff, out_dir_path + "\\" + "ori_sim-ortho.tif")
this_in_rpc_lon_lat_path = this_out_ori_sim_tiff
this_out_sim_ori_tiff = out_dir_path + "\\" + "RD_sim_ori.tif" # // 局地入射角
if (os.path.exists(this_out_sim_ori_tiff)):
shutil.move(this_out_sim_ori_tiff, out_dir_path + "\\" + "sim_ori-ortho.tif")
# GTC 入射角 # GTC 入射角
GTC_rc_path=os.path.join(self.__workspace_package_path,"ori_sim-ortho.tif") GTC_rc_path=os.path.join(self.__workspace_package_path,"ori_sim-ortho.tif")
GTC_out_path=self.__workspace_package_path GTC_out_path=self.__workspace_package_path
parameter_path = os.path.join(self.__workspace_package_path, "orth_para.txt") parameter_path = os.path.join(self.__workspace_package_path, "orth_para.txt")
dem_rc = os.path.join(self.__workspace_Temporary_path, "dem_rc.tiff") this_in_rpc_lon_lat_path = os.path.join(self.__workspace_package_path, "ori_sim-ortho.tif")
dem_rc = os.path.join(self.__workspace_package_path, "sim_ori-ortho.tif")
dem_rc_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, dem_rc)
shutil.move(dem_rc_pro, dem_rc)
in_tif_paths = list(glob.glob(os.path.join(slc_paths, '*.tiff'))) in_tif_paths = list(glob.glob(os.path.join(slc_paths, '*.tiff')))
for in_tif_path in in_tif_paths: for in_tif_path in in_tif_paths:
out_sar_path = os.path.join(GTC_out_path, os.path.split(in_tif_path)[1]) out_sar_path = os.path.join(GTC_out_path, os.path.split(in_tif_path)[1])
slc_path_temp=os.path.join(slc_paths,in_tif_path) slc_path_temp=os.path.join(slc_paths,in_tif_path)
out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif").replace("L1A","L1B")).replace("HH","h_h").replace("HV","h_v").replace("VH","v_h").replace("VV","v_v") out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","-lin.tif").replace("L1A","L1B")).replace("HH","h_h").replace("HV","h_v").replace("VH","v_h").replace("VV","v_v").replace("DH","d_h")
# out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif")) # out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif"))
alg.sar_backscattering_coef(slc_path_temp,self.__in_processing_paras['META'],out_power_path) alg.sar_backscattering_coef(slc_path_temp,self.__in_processing_paras['META'],out_power_path)
temp_slc_path=os.path.join(self.__workspace_package_path, os.path.basename(out_power_path))
temp_slc_path=temp_slc_path.replace("_db.tif","-ortho.tif") lin_tif_path = os.path.join(self.__workspace_Temporary_path,
os.path.basename(out_power_path).split('-')[0] + "-lin_geo.tif")
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc,
# out_power_path,
# lin_tif_path)
Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, dem_rc,
out_power_path,
lin_tif_path)
tempout_tif_path = os.path.join(self.__workspace_package_path, os.path.basename(lin_tif_path).split('-')[0] + "-ortho.tif")
alg.lin_to_db(lin_tif_path, tempout_tif_path) # 线性值转回DB值
# temp_slc_path=os.path.join(self.__workspace_package_path, os.path.basename(out_power_path))
# temp_slc_path=temp_slc_path.replace("_db.tif","-ortho.tif")
#inter_Range2Geo(self,lon_lat_path , data_tiff , grid_path , space) #inter_Range2Geo(self,lon_lat_path , data_tiff , grid_path , space)
# Orthorectification.inter_Range2Geo(GTC_rc_path,out_power_path,temp_slc_path,Orthorectification.heightspace) # Orthorectification.inter_Range2Geo(GTC_rc_path,out_power_path,temp_slc_path,Orthorectification.heightspace)
Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_power_path, temp_slc_path) # Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_power_path, temp_slc_path) #
break # break
#Orth_Slc.append(temp_slc_path) #Orth_Slc.append(temp_slc_path)
# power_list.append(out_power_path) # power_list.append(out_power_path)
@ -619,7 +665,7 @@ class OrthoMain:
# 生成元文件案例 # 生成元文件案例
# xml_path = "./model_meta.xml" # xml_path = "./model_meta.xml"
tem_folder=self.__workspace_path + EXE_NAME + r"\Temporary""\\" tem_folder=self.__workspace_path + EXE_NAME + r"\Temporary""\\"
image_path=temp_slc_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif") image_path=tempout_tif_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif")
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
# par_dict = CreateDict().calu_nature(image_path, self.processinfo, out_path1, out_path2) # par_dict = CreateDict().calu_nature(image_path, self.processinfo, out_path1, out_path2)
@ -639,10 +685,20 @@ class OrthoMain:
meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml")) meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml"))
para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_package_path, out_path1, out_path2).calu_nature() para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_package_path, out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": '正射校正'})
para_dict.update({"imageinfo_ProductIdentifier": 'Ortho'})
para_dict.update({"imageinfo_ProductLevel": '3A'})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
# 生成压缩包 # 生成压缩包
logger.info('progress bar :94') logger.info('progress bar :94')
logger.info('start make targz..') logger.info('start make targz..')
@ -678,7 +734,7 @@ if __name__ == '__main__':
except Exception: except Exception:
logger.exception("run-time error!") logger.exception("run-time error!")
finally: finally:
# OrthoMain.del_temp_workspace() OrthoMain.del_temp_workspace()
pass pass
end = datetime.datetime.now() end = datetime.datetime.now()
logger.info('running use time: %s ' % (end - start)) logger.info('running use time: %s ' % (end - start))

File diff suppressed because it is too large Load Diff

View File

@ -9,16 +9,13 @@
@Version 1.0.0 @Version 1.0.0
""" """
import logging import logging
# from BackScatteringAlg import ScatteringAlg as alg
# from BackScatteringAlg import rpc_correction,getRCImageRC
from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource
from tool.algorithm.xml.CreatMetafile import CreateMetafile
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from BackScatteringXmlInfo import CreateDict, CreateStadardXmlFile
from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.image.ImageHandle import ImageHandler
from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2 from tool.algorithm.algtools.PreProcess import PreProcess as pp
from OrthoAlg import ScatteringAlg as alg from BackScatteringAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2
from BackScatteringAlg import ScatteringAlg as alg
from tool.config.ConfigeHandle import Config as cf from tool.config.ConfigeHandle import Config as cf
import os import os
import glob import glob
@ -243,6 +240,21 @@ class ScatteringMain:
if os.path.exists(path): if os.path.exists(path):
self.del_floder(path) self.del_floder(path)
def process_sim_ori(self, ori_sim, sim_ori):
scopes = ()
scopes += (ImageHandler.get_scope_ori_sim(ori_sim),)
intersect_polygon = pp().intersect_polygon(scopes)
if intersect_polygon is None:
raise Exception('create intersect shp fail!')
shp_path = os.path.join(self.__workspace_preprocessing_path, 'IntersectPolygon.shp')
if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
raise Exception('create intersect shp fail!')
sim_ori_process = os.path.join(self.__workspace_preprocessing_path, 'sim_ori_process.tif')
pp().cut_img(sim_ori_process, sim_ori, shp_path)
return sim_ori_process
def process_handle(self,start): def process_handle(self,start):
in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tif'))) in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tif')))
if in_tif_paths == []: if in_tif_paths == []:
@ -311,7 +323,16 @@ class ScatteringMain:
this_out_dem_rc_path = os.path.join(out_dir_path, "WGS_SAR_map.tiff") # out_dir_path + "\\" + "WGS_SAR_map.tiff"#// 经纬度与行列号映射 this_out_dem_rc_path = os.path.join(out_dir_path, "WGS_SAR_map.tiff") # out_dir_path + "\\" + "WGS_SAR_map.tiff"#// 经纬度与行列号映射
if(os.path.exists(this_out_dem_rc_path)): if(os.path.exists(this_out_dem_rc_path)):
os.remove(this_out_dem_rc_path) os.remove(this_out_dem_rc_path)
this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff"
if (os.path.exists(this_out_sar_sim_path)):
os.remove(this_out_sar_sim_path)
this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射
if (os.path.exists(this_out_sar_sim_wgs_path)):
os.remove(this_out_sar_sim_wgs_path)
this_out_incidence_path = os.path.join(out_dir_path, "incidentAngle.tiff") # out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角 this_out_incidence_path = os.path.join(out_dir_path, "incidentAngle.tiff") # out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角
this_out_localIncidenct_path = os.path.join(out_dir_path, "localIncidentAngle.tiff") # out_dir_path + "\\" + "localIncidentAngle.tiff"#// 局地入射角 this_out_localIncidenct_path = os.path.join(out_dir_path, "localIncidentAngle.tiff") # out_dir_path + "\\" + "localIncidentAngle.tiff"#// 局地入射角
if(os.path.exists(this_out_incidence_path)): if(os.path.exists(this_out_incidence_path)):
@ -329,11 +350,17 @@ class ScatteringMain:
this_out_ori_sim_tiff = os.path.join(out_dir_path, "RD_ori_sim.tif") # out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角 this_out_ori_sim_tiff = os.path.join(out_dir_path, "RD_ori_sim.tif") # out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角
this_in_rpc_lon_lat_path = this_out_ori_sim_tiff this_in_rpc_lon_lat_path = this_out_ori_sim_tiff
this_out_sim_ori_tiff = os.path.join(out_dir_path, "RD_sim_ori.tif")
this_in_rpc_x_y_path = this_out_sim_ori_tiff
this_in_rpc_x_y_path_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, this_in_rpc_x_y_path)
parameter_path = os.path.join(self.__workspace_processing_path, "orth_para.txt") parameter_path = os.path.join(self.__workspace_processing_path, "orth_para.txt")
dem_rc = os.path.join(self.__workspace_preprocessing_path, "dem_rc.tiff")
for in_tif_path in in_tif_paths: for in_tif_path in in_tif_paths:
out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_DB.tif" # out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif"
out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif"
if ('HH' in os.path.basename(in_tif_path)) or ('HV' in os.path.basename(in_tif_path)) or ('VH' in os.path.basename(in_tif_path)) or ('VV' in os.path.basename(in_tif_path)): if ('HH' in os.path.basename(in_tif_path)) or ('HV' in os.path.basename(in_tif_path)) or ('VH' in os.path.basename(in_tif_path)) or ('VV' in os.path.basename(in_tif_path)):
alg.sar_backscattering_coef(in_tif_path, meta_file_path, out_tif_path) alg.sar_backscattering_coef(in_tif_path, meta_file_path, out_tif_path)
# 构建RPC # 构建RPC
@ -341,26 +368,46 @@ class ScatteringMain:
rpc_path=in_tif_path.replace(".tiff",".rpc") if os.path.exists(in_tif_path.replace(".tiff",".rpc")) else in_tif_path.replace(".tiff",".rpb") rpc_path=in_tif_path.replace(".tiff",".rpc") if os.path.exists(in_tif_path.replace(".tiff",".rpc")) else in_tif_path.replace(".tiff",".rpb")
if not os.path.exists(rpc_path): if not os.path.exists(rpc_path):
logger.error('rpc not found!') logger.error('rpc not found!')
# tempout_tif_path=os.path.join(self.__workspace_processing_path,os.path.splitext(os.path.basename(in_tif_path))[0]).replace("_L1A_","_L4_")+ r".tif"
tempout_tif_path=os.path.join(self.__workspace_processing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif" # db->地理编码
# lin_tif_path = os.path.join(self.__workspace_processing_path,
# os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif"
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path,
# out_tif_path,
# lin_tif_path)
# 线性->地理编码->db
lin_tif_path=os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-lin_geo.tif"
# Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro,
# out_tif_path,
# lin_tif_path)
Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro,
out_tif_path,
lin_tif_path)
tempout_tif_path = os.path.join(self.__workspace_processing_path,
os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif"
alg.lin_to_db(lin_tif_path, tempout_tif_path) #线性值转回DB值
# 移动RPC # 移动RPC
#rpc_correction(in_tif_path,rpc_path,out_tif_path,dem_tif_file = None) #rpc_correction(in_tif_path,rpc_path,out_tif_path,dem_tif_file = None)
# Orthorectification.inter_Range2Geo(this_in_rpc_lon_lat_path,out_tif_path,tempout_tif_path,Orthorectification.heightspace) # Orthorectification.inter_Range2Geo(this_in_rpc_lon_lat_path,out_tif_path,tempout_tif_path,Orthorectification.heightspace)
Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_tif_path,
tempout_tif_path)
#shutil.move(rpc_path,out_tif_path.replace(".tiff",".rpc"))
self.imageHandler.write_quick_view(tempout_tif_path, color_img=False) self.imageHandler.write_quick_view(tempout_tif_path, color_img=False)
# self.imageHandler.write_quick_view(lin_tif_path, color_img=False)
else: else:
shutil.copy(in_tif_path,self.__workspace_processing_path) shutil.copy(in_tif_path,self.__workspace_processing_path)
ref_tif_path = tempout_tif_path ref_tif_path = tempout_tif_path
# ref_tif_path = lin_tif_path
# 构建行列号映射表 # 构建行列号映射表
#out_rpc_rc_path = os.path.join(self.__workspace_processing_path,"RPC_ori_sim.tif") #out_rpc_rc_path = os.path.join(self.__workspace_processing_path,"RPC_ori_sim.tif")
#getRCImageRC(in_tif_path,out_rpc_rc_path,rpc_path) #getRCImageRC(in_tif_path,out_rpc_rc_path,rpc_path)
logger.info('progress bar: 90%') logger.info('progress bar: 90%')
if(os.path.exists(this_in_rpc_lon_lat_path)): if(os.path.exists(this_in_rpc_lon_lat_path)):
os.remove(this_in_rpc_lon_lat_path) os.remove(this_in_rpc_lon_lat_path)
# out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.split(meta_file_path)[1].rstrip('.meta.xml') + '_DB.meta.xml') if (os.path.exists(this_in_rpc_x_y_path)):
os.remove(this_in_rpc_x_y_path)
# out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.split(meta_file_path)[1].rstrip('.meta.xml') + '_DB.meta.xml')
out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.basename(meta_file_path)) out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.basename(meta_file_path))
shutil.copy(meta_file_path, out_mate_file_path) shutil.copy(meta_file_path, out_mate_file_path)

View File

@ -21,9 +21,10 @@ logger = logging.getLogger("mylog")
class LandCoverMeasCsv: class LandCoverMeasCsv:
"""读取地表覆盖标记数据""" """读取地表覆盖标记数据"""
def __init__(self, csv_path, preprocessed_paras): def __init__(self, csv_path, preprocessed_paras, max_tran__num_per_class):
self.__csv_path = csv_path self.__csv_path = csv_path
self.__preprocessed_paras = preprocessed_paras self.__preprocessed_paras = preprocessed_paras
self.__max_tran__num_per_class = max_tran__num_per_class
def api_read_measure(self): def api_read_measure(self):
""" """
@ -123,9 +124,10 @@ class LandCoverMeasCsv:
for train_data in train_data_list: for train_data in train_data_list:
logger.info(str(train_data[0]) + "," + str(train_data[2]) +"," + "num:" + str(len(train_data[3]))) logger.info(str(train_data[0]) + "," + str(train_data[2]) +"," + "num:" + str(len(train_data[3])))
logger.info("max number = 100000, random select 100000 point as train data!") max_num = self.__max_tran__num_per_class
if(len(train_data[3]) > 100000): logger.info("max number =" + str(max_num) + ", random select" + str(max_num) + " point as train data!")
train_data[3] = random.sample(train_data[3], 100000) if (len(train_data[3]) > max_num):
train_data[3] = random.sample(train_data[3], max_num)
return train_data_list return train_data_list

View File

@ -21,6 +21,7 @@ import multiprocessing
import pyproj._compat import pyproj._compat
# 导入PreProcess模块要在其他包含gdal库的模块前面不然剪切影像会报错详见https://blog.csdn.net/u014656611/article/details/106450006 # 导入PreProcess模块要在其他包含gdal库的模块前面不然剪切影像会报错详见https://blog.csdn.net/u014656611/article/details/106450006
from tool.algorithm.algtools.PreProcess import PreProcess as pp from tool.algorithm.algtools.PreProcess import PreProcess as pp
from LandCoverAuxData import LandCoverMeasCsv
from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.ROIAlg import ROIAlg as alg from tool.algorithm.algtools.ROIAlg import ROIAlg as alg
@ -129,6 +130,8 @@ class LandCoverMain:
para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典 # tif路径字典
para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))) para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({"paraMeter": parameter_path})
return para_dic return para_dic
def __create_work_space(self): def __create_work_space(self):
@ -162,10 +165,10 @@ class LandCoverMain:
""" """
预处理 预处理
""" """
para_names_geo = [] para_names_geo = ['sim_ori']
for key in self.__processing_paras.keys(): # for key in self.__processing_paras.keys():
if "FeatureMap" in key: # if "FeatureMap" in key:
para_names_geo.append(key) # para_names_geo.append(key)
self.__feature_name_list = para_names_geo self.__feature_name_list = para_names_geo
p = pp() p = pp()
@ -181,10 +184,12 @@ class LandCoverMain:
self.__preprocessed_paras.update({name: out_path}) self.__preprocessed_paras.update({name: out_path})
logger.info('preprocess_handle success!') logger.info('preprocess_handle success!')
for name in para_names_geo: # for name in para_names_geo:
l1a_path = os.path.join(self.__workspace_preprocessed_path, name+".tif") # l1a_path = os.path.join(self.__workspace_preprocessed_path, name+".tif")
self._tr.tran_geo_to_l1a(self.__preprocessed_paras[name], l1a_path, self.__preprocessed_paras['ori_sim'], is_class=False) # self._tr.tran_geo_to_l1a(self.__preprocessed_paras[name], l1a_path, self.__preprocessed_paras['ori_sim'], is_class=False)
self.__preprocessed_paras[name] = l1a_path # self.__preprocessed_paras[name] = l1a_path
self.__cols_geo = self.imageHandler.get_img_width(self.__preprocessed_paras['sim_ori'])
self.__rows_geo = self.imageHandler.get_img_height(self.__preprocessed_paras['sim_ori'])
self.__cols = self.imageHandler.get_img_width(self.__preprocessed_paras['HH']) self.__cols = self.imageHandler.get_img_width(self.__preprocessed_paras['HH'])
self.__rows = self.imageHandler.get_img_height(self.__preprocessed_paras['HH']) self.__rows = self.imageHandler.get_img_height(self.__preprocessed_paras['HH'])
@ -330,7 +335,7 @@ class LandCoverMain:
block_size = bp.get_block_size(self.__rows, self.__cols) block_size = bp.get_block_size(self.__rows, self.__cols)
self.__block_size = block_size self.__block_size = block_size
bp.cut(feature_tif_dic, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size) bp.cut_new(feature_tif_dic, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size)
img_dir, img_name = bp.get_file_names(self.__workspace_block_tif_path, ['tif']) img_dir, img_name = bp.get_file_names(self.__workspace_block_tif_path, ['tif'])
dir_dict_all = bp.get_same_img(img_dir, img_name) dir_dict_all = bp.get_same_img(img_dir, img_name)
@ -423,7 +428,7 @@ class LandCoverMain:
# 合并影像 # 合并影像
data_dir = bp_cover_dir data_dir = bp_cover_dir
out_path = self.__workspace_processing_path[0:-1] out_path = self.__workspace_processing_path[0:-1]
bp.combine( bp.combine_new(
data_dir, data_dir,
self.__cols, self.__cols,
self.__rows, self.__rows,
@ -456,16 +461,45 @@ class LandCoverMain:
logger.info('progress bar: 30%') logger.info('progress bar: 30%')
return lee_filter_path return lee_filter_path
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
'''
# std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
dem_rc, in_sar, out_sar)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def features_geo(self, features_path):
dir = os.path.join(self.__workspace_processing_path, 'features_geo\\')
if not os.path.exists(dir):
os.mkdir(dir)
for key, file in zip(features_path, features_path.values()):
name = key + '_geo.tif'
out_path = os.path.join(dir, name)
self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
self.__preprocessed_paras['sim_ori'], file, out_path)
return dir
def process_handle(self, start): def process_handle(self, start):
""" """
算法主处理函数 算法主处理函数
""" """
hh_geo_path = self.__workspace_processing_path + "hh_geo.tif"
self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
self.__preprocessed_paras['sim_ori'],
self.__preprocessed_paras['HH'], hh_geo_path)
# 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据 # 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据
# train_data_list = csvh.trans_landCover_measuredata(csvh.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim']) pm = LandCoverMeasCsv(self.__processing_paras['LabelData'], hh_geo_path, MAX_TRAN_NUM)
csvh_roi = csvHandle(self.__rows, self.__cols) train_data_list = pm.api_read_measure()
train_data_dic = csvh_roi.trans_landCover_measuredata_dic(csvh_roi.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim'], MAX_TRAN_NUM) train_data_dic = csvh.trans_landCover_list2dic(train_data_list)
csvh_roi = csvHandle(self.__rows_geo, self.__cols_geo)
# train_data_dic = csvh_roi.trans_landCover_measuredata_dic(csvh_roi.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim'], MAX_TRAN_NUM)
label_img = csvh_roi.get_roi_img() label_img = csvh_roi.get_roi_img()
if(len(label_img) != 0): if(len(label_img) != 0):
self.imageHandler.write_img(os.path.join(self.__workspace_processing_path, "label_img.tif"),"",[0,0,0,0,0,0],label_img) self.imageHandler.write_img(os.path.join(self.__workspace_processing_path, "label_img.tif"),"",[0,0,0,0,0,0],label_img)
@ -487,17 +521,19 @@ class LandCoverMain:
feature_tif_paths.update(write_bin_to_tif(self.__feature_tif_dir, feature_bin_dic)) feature_tif_paths.update(write_bin_to_tif(self.__feature_tif_dir, feature_bin_dic))
logging.info("feature_tif_paths:%s",feature_tif_paths) logging.info("feature_tif_paths:%s",feature_tif_paths)
# 对所有特征进行地理编码
feature_geo = self.features_geo(feature_tif_paths)
# 新添加的特征做归一化 # 新添加的特征做归一化
for name in self.__feature_name_list: # for name in self.__feature_name_list:
proj, geo, arr = self.imageHandler.read_img(self.__preprocessed_paras[name]) # proj, geo, arr = self.imageHandler.read_img(self.__preprocessed_paras[name])
arr = ml.standardization(arr) # arr = ml.standardization(arr)
self.imageHandler.write_img(os.path.join(self.__feature_tif_dir, name+".tif"), proj, geo, arr) # self.imageHandler.write_img(os.path.join(self.__feature_tif_dir, name+".tif"), proj, geo, arr)
logger.info("decompose feature success!") logger.info("decompose feature success!")
logger.info('progress bar: 50%') logger.info('progress bar: 50%')
# 生成最优特征子集训练集 # 生成最优特征子集训练集
X_train, Y_train, optimal_feature = ml.gene_optimal_train_set(train_data_dic, self.__feature_tif_dir, 0.07, 0.85) X_train, Y_train, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_geo, 0.07, 0.85)
# 训练模型 # 训练模型
cost = self.__processing_paras["Cost"] cost = self.__processing_paras["Cost"]
@ -517,10 +553,11 @@ class LandCoverMain:
# logger.info('progress bar: 60%') # logger.info('progress bar: 60%')
# 生成测试集 # 生成测试集
X_test_path_list = ml.gene_test_set(self.__feature_tif_dir, optimal_feature) # X_test_path_list = ml.gene_test_set(self.__feature_tif_dir, optimal_feature)
X_test_path_list = ml.gene_test_set(feature_geo, optimal_feature)
# 预测 # 预测
logger.info('testing') logger.info('testing')
cover_path = ml.predict(clf, X_test_path_list, EXE_NAME, self.__workspace_processing_path, self.__rows, self.__cols) cover_path = ml.predict(clf, X_test_path_list, EXE_NAME, self.__workspace_processing_path, self.__rows_geo, self.__cols_geo)
logger.info('test success!') logger.info('test success!')
logger.info('progress bar: 95%') logger.info('progress bar: 95%')
@ -537,14 +574,18 @@ class LandCoverMain:
cover_data = np.int16(cover_data) cover_data = np.int16(cover_data)
# cover_path = cover_path.replace('.tif', '_geo.tif') # cover_path = cover_path.replace('.tif', '_geo.tif')
cover_geo_path = cover_path.replace('.tif', '_geo.tif') cover_geo_path = cover_path.replace('.tif', '_geo.tif')
self.imageHandler.write_img(cover_path, proj, geo, cover_data) self.imageHandler.write_img(cover_geo_path, proj, geo, cover_data)
# l1a图像坐标转换地理坐标 # l1a图像坐标转换地理坐标
self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], cover_path, cover_geo_path, 'nearest') # self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
# self.__preprocessed_paras['sim_ori'], cover_path, cover_geo_path)
# # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], cover_path, cover_geo_path, 'nearest')
proj, geo, cover_data_geo = self.imageHandler.read_img(cover_geo_path) proj, geo, cover_data_geo = self.imageHandler.read_img(cover_geo_path)
hh_geo_path = self.__workspace_processing_path + "hh_geo.tif"
self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], self.__preprocessed_paras['HH'], hh_geo_path) proj, geo, cover_data_hh = self.imageHandler.read_img(hh_geo_path)
# self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], self.__preprocessed_paras['HH'], hh_geo_path)
roi_img = self.imageHandler.get_band_array(self.create_roi(hh_geo_path)) roi_img = self.imageHandler.get_band_array(self.create_roi(hh_geo_path))
# 获取影像roi区域 # 获取影像roi区域
@ -588,11 +629,18 @@ class LandCoverMain:
out_path1, out_path2).calu_nature() out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "地表覆盖类型"}) para_dict.update({"imageinfo_ProductName": "地表覆盖类型"})
para_dict.update({"imageinfo_ProductIdentifier": "LandCover"}) para_dict.update({"imageinfo_ProductIdentifier": "LandCover"})
para_dict.update({"imageinfo_ProductLevel": "LEVEL5"}) para_dict.update({"imageinfo_ProductLevel": "4"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "LabelData"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "LabelData"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
if __name__ == '__main__': if __name__ == '__main__':
multiprocessing.freeze_support() #解决打包与运行错误 multiprocessing.freeze_support() #解决打包与运行错误
start = datetime.datetime.now() start = datetime.datetime.now()

View File

@ -10,6 +10,8 @@
[修改序列] [修改日期] [修改者] [修改内容] [修改序列] [修改日期] [修改者] [修改内容]
1 2022-6-27 李明明 1.增加配置文件config.ini; 2.修复快速图全黑的问题; 3.内部处理使用地理坐标系(4326) 1 2022-6-27 李明明 1.增加配置文件config.ini; 2.修复快速图全黑的问题; 3.内部处理使用地理坐标系(4326)
""" """
from osgeo import gdalconst
from tool.algorithm.algtools.PreProcess import PreProcess as pp # 此行放在下面会报错,最好放在上面 from tool.algorithm.algtools.PreProcess import PreProcess as pp # 此行放在下面会报错,最好放在上面
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara # 导入xml文件读取与检查文件 from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara # 导入xml文件读取与检查文件
from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.algtools.logHandler import LogHandler
@ -31,10 +33,13 @@ import numpy as np
import scipy.spatial.transform # 用于解决打包错误 import scipy.spatial.transform # 用于解决打包错误
import scipy.spatial.transform._rotation_groups # 用于解决打包错误 import scipy.spatial.transform._rotation_groups # 用于解决打包错误
import scipy.special.cython_special # 用于解决打包错误 import scipy.special.cython_special # 用于解决打包错误
import pyproj._compat
from scipy.interpolate import griddata from scipy.interpolate import griddata
import sys import sys
import multiprocessing import multiprocessing
from tool.file.fileHandle import fileHandle from tool.file.fileHandle import fileHandle
from sample_process import read_sample_csv,combine_sample_attr,ReprojectImages2,read_tiff,check_sample,split_sample_list
from tool.LAI.LAIProcess import train_WMCmodel,test_WMCModel,process_tiff
cover_id_list = [] cover_id_list = []
threshold_of_ndvi_min = 0 threshold_of_ndvi_min = 0
@ -122,7 +127,7 @@ class LeafIndexMain:
file_dir = os.path.join(self.__workspace_preprocessing_path, name + '\\') file_dir = os.path.join(self.__workspace_preprocessing_path, name + '\\')
file.de_targz(tar_gz_path, file_dir) file.de_targz(tar_gz_path, file_dir)
# 元文件字典 # 元文件字典
para_dic.update(InitPara.get_meta_dic(InitPara.get_meta_paths(file_dir, name), name)) para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典 # tif路径字典
pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)) pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))
flag_list = [0, 0] flag_list = [0, 0]
@ -197,7 +202,7 @@ class LeafIndexMain:
""" """
预处理 预处理
""" """
para_names = [self.__sar_tif_name, 'LocalIncidenceAngle', "NDVI", "surface_coverage"] para_names = [self.__sar_tif_name, 'LocalIncidenceAngle', "NDVI", "surface_coverage", 'soilMeasured']
ref_img_name = self.__sar_tif_name ref_img_name = self.__sar_tif_name
p = pp() p = pp()
self.__preprocessed_paras = p.preprocessing(para_names, ref_img_name, self.__preprocessed_paras = p.preprocessing(para_names, ref_img_name,
@ -310,14 +315,125 @@ class LeafIndexMain:
logger.info('create soil_moisture image success!') logger.info('create soil_moisture image success!')
logger.info('progress bar: 40%') logger.info('progress bar: 40%')
def cal_empirical_parameters(self):
work_path = self.__workspace_path + EXE_NAME + "\\Temporary\\empirical""\\"
# b. 结果工作
result_dir_path = self.__workspace_path + EXE_NAME + "\\Temporary\\empirical_result""\\"
path_list = [work_path, result_dir_path]
file.creat_dirs(path_list)
# 1. 后向散射系数 dB
sigma_path = self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif'
# 2. 局地入射角
incident_angle_path = self.__workspace_maskcai_localang_path + "LocalIncidenceAngle_preproed.tif"
# 3. 样本csv地址
lai_csv_path = self.__processing_paras['laiMeasuredData']
# 4. NDVI影像地址 -- 修正模型
NDVI_tiff_path = self.__preprocessed_paras["NDVI"]
# 5. 土壤含水量影像地址
soil_water_tiff_path = self.__preprocessed_paras['soilMeasured']
# 6. 土壤含水量样本地址
soil_water_csv_path = r""
# 7. 选择土壤含水量影像
soil_water = 'tiff'
# 8. 输出图片
train_err_image_path = os.path.join(result_dir_path, "train_image.png")
NDVI_min = -1 # 完全裸土对应的 NDVI 值
NDVI_max = 1 # 完全植被覆盖对应的 NDVI 值
# 临时变量
soil_tiff_resample_path = os.path.join(work_path, "soil_water.tiff") # 与 后向散射系数同样分辨率的 土壤水分影像
NDVI_tiff_resample_path = os.path.join(work_path, 'NDVI.tiff') # 与 后向散射系数产品同样分辨率的 NDVI影像
incident_angle_resample_path = os.path.join(work_path, "localincangle.tiff")
# 读取数据
lai_sample = read_sample_csv(lai_csv_path) # 读取样本数据
sigma_tiff = read_tiff(sigma_path) # 读取后向散射系数
incident_angle = read_tiff(incident_angle_path) # 读取局地入射角
# 对于土壤水分、NDVI做重采样
ReprojectImages2(soil_water_tiff_path, sigma_path, soil_tiff_resample_path, resampleAlg=gdalconst.GRA_Bilinear)
ReprojectImages2(NDVI_tiff_path, sigma_path, NDVI_tiff_resample_path, resampleAlg=gdalconst.GRA_Bilinear)
ReprojectImages2(incident_angle_path, sigma_path, incident_angle_resample_path,
resampleAlg=gdalconst.GRA_NearestNeighbour)
soil_water_tiff = read_tiff(soil_tiff_resample_path) # 读取土壤含水量影像
NDVI_tiff = read_tiff(NDVI_tiff_resample_path) # 引入NDVI
incident_angle = read_tiff(incident_angle_resample_path) # 读取局地入射角
# 处理归一化植被指数
F_VEG = (NDVI_tiff['data'] - NDVI_min) / (NDVI_max - NDVI_min) # 处理得到植被覆盖度
soil_water_tiff['data'] = soil_water_tiff['data'] / 100.0 # 转换为百分比
incident_angle['data'] = incident_angle['data'] * np.pi / 180.0 # 转换为弧度值
sigma_tiff['data'] = np.power(10, (sigma_tiff['data'] / 10)) # 转换为线性值
# float32 转 float64
soil_water_tiff['data'] = soil_water_tiff['data'].astype(np.float64)
incident_angle['data'] = incident_angle['data'].astype(np.float64)
sigma_tiff['data'] = sigma_tiff['data'].astype(np.float64)
# 将土壤水分与lai样本之间进行关联
lai_water_sample = [] # ['日期', '样方编号', '经度', '纬度', 'LAI','土壤含水量']
if soil_water == 'tiff':
lai_water_sample = combine_sample_attr(lai_sample, soil_water_tiff)
pass
else: # 这个暂时没有考虑
pass
# 将入射角、后向散射系数与lai样本之间进行关联
lai_water_inc_list = combine_sample_attr(lai_water_sample,
incident_angle) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角']
lai_waiter_inc_sigma_list = combine_sample_attr(lai_water_inc_list,
sigma_tiff) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数']
# lai_waiter_inc_sigma_NDVI_list=combine_sample_attr(lai_waiter_inc_sigma_list,NDVI_tiff) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数','NDVI']
lai_waiter_inc_sigma_list = check_sample(
lai_waiter_inc_sigma_list) # 清理样本 ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数']
# lai_waiter_inc_sigma_NDVI_list=check_sample(lai_waiter_inc_sigma_NDVI_list) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数','NDVI']
# 数据集筛选
lai_waiter_inc_sigma_list_result = []
# 筛选保留的数据集
logger.info("保留得数据集如下")
for i in range(len(lai_waiter_inc_sigma_list)):
if i in []:
continue
logger.info(str(lai_waiter_inc_sigma_list[i]))
lai_waiter_inc_sigma_list_result.append(lai_waiter_inc_sigma_list[i])
lai_waiter_inc_sigma_list = lai_waiter_inc_sigma_list_result
# [sample_train,sample_test]=split_sample_list(lai_waiter_inc_sigma_list,0.6) # step 1 切分数据集
[sample_train, sample_test] = [lai_waiter_inc_sigma_list[:], lai_waiter_inc_sigma_list[:]] # step 1 切分数据集
logger.info("训练模型")
a = self.__processing_paras["A"]
b = self.__processing_paras["B"]
c = self.__processing_paras["C"]
d = self.__processing_paras["D"]
params_X0 = [a, b, c, d, 0.771, -0.028]
params_arr = train_WMCmodel(sample_train, params_X0, train_err_image_path, False)
logging.info("模型初值:\t{}".format(str(params_X0)))
logging.info("训练得到的模型系数:\t{}".format(str(params_arr)))
self.__processing_paras.update({"A": params_arr[0]})
self.__processing_paras.update({"B": params_arr[1]})
self.__processing_paras.update({"C": params_arr[2]})
self.__processing_paras.update({"D": params_arr[3]})
def block_process(self,start): def block_process(self,start):
""" """
生成叶面积指数产品 生成叶面积指数产品
""" """
# 生成土壤水分影像 # 生成土壤水分影像
self.create_soil_moisture_tif() # self.create_soil_moisture_tif()
shutil.copyfile(self.__preprocessed_paras[self.__sar_tif_name], self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif') if os.path.exists(self.__preprocessed_paras['soilMeasured']):
soil_new = os.path.join(self.__workspace_maskcai_SoilMoi_path, 'soil_moisture.tif')
shutil.copy(self.__preprocessed_paras['soilMeasured'], soil_new)
lee_path = os.path.join(self.__workspace_preprocessed_path, self.__sar_tif_name + '.tif')
Filter().lee_process_sar(self.__preprocessed_paras[self.__sar_tif_name], lee_path, 3, 0.25)
shutil.copyfile(lee_path, self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif')
# shutil.copyfile(self.__preprocessed_paras[self.__sar_tif_name], self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif')
logger.info('progress bar: 50%') logger.info('progress bar: 50%')
# 模型训练得到经验系数
self.cal_empirical_parameters()
block_size = self.BlockProcess.get_block_size(self.__rows, self.__cols) block_size = self.BlockProcess.get_block_size(self.__rows, self.__cols)
@ -334,7 +450,7 @@ class LeafIndexMain:
processes_num = min([len(in_tif_paths), multiprocessing_num]) processes_num = min([len(in_tif_paths), multiprocessing_num])
Filter().lee_filter_multiprocess(in_tif_paths, in_tif_paths, FILTER_SISE, processes_num) # Filter().lee_filter_multiprocess(in_tif_paths, in_tif_paths, FILTER_SISE, processes_num)
pool = multiprocessing.Pool(processes=processes_num) pool = multiprocessing.Pool(processes=processes_num)
pl = [] pl = []
@ -408,16 +524,23 @@ class LeafIndexMain:
model_path = "./product.xml" model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-lef.meta.xml") meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-lef.meta.xml")
para_dict = CreateMetaDict(image_path, self.__processing_paras['META'], self.__workspace_processing_path, para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path,
out_path1, out_path2).calu_nature() out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "叶面积指数"}) para_dict.update({"imageinfo_ProductName": "叶面积指数"})
para_dict.update({"imageinfo_ProductIdentifier": "LeafAreaIndex"}) para_dict.update({"imageinfo_ProductIdentifier": "LeafAreaIndex"})
para_dict.update({"imageinfo_ProductLevel": "LEVEL3"}) para_dict.update({"imageinfo_ProductLevel": "5"})
para_dict.update({"ProductProductionInfo_BandSelection": "1"}) para_dict.update({"ProductProductionInfo_BandSelection": "1"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "MeasuredData,NDVI,LandCover"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "MeasuredData,NDVI,LandCover"})
para_dict.update({"MetaInfo_Unit": "None"}) # 设置单位 para_dict.update({"MetaInfo_Unit": "None"}) # 设置单位
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
# 文件夹打包 # 文件夹打包
file.make_targz(self.__out_para, self.__product_dic) file.make_targz(self.__out_para, self.__product_dic)
logger.info('process_handle success!') logger.info('process_handle success!')

View File

@ -17,6 +17,7 @@ from tool.algorithm.algtools.logHandler import LogHandler
from SoilMoistureALg import MoistureAlg as alg from SoilMoistureALg import MoistureAlg as alg
from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.block.blockprocess import BlockProcess
from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.config.ConfigeHandle import Config as cf from tool.config.ConfigeHandle import Config as cf
from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.xml.CreatMetafile import CreateMetafile
from tool.algorithm.algtools.ROIAlg import ROIAlg as roi from tool.algorithm.algtools.ROIAlg import ROIAlg as roi
@ -225,6 +226,16 @@ class MoistureMain:
# 计算ROI区域 # 计算ROI区域
bare_land_mask_path = self.create_roi() bare_land_mask_path = self.create_roi()
logger.info('progress bar: 50%') logger.info('progress bar: 50%')
para_names = ['HH', 'VV', 'VH', 'HV']
for i in para_names:
if os.path.exists(self.__preprocessed_paras[i]):
lee_path = os.path.join(self.__workspace_preprocessed_path, os.path.basename(self.__preprocessed_paras[i]).split(".")[0] + '_lee.tif')
Filter().lee_process_sar(self.__preprocessed_paras[i], lee_path, 3, 0.25)
logger.info('lee process finish: ' + self.__preprocessed_paras[i])
os.remove(self.__preprocessed_paras[i])
self.__preprocessed_paras.update({i: lee_path})
# 分块 # 分块
bp = BlockProcess() bp = BlockProcess()
# block_size = bp.get_block_size(self.__rows, self.__cols,block_size_config) # block_size = bp.get_block_size(self.__rows, self.__cols,block_size_config)
@ -261,11 +272,11 @@ class MoistureMain:
return False return False
processes_num = min([len(angle_list), multiprocessing_num, multiprocessing.cpu_count() - 1]) processes_num = min([len(angle_list), multiprocessing_num, multiprocessing.cpu_count() - 1])
f = Filter() # f = Filter()
f.lee_filter_multiprocess(hh_list, hh_list, FILTER_SIZE, processes_num) # f.lee_filter_multiprocess(hh_list, hh_list, FILTER_SIZE, processes_num)
f.lee_filter_multiprocess(vv_list, vv_list, FILTER_SIZE, processes_num) # f.lee_filter_multiprocess(vv_list, vv_list, FILTER_SIZE, processes_num)
f.lee_filter_multiprocess(vh_list, vh_list, FILTER_SIZE, processes_num) # f.lee_filter_multiprocess(vh_list, vh_list, FILTER_SIZE, processes_num)
f.lee_filter_multiprocess(hv_list, hv_list, FILTER_SIZE, processes_num) # f.lee_filter_multiprocess(hv_list, hv_list, FILTER_SIZE, processes_num)
# 开启多进程处理 # 开启多进程处理
pool = multiprocessing.Pool(processes=processes_num) pool = multiprocessing.Pool(processes=processes_num)
pl = [] pl = []
@ -326,7 +337,9 @@ class MoistureMain:
bp.assign_spatial_reference_byfile(self.__preprocessed_paras['HH'], soil_moisture_path) bp.assign_spatial_reference_byfile(self.__preprocessed_paras['HH'], soil_moisture_path)
# 生成roi区域 # 生成roi区域
product_path = self.__product_dic + 'SoilMoistureProduct.tif' SrcImageName = os.path.split(self.__input_paras["DualPolarSAR"]['ParaValue'])[1].split('.tar.gz')[
0] + '-SMC.tif'
product_path = os.path.join(self.__product_dic, SrcImageName)
roi.cal_roi(product_path, soil_moisture_path, bare_land_mask_path, background_value=np.nan) roi.cal_roi(product_path, soil_moisture_path, bare_land_mask_path, background_value=np.nan)
logger.info('cal soil_moisture success!') logger.info('cal soil_moisture success!')
@ -345,25 +358,43 @@ class MoistureMain:
image_path = product_path image_path = product_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
id_min = 0 # id_min = 0
id_max = 1000 # id_max = 1000
threshold_of_ndvi_min = 0 # threshold_of_ndvi_min = 0
threshold_of_ndvi_max = 1 # threshold_of_ndvi_max = 1
set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] # set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max]
CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
# CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml()
SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue'] SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue']
paths = SrcImagePath.split(';') paths = SrcImagePath.split(';')
SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0] SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0]
if len(paths) >= 2: # if len(paths) >= 2:
for i in range(1, len(paths)): # for i in range(1, len(paths)):
SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] # SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0]
meta_xml_path = self.__product_dic+EXE_NAME+"Product.meta.xml" # meta_xml_path = self.__product_dic+EXE_NAME+"Product.meta.xml"
CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName)
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__product_dic, SrcImageName + "-SMC.meta.xml")
para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__product_dic,
out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "土壤水分产品"})
para_dict.update({"imageinfo_ProductIdentifier": "SoilMositure"})
para_dict.update({"imageinfo_ProductLevel": "5A"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
# 文件夹打包 # 文件夹打包
file.make_targz(self.__out_para, self.__product_dic) file.make_targz(self.__out_para, self.__product_dic)
@ -398,8 +429,8 @@ if __name__ == '__main__':
except Exception: except Exception:
logger.exception('run-time error!') logger.exception('run-time error!')
finally: finally:
main_handler.del_temp_workspace() # main_handler.del_temp_workspace()
pass
end = datetime.datetime.now() end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start) msg = 'running use time: %s ' % (end - start)
logger.info(msg) logger.info(msg)

View File

@ -113,6 +113,8 @@ class MoistureMain:
# 元文件字典 # 元文件字典
# para_dic.update(InitPara.get_meta_dic(InitPara.get_meta_paths(file_dir, name), name)) # para_dic.update(InitPara.get_meta_dic(InitPara.get_meta_paths(file_dir, name), name))
para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({"paraMeter": parameter_path})
# tif路径字典 # tif路径字典
pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)) pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))
@ -136,6 +138,8 @@ class MoistureMain:
para_dic.update({'inc_angle': in_tif_path}) para_dic.update({'inc_angle': in_tif_path})
elif 'ori_sim' == key: elif 'ori_sim' == key:
para_dic.update({'ori_sim': in_tif_path}) para_dic.update({'ori_sim': in_tif_path})
elif 'sim_ori' == key:
para_dic.update({'sim_ori': in_tif_path})
elif 'LocalIncidenceAngle' == key: elif 'LocalIncidenceAngle' == key:
para_dic.update({'LocalIncidenceAngle': in_tif_path}) para_dic.update({'LocalIncidenceAngle': in_tif_path})
elif 'inci_Angle-ortho' == key: elif 'inci_Angle-ortho' == key:
@ -173,13 +177,13 @@ class MoistureMain:
# self.__preprocessed_paras, scopes_roi = p.preprocessing_oh2004(para_names, self.__processing_paras, # self.__preprocessed_paras, scopes_roi = p.preprocessing_oh2004(para_names, self.__processing_paras,
# self.__workspace_preprocessing_path, self.__workspace_preprocessed_path) # self.__workspace_preprocessing_path, self.__workspace_preprocessed_path)
para_names_geo = ['Covering', 'NDVI'] para_names_geo = ['Covering', 'NDVI', 'inc_angle', 'sim_ori']
p = pp() p = pp()
cutted_img_paths, scopes_roi = p.cut_geoimg(self.__workspace_preprocessing_path, para_names_geo, cutted_img_paths, scopes_roi = p.cut_geoimg(self.__workspace_preprocessing_path, para_names_geo,
self.__processing_paras) self.__processing_paras)
self.__preprocessed_paras.update(cutted_img_paths) self.__preprocessed_paras.update(cutted_img_paths)
para_names_l1a = ["HH", "VV", "HV", "VH", 'inci_Angle-ortho', 'ori_sim'] para_names_l1a = ["HH", "VV", "HV", "VH", 'ori_sim'] #'inci_Angle-ortho',
self._tr = TransImgL1A(self.__processing_paras['ori_sim'], scopes_roi) self._tr = TransImgL1A(self.__processing_paras['ori_sim'], scopes_roi)
for name in para_names_l1a: for name in para_names_l1a:
@ -267,6 +271,18 @@ class MoistureMain:
print(os.system(exe_cmd)) print(os.system(exe_cmd))
print("==========================================================================") print("==========================================================================")
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
'''
# std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
dem_rc, in_sar, out_sar)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def process_handle(self,start): def process_handle(self,start):
""" """
算法主处理函数 算法主处理函数
@ -275,7 +291,7 @@ class MoistureMain:
tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\" tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\"
soilOh2004 = SoilMoistureTool(self.__workspace_preprocessed_path, self.__workspace_processing_path, self.__cols, soilOh2004 = SoilMoistureTool(self.__workspace_preprocessed_path, self.__workspace_processing_path, self.__cols,
self.__rows, self.__preprocessed_paras['inci_Angle-ortho'], self.__processing_paras['Origin_META']) self.__rows, self.__preprocessed_paras['inc_angle'], self.__processing_paras['Origin_META'])
result = soilOh2004.soil_oh2004() result = soilOh2004.soil_oh2004()
logger.info('progress bar: 80%') logger.info('progress bar: 80%')
@ -288,7 +304,11 @@ class MoistureMain:
product_geo_path = os.path.join(tem_folder, 'SoilMoistureProduct_geo.tif') product_geo_path = os.path.join(tem_folder, 'SoilMoistureProduct_geo.tif')
space = self.imageHandler.get_geotransform(self.__preprocessed_paras['HH']) space = self.imageHandler.get_geotransform(self.__preprocessed_paras['HH'])
self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'],product_temp_path, product_geo_path, pixelspace)
self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
self.__preprocessed_paras['sim_ori'], product_temp_path,
product_geo_path)
# self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'],product_temp_path, product_geo_path, pixelspace)
# self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], product_temp_path, product_geo_path, 'linear') # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], product_temp_path, product_geo_path, 'linear')
# #
@ -300,7 +320,10 @@ class MoistureMain:
bare_land_mask_path = roi().roi_process(para_names, self.__workspace_processing_path + "/roi/", bare_land_mask_path = roi().roi_process(para_names, self.__workspace_processing_path + "/roi/",
self.__processing_paras, self.__preprocessed_paras) self.__processing_paras, self.__preprocessed_paras)
product_path = os.path.join(self.__product_dic, 'SoilMoistureProduct.tif') SrcImageName = os.path.split(self.__input_paras["DualPolarSAR"]['ParaValue'])[1].split('.tar.gz')[
0] + '-Soil.tif'
product_path = os.path.join(self.__product_dic, SrcImageName)
# product_path = os.path.join(self.__product_dic, 'SoilMoistureProduct.tif')
# 获取影像roi区域 # 获取影像roi区域
roi.cal_roi(product_path, product_geo_path, bare_land_mask_path, background_value=np.nan) roi.cal_roi(product_path, product_geo_path, bare_land_mask_path, background_value=np.nan)
@ -366,8 +389,8 @@ if __name__ == '__main__':
except Exception: except Exception:
logger.exception('run-time error!') logger.exception('run-time error!')
finally: finally:
main_handler.del_temp_workspace() # main_handler.del_temp_workspace()
# pass pass
end = datetime.datetime.now() end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start) msg = 'running use time: %s ' % (end - start)
logger.info(msg) logger.info(msg)

View File

@ -46,11 +46,11 @@ class SoilMoistureTool:
atp.ahv_to_polsarpro_t3_soil(t3_path, tif_path) atp.ahv_to_polsarpro_t3_soil(t3_path, tif_path)
# Lee滤波 # Lee滤波
# leeFilter = LeeRefinedFilterT3() leeFilter = LeeRefinedFilterT3()
# lee_filter_path = os.path.join(self.__workspace_processing_path, lee_filter_path = os.path.join(self.__workspace_processing_path,
# 'lee_filter\\') 'lee_filter\\')
#
# leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols()) leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols())
# logger.info("refine_lee filter success!") # logger.info("refine_lee filter success!")
# logging.info("refine_lee filter success!") # logging.info("refine_lee filter success!")
return t3_path return t3_path

View File

@ -11,14 +11,17 @@
1 2022-6-27 石海军 1.增加配置文件config.ini; 2.内部处理使用地理坐标系(4326) 1 2022-6-27 石海军 1.增加配置文件config.ini; 2.内部处理使用地理坐标系(4326)
""" """
import logging import logging
import shutil
from tool.algorithm.algtools.MetaDataHandler import Calibration from tool.algorithm.algtools.MetaDataHandler import Calibration
from tool.algorithm.algtools.PreProcess import PreProcess as pp from tool.algorithm.algtools.PreProcess import PreProcess as pp
from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara
from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.algtools.ROIAlg import ROIAlg as roi from tool.algorithm.algtools.ROIAlg import ROIAlg as roi
from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.block.blockprocess import BlockProcess
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.file.fileHandle import fileHandle from tool.file.fileHandle import fileHandle
# from AHVToPolsarpro import AHVToPolsarpro # from AHVToPolsarpro import AHVToPolsarpro
from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
@ -115,6 +118,8 @@ class SalinityMain:
para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典 # tif路径字典
para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))) para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({"paraMeter": parameter_path})
return para_dic return para_dic
def __create_work_space(self): def __create_work_space(self):
@ -148,7 +153,7 @@ class SalinityMain:
""" """
预处理 预处理
""" """
para_names_geo = ["Covering", "NDVI"] para_names_geo = ["Covering", "NDVI", 'sim_ori']
p = pp() p = pp()
p.check_img_projection(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras) p.check_img_projection(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras)
#计算roi #计算roi
@ -209,13 +214,22 @@ class SalinityMain:
logger.info('ahv transform to polsarpro T3 matrix success!') logger.info('ahv transform to polsarpro T3 matrix success!')
logger.info('progress bar: 20%') logger.info('progress bar: 20%')
# Lee滤波
leeFilter = LeeRefinedFilterT3()
lee_filter_path = os.path.join(self.__workspace_processing_path,
'lee_filter\\')
leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols())
logger.info('Refined_lee process success!')
haa = PspHAAlphaDecomposition(normalization=True) haa = PspHAAlphaDecomposition(normalization=True)
haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir, haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir,
h_a_alpha_decomposition_T3_path='h_a_alpha_decomposition_T3.exe' , h_a_alpha_decomposition_T3_path='h_a_alpha_decomposition_T3.exe' ,
h_a_alpha_eigenvalue_set_T3_path='h_a_alpha_eigenvalue_set_T3.exe' , h_a_alpha_eigenvalue_set_T3_path='h_a_alpha_eigenvalue_set_T3.exe' ,
h_a_alpha_eigenvector_set_T3_path='h_a_alpha_eigenvector_set_T3.exe', h_a_alpha_eigenvector_set_T3_path='h_a_alpha_eigenvector_set_T3.exe',
polsarpro_in_dir=t3_path) polsarpro_in_dir=lee_filter_path)
def create_meta_file(self, product_path): def create_meta_file(self, product_path):
xml_path = "./model_meta.xml" xml_path = "./model_meta.xml"
@ -223,30 +237,46 @@ class SalinityMain:
image_path = product_path image_path = product_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
par_dict = CreateDict(image_path, [1, 1, 1, 1], out_path1, out_path2).calu_nature(start) # par_dict = CreateDict(image_path, [1, 1, 1, 1], out_path1, out_path2).calu_nature(start)
model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml()
# 文件夹打包 # 文件夹打包
SrcImagePath = self.__input_paras["AHV"]['ParaValue'] SrcImagePath = self.__input_paras["AHV"]['ParaValue']
paths = SrcImagePath.split(';') paths = SrcImagePath.split(';')
SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0]
if len(paths) >= 2: # if len(paths) >= 2:
for i in range(1, len(paths)): # for i in range(1, len(paths)):
SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] # SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0]
meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" # meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml"
CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process( # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(
SrcImageName) # SrcImageName)
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-Salinity.meta.xml")
def inter_Range2Geo(self, lon_lat_path, data_tiff, grid_path, space): para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path,
out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "土壤盐碱度"})
para_dict.update({"imageinfo_ProductIdentifier": "SoilSalinity"})
para_dict.update({"imageinfo_ProductLevel": "5A"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
''' '''
# std::cout << "mode 10"; # std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 10 lon_lat_path data_tiff grid_path space"; # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
''' '''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 10, exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
lon_lat_path, data_tiff, dem_rc, in_sar, out_sar)
grid_path, space)
print(exe_cmd) print(exe_cmd)
print(os.system(exe_cmd)) print(os.system(exe_cmd))
print("==========================================================================") print("==========================================================================")
@ -293,6 +323,22 @@ class SalinityMain:
self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array) self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array)
logger.info('create features matrix success!') logger.info('create features matrix success!')
# for n in range(block_num):
# name = os.path.basename(dir_dict[key_name][n])
# suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \
# name.split('_')[-1]
# features_path = self.__workspace_block_tif_processed_path + "features\\features" + suffix
# row = self.imageHandler.get_img_height(dir_dict[key_name][n])
# col = self.imageHandler.get_img_width(dir_dict[key_name][n])
# features_array = np.zeros((len(dir_dict), row, col), dtype='float32')
# for m, value in zip(range(len(dir_dict)), dir_dict.values()):
# features_array[m, :, :] = self.imageHandler.get_band_array(value[n], 1)
# # 异常值转为0
# features_array[np.isnan(features_array)] = 0.0
# features_array[np.isinf(features_array)] = 0.0
# self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array)
# logger.info('create features matrix success!')
# 生成训练集 # 生成训练集
block_features_dir, block_features_name = bp.get_file_names(self.__workspace_block_tif_processed_path + 'features\\', ['tif']) block_features_dir, block_features_name = bp.get_file_names(self.__workspace_block_tif_processed_path + 'features\\', ['tif'])
@ -344,15 +390,17 @@ class SalinityMain:
# l1a图像坐标转换地理坐标 # l1a图像坐标转换地理坐标
salinity_path = self.__workspace_processing_path + "salinity.tif" salinity_path = self.__workspace_processing_path + "salinity.tif"
salinity_geo_path = self.__workspace_processing_path + "salinity_geo.tif" SrcImageName = os.path.split(self.__input_paras["AHV"]['ParaValue'])[1].split('.tar.gz')[0] + '-Salinity.tif'
salinity_geo_path = os.path.join(self.__workspace_processing_path, SrcImageName)
self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path, pixelspace) self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], self.__preprocessed_paras['sim_ori'], salinity_path, salinity_geo_path)
# self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path, pixelspace)
# self._tr.l1a_2_geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path) # self._tr.l1a_2_geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path)
self.resampleImgs(salinity_geo_path) self.resampleImgs(salinity_geo_path)
# 生成roi区域 # 生成roi区域
product_path = self.__product_dic + 'SoilSalinityProduct.tif' product_path = os.path.join(self.__product_dic, SrcImageName)
roi.cal_roi(product_path, salinity_geo_path, self.create_roi(), background_value=np.nan) roi.cal_roi(product_path, salinity_geo_path, self.create_roi(), background_value=np.nan)
# 生成快视图 # 生成快视图

View File

@ -22,6 +22,7 @@ from tool.algorithm.algtools.logHandler import LogHandler
from SurfaceRoughnessAlg import MoistureAlg as alg from SurfaceRoughnessAlg import MoistureAlg as alg
from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.block.blockprocess import BlockProcess
from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.config.ConfigeHandle import Config as cf from tool.config.ConfigeHandle import Config as cf
from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.xml.CreatMetafile import CreateMetafile
from tool.algorithm.algtools.ROIAlg import ROIAlg as roi from tool.algorithm.algtools.ROIAlg import ROIAlg as roi
@ -319,26 +320,43 @@ class MoistureMain:
image_path = product_path image_path = product_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
#
id_min = 0 # id_min = 0
id_max = 1000 # id_max = 1000
threshold_of_ndvi_min = 0 # threshold_of_ndvi_min = 0
threshold_of_ndvi_max = 1 # threshold_of_ndvi_max = 1
set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] # set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max]
CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml()
#
SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue'] SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue']
paths = SrcImagePath.split(';') paths = SrcImagePath.split(';')
SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0] SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0]
if len(paths) >= 2: # if len(paths) >= 2:
for i in range(1, len(paths)): # for i in range(1, len(paths)):
SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] # SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0]
meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" # meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml"
CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName)
# 文件夹打包 # 文件夹打包
model_path = "./product.xml"
meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-Roughness.meta.xml")
para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path,
out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "地表粗糙度"})
para_dict.update({"imageinfo_ProductIdentifier": "SurfaceRoughness"})
para_dict.update({"imageinfo_ProductLevel": "5A"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
file.make_targz(self.__out_para, self.__product_dic) file.make_targz(self.__out_para, self.__product_dic)
logger.info('process_handle success!') logger.info('process_handle success!')
logger.info('progress bar: 100%') logger.info('progress bar: 100%')
@ -371,7 +389,7 @@ if __name__ == '__main__':
except Exception: except Exception:
logger.exception('run-time error!') logger.exception('run-time error!')
finally: finally:
# main_handler.del_temp_workspace() main_handler.del_temp_workspace()
pass pass
end = datetime.datetime.now() end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start) msg = 'running use time: %s ' % (end - start)

View File

@ -166,7 +166,7 @@ class ROIAlg:
for i in range(0, im_bands): for i in range(0, im_bands):
tif_array[i, :, :][np.isnan(mask_array)] = background_value tif_array[i, :, :][np.isnan(mask_array)] = background_value
tif_array[i, :, :][mask_array == 0] = background_value tif_array[i, :, :][mask_array == 0] = background_value
image_handler.write_img(out_tif_path, proj, geotrans, tif_array) image_handler.write_img(out_tif_path, proj, geotrans, tif_array, '0')
logger.info("cal_roi success, path: %s", out_tif_path) logger.info("cal_roi success, path: %s", out_tif_path)
return True return True

View File

@ -277,6 +277,19 @@ class Filter:
file.del_folder(block_filtered) file.del_folder(block_filtered)
pass pass
def lee_process_sar(self, in_sar, out_sar, win_size, noise_var):
'''
# std::cout << "mode 12"
# std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var"
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar,
out_sar, win_size, noise_var)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
if __name__ == '__main__': if __name__ == '__main__':
# 示例1 # 示例1
# path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif" # path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif"

View File

@ -376,7 +376,7 @@ class ImageHandler:
# 写GeoTiff文件 # 写GeoTiff文件
@staticmethod @staticmethod
def write_img(filename, im_proj, im_geotrans, im_data, no_data='null'): def write_img(filename, im_proj, im_geotrans, im_data, no_data='0'):
""" """
影像保存 影像保存
:param filename: 保存的路径 :param filename: 保存的路径
@ -400,10 +400,11 @@ class ImageHandler:
datatype = gdal_dtypes[im_data.dtype.name] datatype = gdal_dtypes[im_data.dtype.name]
else: else:
datatype = gdal.GDT_Float32 datatype = gdal.GDT_Float32
flag = False
# 判读数组维数 # 判读数组维数
if len(im_data.shape) == 3: if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape im_bands, im_height, im_width = im_data.shape
flag = True
else: else:
im_bands, (im_height, im_width) = 1, im_data.shape im_bands, (im_height, im_width) = 1, im_data.shape
@ -420,11 +421,18 @@ class ImageHandler:
if im_bands == 1: if im_bands == 1:
# outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据 # outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
outband = dataset.GetRasterBand(1) if flag:
outband.WriteArray(im_data) outband = dataset.GetRasterBand(1)
if no_data != 'null': outband.WriteArray(im_data[0])
outband.SetNoDataValue(np.double(no_data)) if no_data != 'null':
outband.FlushCache() outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else:
outband = dataset.GetRasterBand(1)
outband.WriteArray(im_data)
if no_data != 'null':
outband.SetNoDataValue(np.double(no_data))
outband.FlushCache()
else: else:
for i in range(im_bands): for i in range(im_bands):
outband = dataset.GetRasterBand(1 + i) outband = dataset.GetRasterBand(1 + i)

View File

@ -128,6 +128,8 @@ class MachineLeaning:
# dst_ds.SetProjection(sr.ExportToWkt()) # dst_ds.SetProjection(sr.ExportToWkt())
# dst_ds.SetGeoTransform(geo_transform) # dst_ds.SetGeoTransform(geo_transform)
# del dst_ds # del dst_ds
if not os.path.exists(img_path):
logger.error('total:%s,block:%s test data failed !path:%s', block_sum, n, img_path)
logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path) logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path)
return True return True
@ -152,7 +154,14 @@ class MachineLeaning:
for path, n in zip(block_features_dir, range(len(block_features_dir))): for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1] name = os.path.split(path)[1]
features_array = ImageHandler.get_data(path) # features_array = ImageHandler.get_data(path)
band = ImageHandler.get_bands(path)
if band == 1:
features_array = np.zeros((1, 1024, 1024), dtype=float)
feature_array = ImageHandler.get_data(path)
features_array[0, :, :] = feature_array
else:
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
@ -175,6 +184,62 @@ class MachineLeaning:
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path) # bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path return cover_path
@staticmethod
def predict_VP(clf, X_test_list, out_tif_name, workspace_processing_path, rows, cols):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
ml = MachineLeaning()
# 开启多进程处理
bp = BlockProcess()
block_size = bp.get_block_size(rows, cols)
block_features_dir = X_test_list
bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name,
'pre_result\\') # workspace_processing_path + out_tif_name + '\\'
file.creat_dirs([bp_cover_dir])
processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 7])
pool = multiprocessing.Pool(processes=processes_num)
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
band = ImageHandler.get_bands(path)
if band == 1:
features_array = np.zeros((1, 1024, 1024), dtype=float)
feature_array = ImageHandler.get_data(path)
features_array[0, :, :] = feature_array
else:
features_array = ImageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \
name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
# ml.predict_blok(clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n)
pool.close()
pool.join()
del pool
# 合并影像
data_dir = bp_cover_dir
out_path = workspace_processing_path[0:-1]
bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
# 添加地理信息
cover_path = os.path.join(workspace_processing_path,
out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
# bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
@staticmethod @staticmethod
def get_name_list(feature_tif_dir): def get_name_list(feature_tif_dir):
in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif'))) in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))

View File

@ -84,6 +84,7 @@ class polyfit2d_U:
class TransImgL1A: class TransImgL1A:
def __init__(self, ori_sim_path, roi): def __init__(self, ori_sim_path, roi):
self._begin_r, self._begin_c, self._end_r, self._end_c = 0, 0, 0, 0 self._begin_r, self._begin_c, self._end_r, self._end_c = 0, 0, 0, 0
self.ori2geo_img = None
self._mask = None self._mask = None
self._min_lon, self._max_lon, self._min_lat, self._max_lat = 0, 0, 0, 0 self._min_lon, self._max_lon, self._min_lat, self._max_lat = 0, 0, 0, 0
self.init_trans_para(ori_sim_path, roi) self.init_trans_para(ori_sim_path, roi)
@ -93,6 +94,13 @@ class TransImgL1A:
data = [(self._begin_r + row, self._begin_c + col) for (row, col) in zip(rowcol[0], rowcol[1])] data = [(self._begin_r + row, self._begin_c + col) for (row, col) in zip(rowcol[0], rowcol[1])]
return data return data
def get_lonlat_points(self):
lon = self.ori2geo_img[0, :, :][np.where(self._mask == 1)]
lat = self.ori2geo_img[1, :, :][np.where(self._mask == 1)]
data = [(row, col) for (row, col) in zip(lon, lat)]
return data
###################### ######################
# 插值方法 # 插值方法
###################### ######################
@ -125,10 +133,10 @@ class TransImgL1A:
r_max = np.nanmax(r_c_list[0]) r_max = np.nanmax(r_c_list[0])
c_min = np.nanmin(r_c_list[1]) c_min = np.nanmin(r_c_list[1])
c_max = np.nanmax(r_c_list[1]) c_max = np.nanmax(r_c_list[1])
ori2geo_img = ori2geo_img[:, r_min:r_max + 1, c_min:c_max + 1] self.ori2geo_img = ori2geo_img[:, r_min:r_max + 1, c_min:c_max + 1]
# 开始调用组件 计算 # 开始调用组件 计算
mask = SAR_GEO.cut_L1A_img(ori2geo_img.astype(np.float64), point_list) mask = SAR_GEO.cut_L1A_img(self.ori2geo_img.astype(np.float64), point_list)
self._begin_r = r_min self._begin_r = r_min
self._end_r = r_max self._end_r = r_max
self._begin_c = c_min self._begin_c = c_min
@ -405,105 +413,105 @@ class TransImgL1A:
det_grid=pixel_delta, method=method) det_grid=pixel_delta, method=method)
return result return result
def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'): # def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'):
ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
# l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
#
lon_data = ori_geo_tif[0, :, :].reshape(-1) # lon_data = ori_geo_tif[0, :, :].reshape(-1)
lat_data = ori_geo_tif[1, :, :].reshape(-1) # lat_data = ori_geo_tif[1, :, :].reshape(-1)
l1a_produc = l1a_produc.reshape(-1) # l1a_produc = l1a_produc.reshape(-1)
idx = np.logical_not(np.isnan(lon_data)) # idx = np.logical_not(np.isnan(lon_data))
lat_data = lat_data[idx] # lat_data = lat_data[idx]
lon_data = lon_data[idx] # lon_data = lon_data[idx]
l1a_produc = l1a_produc[idx] # l1a_produc = l1a_produc[idx]
idx = np.logical_not(np.isnan(lat_data)) # idx = np.logical_not(np.isnan(lat_data))
lat_data = lat_data[idx] # lat_data = lat_data[idx]
lon_data = lon_data[idx] # lon_data = lon_data[idx]
l1a_produc = l1a_produc[idx] # l1a_produc = l1a_produc[idx]
#
gt = [self._min_lon, pixel_delta_x, 0.0, # gt = [self._min_lon, pixel_delta_x, 0.0,
self._max_lat, 0.0, -pixel_delta_y] # self._max_lat, 0.0, -pixel_delta_y]
[lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
#
# 获取地理坐标系统信息,用于选取需要的地理坐标系统 # # 获取地理坐标系统信息,用于选取需要的地理坐标系统
srs = osr.SpatialReference() # srs = osr.SpatialReference()
srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
proj = srs.ExportToWkt() # proj = srs.ExportToWkt()
#
projection = srs.ExportToPROJJSON() # projection = srs.ExportToPROJJSON()
# lower_left_x、lower_left_y、upper_right_x、upper_right_y # # lower_left_x、lower_left_y、upper_right_x、upper_right_y
target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
lon_data = lon_data.reshape(-1, 1) # lon_data = lon_data.reshape(-1, 1)
lat_data = lat_data.reshape(-1, 1) # lat_data = lat_data.reshape(-1, 1)
l1a_produc = l1a_produc.reshape(-1, 1) # l1a_produc = l1a_produc.reshape(-1, 1)
source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
lalo_step = [pixel_delta_x, -pixel_delta_y] # lalo_step = [pixel_delta_x, -pixel_delta_y]
radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
radius=radius, neighbours=32, # radius=radius, neighbours=32,
nprocs=8, fill_value=np.nan, # nprocs=8, fill_value=np.nan,
epsilon=0) # epsilon=0)
#
ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan) # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan)
#
def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'): # def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'):
ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
# l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
#
lon_data = ori_geo_tif[0, :, :].reshape(-1) # lon_data = ori_geo_tif[0, :, :].reshape(-1)
lat_data = ori_geo_tif[1, :, :].reshape(-1) # lat_data = ori_geo_tif[1, :, :].reshape(-1)
l1a_produc = l1a_produc.reshape(-1) # l1a_produc = l1a_produc.reshape(-1)
idx = np.logical_not(np.isnan(lon_data)) # idx = np.logical_not(np.isnan(lon_data))
lat_data = lat_data[idx] # lat_data = lat_data[idx]
lon_data = lon_data[idx] # lon_data = lon_data[idx]
l1a_produc = l1a_produc[idx] # l1a_produc = l1a_produc[idx]
idx = np.logical_not(np.isnan(lat_data)) # idx = np.logical_not(np.isnan(lat_data))
lat_data = lat_data[idx] # lat_data = lat_data[idx]
lon_data = lon_data[idx] # lon_data = lon_data[idx]
l1a_produc = l1a_produc[idx] # l1a_produc = l1a_produc[idx]
#
gt = [self._min_lon, pixel_delta_x, 0.0, # gt = [self._min_lon, pixel_delta_x, 0.0,
self._max_lat, 0.0, -pixel_delta_y] # self._max_lat, 0.0, -pixel_delta_y]
[lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
#
# 获取地理坐标系统信息,用于选取需要的地理坐标系统 # # 获取地理坐标系统信息,用于选取需要的地理坐标系统
srs = osr.SpatialReference() # srs = osr.SpatialReference()
srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
proj = srs.ExportToWkt() # proj = srs.ExportToWkt()
#
projection = srs.ExportToPROJJSON() # projection = srs.ExportToPROJJSON()
# lower_left_x、lower_left_y、upper_right_x、upper_right_y # # lower_left_x、lower_left_y、upper_right_x、upper_right_y
target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
lon_data = lon_data.reshape(-1, 1) # lon_data = lon_data.reshape(-1, 1)
lat_data = lat_data.reshape(-1, 1) # lat_data = lat_data.reshape(-1, 1)
l1a_produc = l1a_produc.reshape(-1, 1) # l1a_produc = l1a_produc.reshape(-1, 1)
source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
lalo_step = [pixel_delta_x, -pixel_delta_y] # lalo_step = [pixel_delta_x, -pixel_delta_y]
radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
if method == 'linear': # if method == 'linear':
geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
radius=radius, neighbours=32, # radius=radius, neighbours=32,
nprocs=8, fill_value=0, # nprocs=8, fill_value=0,
epsilon=0) # epsilon=0)
elif method == 'nearest': # elif method == 'nearest':
geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0, # geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0,
radius_of_influence=50000, # radius_of_influence=50000,
fill_value=0, nprocs=8 # fill_value=0, nprocs=8
) # )
geo_produc = geo_produc[:,:,0] # geo_produc = geo_produc[:,:,0]
ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc) # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc)
@property @property
def mask(self): def mask(self):

View File

@ -47,6 +47,7 @@ class CreateMetaDict:
# imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率 # imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率
# para_dict.update({"imageinfo_widthspace": imageinfo_widthspace}) # para_dict.update({"imageinfo_widthspace": imageinfo_widthspace})
# para_dict.update({"imageinfo_heightspace": imageinfo_heightspace}) # para_dict.update({"imageinfo_heightspace": imageinfo_heightspace})
para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace}) para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace})
para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"}) para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"})

View File

@ -228,6 +228,7 @@ class csvHandle:
roi_poly = [(float(lon), float(lat)) for (lon, lat) in points] roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
tr = TransImgL1A(cuted_ori_sim_path, roi_poly) tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
l1a_points = tr.get_roi_points() l1a_points = tr.get_roi_points()
# l1a_points = tr.get_lonlat_points()
if data_use_type == 'train': if data_use_type == 'train':
train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]]) train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]])
elif data_use_type == 'test': elif data_use_type == 'test':

View File

@ -784,7 +784,7 @@ class VegetationHeightMain:
coh_arrayt = AHVToPolSarProS2().read_none_complex_bin_to_array(t6_T11_bin) coh_arrayt = AHVToPolSarProS2().read_none_complex_bin_to_array(t6_T11_bin)
rows, cols=coh_arrayt.shape[0],coh_arrayt.shape[1] rows, cols=coh_arrayt.shape[0],coh_arrayt.shape[1]
# 4、T6->boxcar_filter->T6 # 4、T6->boxcar_filter->T6
logger.info('start computing the filter...') # 线性平滑滤波 logger.info('start computing the filter...') # boxcar滤波
boxcar_filter_tool_path = os.path.join(current_path, "boxcar_filter_T6.exe") boxcar_filter_tool_path = os.path.join(current_path, "boxcar_filter_T6.exe")
# boxcar_filter_tool_path = os.path.join(current_path, "lee_refined_filter_T6.exe") # boxcar_filter_tool_path = os.path.join(current_path, "lee_refined_filter_T6.exe")
master_slave_t6_box = self.__workspace_preprocessed2_path + "master_slave_t6_box""\\" master_slave_t6_box = self.__workspace_preprocessed2_path + "master_slave_t6_box""\\"
@ -793,7 +793,8 @@ class VegetationHeightMain:
master_slave_t6, *(3, 3, 0, 0)) master_slave_t6, *(3, 3, 0, 0))
# PlantHeightAlg().polsar_lee_filter(master_slave_t6_box, boxcar_filter_tool_path, # PlantHeightAlg().polsar_lee_filter(master_slave_t6_box, boxcar_filter_tool_path,
# master_slave_t6, *(3, 3, 0, 0)) # master_slave_t6, *(3, 3, 0, 0))
logger.info("T6 lee_refined_filter finish") # logger.info("T6 lee_refined_filter finish")
logger.info("T6 boxcar_filter finish")
logger.info('progress bar :85') logger.info('progress bar :85')
# 5、 T6->coherence_estimation->T6 相干度估计 # 5、 T6->coherence_estimation->T6 相干度估计
coherence_estimation_path = os.path.join(current_path, "complex_coherence_estimation.exe") coherence_estimation_path = os.path.join(current_path, "complex_coherence_estimation.exe")

View File

@ -12,6 +12,7 @@ import csv
import numpy as np import numpy as np
import mahotas import mahotas
import logging import logging
import random
from tool.algorithm.algtools.CoordinateTransformation import lonlat2geo, geo2imagexy from tool.algorithm.algtools.CoordinateTransformation import lonlat2geo, geo2imagexy
from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.image.ImageHandle import ImageHandler
logger = logging.getLogger("mylog") logger = logging.getLogger("mylog")
@ -203,4 +204,206 @@ class PhenologyMeasCsv:
point.append(float(cells[1])) point.append(float(cells[1]))
pList.append(point) pList.append(point)
pointList.append(pList) pointList.append(pList)
return pointList
class PhenoloyMeasCsv_geo:
def __init__(self, csv_path, preprocessed_paras, max_tran__num_per_class=100000):
self.__csv_path = csv_path
self.__preprocessed_paras = preprocessed_paras
self.__max_tran__num_per_class = max_tran__num_per_class
def api_read_measure(self):
"""
读取csv表格数据api函数
"""
csv_data = self.__readcsv(self.__csv_path)
return self.__trans_measuredata(csv_data)
def api_read_measure_by_name(self, name):
"""
读取csv表格数据api函数
"""
csv_data = self.__readcsv_by_name(self.__csv_path, name)
return self.__trans_measuredata(csv_data)
def class_list(self):
"""
输出csv表中的前三列
"""
reader = csv.reader(open(self.__csv_path, newline=''))
class_list=[]
type_id_name = {}
type_id_parent = {}
for line_data in reader:
class_list.append(line_data) # class_list含有四列
for data in class_list[1:]:
type_parent= data[0]
type_id = int(data[1])
type_name = data[2]
if type_id not in type_id_name.keys():
type_id_name.update({type_id: type_name})
type_id_parent.update({type_id: type_parent})
return type_id_name, type_id_parent
pass
@staticmethod
def __readcsv(csv_path):
"""
读取csv表格数据
:para csv_path: csv文件路径
"""
reader = csv.reader(open(csv_path, newline=''))
csv_list = []
for line_data in reader:
csv_list.append(line_data)
return csv_list[1:]
@staticmethod
def __readcsv_by_name(csv_path, name):
"""
读取csv表格数据
:para csv_path: csv文件路径
"""
reader = csv.reader(open(csv_path, newline=''))
csv_list = []
for line_data in reader:
if name in line_data[0]:
csv_list.append(line_data)
return csv_list
def __trans_measuredata(self, meas_data):
"""
获取多边形区域内所有的点分为训练集数据和测试集数据
:para meas_data: csv读取的实测数据
"""
type_data = {}
n = 1
train_data_list = []
for data in meas_data:
for d in data:
if d == '':
raise Exception('there are empty data!', data)
point_list = []
dataset, rows, cols = self.__get_para_tif_inf()
type_id = int(data[1])
type_name = data[2]
if type_id not in type_data.keys():
train_data_list.append([n, type_id, type_name, []])
type_data.update({type_id: type_name})
n += 1
pointList = self.__roiPolygonAnalysis(data[3])
for points in pointList:
poly = []
for point in points:
lon = float(point[0])
lat = float(point[1])
# projs = lonlat2geo(dataset, lon, lat)
coord = geo2imagexy(dataset, lon, lat)
row = round(coord[1])
col = round(coord[0])
if 0 <= row < rows and 0 <= col < cols:
poly.append([row, col])
else:
logger.warning("point %s is beyond tif scope, in measure data: %s !", point, data)
if poly != []:
point_list.append(self.__render(poly))
for train_data in train_data_list:
if train_data[1] == type_id:
train_data[3] = train_data[3] + self.__render(poly)
if train_data[3] == [] :
raise Exception('there are empty data!', train_data)
num_list = []
for train_data in train_data_list:
num_list.append(len(train_data[3]))
max_num = np.min(num_list)
for train_data in train_data_list:
logger.info(str(train_data[0]) + "," + str(train_data[2]) +"," + "num:" + str(len(train_data[3])))
# max_num = self.__max_tran__num_per_class
logger.info("max number =" + str(max_num) +", random select"+str(max_num)+" point as train data!")
if(len(train_data[3]) > max_num):
train_data[3] = random.sample(train_data[3], max_num)
if len(train_data_list) <= 1:
raise Exception('there is only one label type!', train_data_list)
return train_data_list
@staticmethod
def __render(poly):
# https://www.cnpython.com/qa/51516
"""Return polygon as grid of points inside polygon.
Input : poly (list of lists)
Output : output (list of lists)
"""
xs, ys = zip(*poly)
minx, maxx = min(xs), max(xs)
miny, maxy = min(ys), max(ys)
newPoly = [(int(x - minx), int(y - miny)) for (x, y) in poly]
X = maxx - minx + 1
Y = maxy - miny + 1
grid = np.zeros((X, Y), dtype=np.int8)
mahotas.polygon.fill_polygon(newPoly, grid)
return [(x + minx, y + miny) for (x, y) in zip(*np.nonzero(grid))]
def __get_para_tif_inf(self):
"""
获取影像的信息
:para tif_name: 影像名称
"""
tif_path = self.__preprocessed_paras
ih = ImageHandler()
dataset = ih.get_dataset(tif_path)
rows = ih.get_img_height(tif_path)
cols = ih.get_img_width(tif_path)
return dataset, rows, cols
@staticmethod
def __roiPolygonAnalysis(roiStr):
"""
将csv的POLY数据转为数组
:para roiStr: poly数据
:return pointList: 保存多边形的list
"""
pointList = []
strContent = roiStr.replace("POLYGON", "")
# 解析轮廓字符串为二维数组
bracketsList = []
strTemp = ''
strList = []
for c in strContent:
if c == '(':
bracketsList.append(c)
continue
elif c == ')':
if len(bracketsList) > 0:
bracketsList.pop(0)
if len(strTemp) > 0:
strList.append(strTemp)
strTemp = ''
else:
strTemp += c
for item in strList:
if len(item) == 0:
continue
pTempList = item.split(',')
pList = []
for row in pTempList:
cells = row.split(' ')
if len(cells) != 2:
continue
point = [float(cells[0]), float(cells[1])]
pList.append(point)
pointList.append(pList)
return pointList return pointList

View File

@ -7,9 +7,12 @@
@Date 2021/9/6 @Date 2021/9/6
@Version 1.0.0 @Version 1.0.0
""" """
import glob
import logging import logging
import os import os
import datetime import datetime
import shutil
import pyproj._compat import pyproj._compat
import cv2 import cv2
import numpy as np import numpy as np
@ -19,7 +22,11 @@ from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara
from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.xml.CreatMetafile import CreateMetafile
from tool.algorithm.algtools.ROIAlg import ROIAlg as alg
from VegetationPhenologyXmlInfo import CreateDict, CreateStadardXmlFile from VegetationPhenologyXmlInfo import CreateDict, CreateStadardXmlFile
from VegetationPhenologyAuxData import PhenoloyMeasCsv_geo
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.file.fileHandle import fileHandle from tool.file.fileHandle import fileHandle
import sys import sys
from tool.algorithm.transforml1a.transHandle import TransImgL1A from tool.algorithm.transforml1a.transHandle import TransImgL1A
@ -38,6 +45,7 @@ EXE_NAME = cf.get('exe_name')
LogHandler.init_log_handler('run_log\\' + EXE_NAME) LogHandler.init_log_handler('run_log\\' + EXE_NAME)
logger = logging.getLogger("mylog") logger = logging.getLogger("mylog")
FILTER_SIZE = int(cf.get('filter_size')) FILTER_SIZE = int(cf.get('filter_size'))
MAX_TRAN_NUM = int(cf.get('max_tran__num_per_class'))
file =fileHandle(DEBUG) file =fileHandle(DEBUG)
env_str = os.path.split(os.path.realpath(__file__))[0] env_str = os.path.split(os.path.realpath(__file__))[0]
os.environ['PROJ_LIB'] = env_str os.environ['PROJ_LIB'] = env_str
@ -84,8 +92,11 @@ class PhenologyMain:
self.__create_work_space() self.__create_work_space()
self.__processing_paras = InitPara.init_processing_paras(self.__input_paras) self.__processing_paras = InitPara.init_processing_paras(self.__input_paras)
self.__processing_paras.update(InitPara(DEBUG).get_mult_tar_gz_infs(self.__processing_paras, self.__workspace_preprocessing_path)) self.__processing_paras.update(InitPara(DEBUG).get_mult_tar_gz_infs(self.__processing_paras, self.__workspace_preprocessing_path))
SrcImagePath = self.__input_paras["AHVS"]['ParaValue']
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', r"VegetationPhenologyProduct.tar.gz") paths = SrcImagePath.split(';')
SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0]
result_name = SrcImageName + "-VP.tar.gz"
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name)
self.__alg_xml_handler.write_out_para("VegetationPhenologyProduct", self.__out_para) #写入输出参数 self.__alg_xml_handler.write_out_para("VegetationPhenologyProduct", self.__out_para) #写入输出参数
logger.info('check_source success!') logger.info('check_source success!')
logger.info('progress bar: 10%') logger.info('progress bar: 10%')
@ -117,7 +128,8 @@ class PhenologyMain:
file.del_folder(path) file.del_folder(path)
def preprocess_single_tar(self,name,scopes_roi): def preprocess_single_tar(self,name,scopes_roi):
key_list = [key for key in self.__processing_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key)and ('pola' not in key))] key_list = [key for key in self.__processing_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key)and ('pola' not in key)
and ('paraMeter' not in key) and ('sim_ori' not in key)and ('Origin_META' not in key)and ('META' not in key))]
ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0] ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0]
ori_sim_path = self.__processing_paras[ori_sim_key] ori_sim_path = self.__processing_paras[ori_sim_key]
@ -137,6 +149,13 @@ class PhenologyMain:
scopes_roi = pp().cal_scopes_roi(self.__processing_paras) scopes_roi = pp().cal_scopes_roi(self.__processing_paras)
for name in self.__processing_paras['name_list']: for name in self.__processing_paras['name_list']:
self.preprocess_single_tar(name, scopes_roi) self.preprocess_single_tar(name, scopes_roi)
para_names_geo = [name + '_sim_ori']
self.__feature_name_list = para_names_geo
p = pp()
cutted_img_paths, scopes_roi = p.cut_geoimg(self.__workspace_preprocessing_path, para_names_geo,
self.__processing_paras)
self.__preprocessed_paras.update({name + 'sim_ori': cutted_img_paths.get(name + '_sim_ori')})
logger.info('preprocess_handle success!') logger.info('preprocess_handle success!')
logger.info('progress bar: 10%') logger.info('progress bar: 10%')
@ -219,13 +238,36 @@ class PhenologyMain:
self.___FeatureFileNameMap[12] = ['Cloude', "entropy.bin"] self.___FeatureFileNameMap[12] = ['Cloude', "entropy.bin"]
self.___FeatureFileNameMap[13] = ['Cloude', "alpha.bin"] self.___FeatureFileNameMap[13] = ['Cloude', "alpha.bin"]
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
'''
# std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
dem_rc, in_sar, out_sar)
print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def create_feature_single_tar(self, name): def create_feature_single_tar(self, name):
key_list = [key for key in self.__preprocessed_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key))] key_list = [key for key in self.__preprocessed_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key))]
ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0] ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0]
sim_ori_key = [key for key in key_list if ('sim_ori' in key)][0]
ori_sim_path = self.__preprocessed_paras[ori_sim_key] ori_sim_path = self.__preprocessed_paras[ori_sim_key]
sim_ori_path = self.__preprocessed_paras[sim_ori_key]
hh_path = self.__preprocessed_paras[name + "_HH"]
hh_geo_path = os.path.join(self.__workspace_processing_path, 'hh_geo.tif')
paramter = self.__processing_paras[name + "paraMeter"]
self.calInterpolation_bil_Wgs84_rc_sar_sigma(paramter, sim_ori_path, hh_path, hh_geo_path)
# 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据 # 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据
train_data, test_data, type_map = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(self.__processing_paras['MeasuredData']), ori_sim_path) pm = PhenoloyMeasCsv_geo(self.__processing_paras['MeasuredData'], hh_geo_path, MAX_TRAN_NUM)
train_data_list = pm.api_read_measure_by_name(name)
train_data_dic = csvh.trans_landCover_list2dic(train_data_list)
# train_data, test_data, type_map = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(self.__processing_paras['MeasuredData']), ori_sim_path)
logger.info("read phenology Measure.csv success!") logger.info("read phenology Measure.csv success!")
# 特征分解 # 特征分解
@ -245,98 +287,173 @@ class PhenologyMain:
featureInput = self.__getInputFeatures() featureInput = self.__getInputFeatures()
feature_dir = CreateFeature.decompose_single_tar(hh_hv_vh_vv_list, self.__workspace_processing_path, self.__workspace_preprocessing_path, name, self._env_str, rows, cols, FILTER_SIZE=3, debug=DEBUG, FeatureInput=featureInput) feature_dir = CreateFeature.decompose_single_tar(hh_hv_vh_vv_list, self.__workspace_processing_path, self.__workspace_preprocessing_path, name, self._env_str, rows, cols, FILTER_SIZE=3, debug=DEBUG, FeatureInput=featureInput)
feature_geo_dir = self.features_geo(feature_dir, paramter, sim_ori_path, name)
# # 获取训练集提取特征的信息
# ids = []
# class_ids = []
# ch_names = []
# positions = []
# for data in train_data:
# if data[0] == name:
# class_ids.append(data[1])
# positions.append(data[2])
# ch_names.append(data[3])
# class_id = [map for map in type_map if (data[1] == map[1])][0]
# ids.append(class_id[0])
# train_data_dic = {}
# train_data_dic.update({"ids": ids})
# train_data_dic.update({"class_ids": class_ids})
# train_data_dic.update({"ch_names": ch_names})
# train_data_dic.update({"positions": positions})
# 获取训练集提取特征的信息 # name_test_data =[]
ids = [] # for dt in test_data:
class_ids = [] # if dt[0] == name:
ch_names = [] # name_test_data.append(dt)
positions = [] logger.info("create_features success!")
for data in train_data: logger.info('progress bar: 20%')
if data[0] == name: # return feature_dir, train_data_dic, name_test_data, type_map
class_ids.append(data[1]) return feature_geo_dir, train_data_dic
positions.append(data[2])
ch_names.append(data[3])
class_id = [map for map in type_map if (data[1] == map[1])][0]
ids.append(class_id[0])
train_data_dic = {}
train_data_dic.update({"ids": ids})
train_data_dic.update({"class_ids": class_ids})
train_data_dic.update({"ch_names": ch_names})
train_data_dic.update({"positions": positions})
name_test_data =[]
for dt in test_data:
if dt[0] == name:
name_test_data.append(dt)
return feature_dir, train_data_dic, name_test_data, type_map
def features_geo(self, features_path, paraMeter, sim_ori, sar_name):
dir = os.path.join(self.__workspace_processing_path, sar_name, 'features_geo')
if not os.path.exists(dir):
os.mkdir(dir)
in_tif_paths = list(glob.glob(os.path.join(features_path, '*.tif')))
for file in in_tif_paths:
name = os.path.basename(file).split('.')[0] + '_geo.tif'
out_path = os.path.join(dir, name)
self.calInterpolation_bil_Wgs84_rc_sar_sigma(paraMeter, sim_ori, file, out_path)
return dir
def process_handle(self, start): def process_handle(self, start):
""" """
算法主处理函数 算法主处理函数
:return: True or False :return: True or False
""" """
# 生成每个时相的特征, 并提取训练集和测试集 # 生成每个时相的特征, 并提取训练集和测试集
# 每个时相的影像生成特征图 # 每个时相的影像生成特征图
X_train, Y_train = None, None X_train, Y_train = None, None
flag = True flag = True
total_name_list = [] total_name_list = []
test_data = []
X_test_dic = {} X_test_dic = {}
for name in self.__processing_paras['name_list']: for name in self.__processing_paras['name_list']:
feature_dir, train_data_dic, test_data_part, type_map = self.create_feature_single_tar(name)
#生成训练集 feature_dir, train_data_dic = self.create_feature_single_tar(name)
X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir) # 生成训练集
X_train_part, Y_train_part, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_dir, 0.07, 0.85)
name_list = ml.get_name_list(feature_dir) name_list = ml.get_name_list(feature_dir)
if optimal_feature == []:
logger.error('特征筛选结果为空,无可用特征作为训练集')
continue
# 生成测试集合 # 生成测试集合
rows, cols = self.get_name_rows_cols(name) X_test_path_list = ml.gene_test_set(feature_dir, optimal_feature)
name_featuresPath_dic_part = ml.vegetationPhenology_combine_feature(feature_dir, self.__workspace_processing_path, name, rows, cols, DEBUG)
X_test_dic_part = self.gene_test_set(test_data_part, name_featuresPath_dic_part, name)
X_test_dic.update(X_test_dic_part)
if flag:
X_train = X_train_part
Y_train = Y_train_part
total_name_list = name_list
flag = False
test_data = test_data_part
else:
X_train = np.vstack((X_train, X_train_part))
Y_train = np.hstack((Y_train, Y_train_part))
total_name_list = total_name_list + name_list
test_data = test_data + test_data_part
logger.info("create_features success!") X_test_dic.update({name: X_test_path_list})
logger.info('progress bar: 20%')
# if DEBUG:
# optimal_feature = [1, 2, 3]
# optimal_X_train = X_train[:, optimal_feature]
# optimal_Y_train = Y_train
# optimal_X_train = optimal_X_train[0:100, :]
# optimal_Y_train = optimal_Y_train[0:100]
# optimal_Y_train[0:100] = 1
# optimal_Y_train[50:100] = 2
# else:
optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7)
logger.info("generate train and test set success!") X_train = X_train_part
logger.info('progress bar: 30%') Y_train = Y_train_part
#RF logger.info("generate train and test set success!")
clf = ml.trainRF(optimal_X_train, optimal_Y_train) logger.info('progress bar: 30%')
logger.info('svm train success!')
logger.info('progress bar: 80%')
# 测试数据 # RF
logger.info('mode testing') clf = ml.trainRF(X_train, Y_train)
product_path = self.predict(clf, X_test_dic, optimal_feature, type_map, start) logger.info('RF train success!')
logger.info('progress bar: 80%')
# 测试数据
logger.info('mode testing')
in_tif_paths = list(glob.glob(os.path.join(feature_dir, '*.tif')))
rows = ImageHandler.get_img_height(in_tif_paths[0])
cols = ImageHandler.get_img_width(in_tif_paths[0])
proj_geo, geo_geo, cover_data_geo = self.imageHandler.read_img(in_tif_paths[0])
product_path = ml.predict_VP(clf, X_test_path_list, name, self.__workspace_processing_path, rows, cols)
proj, geo, cover_data = self.imageHandler.read_img(product_path)
# 形态学闭运算去roi区域噪点
cover_data = np.uint8(cover_data)
kernel = np.ones((5, 5), np.uint8)
cover_data = cv2.erode(cv2.dilate(cover_data, kernel), kernel)
for id, class_id in zip(train_data_dic['ids'], train_data_dic['class_ids']):
cover_data[np.where(cover_data == id)] = class_id
cover_data = np.int16(cover_data)
roi_img = self.imageHandler.get_band_array(self.create_roi(in_tif_paths[0]))
# 获取影像roi区域
cover_data_pro = cover_data * roi_img
cover_geo_path = os.path.join(self.__product_dic, os.path.basename(product_path).split('.tif')[0] + '-VP.tif')
self.imageHandler.write_img(cover_geo_path, proj_geo, geo_geo, cover_data_pro)
self.imageHandler.write_quick_view(cover_geo_path, color_img=True)
meta_xml_path = self.create_meta_file(cover_geo_path)
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
logger.info('mode test success!') logger.info('mode test success!')
self.create_meta_file(product_path)
# 文件夹打包 # 文件夹打包
file.make_targz(self.__out_para, self.__product_dic) file.make_targz(self.__out_para, self.__product_dic)
logger.info('progress bar: 100%') logger.info('progress bar: 100%')
# """
# 算法主处理函数
# :return: True or False
# """
# # 生成每个时相的特征, 并提取训练集和测试集
# # 每个时相的影像生成特征图
# X_train, Y_train = None, None
# flag = True
# total_name_list = []
# test_data = []
# X_test_dic = {}
# for name in self.__processing_paras['name_list']:
# feature_dir, train_data_dic, test_data_part, type_map = self.create_feature_single_tar(name)
# #生成训练集
# X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir)
# name_list = ml.get_name_list(feature_dir)
# # 生成测试集合
# rows, cols = self.get_name_rows_cols(name)
# name_featuresPath_dic_part = ml.vegetationPhenology_combine_feature(feature_dir, self.__workspace_processing_path, name, rows, cols, DEBUG)
# X_test_dic_part = self.gene_test_set(test_data_part, name_featuresPath_dic_part, name)
# X_test_dic.update(X_test_dic_part)
# if flag:
# X_train = X_train_part
# Y_train = Y_train_part
# total_name_list = name_list
# flag = False
# test_data = test_data_part
# else:
# X_train = np.vstack((X_train, X_train_part))
# Y_train = np.hstack((Y_train, Y_train_part))
# total_name_list = total_name_list + name_list
# test_data = test_data + test_data_part
#
# logger.info("create_features success!")
# logger.info('progress bar: 20%')
#
# optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7)
#
# logger.info("generate train and test set success!")
# logger.info('progress bar: 30%')
#
# #RF
# clf = ml.trainRF(optimal_X_train, optimal_Y_train)
# logger.info('svm train success!')
# logger.info('progress bar: 80%')
#
# # 测试数据
# logger.info('mode testing')
# product_path = self.predict(clf, X_test_dic, optimal_feature, type_map, start)
# logger.info('mode test success!')
# self.create_meta_file(product_path)
# # 文件夹打包
# file.make_targz(self.__out_para, self.__product_dic)
# logger.info('progress bar: 100%')
def predict(self, mode, X_test_dic, validity_list, type_map, start): def predict(self, mode, X_test_dic, validity_list, type_map, start):
# 测试数据 # 测试数据
clf = mode clf = mode
@ -394,6 +511,19 @@ class PhenologyMain:
return product_geo_path return product_geo_path
def create_roi(self, img_path):
"""
计算ROI掩膜
:return:掩膜路径
"""
processing_path = self.__workspace_processing_path
# 利用影像的有效范围生成MASK
tif_mask_path = os.path.join(processing_path, "tif_mask.tif") # processing_path + "tif_mask.tif"
alg.trans_tif2mask(tif_mask_path, img_path, np.nan)
logger.info('create ROI image success!')
return tif_mask_path
# def test_resamp(): # def test_resamp():
# # 形态学闭运算去roi区域噪点 # # 形态学闭运算去roi区域噪点
# # cover_data = np.uint8(cover_data) # # cover_data = np.uint8(cover_data)
@ -404,26 +534,42 @@ class PhenologyMain:
def create_meta_file(self, product_path): def create_meta_file(self, product_path):
# 生成元文件案例 # 生成元文件案例
xml_path = "./model_meta.xml" # xml_path = "./model_meta.xml"
tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\" tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\"
image_path = product_path image_path = product_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml()
#
# SrcImagePath = self.__input_paras["AHVS"]['ParaValue']
# paths = SrcImagePath.split(';')
# SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0]
# if len(paths) >= 2:
# for i in range(1, len(paths)):
# SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0]
# meta_xml_path = os.path.join(self.__product_dic, EXE_NAME + "Product.meta.xml")
# self.type_id_name = csvh.vegePhenology_class_list(self.__processing_paras['MeasuredData'])
#
# CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process2(
# self.type_id_name, None, SrcImageName)
SrcImagePath = self.__input_paras["AHVS"]['ParaValue'] SrcImageName = os.path.basename(product_path).split('.tif')[0]
paths = SrcImagePath.split(';') model_path = "./product.xml"
SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] meta_xml_path = os.path.join(self.__product_dic, SrcImageName + ".meta.xml")
if len(paths) >= 2: key = os.path.basename(product_path).split('-VP.tif')[0] + '_Origin_META'
for i in range(1, len(paths)): para_dict = CreateMetaDict(image_path, self.__processing_paras[key], self.__workspace_processing_path,
SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] out_path1, out_path2).calu_nature()
meta_xml_path = os.path.join(self.__product_dic, EXE_NAME + "Product.meta.xml") para_dict.update({"imageinfo_ProductName": "植被物候"})
self.type_id_name = csvh.vegePhenology_class_list(self.__processing_paras['MeasuredData']) para_dict.update({"imageinfo_ProductIdentifier": "VegetationPhenology"})
para_dict.update({"imageinfo_ProductLevel": "4"})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "Label"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
return meta_xml_path
CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process2(
self.type_id_name, None, SrcImageName)
if __name__ == '__main__': if __name__ == '__main__':