diff --git a/Ortho/OrthoAlg.py b/Ortho/OrthoAlg.py index 61329df4..08b7c3dd 100644 --- a/Ortho/OrthoAlg.py +++ b/Ortho/OrthoAlg.py @@ -100,10 +100,27 @@ class ScatteringAlg: coef_arr[np.isinf(coef_arr)] = -9999 coef_arr[where_9999_0] = -9999 coef_arr[where_9999_1] = -9999 - # 输出的SAR后向散射系数产品 - ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, -9999) + ## 输出的SAR后向散射系数产品 + # ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, 0) + + tif_array = np.power(10.0, coef_arr / 10.0) # dB --> 线性值 后向散射系数 + + tif_array[np.isnan(tif_array)] = 0 + tif_array[np.isinf(tif_array)] = 0 + tif_array[where_9999_0] = 0 + tif_array[where_9999_1] = 0 + + ImageHandler.write_img(out_sar_tif, proj, geotrans, tif_array, 0) return True + @staticmethod + def lin_to_db(lin_path, db_path): + proj, geotrans, in_data = ImageHandler.read_img(lin_path) + db_arr = 10 * np.log10(in_data) + # db_arr[np.isnan(db_arr)] = -9999 + # db_arr[np.isinf(db_arr)] = -9999 + ImageHandler.write_img(db_path, proj, geotrans, db_arr, -9999) + @@ -1156,6 +1173,8 @@ class Orthorectification(object): # 12、PRF HeaderInformation_json['PRF'] = float( FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['PRF']['NodePath'])) + HeaderInformation_json['Fs'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['Fs']['NodePath'])) # 13、中心时间 HeaderInformation_json['ImageInformation']['CenterTime'] = datetime.datetime.strptime( FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['CenterImageTime']['NodePath']), @@ -1178,6 +1197,7 @@ class Orthorectification(object): self.heightspace=HeaderInformation_json['ImageInformation']['ImageHeightSpace'] self.refrange=HeaderInformation_json['ImageInformation']['refRange'] self.nearrange=HeaderInformation_json['ImageInformation']['NearRange'] + self.Fs = HeaderInformation_json['Fs']*1e6 # Mhz return HeaderInformation_json pass @@ -1440,7 +1460,9 @@ class IndirectOrthorectification(Orthorectification): fp.write("{}\n".format(self.header_info['ImageInformation']['StartTime'])) fp.write("{}\n".format(self.header_info['PRF'])) fp.write("{}\n".format(self.refrange)) - fp.write("{}\n".format(self.widthspace)) + fp.write("{}\n".format(self.Fs)) + fp.write("{}\n".format(self.header_info['ImageInformation']['DopplerParametersReferenceTime'])) + #fp.write("{}\n".format(self.widthspace)) # 多普勒系数 fp.write("{}\n".format(len(self.header_info['ImageInformation']['DopplerCentroidCoefficients']))) @@ -1474,6 +1496,8 @@ class IndirectOrthorectification(Orthorectification): fp.write("{}".format(startTime.tm_mday)) self.paramterFile_path=outparameter_path + + def IndirectOrthorectification(self, FilePath_str,workspace_dir): """ 正射校正组件 @@ -1601,6 +1625,32 @@ class IndirectOrthorectification(Orthorectification): print(exe_cmd) print(os.system(exe_cmd)) print("==========================================================================") + + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + + def lee_process_sar(self,in_sar, out_sar, win_size, noise_var): + ''' + # std::cout << "mode 12" + # std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var" + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar, + out_sar, win_size, noise_var) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + def getPowerTif(self,in_ori_path,out_power_path): ''' diff --git a/Ortho/OrthoMain.py b/Ortho/OrthoMain.py index 807b2764..02bd1aa1 100644 --- a/Ortho/OrthoMain.py +++ b/Ortho/OrthoMain.py @@ -11,6 +11,7 @@ import logging from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml +from tool.algorithm.algtools.PreProcess import PreProcess as pp import tarfile from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件 from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2 @@ -126,7 +127,8 @@ class OrthoMain: def check_source(self): """ - 检查算法相关的配置文件,图像,辅助文件是否齐全 + 检查算法相关的配置文件,图 + 像,辅助文件是否齐全 """ if self.__check_handler.check_alg_xml() is False: return False @@ -332,7 +334,7 @@ class OrthoMain: para_dic.update({name1: file_dir}) # {SLC: file_path} # 获取文件夹内的文件 - hh_flag, hv_flag, vh_flag, vv_flag ,dh_flag= 0, 0, 0, 0 ,0 # + hh_flag, hv_flag, vh_flag, vv_flag, dh_flag = 0, 0, 0, 0, 0 # if os.path.exists(file_dir + name + '\\'): in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif'))) if in_tif_paths == []: @@ -492,6 +494,22 @@ class OrthoMain: left_up_lon = 0 left_up_lat = 0 + + def process_sim_ori(self, ori_sim, sim_ori): + + scopes = () + scopes += (ImageHandler.get_scope_ori_sim(ori_sim),) + + intersect_polygon = pp().intersect_polygon(scopes) + if intersect_polygon is None: + raise Exception('create intersect shp fail!') + shp_path = os.path.join(self.__workspace_Temporary_path, 'IntersectPolygon.shp') + if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False: + raise Exception('create intersect shp fail!') + sim_ori_process = os.path.join(self.__workspace_Temporary_path, 'sim_ori_process.tif') + pp().cut_img(sim_ori_process, sim_ori, shp_path) + return sim_ori_process + def RD_process_handle(self): # RPC @@ -519,7 +537,7 @@ class OrthoMain: # 3 处理RD in_slc_path=None for slc_path in os.listdir(slc_paths): - if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0 ): + if slc_path.find(".tiff")>0 and (slc_path.find("_HH_")>0 or slc_path.find("_VV_")>0 or slc_path.find("_DH_")>0): in_slc_path=os.path.join(slc_paths,slc_path) break # 获取校正模型后 @@ -548,6 +566,15 @@ class OrthoMain: if(os.path.exists(this_out_dem_rc_path)): os.remove(this_out_dem_rc_path) + this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff" + if (os.path.exists(this_out_sar_sim_path)): + os.remove(this_out_sar_sim_path) + + this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射 + if (os.path.exists(this_out_sar_sim_wgs_path)): + os.remove(this_out_sar_sim_wgs_path) + + this_out_incidence_path = out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角 this_out_localIncidenct_path = out_dir_path + "\\" + "localincidentAngle.tiff"#// 局地入射角 this_out_inc_angle_rpc_path = out_dir_path + "\\" + "RD_incidentAngle.tiff"#// 局地入射角 @@ -568,28 +595,47 @@ class OrthoMain: this_out_ori_sim_tiff = out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角 if (os.path.exists(this_out_ori_sim_tiff)): shutil.move(this_out_ori_sim_tiff, out_dir_path + "\\" + "ori_sim-ortho.tif") - this_in_rpc_lon_lat_path = this_out_ori_sim_tiff + + this_out_sim_ori_tiff = out_dir_path + "\\" + "RD_sim_ori.tif" # // 局地入射角 + if (os.path.exists(this_out_sim_ori_tiff)): + shutil.move(this_out_sim_ori_tiff, out_dir_path + "\\" + "sim_ori-ortho.tif") + # GTC 入射角 GTC_rc_path=os.path.join(self.__workspace_package_path,"ori_sim-ortho.tif") GTC_out_path=self.__workspace_package_path parameter_path = os.path.join(self.__workspace_package_path, "orth_para.txt") - dem_rc = os.path.join(self.__workspace_Temporary_path, "dem_rc.tiff") - + this_in_rpc_lon_lat_path = os.path.join(self.__workspace_package_path, "ori_sim-ortho.tif") + dem_rc = os.path.join(self.__workspace_package_path, "sim_ori-ortho.tif") + dem_rc_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, dem_rc) + shutil.move(dem_rc_pro, dem_rc) in_tif_paths = list(glob.glob(os.path.join(slc_paths, '*.tiff'))) for in_tif_path in in_tif_paths: out_sar_path = os.path.join(GTC_out_path, os.path.split(in_tif_path)[1]) slc_path_temp=os.path.join(slc_paths,in_tif_path) - out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif").replace("L1A","L1B")).replace("HH","h_h").replace("HV","h_v").replace("VH","v_h").replace("VV","v_v") + out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","-lin.tif").replace("L1A","L1B")).replace("HH","h_h").replace("HV","h_v").replace("VH","v_h").replace("VV","v_v").replace("DH","d_h") # out_power_path=os.path.join(self.__workspace_Temporary_path,slc_path_temp.replace(".tiff","_db.tif")) alg.sar_backscattering_coef(slc_path_temp,self.__in_processing_paras['META'],out_power_path) - temp_slc_path=os.path.join(self.__workspace_package_path, os.path.basename(out_power_path)) - temp_slc_path=temp_slc_path.replace("_db.tif","-ortho.tif") + + lin_tif_path = os.path.join(self.__workspace_Temporary_path, + os.path.basename(out_power_path).split('-')[0] + "-lin_geo.tif") + # Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, + # out_power_path, + # lin_tif_path) + + Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, dem_rc, + out_power_path, + lin_tif_path) + tempout_tif_path = os.path.join(self.__workspace_package_path, os.path.basename(lin_tif_path).split('-')[0] + "-ortho.tif") + alg.lin_to_db(lin_tif_path, tempout_tif_path) # 线性值转回DB值 + + # temp_slc_path=os.path.join(self.__workspace_package_path, os.path.basename(out_power_path)) + # temp_slc_path=temp_slc_path.replace("_db.tif","-ortho.tif") #inter_Range2Geo(self,lon_lat_path , data_tiff , grid_path , space) # Orthorectification.inter_Range2Geo(GTC_rc_path,out_power_path,temp_slc_path,Orthorectification.heightspace) - Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_power_path, temp_slc_path) - break + # Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_power_path, temp_slc_path) # + # break #Orth_Slc.append(temp_slc_path) # power_list.append(out_power_path) @@ -619,7 +665,7 @@ class OrthoMain: # 生成元文件案例 # xml_path = "./model_meta.xml" tem_folder=self.__workspace_path + EXE_NAME + r"\Temporary""\\" - image_path=temp_slc_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif") + image_path=tempout_tif_path# os.path.join(self.__workspace_package_path, "OrthoMapTable.tif") out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") # par_dict = CreateDict().calu_nature(image_path, self.processinfo, out_path1, out_path2) @@ -639,10 +685,20 @@ class OrthoMain: meta_xml_path = os.path.join(self.__workspace_package_path, os.path.basename(self.__out_para).replace(".tar.gz",".meta.xml")) para_dict = CreateMetaDict(image_path, self.__in_processing_paras['META'], self.__workspace_package_path, out_path1, out_path2).calu_nature() + para_dict.update({"imageinfo_ProductName": '正射校正'}) + para_dict.update({"imageinfo_ProductIdentifier": 'Ortho'}) + para_dict.update({"imageinfo_ProductLevel": '3A'}) para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "DEM"}) CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) + # 生成压缩包 logger.info('progress bar :94%') logger.info('start make targz..') @@ -678,7 +734,7 @@ if __name__ == '__main__': except Exception: logger.exception("run-time error!") finally: - # OrthoMain.del_temp_workspace() + OrthoMain.del_temp_workspace() pass end = datetime.datetime.now() logger.info('running use time: %s ' % (end - start)) diff --git a/backScattering/BackScatteringAlg.py b/backScattering/BackScatteringAlg.py index d4b6b06c..529420af 100644 --- a/backScattering/BackScatteringAlg.py +++ b/backScattering/BackScatteringAlg.py @@ -1,22 +1,54 @@ # -*- coding: UTF-8 -*- """ @Project :microproduct -@File :BackscatteringAlg.py +@File :OneOrthoAlg.py +@Function :正射校正算法 @Author :KHZ -@Date :2021/9/2 11:14 +@Contact: +@Date :2021/8/14 @Version :1.0.0 -修改历史: -[修改序列] [修改日期] [修改者] [修改内容] - 1 2022-6-29 石海军 1.int16矩阵转float32;2.对数形式(单位dB)转指数形式(无单位) """ +# from re import I, T, X, match +# from types import DynamicClassAttribute +# from numpy.core.einsumfunc import _parse_possible_contraction +# from numpy.core.fromnumeric import shape +# from numpy.core.shape_base import block +# from numpy.lib import row_stack +# from numpy.lib.function_base import delete +import psutil import os +import gc +# import sys +# import scipy +# from scipy.sparse import data +# from scipy.sparse.construct import random +import xmltodict import numpy as np -from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler -from tool.algorithm.image.ImageHandle import ImageHandler +import json +import datetime, time +import yaml +import math +import time +#import cv2 +#import cv2 as cv from osgeo import gdal, gdalconst -from concurrent.futures import ThreadPoolExecutor, as_completed -# env_str = os.getcwd() -# os.environ['PROJ_LIB'] = env_str +# from yaml.events import AliasEvent, NodeEvent +from OrthoAuxData import OrthoAuxData +# from OrthoImage import ImageHandler +from tool.algorithm.image.ImageHandle import ImageHandler +from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler +# from logHandler import LogHandler +from osgeo import osr +from scipy import interpolate +import scipy.io as scipyio +import copy +import scipy.sparse as ss +import shutil +import pandas +import uuid +from concurrent.futures._base import as_completed, wait +from concurrent.futures.thread import ThreadPoolExecutor +from multiprocessing import Pool class ScatteringAlg: def __init__(self): @@ -59,22 +91,123 @@ class ScatteringAlg: QualifyValue = MetaDataHandler.get_QualifyValue(meta_file_path, polarization) Kdb = MetaDataHandler.get_Kdb(meta_file_path, polarization) - # 10 * (alog10((b1 * b1 + b2 * b2) * ((3690.385986 / 32767) * (3690.385986 / 32767)))) - 32.565000 # 计算后向散射系数 #对数形式 coef_arr = 10 * (np.log10(intensity_arr * ((QualifyValue/32767)**2))) - Kdb + coef_arr[np.isnan(coef_arr)] = -9999 coef_arr[np.isinf(coef_arr)] = -9999 coef_arr[where_9999_0] = -9999 coef_arr[where_9999_1] = -9999 - # 输出的SAR后向散射系数产品 - ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, -9999) + ## 输出的SAR后向散射系数产品 + # ImageHandler.write_img(out_sar_tif, proj, geotrans, coef_arr, 0) + + tif_array = np.power(10.0, coef_arr / 10.0) # dB --> 线性值 后向散射系数 + + tif_array[np.isnan(tif_array)] = 0 + tif_array[np.isinf(tif_array)] = 0 + tif_array[where_9999_0] = 0 + tif_array[where_9999_1] = 0 + + + ImageHandler.write_img(out_sar_tif, proj, geotrans, tif_array, 0) return True + @staticmethod + def lin_to_db(lin_path, db_path): + proj, geotrans, in_data = ImageHandler.read_img(lin_path) + db_arr = 10 * np.log10(in_data) + # db_arr[np.isnan(db_arr)] = -9999 + # db_arr[np.isinf(db_arr)] = -9999 + ImageHandler.write_img(db_path, proj, geotrans, db_arr, -9999) -############### -# RPC 模块 -############### + +def FindInfomationFromJson(HeaderFile_dom_json, node_path_list): + """ + 在Json文件中,按照指定路径解析出制定节点 + """ + result_node = HeaderFile_dom_json + for nodename in node_path_list: + result_node = result_node[nodename] + return result_node + + +def GetVectorNorm(Vecter): + """ + 得到向量的模 + """ + Vecter = Vecter.reshape(-1,1) + Vecter_Norm_pow = np.matmul(Vecter.T,Vecter) + return np.sqrt(Vecter_Norm_pow) + + +def XYZOuterM2(A, B): + """ + 外积(叉乘),日后版本换成可以任意维度的外积运算方程 + args: + A:nx3 + B:nx3 + """ + cnt = A.shape[0] + C = np.zeros((cnt, 3)) + C[:, 0] = A[:, 1] * B[:, 2] - A[:, 2] * B[:, 1] + C[:, 1] = A[:, 2] * B[:, 0] - A[:, 0] * B[:, 2] + C[:, 2] = A[:, 0] * B[:, 1] - A[:, 1] * B[:, 0] + return C + +def LinearSampling_raster(source_raster,tar_raster,source_start_row,source_start_col,tar_start_row,tar_start_col): + ''' 双线性重采样 + agrs: + source_raster:原影像 + tar_raster:目标影像 + source_start_row:原影像的起始行号 + source_start_col:原影像的起始列号 + tar_start_row:目标影像的起始行号 + tar_start_col:目标影像的起始列号 + return: + tar_raster:目标影像 + ''' + # 根据原影像的起始行列号与目标影像行列号,计算目标影像行列号的校正值 + d_row=tar_start_row-source_start_row # + d_col=tar_start_col-source_start_col # + + + source_height=source_raster.shape[0] + source_width=source_raster.shape[1] + tar_height=tar_raster.shape[0] + tar_width=tar_raster.shape[1] + + source_row=(np.ones((source_width,source_height))*range(source_height)).transpose(1,0) + source_col=np.ones((source_height,source_width))*range(source_width) + + tar_row=(np.ones((tar_width,tar_height))*range(tar_height)).transpose(1,0) + tar_col=np.ones((tar_height,tar_width))*range(tar_width) + + tar_row=tar_row+d_row # 坐标系变换 + tar_col=tar_col+d_col # 坐标系变换 + # 确定行列号 + last_source_row=np.floor(tar_row) + last_source_col=np.floor(tar_col) + next_source_row=np.ceil(tar_row) + next_source_col=np.ceil(tar_col) + + # 双线性重采样模型示意图, + # y1 r1 y2 + # + # r + # + # y3 r2 y4 + # 计算重采样 + r1=source_raster[last_source_row,last_source_col]*(tar_col-last_source_col)+source_raster[last_source_row,next_source_col]*(next_source_col-tar_col) + r2=source_raster[next_source_row,last_source_col]*(tar_col-last_source_col)+source_raster[next_source_row,next_source_col]*(next_source_col-tar_col) + tar_raster=r1*(tar_row-last_source_row)+r2*(next_source_row-tar_row) + tar_raster=tar_raster.reshape(tar_height,tar_width) # 目标影像 + return tar_raster.copy() + +'''-----双线性重采样-----''' + + +""" RPC 校正部分 """ def parse_rpc_file(rpc_file): # rpc_file:.rpc文件的绝对路径 @@ -404,7 +537,9 @@ class RPCModel: y1 = apply_rfm(self.row_num, self.row_den, lat, lon + EPS, alt) x2 = apply_rfm(self.col_num, self.col_den, lat + EPS, lon, alt) y2 = apply_rfm(self.row_num, self.row_den, lat + EPS, lon, alt) - + + + n = 0 while not np.all((x0 - col) ** 2 + (y0 - row) ** 2 < 1e-18): @@ -461,9 +596,48 @@ class RPCModel: def rpc_row_col(params): rpc_mdl,r_block,c_block,ati_block,i,block_shape=params - r_block ,c_block = rpc_mdl.localization(r_block.reshape(-1).astype(np.int32),c_block.reshape(-1).astype(np.int32) ,ati_block ) + r_block ,c_block = rpc_mdl.localization(r_block,c_block,0) + print("\t{}\t".format(i)) return [i,r_block ,c_block,block_shape] + +def get_RPC_lon_lat(in_rpc_tiff,out_rpc_rc_path): + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} ".format(exe_path,8,in_rpc_tiff,out_rpc_rc_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") +def getRCImageRC2(inputPath,out_rpc_rc_path): + #shutil.copy2(inputPath, out_rpc_rc_path) + # 创建重采样输出文件(设置投影及六参数) + input_rpc_sar=gdal.Open(inputPath) + + driver = gdal.GetDriverByName('GTiff') + output = driver.Create(out_rpc_rc_path, input_rpc_sar.RasterXSize,input_rpc_sar.RasterYSize, 2,gdal.GDT_Float32) + output.SetGeoTransform(list(input_rpc_sar.GetGeoTransform())) + output.SetProjection(input_rpc_sar.GetProjection()) + # 参数说明 输入数据集、输出文件、输入投影、参考投影、重采样方法(最邻近内插\双线性内插\三次卷积等)、回调函数 + + rpc_rc_img=output + # 计算行列号 + rc_height=rpc_rc_img.RasterYSize + rc_width=rpc_rc_img.RasterXSize + + + for i in range(0,rc_height,100): + c_block=np.ones((100,rc_width)).astype(np.float32)*(np.array(list(range(rc_width))).reshape(1,rc_width)) + r_block=np.ones((100,rc_width)).astype(np.float32)*(np.array(list(range(100))).reshape(100,1)) + r_block=r_block+i + if rc_height-i>100: + rpc_rc_img.GetRasterBand(1).WriteArray(r_block.astype(np.float32),0,i) + rpc_rc_img.GetRasterBand(2).WriteArray(c_block.astype(np.float32),0,i) + else: + num=rc_height-i + rpc_rc_img.GetRasterBand(1).WriteArray(r_block[:num,:].astype(np.float32),0,i) + rpc_rc_img.GetRasterBand(2).WriteArray(c_block[:num,:].astype(np.float32),0,i) + + del rpc_rc_img,output,input_rpc_sar + def getRCImageRC(inputPath,out_rpc_rc_path,rpc_file_path): rpc_tool = read_rpc_file(rpc_file_path) #shutil.copy2(inputPath, out_rpc_rc_path) @@ -481,22 +655,2268 @@ def getRCImageRC(inputPath,out_rpc_rc_path,rpc_file_path): rc_height=rpc_rc_img.RasterYSize rc_width=rpc_rc_img.RasterXSize - with ThreadPoolExecutor(max_workers=8) as t: + + with Pool(8) as t: plist=[] - for i in range(0,rc_height,100): - c_block=np.ones((100,rc_width)).astype(np.float32)*(np.array(list(range(rc_width))).reshape(1,rc_width)) - r_block=np.ones((100,rc_width)).astype(np.float32)*(np.array(list(range(100))).reshape(100,1)) + for i in range(0,rc_height,1000): + c_block=np.ones((1000,rc_width)).astype(np.float32)*(np.array(list(range(rc_width))).reshape(1,rc_width)) + r_block=np.ones((1000,rc_width)).astype(np.float32)*(np.array(list(range(1000))).reshape(1000,1)) r_block=r_block+i - if not rc_height-i>100: + if not rc_height-i>1000: num=rc_height-i r_block=r_block[:num,:].astype(np.float32) c_block=c_block[:num,:].astype(np.float32) block_shape=r_block.shape - plist.append(t.submit(rpc_row_col,[rpc_tool,r_block.reshape(-1).astype(np.int32),c_block.reshape(-1).astype(np.int32) ,c_block.reshape(-1)*0+0,i,block_shape])) - for future in as_completed(plist): - data = future.result() + plist.append(t.apply_async(rpc_row_col,args=([rpc_tool,r_block.reshape(-1).astype(np.int32),c_block.reshape(-1).astype(np.int32) ,c_block.reshape(-1)*0+0,i,block_shape],))) + t.close() + t.join() + for future in plist: + data = future.get() i,r_block ,c_block,block_shape=data rpc_rc_img.GetRasterBand(1).WriteArray(r_block.reshape(block_shape).astype(np.float32),0,i) rpc_rc_img.GetRasterBand(2).WriteArray(c_block.reshape(block_shape).astype(np.float32),0,i) - del rpc_rc_img,output,input_rpc_sar \ No newline at end of file + del rpc_rc_img,output,input_rpc_sar +""" RPC 结束 """ + + +class SatelliteOrbit(object): + def __init__(self) -> None: + super().__init__() + self.starttime = 1262275200.0 + self.modelName="" + + def get_starttime(self): + ''' + 返回卫星轨道时间起算点 + ''' + return self.starttime + + def ReconstructionSatelliteOrbit(self, GPSPoints_list): + ''' + 重建卫星轨道,使用多项式拟合法 + args: + GPSPoints_list:GPS 卫星轨道点 + return: + SatelliteOrbitModel 卫星轨道模型 + ''' + self.SatelliteOrbitModel = None + + def SatelliteSpaceState(self, time_float): + ''' + 根据时间戳,返回对应时间的卫星的轨迹状态 + args: + time_float:时间戳 + return: + State_list:[time,Xp,Yp,Zp,Vx,Vy,Vz] + ''' + return None + + +class SatelliteOrbitFitPoly(SatelliteOrbit): + ''' + 继承于SatelliteOribit类,为拟合多项式实现方法 + ''' + + def __init__(self) -> None: + super().__init__() + self.modelName="多项式" + self.polynum=4 + + def ReconstructionSatelliteOrbit(self, GPSPoints_list, starttime): + if len(GPSPoints_list)==2: + self.polynum=1 + self.starttime = starttime + + record_count = len(GPSPoints_list) + time_arr = np.zeros((record_count, 1), dtype=np.float64) # 使用np.float64只是为了精度高些;如果32位也能满足需求,请用32位 + state_arr = np.zeros((record_count, 6), dtype=np.float64) + A_arr = np.zeros((self.polynum+1, 6), dtype=np.float64) # 四次项 + X=np.ones((record_count,self.polynum+1),dtype=np.float64) # 记录时间坐标 + # 将点记录转换为自变量矩阵、因变量矩阵 + + for i in range(record_count): + GPSPoint = GPSPoints_list[i] + time_ = GPSPoint[0] - self.starttime # 为了保证精度,对时间进行缩放 + X[i,:]=np.array([1,time_]) + state_arr[i, :] = np.array(GPSPoint[1:],dtype=np.float64).reshape(1,6) # 空间坐标 + self.model_f=[] + for i in range(6): + Y = state_arr[:, i].reshape(-1,1) + A_arr[:,i]=np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T,X)),X.T),Y)[:,0] + + self.A_arr=copy.deepcopy(A_arr.copy()) + return self.A_arr + elif len(GPSPoints_list) > 6: + self.polynum=4 + # 多项式的节点数,理论上是超过5个可以起算,这里为了精度选择10个点起算。 + # 多项式 XA=Y ==> A=(X'X)^X'Y,其中 A 为待求系数,X为变量,Y为因变量 + # 这里使用三次项多项式,共有6组参数。 + # 声明自变量,因变量,系数矩阵 + self.starttime = starttime + + record_count = len(GPSPoints_list) + time_arr = np.zeros((record_count, 1), dtype=np.float64) # 使用np.float64只是为了精度高些;如果32位也能满足需求,请用32位 + state_arr = np.zeros((record_count, 6), dtype=np.float64) + A_arr = np.zeros((self.polynum+1, 6), dtype=np.float64) # 四次项 + X=np.ones((record_count,self.polynum+1),dtype=np.float64) # 记录时间坐标 + # 将点记录转换为自变量矩阵、因变量矩阵 + + for i in range(record_count): + GPSPoint = GPSPoints_list[i] + time_ = GPSPoint[0] - self.starttime # 为了保证精度,对时间进行缩放 + X[i,:]=np.array([1,time_,time_**2,time_**3,time_**4]) + state_arr[i, :] = np.array(GPSPoint[1:],dtype=np.float64).reshape(1,6) # 空间坐标 + self.model_f=[] + for i in range(6): + Y = state_arr[:, i].reshape(-1,1) + A_arr[:,i]=np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T,X)),X.T),Y)[:,0] + + self.A_arr=copy.deepcopy(A_arr.copy()) + ''' 测试误差 + from matplotlib import pyplot + label_list=['x','y','z','vx','vy','vz'] + color_list=['r','g','b','gold','gray','pink'] + pyplot.figure() + for i in range(6): + Y = state_arr[:, i] + Y_predict=self.model_f[i](X) + pyplot.subplot(int("23{}".format(i+1))) + d=Y-Y_predict + pyplot.plot(X,d,label=label_list[i],color=color_list[i]) + pyplot.title("max:{}".format(np.max(d))) + #self.model_f.append(interpolate.interp1d(X,Y,kind='cubic',fill_value='extrapolate')) + pyplot.legend() + pyplot.show() + ''' + return self.A_arr + else: + self.A_arr = None + return None + + def SatelliteSpaceState(self, time_float): + ''' + 逐像素求解 + 根据时间戳,返回对应时间的卫星的轨迹状态,会自动计算与起算时间之差 + args: + time_float:时间戳 + return: + State_list:[time,Xp,Yp,Zp,Vx,Vy,Vz] + ''' + if self.model_f is None: + return None + + result_arr=np.zeros((1,7)) + + time_float = time_float - self.starttime + result_arr[0,0]=time_float + #time_arr[0, 4] = time_arr[0, 3] * time_float ** 4 + time_float=np.array([1,time_float,time_float**2,time_float**3,time_float**4]).reshape(1,5) + result_arr=np.matmul(time_float,self.A_arr) + return [time_float,result_arr] + + def getSatelliteSpaceState(self, time_array): + ''' + 矩阵求解 + 根据时间戳矩阵,返回对应时刻的卫星空间状态(位置,速度),且会自动计算与起算时间之差 + args: + time_array:nparray nx1 时间戳 + return: + SatellitSpaceStateArray:nparray nx6 状态信息 + ''' + if self.model_f is None: + return None # 返回None,表示没有结果 + if self.polynum==4: + n=time_array.shape[0] + result_arr_=np.zeros((n,6),dtype=np.float64) + time_float = time_array - self.starttime + time_float=time_float.reshape(-1) # nx1 + time_arr=np.ones((time_float.shape[0],5)) # nx5 + time_arr[:,1]=time_float + time_arr[:,2]=time_float**2 + time_arr[:,3]=time_float**3 + time_arr[:,4]=time_float**4 + result_arr_=np.matmul(time_arr,self.A_arr) # nx5 5x6 + #time_arr[0, 4] = time_arr[0, 3] * time_float ** 4 + #result_arr=result_arr_ + return result_arr_ # 位置矩阵 + else: + n=time_array.shape[0] + result_arr_=np.zeros((n,6),dtype=np.float64) + time_float = time_array - self.starttime + time_float=time_float.reshape(-1) # nx1 + time_arr=np.ones((time_float.shape[0],self.polynum+1)) # nx5 + time_arr[:,1]=time_float + result_arr_=np.matmul(time_arr,self.A_arr) # nx5 5x6 + #time_arr[0, 4] = time_arr[0, 3] * time_float ** 4 + #result_arr=result_arr_ + return result_arr_ # 位置矩阵 + + +def ReconstructionSatelliteOrbit(GPSPoints_list, starttime): + ''' + 构建卫星轨道 + args: + GPSPoints_list:卫星轨道点 + starttime:起算时间 + ''' + if len(GPSPoints_list) > 10: + SatelliteOrbitModel = SatelliteOrbitFitPoly() + if SatelliteOrbitModel.ReconstructionSatelliteOrbit(GPSPoints_list, starttime=starttime) is None: + return None + return SatelliteOrbitModel + elif len(GPSPoints_list)==2: + SatelliteOrbitModel = SatelliteOrbitFitPoly() + if SatelliteOrbitModel.ReconstructionSatelliteOrbit(GPSPoints_list, starttime=starttime) is None: + return None + return SatelliteOrbitModel + + + +class DEMProcess(object): + def __init__(self): + pass + + @staticmethod + def get_extent(fn): + ''' + 原文链接:https://blog.csdn.net/XBR_2014/article/details/85255412 + ''' + ds = gdal.Open(fn) + rows = ds.RasterYSize + cols = ds.RasterXSize + # 获取图像角点坐标 + gt = ds.GetGeoTransform() + minx = gt[0] + maxy = gt[3] + maxx = gt[0] + gt[1] * rows + miny = gt[3] + gt[5] * cols + return (minx, maxy, maxx, miny) + + @staticmethod + def img_mosaic(in_files, out_dem_path): + # 通过两两比较大小,将最终符合条件的四个角点坐标保存 + # 即为拼接图像的四个角点坐标 + minX, maxY, maxX, minY = DEMProcess.get_extent(in_files[0]) + for fn in in_files[1:]: + minx, maxy, maxx, miny = DEMProcess.get_extent(fn) + minX = min(minX, minx) + maxY = max(maxY, maxy) + maxX = max(maxX, maxx) + minY = min(minY, miny) + + # 获取输出图像的行列数 + in_ds = gdal.Open(in_files[0]) + bands_num = in_ds.RasterCount + gt = in_ds.GetGeoTransform() + rows = int((maxX - minX) / abs(gt[5])) + cols = int((maxY - minY) / gt[1]) + + # 判断栅格数据的数据类型 + datatype = gdal.GDT_UInt16 + + # 创建输出图像 + driver = gdal.GetDriverByName('GTiff') + out_dem = os.path.join(out_dem_path, 'mosaic0.tif') + out_ds = driver.Create(out_dem, cols, rows, bands_num, datatype) + out_ds.SetProjection(in_ds.GetProjection()) + + gt = list(in_ds.GetGeoTransform()) + gt[0], gt[3] = minX, maxY + out_ds.SetGeoTransform(gt) + + for fn in in_files: + in_ds = gdal.Open(fn) + x_size = in_ds.RasterXSize + y_size = in_ds.RasterYSize + trans = gdal.Transformer(in_ds, out_ds, []) + success, xyz = trans.TransformPoint(False, 0, 0) + x, y, z = map(int, xyz) + for i in range(1, bands_num + 1): + data = in_ds.GetRasterBand(i).ReadAsArray() + out_band = out_ds.GetRasterBand(i) + out_data = out_band.ReadAsArray(x, y, x_size, y_size) + data = np.maximum(data, out_data) + out_band.WriteArray(data, x, y) + + del in_ds, out_band, out_ds + + @staticmethod + def dem_clip(OutFilePath, DEMFilePath, SelectArea): + ''' + 根据选择范围裁剪DEM,并输出 + agrs: + outFilePath:裁剪DEM输出地址 + DEMFilePath:被裁减DEM地址 + SelectArea:list [(xmin,ymax),(xmax,ymin)] 框选范围 左上角,右下角 + ''' + DEM_ptr = gdal.Open(DEMFilePath) + DEM_GeoTransform = DEM_ptr.GetGeoTransform() # 读取影像的投影变换 + DEM_InvGeoTransform = gdal.InvGeoTransform(DEM_GeoTransform) + SelectAreaArrayPoints = [gdal.ApplyGeoTransform(DEM_InvGeoTransform, p[0], p[1]) for p in SelectArea] + SelectAreaArrayPoints = list(map(lambda p: (int(p[0]), int(p[1])), SelectAreaArrayPoints)) # 确定坐标 + + [(ulx, uly), (brx, bry)] = SelectAreaArrayPoints + rowCount, colCount = bry - uly, brx - ulx + + # 输出DEM的桌面坐标转换 + Out_Transfrom = list(DEM_GeoTransform) + Out_Transfrom[0] = SelectArea[0][0] + Out_Transfrom[3] = SelectArea[0][1] + + # 构建输出DEM + Bands_num = DEM_ptr.RasterCount + gtiff_driver = gdal.GetDriverByName('GTiff') + datatype = gdal.GDT_UInt16 + out_dem = gtiff_driver.Create(OutFilePath, colCount, rowCount, Bands_num, datatype) + out_dem.SetProjection(DEM_ptr.GetProjection()) + out_dem.SetGeoTransform(Out_Transfrom) + + for i in range(1, Bands_num + 1): + data_band = DEM_ptr.GetRasterBand(i) + out_band = out_dem.GetRasterBand(i) + data = data_band.ReadAsArray(ulx, uly, colCount, rowCount) + out_band.WriteArray(data) + del out_dem + + @staticmethod + def dem_merged(in_dem_path, meta_file_path, out_dem_path): + ''' + DEM重采样函数,默认坐标系为WGS84 + agrs: + in_dem_path: 输入的DEM文件夹路径 + meta_file_path: 输入的xml元文件路径 + out_dem_path: 输出的DEM文件夹路径 + ''' + # 读取文件夹中所有的DEM + dem_file_paths=[os.path.join(in_dem_path,dem_name) for dem_name in os.listdir(in_dem_path) if dem_name.find(".tif")>=0 and dem_name.find(".tif.")==-1] + spatialreference=osr.SpatialReference() + spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree + spatialproj=spatialreference.ExportToWkt() # 导出投影结果 + # 将DEM拼接成一张大图 + mergeFile =gdal.BuildVRT(os.path.join(out_dem_path,"mergedDEM_VRT.tif"),dem_file_paths) + out_DEM=os.path.join(out_dem_path,"mergedDEM.tif") + gdal.Warp(out_DEM, + mergeFile, + format="GTiff", + dstSRS=spatialproj, + dstNodata=-9999, + outputType=gdal.GDT_Float32) + time.sleep(3) + #gdal.CloseDir(out_DEM) + return out_DEM + @staticmethod + def dem_resampled(in_dem_path,out_dem_path,samling_f): + in_dem=gdal.Open(in_dem_path,gdalconst.GA_ReadOnly) + width=in_dem.RasterXSize + height=in_dem.RasterYSize + gt=list(in_dem.GetGeoTransform()) + width=width*samling_f + height=height*samling_f + gt=[gt[0],gt[1]/samling_f,gt[2]/samling_f,gt[3],gt[4]/samling_f,gt[5]/samling_f] + driver = gdal.GetDriverByName('GTiff') + output = driver.Create(out_dem_path, width,height, 1,in_dem.GetRasterBand(1).DataType) + output.SetGeoTransform(gt) + output.SetProjection(in_dem.GetProjection()) + # 参数说明 输入数据集、输出文件、输入投影、参考投影、重采样方法(最邻近内插\双线性内插\三次卷积等)、回调函数 + gdal.ReprojectImage(in_dem, output, in_dem.GetProjection(), output.GetProjection(), gdalconst.GRA_CubicSpline,0.0,0.0,) + return out_dem_path + @staticmethod + def dem_resampled_RPC(in_dem_path,refrence_img_path,out_dem_path): + in_dem=gdal.Open(in_dem_path,gdalconst.GA_ReadOnly) + refrence_img=gdal.Open(refrence_img_path,gdalconst.GA_ReadOnly) + width=refrence_img.RasterXSize + height=refrence_img.RasterYSize + gt=list(refrence_img.GetGeoTransform()) + driver = gdal.GetDriverByName('GTiff') + output = driver.Create(out_dem_path, width,height, 1,in_dem.GetRasterBand(1).DataType) + output.SetGeoTransform(gt) + output.SetProjection(refrence_img.GetProjection()) + # 参数说明 输入数据集、输出文件、输入投影、参考投影、重采样方法(最邻近内插\双线性内插\三次卷积等)、回调函数 + gdal.ReprojectImage(in_dem, output, in_dem.GetProjection(), output.GetProjection(), gdalconst.GRA_CubicSpline,0.0,0.0,) + return out_dem_path + # referencefile = gdal.Open(referencefilefilePath, gdal.GA_ReadOnly) + # referencefileProj = referencefile.GetProjection() + # referencefileTrans = referencefile.GetGeoTransform() + # bandreferencefile = referencefile.GetRasterBand(1) + # Width= referencefile.RasterXSize + # Height = referencefile.RasterYSize + # nbands = referencefile.RasterCount + # # 创建重采样输出文件(设置投影及六参数) + # driver = gdal.GetDriverByName('GTiff') + # output = driver.Create(outputfilePath, Width,Height, nbands, bandreferencefile.DataType) + # output.SetGeoTransform(referencefileTrans) + # output.SetProjection(referencefileProj) + # # 参数说明 输入数据集、输出文件、输入投影、参考投影、重采样方法(最邻近内插\双线性内插\三次卷积等)、回调函数 + # gdal.ReprojectImage(inputrasfile, output, inputProj, referencefileProj, gdalconst.GRA_Bilinear,0.0,0.0,) + + +class Orthorectification(object): + """ + 正射校正模块类 + """ + def __init__(self, configPath="./config.ymal") -> None: + super().__init__() + # 常量声明 + # 加载配置文件 + d=os.listdir(".") + with open(configPath, 'r', encoding='utf-8') as f: + const = f.read() + self.config = yaml.load(const, Loader=yaml.FullLoader) + self.config['SatelliteOribit']['StartTime'] = datetime.datetime.strptime( + self.config['SatelliteOribit']['StartTime']['Value'], + self.config['SatelliteOribit']['StartTime']['format']).timestamp() # 转化成UTC时间格式 + self.R_e = self.config['SatelliteOribit']['ReferenceSpheroid']['Re'] # m + self.R_p = self.config['SatelliteOribit']['ReferenceSpheroid']['Rp'] # m + self.w_e = self.config['SatelliteOribit']['ReferenceSpheroid']['we'] # rad/s + + def ParseHearderFile(self, HeaderFilePath_str): + """ + 解析头文件,返回计算所需要的必要信息。 + args: + HeaderFilePath_str: 头文件地址 + return: + HeaderFileInfomation_json: 头文件信息包 + """ + + with open(HeaderFilePath_str, 'r', encoding='utf-8') as fp: + HeaderFile_dom_str = fp.read() + HeaderFile_dom = xmltodict.parse(HeaderFile_dom_str) # 将XML转成json文本 + HeaderFile_dom_json = json.loads(json.dumps(HeaderFile_dom)) # 将json字符串,转成json对象(对应于python中的dict) + # 获取头文件 + HeaderInformation_json = {} + # 解析轨道节点,假定是GPS节点 + HeaderInformation_json['GPS'] = [] + # GPS 解析信息 + GPSNode_Path = self.config['GPS']['GPSNode_Path'] + GPSNodeNames_list = self.config['GPS']['NodeInfomation_Name'] + GPSTime_Format = self.config['GPS']['Time_format'] + GPSPoints = FindInfomationFromJson(HeaderFile_dom_json, GPSNode_Path) + + for GPSPoint in GPSPoints: + GPSPoint = [ + datetime.datetime.strptime(GPSPoint[GPSNodeNames_list[0]], GPSTime_Format).timestamp(), # TimeStamp + float(GPSPoint[GPSNodeNames_list[1]]), # Xp + float(GPSPoint[GPSNodeNames_list[2]]), # Yp + float(GPSPoint[GPSNodeNames_list[3]]), # Zp + float(GPSPoint[GPSNodeNames_list[4]]), # Vx + float(GPSPoint[GPSNodeNames_list[5]]), # Vy + float(GPSPoint[GPSNodeNames_list[6]])] # VZ + HeaderInformation_json['GPS'].append(GPSPoint) + + # 提取成像相关信息 + HeaderInformation_json['ImageInformation'] = {} + # 1、开始成像时间 + HeaderInformation_json['ImageInformation']['StartTime'] = datetime.datetime.strptime( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['StartImageTime']['NodePath']), + self.config['imageinfo']['StartImageTime']['Format'] + ).timestamp() + # 2、结束成像时间 + HeaderInformation_json['ImageInformation']['EndTime'] = datetime.datetime.strptime( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['EndImageTime']['NodePath']), + self.config['imageinfo']['StartImageTime']['Format'] + ).timestamp() + # 3、影像的宽高 + HeaderInformation_json['ImageInformation']['height'] = int( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['ImageHeight']['NodePath'])) + HeaderInformation_json['ImageInformation']['width'] = int( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['ImageWidth']['NodePath'])) + # 4、影像的近斜距 + HeaderInformation_json['ImageInformation']['NearRange'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['NearRange']['NodePath'])) + # 5、入射角 + HeaderInformation_json['ImageInformation']['incidenceAngle'] = { + 'NearRange': float(FindInfomationFromJson(HeaderFile_dom_json, + self.config['imageinfo']['incidenceAngle']['NearRange'][ + 'NodePath'])), + 'FarRange': float(FindInfomationFromJson(HeaderFile_dom_json, + self.config['imageinfo']['incidenceAngle']['FarRange'][ + 'NodePath'])) + } + # 6、成像时刻影像带宽 + HeaderInformation_json['ImageInformation']['bandWidth'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['bandWidth']['NodePath'])) + # 7、多普勒质心常数 + DopplerCentroidCoefficients_list = FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo'][ + 'DopplerCentroidCoefficients']['NodePath']) + HeaderInformation_json['ImageInformation']['DopplerCentroidCoefficients'] = [ + float(DopplerCentroidCoefficients_list[ + self.config['imageinfo']['DopplerCentroidCoefficients']['DopplerCentroidCoefficients_Name'][0]]), + float(DopplerCentroidCoefficients_list[ + self.config['imageinfo']['DopplerCentroidCoefficients']['DopplerCentroidCoefficients_Name'][1]]), + float(DopplerCentroidCoefficients_list[ + self.config['imageinfo']['DopplerCentroidCoefficients']['DopplerCentroidCoefficients_Name'][2]]), + float(DopplerCentroidCoefficients_list[ + self.config['imageinfo']['DopplerCentroidCoefficients']['DopplerCentroidCoefficients_Name'][3]]), + float(DopplerCentroidCoefficients_list[ + self.config['imageinfo']['DopplerCentroidCoefficients']['DopplerCentroidCoefficients_Name'][4]]), + ] + # 8、波长 + HeaderInformation_json['ImageInformation']['lambda'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['lambda']['NodePath'])) + # 9、中心像元对应的中心经纬度 + CenterPoint = FindInfomationFromJson(HeaderFile_dom_json, + self.config['imageinfo']['CenterImagePositon']['NodePath']) + HeaderInformation_json['ImageInformation']['centerPosition'] = [ + float(CenterPoint[self.config['imageinfo']['CenterImagePositon']['Value'][0]]), # 纬度 + float(CenterPoint[self.config['imageinfo']['CenterImagePositon']['Value'][1]]), # 经度 + ] + # 10、多普勒参数参考时间 + HeaderInformation_json['ImageInformation']['DopplerParametersReferenceTime'] = float( + FindInfomationFromJson(HeaderFile_dom_json, + self.config['imageinfo']['DopplerParametersReferenceTime']['NodePath'])) + # 11、参考斜距 + HeaderInformation_json['ImageInformation']['refRange'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['ReferenceRange']['NodePath'])) + # 12、PRF + HeaderInformation_json['PRF'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['PRF']['NodePath'])) + HeaderInformation_json['Fs'] = float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['sensor']['Fs']['NodePath'])) + # 13、中心时间 + HeaderInformation_json['ImageInformation']['CenterTime'] = datetime.datetime.strptime( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['CenterImageTime']['NodePath']), + self.config['imageinfo']['CenterImageTime']['Format'] + ).timestamp() + #14. 影像的空间间隔分辨率 + HeaderInformation_json['ImageInformation']['ImageWidthSpace']=float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['ImageWidthSpace']['NodePath'])) + + HeaderInformation_json['ImageInformation']['ImageHeightSpace']=float( + FindInfomationFromJson(HeaderFile_dom_json, self.config['imageinfo']['ImageHeightSpace']['NodePath'])) + + # 部分需要解析 + self.lamda=HeaderInformation_json['ImageInformation']['lambda'] + self.PRF=HeaderInformation_json['PRF'] + self.imgwidth=HeaderInformation_json['ImageInformation']['width'] + self.imgheight=HeaderInformation_json['ImageInformation']['height'] + self.imgstarttime=HeaderInformation_json['ImageInformation']['StartTime'] + self.widthspace=HeaderInformation_json['ImageInformation']['ImageWidthSpace'] + self.heightspace=HeaderInformation_json['ImageInformation']['ImageHeightSpace'] + self.refrange=HeaderInformation_json['ImageInformation']['refRange'] + self.nearrange=HeaderInformation_json['ImageInformation']['NearRange'] + self.Fs=HeaderInformation_json['Fs']*1e6 # Mhz + return HeaderInformation_json + pass + + def LLA_to_XYZ(self, latitude, longitude, altitude): + """ + 经纬度转大地坐标系 + args: + latitude:纬度 + longitude:经纬 + altitude:海拔高程 + refrence: https://blog.csdn.net/dulingwen/article/details/96868530 + """ + # 经纬度的余弦值 + cosLat = math.cos(latitude * math.pi / 180) + sinLat = math.sin(latitude * math.pi / 180) + cosLon = math.cos(longitude * math.pi / 180) + sinLon = math.sin(longitude * math.pi / 180) + + # WGS84坐标系的参数 + rad = 6378137.0 # 地球赤道平均半径(椭球长半轴:a) + f = 1.0 / 298.257224 # WGS84椭球扁率 :f = (a-b)/a + C = 1.0 / math.sqrt(cosLat * cosLat + (1 - f) * (1 - f) * sinLat * sinLat) + S = (1 - f) * (1 - f) * C + h = altitude + + # 计算XYZ坐标 + X = (rad * C + h) * cosLat * cosLon + Y = (rad * C + h) * cosLat * sinLon + Z = (rad * S + h) * sinLat + + return np.array([X, Y, Z]).reshape(1, 3) + + def XYZ_to_LLA(self, X, Y, Z): + """ + 大地坐标系转经纬度 + 适用于WGS84坐标系 + args: + x,y,z + return: + lat,lon,altitude + """ + # WGS84坐标系的参数 + a = 6378137.0 # 椭球长半轴 + b = 6356752.314245 # 椭球短半轴 + ea = np.sqrt((a ** 2 - b ** 2) / a ** 2) + eb = np.sqrt((a ** 2 - b ** 2) / b ** 2) + p = np.sqrt(X ** 2 + Y ** 2) + theta = np.arctan2(Z * a, p * b) + + # 计算经纬度及海拔 + longitude = np.arctan2(Y, X) + latitude = np.arctan2(Z + eb ** 2 * b * np.sin(theta) ** 3, p - ea ** 2 * a * np.cos(theta) ** 3) + N = a / np.sqrt(1 - ea ** 2 * np.sin(latitude) ** 2) + altitude = p / np.cos(latitude) - N + + return np.array([np.degrees(latitude), np.degrees(longitude), altitude]) + + def getRByColumnCode(self, c): + """ + 根据列号计算斜距 + args: + c: 列号 + """ + return self.R0 + c * self.delta_R + pass + + def getTimeByLineCode(self, r): + """ + 根据行号计算成像时间 + args: + r: 行号 + """ + return np.matmul( + np.concatenate([np.array([r ** i]).reshape(1, 1) for i in range(self.r2t_A_arr.shape[0])], axis=1), + self.r2t_A_arr) + + def ReconstuctionTimesOflyDirectionPositionModel(self, timePoints_list): + """ + 构建飞行向坐标与时间的转换模型,注意这里没有调整时间的起算点。 + args: + timePoints_list:时间点坐标 [r,t] + """ + # 根据点数确定模型,最高为3次项模型 + Y = np.zeros((len(timePoints_list), 1)) + X = np.zeros((len(timePoints_list), 1)) + + for i in range(len(timePoints_list)): + Y[i, 0] = timePoints_list[i][1] + X[i, 0] = timePoints_list[i][0] + + if len(timePoints_list) == 2: # 一次项 + X = np.concatenate([np.ones((len(timePoints_list), 1)), X], axis=1) + A = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y) + + elif len(timePoints_list) >= 3: # 二次项 + X = np.concatenate([np.ones(len(timePoints_list), 1), X, np.power(X, 2)], axis=1) + A = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y) + + elif len(timePoints_list) >= 8: # 三次项 + X = np.concatenate([np.ones(len(timePoints_list), 1), X, np.power(X, 2), np.power(X, 3)], axis=1) + A = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y) + + self.r2t_A_arr = A + + def PrepareConvertSystem(self): + """ + 计算常量 + 在数据计算之前,提前处理部分常量信息 + 构建r,c到坐标斜距的计算公式 + """ + self.R0 = self.header_info['ImageInformation']['NearRange'] # 起始斜距/近斜距 + self.ReconstuctionTimesOflyDirectionPositionModel([ + [0, self.header_info['ImageInformation']['StartTime']], + [self.header_info['ImageInformation']['height'] - 1, self.header_info['ImageInformation']['EndTime']] + ]) # 构建坐标到时间的变换,这里仅用两个数据点构建 + self.delta_R = self.widthspace + + + + + def ConvertCoordinary(self): + """ + 批量求解点坐标 + """ + pass + + def Orthorectification(self, FilePath_str): + """ + 正射校正组件 + 正射校正整体可以分为两个步骤: + Step 1:计算出栅格坐标对应的真实的大地坐标系坐标 + Step 2:根据大地坐标系将影像重采样到大地坐标系空间中 + args: + FilePath_str:影像所在文件夹 + """ + # 分离需要校正对象 + file_name_list = os.listdir(FilePath_str) + header_name = [file_name for file_name in file_name_list if + file_name.rfind(".meta.xml") == len(file_name) - len('.meta.xml')][0] # xml头文件,存储在list中 + tiff_name = [file_name for file_name in file_name_list if + file_name.rfind(".tiff") == len(file_name) - len('.tiff')] # tif影像文件,存储在list中 + # 解析头文件 + self.header_info = self.ParseHearderFile(os.path.join(FilePath_str, header_name)) + # 构建轨道模型 + self.SatelliteOrbitModel = ReconstructionSatelliteOrbit(self.header_info['GPS'], + starttime=self.config['SatelliteOribit']['StartTime']) + # 求解模型前计算常数变换模型 + self.PrepareConvertSystem() + # 批量计算空间坐标 + self.ConvertCoordinary() + +''' +卫星轨道定位法,修改为静态类 +间接定法法,模拟SAR方法的静态方法,与类独立,方便使用多进程 +''' +def getSatelliteSpaceState(time_array,SatelliteOrbitModelstarttime,SatelliteOrbitModelParameters=None,SatelliteOrbitModelName="多项式"): + if SatelliteOrbitModelName=="多项式": + if SatelliteOrbitModelParameters is None: + return None # 返回None,表示没有结果 + n=time_array.shape[0] + result_arr_=np.zeros((n,6),dtype=np.float64) + time_float = time_array - SatelliteOrbitModelstarttime + time_float=time_float.reshape(-1) # nx1 + time_arr=np.ones((time_float.shape[0],5)) # nx5 + time_arr[:,1]=time_float + time_arr[:,2]=time_float**2 + time_arr[:,3]=time_float**3 + time_arr[:,4]=time_float**4 + result_arr_=np.matmul(time_arr,SatelliteOrbitModelParameters) # nx5 5x6 + #time_arr[0, 4] = time_arr[0, 3] * time_float ** 4 + #result_arr=result_arr_ + return result_arr_ # 位置矩阵 + +''' +双线性重采样 +''' +def LinearSampling(ori_dem,sampling_f,offset_row=1,offset_col=1): + ''' + 简化双线性重采样模型 + 根据重采样率对原始对象进行重采样。 + 双线性重采样,这里去栅格所涉及的四个角点作为起算点。 + 模型示意图: + + y1 r1 y2 + + r + + y3 r2 y4 + args: + ori_dem:原始DEM + sampling_f:采样率 int + return: + new_dem:采样结果 + ''' + # 原始dem的行列号 + ori_height=ori_dem.shape[0]-offset_row + ori_width=ori_dem.shape[1]-offset_col + + # 采样之后的影像的范围大小 + new_height=int(ori_height*sampling_f) + new_width=int(ori_width*sampling_f) + # 获取采样之后的栅格单元对应的原DEM的行列号 + row_index_list=list(range(new_height)) + col_index_list=list(range(new_width)) + + new_dem=np.ones((new_height,new_width)) + row_index_arr=(new_dem.transpose(1,0)*row_index_list).transpose(1,0) + col_index_arr=new_dem*col_index_list + row_index_arr=row_index_arr*1.0/sampling_f + col_index_arr=col_index_arr*1.0/sampling_f + # 初始化 + new_dem=0 + # 计算权重 + row_index_arr=row_index_arr.reshape(-1) + col_index_arr=col_index_arr.reshape(-1) + last_row_index_arr=np.floor(row_index_arr).astype(np.int32) + next_row_index_arr=np.ceil(row_index_arr).astype(np.int32) + last_col_index_arr=np.floor(col_index_arr).astype(np.int32) + next_col_index_arr=np.ceil(col_index_arr).astype(np.int32) + # 双线性重采样模型示意图, + # y1 r1 y2 + # + # r + # + # y3 r2 y4 + # 计算重采样 + + R1=ori_dem[last_row_index_arr,last_col_index_arr]*(col_index_arr-last_col_index_arr)+ori_dem[last_row_index_arr,next_col_index_arr]*(next_col_index_arr-col_index_arr) + R2=ori_dem[next_row_index_arr,last_col_index_arr]*(col_index_arr-last_col_index_arr)+ori_dem[next_row_index_arr,next_col_index_arr]*(next_col_index_arr-col_index_arr) + new_dem=R1*(row_index_arr-last_row_index_arr)+R2*(next_row_index_arr-row_index_arr) # 双线性重采样 + new_dem=new_dem.reshape(new_height,new_width) + del R1,R2,next_row_index_arr,next_col_index_arr,last_row_index_arr,last_col_index_arr,row_index_arr,col_index_arr + result_dem=np.ones((new_height,new_width,3)) + result_dem[:,:,2]=new_dem + result_dem[:,:,0]=(np.ones((new_height,new_width)).transpose(1,0)*list(range(new_height))).transpose(1,0) + result_dem[:,:,1]=np.ones((new_height,new_width))*col_index_list + + return result_dem + + pass + + +class IndirectOrthorectification(Orthorectification): + """ + 间接定位法 + """ + def outParameterText(self,outparameter_path): + ''' + 生成配置文件 + ''' + with open(outparameter_path,"w",encoding='utf-8') as fp: + # 输出文件 + fp.write("{}\n".format(self.header_info['ImageInformation']['height'])) + fp.write("{}\n".format(self.header_info['ImageInformation']['width'])) + fp.write("{}\n".format(self.header_info['ImageInformation']['NearRange'])) # 近斜距 + fp.write("{}\n".format(self.header_info['ImageInformation']['incidenceAngle']['NearRange'])) # 近斜距入射角 + fp.write("{}\n".format(self.header_info['ImageInformation']['incidenceAngle']['FarRange'])) # 远距入射角 + fp.write("{}\n".format(self.config['LightSpeed'])) # 光速 + fp.write("{}\n".format(self.header_info['ImageInformation']['lambda'])) + fp.write("{}\n".format(self.header_info['ImageInformation']['StartTime'])) + fp.write("{}\n".format(self.header_info['PRF'])) + fp.write("{}\n".format(self.refrange)) + fp.write("{}\n".format(self.Fs)) + fp.write("{}\n".format(self.header_info['ImageInformation']['DopplerParametersReferenceTime'])) + # fp.write("{}\n".format(self.widthspace)) + + # 多普勒系数 + fp.write("{}\n".format(len(self.header_info['ImageInformation']['DopplerCentroidCoefficients']))) + for i in range(len(self.header_info['ImageInformation']['DopplerCentroidCoefficients'])): + fp.write("{}\n".format(self.header_info['ImageInformation']['DopplerCentroidCoefficients'][i])) + fp.write("{}\n".format(1)) + fp.write("{}\n".format(self.SatelliteOrbitModel.polynum)) + fp.write("{}\n".format(self.SatelliteOrbitModel.get_starttime())) + # X + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,0])) + # Y + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,1])) + # Z + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,2])) + # Vx + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,3])) + # Vy + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,4])) + # vz + for i in range(self.SatelliteOrbitModel.polynum+1): + fp.write("{}\n".format(self.SatelliteOrbitModel.A_arr[i,5])) + # UTC参数 + startTime=time.localtime(self.header_info['ImageInformation']["StartTime"]) + fp.write("{}\n".format(startTime.tm_year)) + fp.write("{}\n".format(startTime.tm_mon)) + fp.write("{}".format(startTime.tm_mday)) + self.paramterFile_path=outparameter_path + + def IndirectOrthorectification(self, FilePath_str,workspace_dir): + """ + 正射校正组件 + 正射校正整体可以分为两个步骤: + Step 1:计算出栅格坐标对应的真实的大地坐标系坐标 + Step 2:根据大地坐标系将影像重采样到大地坐标系空间中 + args: + FilePath_str:影像所在文件夹 + dem_resampled_path:重采样后的DEM路径 + lut_tif_path:输出的r,c,ti数据影像 + work_temp_path: 输出的临时文件地址路径,方便存放计算临时文件 + """ + # 分离需要校正对象 + file_name_list = os.listdir(FilePath_str) + header_name = [file_name for file_name in file_name_list if + file_name.rfind(".meta.xml") == len(file_name) - len('.meta.xml')][0] # 头文件 + tiff_name = [file_name for file_name in file_name_list if + file_name.rfind(".tiff") == len(file_name) - len('.tiff')] # 影像文件 + # 解析头文件 + self.header_info = self.ParseHearderFile(os.path.join(FilePath_str, header_name)) + # 构建轨道模型 + self.SatelliteOrbitModel = ReconstructionSatelliteOrbit(self.header_info['GPS'], + starttime=self.header_info['ImageInformation']['CenterTime']) # 构建卫星轨道模型,取第0个节点的时间 + # 获取所有轨道节点时间 + gpslist=np.array(self.header_info['GPS']).reshape(-1,7) + # 验证轨道模型 + statepoints=self.SatelliteOrbitModel.getSatelliteSpaceState(gpslist[:,0].reshape(-1)) + # 计算轨道差距 + statepoints_dist=statepoints-gpslist[:,1:] + statepoints_dist_line=statepoints_dist[:,:3] + statepoints_dist_line=np.sum(statepoints_dist_line**2,axis=1)**0.5 + + statepoints_dist_ver=np.sum(statepoints_dist[:,3:]**2,axis=1)**0.5 + print("sata Point distance:\t{}\t -\t{}\t ".format(np.min(statepoints_dist_line),np.max(statepoints_dist_line))) + + # 求解模型前计算常数变换模型 + self.PrepareConvertSystem() + self.outParameterText(os.path.join(workspace_dir,"orth_para.txt")) + + def preCaldem_sar_rc(self,dem_path,ori_sar_path,work_path,taget_path): + #.\SIMOrthoProgram\x64\Release\SIMOrthoProgram.exe 1 parameter_path dem_path ori_sar_path work_path taget_path + # SIMOrthoProgram.exe 1 in_parameter_path in_dem_path in_ori_sar_path in_work_path in_taget_path + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5} {6} {7}".format(exe_path,1,self.paramterFile_path,dem_path,ori_sar_path,work_path ,taget_path,"\\") + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def getRPC_incidenceAngle(self,in_dem_path,in_rpc_rc_path,out_rpc_dem_path,out_incident_angle_path,out_local_incident_angle_path): + ''' + std::cout << "mode 4: get RPC incident and local incident angle sar model:"; + std::cout << "SIMOrthoProgram.exe 4 in_parameter_path in_dem_path in_rpc_rc_path out_rpc_dem_path out_incident_angle_path out_local_incident_angle_path"; + + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5} {6} {7}".format(exe_path,4,self.paramterFile_path,in_dem_path,in_rpc_rc_path,out_rpc_dem_path,out_incident_angle_path,out_local_incident_angle_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def getRPC_incidenceAngle_lon_lat(self,in_dem_path,in_gec_lon_lat_path,work_path,taget_path,out_incident_angle_path,out_local_incident_angle_path,out_incangle_geo_path,out_localincangle_geo_path): + ''' + std::cout << "mode 7: get RPC incident and local incident angle sar model:"; + std::cout << "SIMOrthoProgram.exe 7 in_parameter_path in_dem_path in_gec_lon_lat_path work_path taget_path out_incident_angle_path out_local_incident_angle_path"; + + ''' + cwd_path = os.getcwd() + print("cwd_path:" + cwd_path) + exe_path=r"{}\baseTool\x64\Release\SIMOrthoProgram.exe".format(cwd_path) + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}".format(exe_path,7,self.paramterFile_path,in_dem_path,in_gec_lon_lat_path,work_path,taget_path,out_incident_angle_path,out_local_incident_angle_path,out_incangle_geo_path,out_localincangle_geo_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def get_incidenceAngle(self,in_dem_path,in_rc_wgs84_path,out_incident_angle_path,out_local_incident_angle_path): + ''' + std::cout << "mode 2: get incident angle and local incident angle by rc_wgs84 and dem and statellite model:\n"; + std::cout << "SIMOrthoProgram.exe 2 in_parameter_path in_dem_path in_rc_wgs84_path out_incident_angle_path out_local_incident_angle_path"; + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5} {6}".format(exe_path,2,self.paramterFile_path,in_dem_path,in_rc_wgs84_path,out_incident_angle_path,out_local_incident_angle_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + def get_GEC_incidenceAngle(self,in_dem_path,in_gec_lon_lat_path,out_incident_angle_path,out_local_incident_angle_path): + ''' + std::cout << "mode 6: get gec incident and local incident angle sar model:"; + std::cout << "SIMOrthoProgram.exe 6 in_parameter_path in_dem_path in_gec_lon_lat_path out_incident_angle_path out_local_incident_angle_path"; + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5} {6}".format(exe_path,6,self.paramterFile_path,in_dem_path,in_gec_lon_lat_path,out_incident_angle_path,out_local_incident_angle_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + def get_GTC_SAR(self,in_rc_wgs84_path,in_ori_sar_path,out_orth_sar_path,modecode=3): + ''' + std::cout << "mode 3: interpolation(cubic convolution) orth sar value by rc_wgs84 and ori_sar image and model:\n "; + std::cout << "SIMOrthoProgram.exe 3 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path,modecode,self.paramterFile_path,in_rc_wgs84_path,in_ori_sar_path,out_orth_sar_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def inter_Range2Geo(self,lon_lat_path , data_tiff , grid_path , space): + ''' + # std::cout << "mode 10"; + # std::cout << "SIMOrthoProgram.exe 10 lon_lat_path data_tiff grid_path space"; + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path,10,lon_lat_path , data_tiff , grid_path , space) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def calInterpolation_cubic_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 9"; + # std::cout << "SIMOrthoProgram.exe 9 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 9, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def getPowerTif(self,in_ori_path,out_power_path): + ''' + std::cout << "mode 5: convert ori tiff to power tiff:"; + std::cout << "SIMOrthoProgram.exe 5 in_ori_path out_power_path"; + ''' + exe_path=r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd=r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} ".format(exe_path,5,in_ori_path,out_power_path) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def test_PSTN(self): + landpoint=np.array([-1945072.5,5344083.0,2878316.0]).reshape(1,3) + # 间接定位法所需的参数 + Doppler_d = np.array(self.header_info['ImageInformation']['DopplerCentroidCoefficients']).reshape(-1, 1) + LightSpeed = self.config['LightSpeed'] + T0 = self.header_info['ImageInformation']['refRange'] * 2 / LightSpeed + lamda = self.header_info['ImageInformation']['lambda'] + StartTime = self.header_info['ImageInformation']['StartTime'] + PRF = self.header_info['PRF'] + R0 = self.header_info['ImageInformation']['NearRange'] + r,c,ti=self.PSTN_block(landpoint, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0) + + pass + def findInitPoint(self,sim_mask,dem_gt,sim_r_tif,sim_c_tif,sampling_f): + + # 根据采样点,大致计算出影像的仿射矩阵,并以此进行外扩 + sampling_f=sampling_f//10 + [r_ids,c_ids]=np.where(sim_mask==1) + sim_r_ids=sim_r_tif[r_ids,c_ids] + sim_c_ids=sim_c_tif[r_ids,c_ids] + X=dem_gt[0]+dem_gt[1]*c_ids+dem_gt[2]*r_ids + Y=dem_gt[3]+dem_gt[4]*c_ids+dem_gt[5]*r_ids + # 首先计算X 变换式 + sim_r_ids=sim_r_ids.reshape(-1,1) + sim_c_ids=sim_c_ids.reshape(-1,1) + X=X.reshape(-1,1) + Y=Y.reshape(-1,1) + point_count=sim_c_ids.shape[0] + X_input=np.ones((point_count,3)) + X_input[:,1]=sim_c_ids[:,0] + X_input[:,2]=sim_r_ids[:,0] + A_X=np.matmul(np.matmul(np.linalg.inv(np.matmul(X_input.T,X_input)), X_input.T), X).reshape(-1,1) + A_Y=np.matmul(np.matmul(np.linalg.inv(np.matmul(X_input.T,X_input)), X_input.T), Y).reshape(-1,1) + + sar_gt=[A_X[0,0],A_X[1,0],A_X[2,0],A_Y[0,0],A_Y[1,0],A_Y[2,0]] + width=self.header_info['ImageInformation']['width'] + height=self.header_info['ImageInformation']['height'] + width_f=self.header_info['ImageInformation']['width']/width + height_f=self.header_info['ImageInformation']['height']/height + sar_gt[1]=sar_gt[1]*width_f + sar_gt[2]=sar_gt[2]*height_f + sar_gt[4]=sar_gt[4]*width_f + sar_gt[5]=sar_gt[5]*height_f + # 外扩 + sar_gt[0]=sar_gt[0]+sar_gt[1]*-60+sar_gt[2]*-60 + sar_gt[3]=sar_gt[3]+sar_gt[4]*-60+sar_gt[5]*-60 + + inv_sar_gt=gdal.InvGeoTransform(sar_gt) + for dem_height in [0,sim_mask.shape[0]-1]: + for dem_width in [0,sim_mask.shape[1]-1]: + dem_edge_x=dem_gt[0]+dem_gt[1]*dem_width+dem_gt[2]*dem_height + dem_edge_y=dem_gt[3]+dem_gt[4]*dem_width+dem_gt[5]*dem_height + c_width=inv_sar_gt[0]+inv_sar_gt[1]*dem_edge_x+inv_sar_gt[2]*dem_edge_y + r_height=inv_sar_gt[3]+inv_sar_gt[4]*dem_edge_x+inv_sar_gt[5]*dem_edge_y + if c_width<=0 or r_height<=0: + continue + width=width+120 if width+120=-3000)&(sim_r_tif=0)&(sim_c_tif=0 else 0 + c_min=c_min-len_col if c_min-len_col>=0 else 0 + r_max=r_max+len_row if r_max+len_row=0)&(r_int=0)&(c_int0: # 可以进行行前推 + start_row=1 # 因为外推了一行 + else: # 不可以进行行前推 + start_row=0 # 此时没有外推一行 + if i+block_height_width>=row_count: # 不存在行后推 + if i>0: # 存在行前推, + end_row=start_row+block_row_count-1 + else: # 不存在行前推 + end_row=start_row+block_row_count + else: # 存在行后推 + if i>0: # 存在行前推 + end_row=start_row+block_row_count-2 + else: # 不存在行前推 + end_row=start_row+block_row_count-1 + pass + # 确定列的范围 + if j>0: # 存在列前推 + start_col=1 + else: # 不存在列前推 + start_col=0 + if j+block_height_width>=col_count: # 不存在列后推 + if j>0: # 存在列前推 + end_col=start_col+block_col_count-1 + else: # 不存在列前推 + end_col=start_col+block_col_count + else: # 存在列后推 + if j>0: # 存在列前推 + end_col=start_col+block_col_count-2 + else: # 不存在列前推 + end_col=start_col+block_col_count-1 + pass + return [start_row,start_col,end_row,end_col] + + def CalIntensityResampled(self,TargetPosition,ti,min_theta=0,max_theta=1): + '''计算入射角 + agrs: + TargetPosition: 地面坐标 nxmx3 + ti:对应的卫星空间位置 nxm + return: + Intensity:入射角阵,-9999 不参与运算 + ''' + # 1 + # 2 0 4 + # 3 + # n=(n12+n23+n34+n41)/4 + # n=(n24xn31)/4 + # 计算 n24=n4-n2 + n24=TargetPosition[:,2:,:]-TargetPosition[:,:-2,:] # 4-2 2: 5070x5068 + n24=n24[1:-1,:,:] + # 计算 n31=n1-n3 + n31=TargetPosition[:-2,:,:]-TargetPosition[2:,:,:] # 1-3 5068x5070 + n31=n31[:,1:-1,:] + # 计算坡度平面向量 + N=np.zeros((n31.shape[0],n31.shape[1],n31.shape[2])) + # n24=[a,b,c] + # n31=[d,f,g] + N[:,:,0]=n24[:,:,1]*n31[:,:,2]-n24[:,:,2]*n31[:,:,1] # bg-cf + N[:,:,1]=n24[:,:,2]*n31[:,:,0]-n24[:,:,0]*n31[:,:,2] # cd-ag + N[:,:,2]=n24[:,:,0]*n31[:,:,1]-n24[:,:,1]*n31[:,:,0] # af-bd + N=N*0.25 # 平面坡度向量 + N=N.reshape(-1,3) + del n24,n31 + n=ti.shape[0] + m=ti.shape[1] + # 分块计算 + theta=np.ones((n-2,m-2)).astype(bool) + ti=ti[1:-1,1:-1] + TargetPosition=TargetPosition[1:-1,1:-1] + TargetPosition=TargetPosition.reshape(-1,3) + ti=ti.reshape(-1) + theta=theta.reshape(-1) + # 分块计算 + le_iter=4000 + for i in range(0,theta.shape[0],le_iter): + len_ti=le_iter + if len_ti+i>theta.shape[0]: + len_ti=theta.shape[0]-i + end_i=i+len_ti + # 计算卫星空间坐标 + R_sp = self.SatelliteOrbitModel.getSatelliteSpaceState(ti[i:end_i])[:, :3]-TargetPosition[i:end_i] + # 夹角=arccos((R_sp*N)/(|R_sp|*|N|)) + theta_=np.sum(R_sp*N[i:end_i],axis=1)/(np.sum(R_sp**2,axis=1)*np.sum(N[i:end_i],axis=1)) + theta[i:end_i]=(theta_>min_theta)&(theta_ self.header_info['ImageInformation']['height']+sampling_f*2: + return False,ti[-1,0] + if np.max(c)<-1*sampling_f*2 or np.min(c) > self.header_info['ImageInformation']['width']+sampling_f*2: + return False,ti[-1,0] + return True + + def ConvertCoordinary_test(self, dem_merged_path, sim_out_path,work_temp_path, flag=1): + + print("当前python程序,内存占用 {}G".format(psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024)) + """ + 批量求解点坐标的性能改进: + 主要针对数据频繁开关文件读写 + flag:是否想要经纬度转大地坐标系 + """ + start = time.time() + # 间接定位法所需的参数 + Doppler_d = np.array(self.header_info['ImageInformation']['DopplerCentroidCoefficients']).reshape(-1, 1) + LightSpeed = self.config['LightSpeed'] + T0 = self.header_info['ImageInformation']['refRange'] * 2 / LightSpeed + lamda = self.header_info['ImageInformation']['lambda'] + StartTime = self.header_info['ImageInformation']['StartTime'] + PRF = self.header_info['PRF'] + R0 = self.header_info['ImageInformation']['NearRange'] + # 计算采样率 + + # 读取影像 + dem_resampled_tiff = gdal.Open(dem_merged_path) + row_count = dem_resampled_tiff.RasterYSize # 读取图像的行数 + col_count = dem_resampled_tiff.RasterXSize # 读取图像的列数 + im_proj = dem_resampled_tiff.GetProjection() + im_geotrans = list(dem_resampled_tiff.GetGeoTransform()) + gt = im_geotrans # 坐标变换参数 + # 计算采样率 + sampling_f=self.CalSamplingRateOfDEM(dem_resampled_tiff) + + # 创建目标虚拟影像 + SAR_width=self.header_info['ImageInformation']['width'] + SAR_height=self.header_info['ImageInformation']['height'] + + SAR_row_col_array=np.zeros((row_count,col_count,2),np.float32) + + for ii in range(0,row_count,3000): + if ii==row_count: + continue + altii=dem_resampled_tiff.GetRasterBand(1).ReadAsArray(0, ii, col_count, 3000) + if altii is None: + # 长度过长,设置行数 + altii=dem_resampled_tiff.GetRasterBand(1).ReadAsArray(0, ii, col_count, int(row_count-ii)) + pass + h=int(altii.shape[0]) + w=int(altii.shape[1]) + y_pixel=(np.ones((w,h))*range(h)+ii).transpose(1,0) + x_pixel=np.ones((h,w))*range(w) + lat=gt[0]+gt[1]*x_pixel+gt[2]*y_pixel # 经度 + lon=gt[3]+gt[4]*x_pixel+gt[5]*y_pixel # 纬度 + #del x_pixel,y_pixel + #gc.collect() + [X, Y, Z] = OrthoAuxData.LLA2XYZM(lat.reshape(-1,1), lon.reshape(-1,1), altii.reshape(-1,1)) # 计算错误 + TargetPosition = np.ones((int(h*w), 3), dtype=np.float32) + TargetPosition[:, 0] = X.reshape(-1) + TargetPosition[:, 1] = Y.reshape(-1) + TargetPosition[:, 2] = Z.reshape(-1) + r, c, ti = self.PSTN_M(TargetPosition, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0) # 间接定位法,核心求解代码 + + SAR_row_col_array[ii:ii+h,:,0]=r.reshape(-1,col_count) + SAR_row_col_array[ii:ii+h,:,1]=c.reshape(-1,col_count) + gtiff_driver = gdal.GetDriverByName('GTiff') + SAR_row_col=gtiff_driver.Create(sim_out_path.replace(".tif","rc.tif"),col_count,row_count,2,gdal.GDT_Float32) + SAR_row_col.SetGeoTransform(im_geotrans) # 写入仿射变换参数 + SAR_row_col.SetProjection(im_proj) # 写入投影 + # 写入tiff + SAR_row_col.GetRasterBand(1).WriteArray(SAR_row_col_array[:,:,0]) + SAR_row_col.GetRasterBand(2).WriteArray(SAR_row_col_array[:,:,1]) + print(sim_out_path.replace(".tif","rc.tif")) + return sim_out_path.replace(".tif","rc.tif") + pass + + def CalSamplingRateOfDEM(self,DEM_tiff): + '''计算DEM的重采样率f,必须在解析头文件之后,参考陈尔学 博士学位论文 《 星载合成孔径雷达影像正射校正方法研究 》 + args: + DEM_tiff:tif DEM的影像 + return: + f:采样率,向上取整 + ''' + # 获取DEM_tiff的高宽、投影、仿射矩阵 + Trans_array=list(DEM_tiff.GetGeoTransform()) # 仿射变换矩阵 + width=DEM_tiff.RasterXSize + height=DEM_tiff.RasterYSize + # x=trans[0]+trans[1]*c+trans[2]*r + # y=trans[3]+trans[4]*c+trans[5]*r + # 获得其高宽分辨率 + delta_x=Trans_array[1]+Trans_array[2] # x轴上的分辨率 经度 + delta_y=Trans_array[4]+Trans_array[5] # y轴上的分辨率 纬度 + # 因为单位为度,所以需要转换为 米 + y_resultion=abs(111194.926644558737*delta_y) + x_resultion=abs(111194.926644*math.cos(Trans_array[3])*delta_x) + resultion=x_resultion if x_resultion>y_resultion else y_resultion + delta_R=self.delta_R # 斜距分辨率 + incidenceAngle=self.header_info['ImageInformation']['incidenceAngle']["NearRange"] # 入射角 + f=math.sin(incidenceAngle*math.pi/180)*1.4142135623730951*resultion/delta_R + f=math.ceil(f) + return f + + def PSTN_M2(self, TargetPostion, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0): + """ + 间接求解方法,使用矩阵方法,分段计算,方便加速 + args: + TargetPosition:nparray,nx3 地面坐标 + Doppler_d:多普勒系数 -1,1 + lamda:波长 + T0:多普勒参考时间 + LightSpeed:光速 + return: + rc:nparray shape nx2 行列号 + """ + # + + n = int(TargetPostion.shape[0]) # 坐标点数据 nx3 + r=np.zeros((n,1)) + c=np.zeros((n,1)) + ti=np.zeros((n,1),dtype=np.float64) + len_iter=2000 + pool= ThreadPoolExecutor() + pool_ls=[] + SatelliteOrbitModelstarttime=self.SatelliteOrbitModel.get_starttime() + SatelliteOrbitModelParameters=self.SatelliteOrbitModel.A_arr + SatelliteOrbitModelName=self.SatelliteOrbitModel.modelName + delta_R=self.delta_R + for i in range(0, n, len_iter): + start_i=i + len_block=len_iter + if start_i+len_block>n: + len_block=n-start_i + end_i=start_i+len_block + temp_TargetPosition=TargetPostion[start_i:end_i,:] + pool_ls.append( pool.submit(PSTN_block_MuilThread,copy.deepcopy(start_i),copy.deepcopy(end_i), + temp_TargetPosition.copy(), + copy.deepcopy(Doppler_d), + copy.deepcopy(StartTime), + copy.deepcopy(lamda), + copy.deepcopy(T0), copy.deepcopy(LightSpeed), + copy.deepcopy(PRF),copy.deepcopy(R0), + copy.deepcopy(delta_R), + copy.deepcopy(SatelliteOrbitModelstarttime), + SatelliteOrbitModelParameters.copy(), + copy.deepcopy(SatelliteOrbitModelName),)) + pool.close() + pool.join() + for p in pool_ls: + r_temp, c_temp, ti_temp,start_i,end_i=p.get() + r[start_i:end_i,:]=r_temp + c[start_i:end_i,:]=c_temp + ti[start_i:end_i,:]=ti_temp + + return r,c,ti + + def PSTN_M(self, TargetPostion, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0): + """ + 间接求解方法,使用矩阵方法,分段计算,方便加速 + args: + TargetPosition:nparray,nx3 地面坐标 + Doppler_d:多普勒系数 -1,1 + lamda:波长 + T0:多普勒参考时间 + LightSpeed:光速 + return: + rc:nparray shape nx2 行列号 + """ + # + n = int(TargetPostion.shape[0]) # 坐标点数据 nx3 + r=np.zeros((n,1)) + c=np.zeros((n,1)) + ti=np.zeros((n,1),dtype=np.float64) + init_size=3000 + for i in range(0,n,init_size): + start_i=i + len_block=init_size + if start_i+len_block>n: + len_block=n-start_i + end_i=start_i+len_block + temp_TargetPosition=TargetPostion[start_i:end_i,:] + r_temp, c_temp, ti_temp=self.PSTN_block(temp_TargetPosition, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0) + + r[start_i:end_i,:]=r_temp.copy() + c[start_i:end_i,:]=c_temp.copy() + ti[start_i:end_i,:]=ti_temp.copy() + return r,c,ti + + def PSTN_block(self, TargetPostion, Doppler_d, StartTime, lamda, T0, LightSpeed, PRF, R0): + ''' + 间接求解方法,使用矩阵方法 + args: + TargetPosition:nparray,nx3 地面坐标 + Doppler_d:多普勒系数 -1,1 + lamda:波长 + T0:多普勒参考时间 + LightSpeed:光速 + return: + rc:nparray shape nx2 行列号 + ''' + n = TargetPostion.shape[0] # 坐标点数据 nx3 + ti = np.ones((n, 1),dtype=np.float64) * StartTime # 初始值 + delta_R = self.delta_R + delta_t = 1 / PRF + R=np.zeros((n,1)) + Rs = np.zeros((n, 3)) + R_sc = np.zeros((n, 3)) # 地面-卫星空间位置矢量 + V_sc = np.zeros((n, 3)) # 地面-卫星空间速率矢量 + FdNumerical = np.ones((n, 1)) # 多普勒公式的数值解法 + FdTheory = np.ones((n, 1)) # 多普勒公式的理论解法 + FdTheory_grad = np.ones((n, 1)) # 多普勒公式的导数 + # 多普勒参数系数 + Doppler_d = Doppler_d.reshape(-1, 1) # 5x1 + TSR=np.ones((n, 1)) # nx1 + TSRx = np.ones((n, Doppler_d.shape[0])) # nx5 + inc_t = np.ones((n, 1)) # 计算增加的导数 + dt = 0.0001 # 数值法求解多普勒距离公式的导数 + dt_rec=1/dt + # 默认最高迭代100次,如果还未求解出结果,就将所有的未收敛的时间值记录为0 ,注意时间不能为0 + for i in range(100): + tempT = ti + dt # 计算增加时间 + # ------- 求解导数 ---------------- + #del Rs,R_sc,V_sc,TSR,TSRx,satelliteSpaceState,R + satelliteSpaceState = self.SatelliteOrbitModel.getSatelliteSpaceState(tempT) # 卫星坐标与速度 nx6 + Rs = satelliteSpaceState[:, :3] + R_sc = satelliteSpaceState[:, :3] - TargetPostion # 地面-卫星空间位置矢量 + V_sc = satelliteSpaceState[:, 3:] # 地面-卫星空间位置矢量 + R =np.sqrt(np.sum(R_sc ** 2, axis=1)).reshape(-1, 1) # nx1 + #FdTheory_grad = (-2 / lamda) * (1 / R) * np.sum(R_sc * V_sc, axis=1).reshape(-1, 1) # nx1 + FdTheory_grad = -2*np.reciprocal(R*lamda) * np.sum(R_sc * V_sc, axis=1).reshape(-1, 1) # nx1 + + # 获取卫星轨道状态信息 + satelliteSpaceState = self.SatelliteOrbitModel.getSatelliteSpaceState(ti) # 卫星坐标与速度 nx6 + # 卫星轨道坐标 + Rs =satelliteSpaceState[:, :3] + # 卫星相对地面的位置向量、卫星速度向量 + R_sc=Rs - TargetPostion # 地面-卫星空间位置矢量 + V_sc =satelliteSpaceState[:, 3:] # 地面-卫星空间位置矢量 + # 斜距 + R = np.sqrt(np.sum(R_sc ** 2, axis=1)).reshape(-1, 1) # nx1 + # 根据多普勒数值求解公式求解多普勒频移值 + TSR= R * (2 / LightSpeed) - T0 # nx1 + TSRx[:,0]=TSR[:,0]**0 # nx5 + TSRx[:,1]=TSR[:,0]**1 + TSRx[:,2]=TSR[:,0]**2 + TSRx[:,3]=TSR[:,0]**3 + TSRx[:,4]=TSR[:,0]**4 + # 根据多普勒物理公式求解求解多普勒频移值 + FdTheory=-2*np.reciprocal(lamda*R)*np.sum(R_sc * V_sc, axis=1).reshape(-1, 1) # nx1 + FdNumerical= np.matmul(TSRx, Doppler_d) # nx1 + + FdTheory_grad = np.reciprocal((FdTheory_grad - FdTheory) * dt_rec) # nx1 + inc_t = (FdTheory - FdNumerical) * FdTheory_grad #(FdTheory - FdNumerical) * (1 / FdTheory_grad) # nx1 + # inc_t = inc_t - inc_t * ti_mask # 允许继续迭代 nx1 + if np.max(np.abs(inc_t)) < delta_t * 0.001: + break + ti = ti - inc_t # 更新坐标 + #del Rs,R_sc,V_sc,R,satelliteSpaceState,FdTheory_grad,FdNumerical,inc_t + #print("当前python程序,{} step, inc_t {} 求解内存占用 {}G".format(i,np.max(inc_t),psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024)) + # 计算行号 + ti_=copy.deepcopy(ti.copy()) + r_ = (ti_ - StartTime)*PRF + delta_t_light=delta_t*LightSpeed + r=copy.deepcopy(r_.copy()) + del r_ + c_ = (R - R0) * (1 / delta_R) + c=copy.deepcopy(c_.copy()) + del c_ + ti = copy.deepcopy(ti_.copy()) + del ti_ + return r, c, ti + pass + + def sim_intensity(self, in_tif_path, dem_resampled_path, lut_tif_path, incidence_tif_path, sim_out_path): + """ + 局部入射角计算 + 原方法存在的问题:多次文件读写 + """ + dem_resampled_tiff = gdal.Open(dem_resampled_path) # 获取重采样的DEM 数据,为了减少文件读写操作,这里做了持久化 + im_proj = dem_resampled_tiff.GetProjection() # 构建投影 + im_geotrans = list(dem_resampled_tiff.GetGeoTransform()) # 构建投影变换对象 + [a1, b1, c1, a2, b2, c2] = im_geotrans # 坐标变换参数:c1=0,b2=0 + start = time.time() # 度量算法 + dem_resampled = dem_resampled_tiff.GetRasterBand(1).ReadAsArray(0, 0, dem_resampled_tiff.RasterXSize, + dem_resampled_tiff.RasterYSize) + del dem_resampled_tiff # 释放内存 + incidence = dem_resampled * 0.0 + 1.8 + # 读取时间 + in_ds = gdal.Open(lut_tif_path) # 读取间接法 + row_cnt = in_ds.RasterYSize + col_cnt = in_ds.RasterXSize + t_arr = in_ds.GetRasterBand(3).ReadAsArray(0, 0, col_cnt, row_cnt).astype(np.float64) # 获取时间阵列 + t_arr = t_arr[1:-1, 1:-1] + self.header_info['ImageInformation']['StartTime'] # 去除最外围一圈时间数据 + row_data = in_ds.GetRasterBand(1).ReadAsArray(0, 0, col_cnt, row_cnt) # 读取行号 + col_data = in_ds.GetRasterBand(2).ReadAsArray(0, 0, col_cnt, row_cnt) # 读取列号 + row_data = np.round(row_data) + col_data = np.round(col_data) + row_data = row_data.astype(np.int16) + col_data = col_data.astype(np.int16) + del in_ds # 释放内存 + c = range(0, col_cnt) + c = np.array(c).reshape(-1, 1) + + for r0 in range(1, row_cnt - 1): + T0 = np.zeros((col_cnt, 3)) # colcount-2 ,3 + T0[:, 0] = a1 + b1 * c[:, 0] + c1 * r0 + T0[:, 1] = a2 + b2 * c[:, 0] + c2 * r0 + T0[:, 2] = dem_resampled[r0, :] + + T0 = np.concatenate(OrthoAuxData.LLA2XYZM(T0[:, 0].reshape(-1, 1), T0[:, 1].reshape(-1, 1), T0[:, 2].reshape(-1, 1)), axis=1) + n1 = np.concatenate(OrthoAuxData.LLA2XYZM(a1 + b1 * c + c1 * (r0 - 1), a2 + b2 * c + c2 * (r0 - 1), + dem_resampled[r0 - 1, :]), axis=1)[1:-1, :] - T0[1:-1, :] # colcount-2 ,3 + n3 = np.concatenate(OrthoAuxData.LLA2XYZM(a1 + b1 * c + c1 * (r0 + 1), a2 + b2 * c + c2 * (r0 + 1), + dem_resampled[r0 + 1, :]), axis=1)[1:-1, :] - T0[1:-1, :] # colcount-2 ,3 + n2 = T0[:-2, :] - T0[1:-1, :] + n4 = T0[2:, :] - T0[1:-1, :] + T0 = T0[1:-1, :] + n31 = n1 - n3 + n42 = n2 - n4 + + N = XYZOuterM2(n31, n42) / 4 + ti = t_arr[r0 - 1, :].reshape(-1, 1) # rowcount-2,colcount-2 + satelliteSpaceState = self.SatelliteOrbitModel.getSatelliteSpaceState(ti) + Rps = satelliteSpaceState[:, :3] - T0 # nx3 + # 计算向量模之积 + N_Norm = np.sqrt(np.sum(N * N, axis=1)).reshape(-1, 1) + Rps_Norm = np.sqrt(np.sum(Rps * Rps, axis=1)).reshape(-1, 1) + N_Norm = np.concatenate((N_Norm, N_Norm, N_Norm), axis=1) + Rps_Norm = np.concatenate((Rps_Norm, Rps_Norm, Rps_Norm), axis=1) + N = N / N_Norm + Rps = Rps / Rps_Norm + # 计算向量相乘 + N_Rps = np.sum(N * Rps, axis=1) # col_count-2 + theta = np.arccos(N_Rps) # 入射角 + incidence[r0, 1:-1] = theta + + incidence = np.degrees(incidence) + print('整张图入射角计算完成,耗时{}秒'.format(time.time() - start)) + ImageHandler.write_img(incidence_tif_path, im_proj, im_geotrans, incidence) + + # 读取原始影像相关信息 + in_tif = gdal.Open(in_tif_path) + col_num = in_tif.RasterXSize + row_num = in_tif.RasterYSize + im_proj1 = in_tif.GetProjection() # 构建投影 + im_geotrans1 = in_tif.GetGeoTransform() + out_arr = np.zeros((row_num, col_num), dtype=np.float32) + for h in range(row_cnt): + r_min = min(row_data[h]) + r_max = max(row_data[h]) + c_min = min(col_data[h]) + c_max = max(col_data[h]) + if 0 <= r_min < row_num or 0 <= r_max < row_num or (r_min < 0 and r_max >= row_num): + if 0 <= c_min < col_num or 0 <= c_max < col_num or (c_min < 0 and c_max >= col_num): + for w in range(col_cnt): + r = row_data[h, w] + c = col_data[h, w] + if 0 <= r < row_num and 0 <= c < col_num: + if 0 <= incidence[h, w] < 90: + out_arr[r, c] += 1 + K = 10 + out_arr = K * out_arr + max_delta = np.max(out_arr) + min_delta = np.min(out_arr) + out_arr = (out_arr - min_delta) / (max_delta - min_delta) + ImageHandler.write_img(sim_out_path, im_proj1, im_geotrans1, out_arr) + + @staticmethod + def sar_intensity_synthesis(sar_in_tif, sar_out_tif): + # 获取SLC格式SAR影像的相关信息 + in_ds = gdal.Open(sar_in_tif) + bands_num = in_ds.RasterCount + rows = in_ds.RasterYSize + columns = in_ds.RasterXSize + proj = in_ds.GetProjection() + geotrans = in_ds.GetGeoTransform() + + datatype = gdal.GDT_Float32 + + # 创建输出的SAR强度图 + gtiff_driver = gdal.GetDriverByName('GTiff') + out_ds = gtiff_driver.Create(sar_out_tif, columns, rows, 1, datatype) + + # 输出SAR强度图 + out_data=np.zeros((rows,columns)) + for i in range(0,rows,10000): + for j in range(0,columns,10000): + len_row=10000 + len_col=10000 + if i+len_row>rows: + len_row=rows-i + if j+len_col>columns: + len_col=columns-j + end_i=i+len_row + end_j=j+len_col + in_data1 = in_ds.GetRasterBand(1).ReadAsArray(j, i, len_col, len_row).astype(np.float32) + in_data2 = in_ds.GetRasterBand(2).ReadAsArray(j, i, len_col, len_row).astype(np.float32) + out_data[i:end_i,j:end_j] = np.log10((in_data1**2 + in_data2**2)+1)*10 + out_ds.GetRasterBand(1).WriteArray(out_data) + del in_ds, out_ds + + @staticmethod + def img_match(sim_sar_path, ori_sar_path,work_sapce_path): + ''' 影像匹配并根据配准点 + args: + sim_sar_path:模拟SAR强度图 + orig_sar_path: 原始SAR强度图 + out_path:配准结果输出点 + work_space_path: 工作目录 + return: + point_correct: 点集映射结果 + raise: + None: 出现错误 + ''' + match_points_path=ImageMatchClass.ImageMatch(ori_sar_path,sim_sar_path,work_sapce_path) + match_points_path=os.path.join(work_sapce_path,"GridMatch.npy") + match_result_path=os.path.join(work_sapce_path,"match_result.npy") + match_result_image_path=os.path.join(work_sapce_path,"match_result_show.tif") + ImageMatchClass.DeNoiseMatch(match_points_path,match_result_path) + ImageMatchClass.DrawMatchResult(ori_sar_path,sim_sar_path,match_result_path,match_result_image_path) + return match_result_path + + def IndireOrtho_smi(self,sim_SAR_rc_path,ori_SAR_path,tar_sar_path): + ''' + 测试间接定位法计算的影像坐标是否有问题。 + 方法核心,插值出间接定位法坐标处的 实部和虚部,不考虑其他区域 + 使用插值方法:等权反距离权重法 + 考虑实际的数据量较大,为节省内存,使用分块处理方法。 + 块大小为1000。 + ''' + sim_rc_tif=gdal.Open(sim_SAR_rc_path) + sim_width=sim_rc_tif.RasterXSize + sim_height=sim_rc_tif.RasterYSize + sim_r_tif=sim_rc_tif.GetRasterBand(1).ReadAsArray(0,0,sim_width,sim_height) + sim_c_tif=sim_rc_tif.GetRasterBand(2).ReadAsArray(0,0,sim_width,sim_height) + # 构建掩膜 + sim_mask=(sim_r_tif>0)&(sim_r_tif0)&(sim_c_tif模拟 + args: + point_arr:[模拟row,模拟col,原始row,原始col] + ''' + # 由模拟->原始 + point_cnt = point_arr.shape[0] # + rs_arr = point_arr[:,2].reshape(point_cnt, 1) # 原始 行 + cs_arr = point_arr[:,3].reshape(point_cnt, 1) + + rm_arr = point_arr[:,0].reshape(point_cnt, 1)-3000 # 模拟 行 还原偏移 + cm_arr = point_arr[:,1].reshape(point_cnt, 1) + X = np.concatenate( + [np.ones((point_cnt, 1), dtype=np.float64), rm_arr, cm_arr, rm_arr ** 2, cm_arr ** 2, rm_arr * cm_arr], + axis=1) + sim2orirc_arr=np.ones((2,6)) + sim2orirc_arr[0,:] = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), rs_arr).reshape(1, 6) + sim2orirc_arr[1,:] = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), cs_arr).reshape(1, 6) + + # 翻转 sim->ori + X = np.concatenate( + [np.ones((point_cnt, 1), dtype=np.float64), rs_arr, cs_arr, rs_arr ** 2, cs_arr ** 2, rs_arr * cs_arr], + axis=1) + ori2simrc_arr=np.ones((2,6)) + ori2simrc_arr[0,:] = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), rm_arr).reshape(1, 6) + ori2simrc_arr[1,:] = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), cm_arr).reshape(1, 6) + return ori2simrc_arr.copy(),sim2orirc_arr.copy() + + @staticmethod + def ReSamplingSAR(ori_sar_path,out_sar_path,lon_lat_point_match_path,gt): + ''' 根据影像点匹配点,插值得到正射图像 + args: + ori_sar_path: 输入sar地址 + out_sar_path: 正射结果地址 + lon_lat_point_match_path: CoordinateCorrection计算结果的路径 + gt: 仿射矩阵 + return: + out_sar_path + ''' + + ori_sar=gdal.Open(ori_sar_path) + ori_height=ori_sar.RasterYSize # 获得高度 + ori_width=ori_sar.RasterXSize # 获得宽度 + # 开始插值 + spatialreference=osr.SpatialReference() + spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree + spatialproj=spatialreference.ExportToWkt() # 导出投影结果 + gtiff_driver = gdal.GetDriverByName('GTiff') + out_sar=gtiff_driver.Create(out_sar_path, ori_width, ori_height, 2, gdal.GDT_Float32) + out_sar.SetGeoTransform(gt) # 写入仿射变换参数 + out_sar.SetProjection(spatialproj) # 写入投影 + + ab=np.array([gt[1],gt[2],gt[4],gt[5]]).reshape(2,2) + ab=np.np.matmul(np.linalg.inv(np.np.matmul(ab.T,ab)),ab.T) + # 解算仿射矩阵,为后面的插值做准备 + lon_lat_file_list=os.listdir(lon_lat_point_match_path) + block_info_json=[] + for lon_lat_file_name in lon_lat_file_list: + lon_lat=np.load(os.path.join(lon_lat_point_match_path,lon_lat_file_name)) + ori_r=lon_lat[:,0] # 原始影像的行坐标 + ori_c=lon_lat[:,1] # 原始影像的列坐标 + lon=lon_lat[:,2].copy() + lat=lon_lat[:,3].copy() + # 根据仿射矩阵阶段投影变换结果 + lon=lon-gt[0] + lat=lat-gt[3] + lon_lat[:,2]=ab[0,0]*lon+ab[0,1]*lat # 行 + lon_lat[:,3]=ab[1,0]*lon+ab[1,1]*lat # 列 + block_info_json.append({ + 'ori':[np.min(lon_lat[:,0]),np.max(lon_lat[:,0]),np.min(lon_lat[:,1]),np.max(lon_lat[:,1])],# r0 r1 c0 c1 + 'ortho':[np.min(lon_lat[:,2]),np.max(lon_lat[:,2]),np.min(lon_lat[:,3]),np.max(lon_lat[:,3])], + 'path':os.path.join(lon_lat_point_match_path,lon_lat_file_name) + }) + + for i in range(0,ori_height,6000): + for j in range(0,ori_width,6000): + # 判断需要检索的行列 + len_row=6000 + len_col=6000 + if i+len_row>ori_height: + len_row=ori_height-i + if j+len_col>ori_width: + len_col=ori_width-j + end_i=i+len_row + end_j=j+len_col + select_region=[i-1000,end_i+1000,j-1000,end_i+1000] + # 根据求交原则,解出需要可以参与插值计算的 方块 + inter_block=[] + for block in block_info_json: + block_ortho =block['ortho'] + if block_ortho[0]>select_region[0] and block_ortho[1]select_region[2] and block_ortho[3]模拟SAR + # X=a0+a1*r+a2*c+a5*r*c+a3*r^2+a4*c^2 + # Y=b0+b1*r+b2*c+b5*r*c+b3*r^2+b4*r^2 + point_arr=np.load(point_match_path) + rcori2simrc_arr,sim2orirc_arr=IndirectOrthorectification.CoordinateTransformation(point_arr) + # 根据行列号裁剪DEM + sim_rc_tif=gdal.Open(sim_out_path) + sim_width=sim_rc_tif.RasterXSize + sim_height=sim_rc_tif.RasterYSize + sim_r_tif=sim_rc_tif.GetRasterBand(1).ReadAsArray(0,0,sim_width,sim_height) + sim_c_tif=sim_rc_tif.GetRasterBand(2).ReadAsArray(0,0,sim_width,sim_height) + + # 计算实际行列号 + r=sim2orirc_arr[0,0]+sim2orirc_arr[0,1]*sim_r_tif+sim2orirc_arr[0,2]*sim_c_tif+sim2orirc_arr[0,3]*sim_r_tif*sim_r_tif+sim2orirc_arr[0,4]*sim_c_tif*sim_c_tif+sim2orirc_arr[0,5]*sim_r_tif*sim_c_tif + c=sim2orirc_arr[1,0]+sim2orirc_arr[1,1]*sim_r_tif+sim2orirc_arr[1,2]*sim_c_tif+sim2orirc_arr[1,3]*sim_r_tif*sim_r_tif+sim2orirc_arr[1,4]*sim_c_tif*sim_c_tif+sim2orirc_arr[1,5]*sim_r_tif*sim_c_tif + sim_r_tif=r + sim_c_tif=c + del r,c + + # + sim_mask=(sim_r_tif>=-1)&(sim_r_tif=-1)&(sim_c_tif原始 + ori_rc=np.ones((r_ids.size,2)) + ori_rc[:,0]=r_arr_trans[0,0]+r_ids*r_arr_trans[0,1]+c_ids*r_arr_trans[0,2]+r_arr_trans[0,3]*r_ids ** 2+r_arr_trans[0,4]*c_ids ** 2+r_arr_trans[0,5]*r_ids * c_ids + ori_rc[:,1]=c_arr_trans[0,0]+r_ids*c_arr_trans[0,1]+c_ids*c_arr_trans[0,2]+c_arr_trans[0,3]*r_ids ** 2+c_arr_trans[0,4]*c_ids ** 2+c_arr_trans[0,5]*r_ids * c_ids + # 插值x坐标 + sim_x_data=gdal.Open(sim_sar_x_path) + sim_x_arr=sim_x_data.GetRasterBand(1).ReadAsArray(0,0 , sim_x_data.RasterXSize, sim_x_data.RasterYSize) # 块 高程文件 + sim_x=sim_x_arr[r_ids,c_ids] + del sim_x_arr,sim_x_data + tree=KDTree(ori_rc) # 原始sar点对应的坐标 + + gtiff_driver = gdal.GetDriverByName('GTiff') + SAR_row_col=gtiff_driver.Create(ori_xyz_path,width,height,3,gdal.GDT_Float32) + # 准备插值--分块插值 + blocksize=5000 + SAR_ori=np.zeros((height,width)) + for i in range(0,height,blocksize): + for j in range(0,width,blocksize): + start_row=i + start_col=j + len_row=blocksize + len_col=blocksize + if(start_row+len_row>height): + len_row=height-start_row + if(start_col+len_col>width): + len_col=width-start_col + end_row=start_row+len_row + end_col=start_col+len_col + grid_xy= np.mgrid[start_row:end_row:1, start_col:end_col:1] + grid_xy=grid_xy.reshape(2,-1).transpose(1,0) + distance_weight,ori_select=tree.query(grid_xy,k=10) + distance_weight=np.reciprocal(distance_weight) + distance_weight=np.multiply(distance_weight.transpose(1,0),np.reciprocal(np.sum(distance_weight,axis=1))) + distance_weight=distance_weight.transpose(1,0) + SAR_ori[start_row:end_row,start_col:end_col]=np.sum( np.multiply(distance_weight,sim_x[ori_select]),axis=1).reshape(len_row,len_col) + + SAR_row_col.GetRasterBand(1).WriteArray(SAR_ori) + + # 插值y坐标 + sim_y_data=gdal.Open(sim_sar_y_path) + sim_y_arr=sim_y_data.GetRasterBand(1).ReadAsArray(0,0 , sim_y_data.RasterXSize, sim_y_data.RasterYSize) # 块 高程文件 + sim_y=sim_y_arr[r_ids,c_ids] + del sim_y_arr,sim_y_data,sim_x + # 准备插值--分块插值 + blocksize=5000 + SAR_ori=np.zeros((height,width)) + for i in range(0,height,blocksize): + for j in range(0,width,blocksize): + start_row=i + start_col=j + len_row=blocksize + len_col=blocksize + if(start_row+len_row>height): + len_row=height-start_row + if(start_col+len_col>width): + len_col=width-start_col + end_row=start_row+len_row + end_col=start_col+len_col + grid_xy= np.mgrid[start_row:end_row:1, start_col:end_col:1] + grid_xy=grid_xy.reshape(2,-1).transpose(1,0) + distance_weight,ori_select=tree.query(grid_xy,k=10) + distance_weight=np.reciprocal(distance_weight) + distance_weight=np.multiply(distance_weight.transpose(1,0),np.reciprocal(np.sum(distance_weight,axis=1))) + distance_weight=distance_weight.transpose(1,0) + SAR_ori[start_row:end_row,start_col:end_col]=np.sum(np.multiply(distance_weight,sim_y[ori_select]),axis=1).reshape(len_row,len_col) + + SAR_row_col.GetRasterBand(2).WriteArray(SAR_ori) + # 插值z坐标 + sim_z_data=gdal.Open(sim_sar_z_path) + sim_z_arr=sim_z_data.GetRasterBand(1).ReadAsArray(0,0 , sim_z_data.RasterXSize, sim_z_data.RasterYSize) # 块 高程文件 + sim_z=sim_z_arr[r_ids,c_ids] + del sim_z_arr,sim_z_data,sim_y + # 准备插值--分块插值 + blocksize=5000 + SAR_ori=np.zeros((height,width)) + for i in range(0,height,blocksize): + for j in range(0,width,blocksize): + start_row=i + start_col=j + len_row=blocksize + len_col=blocksize + if(start_row+len_row>height): + len_row=height-start_row + if(start_col+len_col>width): + len_col=width-start_col + end_row=start_row+len_row + end_col=start_col+len_col + grid_xy= np.mgrid[start_row:end_row:1, start_col:end_col:1] + grid_xy=grid_xy.reshape(2,-1).transpose(1,0) + distance_weight,ori_select=tree.query(grid_xy,k=10) + distance_weight=np.reciprocal(distance_weight) + distance_weight=np.multiply(distance_weight.transpose(1,0),np.reciprocal(np.sum(distance_weight,axis=1))) + distance_weight=distance_weight.transpose(1,0) + SAR_ori[start_row:end_row,start_col:end_col]=np.sum(np.multiply(distance_weight,sim_z[ori_select]),axis=1).reshape(len_row,len_col) + SAR_row_col.GetRasterBand(3).WriteArray(SAR_ori) + return ori_xyz_path + + def outResamplingParas(self,work_space_file,ortho_sar_xyz_pow_path,sar_xyz_path,sar_incidence_path,ortho_sar_xyz_path,ortho_sar_incidence_path,out_sar_file_list,dem_clip_path,ori2sim,sim2ori): + ''' + sar_xyz_path:xyz插值结果 + sar_incidence_path: 入射角 + ortho_sar_xyz_path: 重采样结果文件夹 + out_sar_file_list:需要重采样的文件 + rc_trans_arr:变换结果 + + ''' + with open(os.path.join(work_space_file,"resample_para.txt"),'w',encoding='utf-8') as fp: + fp.write("{}\n".format(dem_clip_path)) + fp.write("{}\n".format(os.path.join(work_space_file,"ori_sim.tif"))) + fp.write("{}\n".format(sar_xyz_path)) + fp.write("{}\n".format(sar_incidence_path)) + fp.write("{}\n".format(ortho_sar_incidence_path)) + fp.write("{}\n".format(ortho_sar_incidence_path.replace("orth_sar_incidence.tif","orth_sar_local_incidence.tif"))) + fp.write("{}\n".format(ortho_sar_xyz_path)) + fp.write("{}\n".format(len(out_sar_file_list))) + for i in range(len(out_sar_file_list)): + file_name=os.path.split(out_sar_file_list[i])[1] + fp.write("{}\n".format(out_sar_file_list[i])) + fp.write("{}\n".format(os.path.join(ortho_sar_xyz_path,file_name))) + fp.write("{}\n".format(os.path.join(ortho_sar_xyz_pow_path,file_name))) + #os.path.split(in_tif_path)[1] + + fp.write("{}\n".format(12)) + for i in range(6): + fp.write("{}\n".format(ori2sim[0,i])) + for i in range(6): + fp.write("{}\n".format(ori2sim[1,i])) + fp.write("{}\n".format(12)) + for i in range(6): + fp.write("{}\n".format(sim2ori[0,i])) + for i in range(6): + fp.write("{}\n".format(sim2ori[1,i])) + return os.path.join(work_space_file,"resample_para.txt") + + def Resampling(self,out_sar_xyz_path,inv_gt,gt,ori_resampling_file_path): + out_xyz=gdal.Open(out_sar_xyz_path) + ori_height=out_xyz.RasterYSize # 获得高度 + ori_width=out_xyz.RasterXSize # 获得宽度 + out_xyz_x=out_xyz.GetRasterBand(1).ReadAsArray(0,0 , ori_width, ori_height) + out_xyz_y=out_xyz.GetRasterBand(2).ReadAsArray(0,0 , ori_width, ori_height) + + # 计算对应的行列号 + out_xyz_rc=np.ones((ori_height,ori_width,2)) + out_xyz_rc[:,:,1]=inv_gt[0]+inv_gt[1]*out_xyz_x+inv_gt[2]*out_xyz_y + out_xyz_rc[:,:,0]=inv_gt[3]+inv_gt[4]*out_xyz_x+inv_gt[5]*out_xyz_y + del out_xyz_x,out_xyz_y,out_xyz + out_xyz_rc=out_xyz_rc.reshape(-1,2) + + blocksize=2000 + ori_data=gdal.Open(ori_resampling_file_path[0][0][0]) + width=ori_data.RasterXSize + height=ori_data.RasterYSize + SAR_ori=np.ones((width,height)) + sim_x=ori_data.GetRasterBand(1).ReadAsArray(0,0 , width, height).reshape(-1) + tree=KDTree(out_xyz_rc) + for i in range(0,height,blocksize): + for j in range(0,width,blocksize): + start_row=i + start_col=j + len_row=blocksize + len_col=blocksize + if(start_row+len_row>height): + len_row=height-start_row + if(start_col+len_col>width): + len_col=width-start_col + end_row=start_row+len_row + end_col=start_col+len_col + grid_xy= np.mgrid[start_row:end_row:1, start_col:end_col:1] + grid_xy=grid_xy.reshape(2,-1).transpose(1,0) + distance_weight,ori_select=tree.query(grid_xy,k=4) + distance_weight=np.reciprocal(distance_weight) + distance_weight=np.multiply(distance_weight.transpose(1,0),np.reciprocal(np.sum(distance_weight,axis=1))) + distance_weight=distance_weight.transpose(1,0) + SAR_ori[start_row:end_row,start_col:end_col]=np.sum( np.multiply(distance_weight,sim_x[ori_select]),axis=1).reshape(len_row,len_col) + pass + ''' + # 获得所有点集 + del point_arr + # 构建查询表 + ori_sar=gdal.Open(ori_sar_path) + ori_height=ori_sar.RasterYSize # 获得高度 + ori_width=ori_sar.RasterXSize # 获得宽度 + point_arr_path=[os.path.join(work_space_file,"point_refrence.npy")] + for point_match_arr_filename in os.listdir(os.path.join(work_space_file,'temp_point_refrence')): + point_arr_path.append(os.path.join(work_space_file,'temp_point_refrence',point_match_arr_filename)) + point_arr=np.ones((1,5)) + for point_path in point_arr_path: + point_temp_arr=np.load(point_path) + point_arr=np.row_stack((point_arr,point_temp_arr)) + point_arr=point_arr[1:,:] + ori_points=np.zeros((point_arr.shape[0],2)) + # 求解对应的行列号 + for i in range(r_arr_trans.shape[1]): + ori_points[:,0]=ori_points[:,0]+r_arr_trans[0,i]*point_arr[:,1]**i + ori_points[:,1]=ori_points[:,1]+c_arr_trans[0,i]*point_arr[:,1]**i + point_arr[:,0]=ori_points[:,0].copy() + point_arr[:,1]=ori_points[:,1].copy() + del ori_points + gc.collect() + # 根据此进行散点插值,确定 + lon_lat_point_match_path=os.path.join(work_space_file,"lon_lat_point_match") + if os.path.exists(lon_lat_point_match_path): + shutil.rmtree(lon_lat_point_match_path) + os.makedirs(lon_lat_point_match_path) + lon_lat=np.zeros((4,2)) + block_info_json={"ori_rc":[]} + # + block_size=5000 + for i in range(0,ori_height,block_size): + for j in range(0,ori_width,block_size): + len_row=block_size + len_col=block_size + if i+len_row>ori_height: + len_row=ori_height-i + if j+len_col>ori_width: + len_col=ori_width-j + end_i=i+len_row + end_j=j+len_col + + r=(np.ones((len_col,len_row))*range(len_row)).transpose(1,0)+i + c=np.ones((len_row,len_col))*range(len_col)+j + # 格网插值 + # scipy 规则格网插值 + lon_lat_r_c=np.ones((len_col*len_row,5)) + lon_lat_r_c[:,0]=r.reshape(-1) + lon_lat_r_c[:,1]=c.reshape(-1) + + SAR_ori_X=interpolate.griddata(point_arr[:,:2], point_arr[:,2], (lon_lat_r_c[:,0], lon_lat_r_c[:,1]), method='linear').reshape(-1) + SAR_ori_Y=interpolate.griddata(point_arr[:,:2], point_arr[:,3], (lon_lat_r_c[:,0], lon_lat_r_c[:,1]), method='linear').reshape(-1) + SAR_ori_Z=interpolate.griddata(point_arr[:,:2], point_arr[:,4], (lon_lat_r_c[:,0], lon_lat_r_c[:,1]), method='linear').reshape(-1) + [lon_lat_r_c[:,2],lon_lat_r_c[:,3],lon_lat_r_c[:,4]]=OrthoAuxData.XYZ2LLAM(SAR_ori_X,SAR_ori_Y,SAR_ori_Z) + temp_path=os.path.join(lon_lat_point_match_path,"r_c_{}_{}.npy".format(i,j)) + np.save(temp_path,lon_lat_r_c) + block_info_json['ori_rc'].append({'ori':[i,j,end_i,end_j],"path":temp_path}) + if i==0 and j==0: + lon_lat[0,0]=lon_lat_r_c[:,2].reshape(len_row,len_col)[0,0] # 经度 + lon_lat[0,1]=lon_lat_r_c[:,3].reshape(len_row,len_col)[0,0] # 纬度 + + if i+len_row==ori_height and j==0: + lon_lat[1,0]=lon_lat_r_c[:,2].reshape(len_row,len_col)[-1,0] + lon_lat[1,1]=lon_lat_r_c[:,3].reshape(len_row,len_col)[-1,0] + + if j+len_col==ori_width and i==0: + lon_lat[2,0]=lon_lat_r_c[:,2].reshape(len_row,len_col)[0,-1] + lon_lat[2,1]=lon_lat_r_c[:,3].reshape(len_row,len_col)[0,-1] + if j+len_col==ori_width and i+len_row==ori_height: + lon_lat[3,0]=lon_lat_r_c[:,2].reshape(len_row,len_col)[-1,-1] + lon_lat[3,1]=lon_lat_r_c[:,3].reshape(len_row,len_col)[-1,-1] + # 1 3 + # 2 4 + gt=[0,1,2,3,4,5] + # 计算 经度 + gt[0]= lon_lat[0,0] + gt[1]=(lon_lat[2,0]-gt[0])/(ori_width-1) + gt[2]=(lon_lat[3,0]-gt[0]-gt[1]*(ori_width-1))/(ori_height-1) + # 计算 纬度 + gt[3]= lon_lat[0,1] + gt[5]=(lon_lat[1,1]-gt[3])/(ori_height-1) + gt[4]=(lon_lat[3,1]-gt[3]-gt[5]*(ori_height-1))/(ori_width-1) + + + return lon_lat_point_match_path,gt + #return lon_lat_point_match_path + + return None + pass + ''' diff --git a/backScattering/BackScatteringMain.py b/backScattering/BackScatteringMain.py index 43244f64..a7f0ed99 100644 --- a/backScattering/BackScatteringMain.py +++ b/backScattering/BackScatteringMain.py @@ -9,16 +9,13 @@ @Version :1.0.0 """ import logging -# from BackScatteringAlg import ScatteringAlg as alg -# from BackScatteringAlg import rpc_correction,getRCImageRC from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource -from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml -from BackScatteringXmlInfo import CreateDict, CreateStadardXmlFile from tool.algorithm.image.ImageHandle import ImageHandler -from OrthoAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2 -from OrthoAlg import ScatteringAlg as alg +from tool.algorithm.algtools.PreProcess import PreProcess as pp +from BackScatteringAlg import IndirectOrthorectification, DEMProcess,rpc_correction,getRCImageRC,get_RPC_lon_lat,getRCImageRC2 +from BackScatteringAlg import ScatteringAlg as alg from tool.config.ConfigeHandle import Config as cf import os import glob @@ -243,6 +240,21 @@ class ScatteringMain: if os.path.exists(path): self.del_floder(path) + def process_sim_ori(self, ori_sim, sim_ori): + + scopes = () + scopes += (ImageHandler.get_scope_ori_sim(ori_sim),) + + intersect_polygon = pp().intersect_polygon(scopes) + if intersect_polygon is None: + raise Exception('create intersect shp fail!') + shp_path = os.path.join(self.__workspace_preprocessing_path, 'IntersectPolygon.shp') + if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False: + raise Exception('create intersect shp fail!') + sim_ori_process = os.path.join(self.__workspace_preprocessing_path, 'sim_ori_process.tif') + pp().cut_img(sim_ori_process, sim_ori, shp_path) + return sim_ori_process + def process_handle(self,start): in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tif'))) if in_tif_paths == []: @@ -311,7 +323,16 @@ class ScatteringMain: this_out_dem_rc_path = os.path.join(out_dir_path, "WGS_SAR_map.tiff") # out_dir_path + "\\" + "WGS_SAR_map.tiff"#// 经纬度与行列号映射 if(os.path.exists(this_out_dem_rc_path)): - os.remove(this_out_dem_rc_path) + os.remove(this_out_dem_rc_path) + + this_out_sar_sim_path = out_dir_path + "\\" + "sar_sim.tiff" + if (os.path.exists(this_out_sar_sim_path)): + os.remove(this_out_sar_sim_path) + + this_out_sar_sim_wgs_path = out_dir_path + "\\" + "sar_sim_wgs.tiff" # // 经纬度与行列号映射 + if (os.path.exists(this_out_sar_sim_wgs_path)): + os.remove(this_out_sar_sim_wgs_path) + this_out_incidence_path = os.path.join(out_dir_path, "incidentAngle.tiff") # out_dir_path + "\\" + "incidentAngle.tiff"#// 入射角 this_out_localIncidenct_path = os.path.join(out_dir_path, "localIncidentAngle.tiff") # out_dir_path + "\\" + "localIncidentAngle.tiff"#// 局地入射角 if(os.path.exists(this_out_incidence_path)): @@ -329,11 +350,17 @@ class ScatteringMain: this_out_ori_sim_tiff = os.path.join(out_dir_path, "RD_ori_sim.tif") # out_dir_path + "\\" + "RD_ori_sim.tif"#// 局地入射角 this_in_rpc_lon_lat_path = this_out_ori_sim_tiff + this_out_sim_ori_tiff = os.path.join(out_dir_path, "RD_sim_ori.tif") + this_in_rpc_x_y_path = this_out_sim_ori_tiff + + this_in_rpc_x_y_path_pro = self.process_sim_ori(this_in_rpc_lon_lat_path, this_in_rpc_x_y_path) + parameter_path = os.path.join(self.__workspace_processing_path, "orth_para.txt") - dem_rc = os.path.join(self.__workspace_preprocessing_path, "dem_rc.tiff") + for in_tif_path in in_tif_paths: - out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_DB.tif" + # out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif" + out_tif_path = os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"_lin.tif" if ('HH' in os.path.basename(in_tif_path)) or ('HV' in os.path.basename(in_tif_path)) or ('VH' in os.path.basename(in_tif_path)) or ('VV' in os.path.basename(in_tif_path)): alg.sar_backscattering_coef(in_tif_path, meta_file_path, out_tif_path) # 构建RPC @@ -341,26 +368,46 @@ class ScatteringMain: rpc_path=in_tif_path.replace(".tiff",".rpc") if os.path.exists(in_tif_path.replace(".tiff",".rpc")) else in_tif_path.replace(".tiff",".rpb") if not os.path.exists(rpc_path): logger.error('rpc not found!') - # tempout_tif_path=os.path.join(self.__workspace_processing_path,os.path.splitext(os.path.basename(in_tif_path))[0]).replace("_L1A_","_L4_")+ r".tif" - tempout_tif_path=os.path.join(self.__workspace_processing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif" + + # db->地理编码 + # lin_tif_path = os.path.join(self.__workspace_processing_path, + # os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif" + # Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path, + # out_tif_path, + # lin_tif_path) + # 线性->地理编码->db + lin_tif_path=os.path.join(self.__workspace_preprocessing_path,os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-lin_geo.tif" + # Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro, + # out_tif_path, + # lin_tif_path) + + Orthorectification.calInterpolation_bil_Wgs84_rc_sar_sigma(parameter_path, this_in_rpc_x_y_path_pro, + out_tif_path, + lin_tif_path) + tempout_tif_path = os.path.join(self.__workspace_processing_path, + os.path.splitext(os.path.basename(in_tif_path))[0]) + r"-cal.tif" + alg.lin_to_db(lin_tif_path, tempout_tif_path) #线性值转回DB值 + # 移动RPC #rpc_correction(in_tif_path,rpc_path,out_tif_path,dem_tif_file = None) # Orthorectification.inter_Range2Geo(this_in_rpc_lon_lat_path,out_tif_path,tempout_tif_path,Orthorectification.heightspace) - Orthorectification.calInterpolation_cubic_Wgs84_rc_sar_sigma(parameter_path, dem_rc, out_tif_path, - tempout_tif_path) - #shutil.move(rpc_path,out_tif_path.replace(".tiff",".rpc")) + self.imageHandler.write_quick_view(tempout_tif_path, color_img=False) + # self.imageHandler.write_quick_view(lin_tif_path, color_img=False) else: shutil.copy(in_tif_path,self.__workspace_processing_path) ref_tif_path = tempout_tif_path + # ref_tif_path = lin_tif_path # 构建行列号映射表 #out_rpc_rc_path = os.path.join(self.__workspace_processing_path,"RPC_ori_sim.tif") #getRCImageRC(in_tif_path,out_rpc_rc_path,rpc_path) logger.info('progress bar: 90%') if(os.path.exists(this_in_rpc_lon_lat_path)): - os.remove(this_in_rpc_lon_lat_path) - # out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.split(meta_file_path)[1].rstrip('.meta.xml') + '_DB.meta.xml') + os.remove(this_in_rpc_lon_lat_path) + if (os.path.exists(this_in_rpc_x_y_path)): + os.remove(this_in_rpc_x_y_path) + # out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.split(meta_file_path)[1].rstrip('.meta.xml') + '_DB.meta.xml') out_mate_file_path = os.path.join(self.__workspace_processing_path,os.path.basename(meta_file_path)) shutil.copy(meta_file_path, out_mate_file_path) diff --git a/landcover_c_sar/LandCoverAuxData.py b/landcover_c_sar/LandCoverAuxData.py index 13cf9bd0..f98f7f22 100644 --- a/landcover_c_sar/LandCoverAuxData.py +++ b/landcover_c_sar/LandCoverAuxData.py @@ -21,9 +21,10 @@ logger = logging.getLogger("mylog") class LandCoverMeasCsv: """读取地表覆盖标记数据""" - def __init__(self, csv_path, preprocessed_paras): + def __init__(self, csv_path, preprocessed_paras, max_tran__num_per_class): self.__csv_path = csv_path self.__preprocessed_paras = preprocessed_paras + self.__max_tran__num_per_class = max_tran__num_per_class def api_read_measure(self): """ @@ -123,9 +124,10 @@ class LandCoverMeasCsv: for train_data in train_data_list: logger.info(str(train_data[0]) + "," + str(train_data[2]) +"," + "num:" + str(len(train_data[3]))) - logger.info("max number = 100000, random select 100000 point as train data!") - if(len(train_data[3]) > 100000): - train_data[3] = random.sample(train_data[3], 100000) + max_num = self.__max_tran__num_per_class + logger.info("max number =" + str(max_num) + ", random select" + str(max_num) + " point as train data!") + if (len(train_data[3]) > max_num): + train_data[3] = random.sample(train_data[3], max_num) return train_data_list diff --git a/landcover_c_sar/LandCoverMain.py b/landcover_c_sar/LandCoverMain.py index 5d6b9326..6b56d066 100644 --- a/landcover_c_sar/LandCoverMain.py +++ b/landcover_c_sar/LandCoverMain.py @@ -21,6 +21,7 @@ import multiprocessing import pyproj._compat # 导入PreProcess模块要在其他包含gdal库的模块前面,不然剪切影像会报错,详见https://blog.csdn.net/u014656611/article/details/106450006 from tool.algorithm.algtools.PreProcess import PreProcess as pp +from LandCoverAuxData import LandCoverMeasCsv from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.algtools.ROIAlg import ROIAlg as alg @@ -129,6 +130,8 @@ class LandCoverMain: para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) # tif路径字典 para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))) + parameter_path = os.path.join(file_dir, "orth_para.txt") + para_dic.update({"paraMeter": parameter_path}) return para_dic def __create_work_space(self): @@ -162,10 +165,10 @@ class LandCoverMain: """ 预处理 """ - para_names_geo = [] - for key in self.__processing_paras.keys(): - if "FeatureMap" in key: - para_names_geo.append(key) + para_names_geo = ['sim_ori'] + # for key in self.__processing_paras.keys(): + # if "FeatureMap" in key: + # para_names_geo.append(key) self.__feature_name_list = para_names_geo p = pp() @@ -181,10 +184,12 @@ class LandCoverMain: self.__preprocessed_paras.update({name: out_path}) logger.info('preprocess_handle success!') - for name in para_names_geo: - l1a_path = os.path.join(self.__workspace_preprocessed_path, name+".tif") - self._tr.tran_geo_to_l1a(self.__preprocessed_paras[name], l1a_path, self.__preprocessed_paras['ori_sim'], is_class=False) - self.__preprocessed_paras[name] = l1a_path + # for name in para_names_geo: + # l1a_path = os.path.join(self.__workspace_preprocessed_path, name+".tif") + # self._tr.tran_geo_to_l1a(self.__preprocessed_paras[name], l1a_path, self.__preprocessed_paras['ori_sim'], is_class=False) + # self.__preprocessed_paras[name] = l1a_path + self.__cols_geo = self.imageHandler.get_img_width(self.__preprocessed_paras['sim_ori']) + self.__rows_geo = self.imageHandler.get_img_height(self.__preprocessed_paras['sim_ori']) self.__cols = self.imageHandler.get_img_width(self.__preprocessed_paras['HH']) self.__rows = self.imageHandler.get_img_height(self.__preprocessed_paras['HH']) @@ -330,7 +335,7 @@ class LandCoverMain: block_size = bp.get_block_size(self.__rows, self.__cols) self.__block_size = block_size - bp.cut(feature_tif_dic, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size) + bp.cut_new(feature_tif_dic, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size) img_dir, img_name = bp.get_file_names(self.__workspace_block_tif_path, ['tif']) dir_dict_all = bp.get_same_img(img_dir, img_name) @@ -423,7 +428,7 @@ class LandCoverMain: # 合并影像 data_dir = bp_cover_dir out_path = self.__workspace_processing_path[0:-1] - bp.combine( + bp.combine_new( data_dir, self.__cols, self.__rows, @@ -456,16 +461,45 @@ class LandCoverMain: logger.info('progress bar: 30%') return lee_filter_path + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + def features_geo(self, features_path): + dir = os.path.join(self.__workspace_processing_path, 'features_geo\\') + if not os.path.exists(dir): + os.mkdir(dir) + for key, file in zip(features_path, features_path.values()): + name = key + '_geo.tif' + out_path = os.path.join(dir, name) + self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], + self.__preprocessed_paras['sim_ori'], file, out_path) + return dir + def process_handle(self, start): """ 算法主处理函数 """ + hh_geo_path = self.__workspace_processing_path + "hh_geo.tif" + self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], + self.__preprocessed_paras['sim_ori'], + self.__preprocessed_paras['HH'], hh_geo_path) - # 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据 - # train_data_list = csvh.trans_landCover_measuredata(csvh.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim']) - csvh_roi = csvHandle(self.__rows, self.__cols) - train_data_dic = csvh_roi.trans_landCover_measuredata_dic(csvh_roi.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim'], MAX_TRAN_NUM) + pm = LandCoverMeasCsv(self.__processing_paras['LabelData'], hh_geo_path, MAX_TRAN_NUM) + train_data_list = pm.api_read_measure() + train_data_dic = csvh.trans_landCover_list2dic(train_data_list) + + csvh_roi = csvHandle(self.__rows_geo, self.__cols_geo) + # train_data_dic = csvh_roi.trans_landCover_measuredata_dic(csvh_roi.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim'], MAX_TRAN_NUM) label_img = csvh_roi.get_roi_img() if(len(label_img) != 0): self.imageHandler.write_img(os.path.join(self.__workspace_processing_path, "label_img.tif"),"",[0,0,0,0,0,0],label_img) @@ -487,17 +521,19 @@ class LandCoverMain: feature_tif_paths.update(write_bin_to_tif(self.__feature_tif_dir, feature_bin_dic)) logging.info("feature_tif_paths:%s",feature_tif_paths) + # 对所有特征进行地理编码 + feature_geo = self.features_geo(feature_tif_paths) # 新添加的特征做归一化 - for name in self.__feature_name_list: - proj, geo, arr = self.imageHandler.read_img(self.__preprocessed_paras[name]) - arr = ml.standardization(arr) - self.imageHandler.write_img(os.path.join(self.__feature_tif_dir, name+".tif"), proj, geo, arr) + # for name in self.__feature_name_list: + # proj, geo, arr = self.imageHandler.read_img(self.__preprocessed_paras[name]) + # arr = ml.standardization(arr) + # self.imageHandler.write_img(os.path.join(self.__feature_tif_dir, name+".tif"), proj, geo, arr) logger.info("decompose feature success!") logger.info('progress bar: 50%') # 生成最优特征子集训练集 - X_train, Y_train, optimal_feature = ml.gene_optimal_train_set(train_data_dic, self.__feature_tif_dir, 0.07, 0.85) + X_train, Y_train, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_geo, 0.07, 0.85) # 训练模型 cost = self.__processing_paras["Cost"] @@ -517,10 +553,11 @@ class LandCoverMain: # logger.info('progress bar: 60%') # 生成测试集 - X_test_path_list = ml.gene_test_set(self.__feature_tif_dir, optimal_feature) + # X_test_path_list = ml.gene_test_set(self.__feature_tif_dir, optimal_feature) + X_test_path_list = ml.gene_test_set(feature_geo, optimal_feature) # 预测 logger.info('testing') - cover_path = ml.predict(clf, X_test_path_list, EXE_NAME, self.__workspace_processing_path, self.__rows, self.__cols) + cover_path = ml.predict(clf, X_test_path_list, EXE_NAME, self.__workspace_processing_path, self.__rows_geo, self.__cols_geo) logger.info('test success!') logger.info('progress bar: 95%') @@ -537,14 +574,18 @@ class LandCoverMain: cover_data = np.int16(cover_data) # cover_path = cover_path.replace('.tif', '_geo.tif') cover_geo_path = cover_path.replace('.tif', '_geo.tif') - self.imageHandler.write_img(cover_path, proj, geo, cover_data) + self.imageHandler.write_img(cover_geo_path, proj, geo, cover_data) + # l1a图像坐标转换地理坐标 - self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], cover_path, cover_geo_path, 'nearest') + # self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], + # self.__preprocessed_paras['sim_ori'], cover_path, cover_geo_path) + # # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], cover_path, cover_geo_path, 'nearest') proj, geo, cover_data_geo = self.imageHandler.read_img(cover_geo_path) - hh_geo_path = self.__workspace_processing_path + "hh_geo.tif" - self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], self.__preprocessed_paras['HH'], hh_geo_path) + + proj, geo, cover_data_hh = self.imageHandler.read_img(hh_geo_path) + # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], self.__preprocessed_paras['HH'], hh_geo_path) roi_img = self.imageHandler.get_band_array(self.create_roi(hh_geo_path)) # 获取影像roi区域 @@ -588,11 +629,18 @@ class LandCoverMain: out_path1, out_path2).calu_nature() para_dict.update({"imageinfo_ProductName": "地表覆盖类型"}) para_dict.update({"imageinfo_ProductIdentifier": "LandCover"}) - para_dict.update({"imageinfo_ProductLevel": "LEVEL5"}) + para_dict.update({"imageinfo_ProductLevel": "4"}) para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "LabelData"}) CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) + if __name__ == '__main__': multiprocessing.freeze_support() #解决打包与运行错误 start = datetime.datetime.now() diff --git a/leafAreaIndex/LeafIndexMain.py b/leafAreaIndex/LeafIndexMain.py index a048c0ed..44f7f5e4 100644 --- a/leafAreaIndex/LeafIndexMain.py +++ b/leafAreaIndex/LeafIndexMain.py @@ -10,6 +10,8 @@ [修改序列] [修改日期] [修改者] [修改内容] 1 2022-6-27 李明明 1.增加配置文件config.ini; 2.修复快速图全黑的问题; 3.内部处理使用地理坐标系(4326); """ +from osgeo import gdalconst + from tool.algorithm.algtools.PreProcess import PreProcess as pp # 此行放在下面会报错,最好放在上面 from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara # 导入xml文件读取与检查文件 from tool.algorithm.algtools.logHandler import LogHandler @@ -31,10 +33,13 @@ import numpy as np import scipy.spatial.transform # 用于解决打包错误 import scipy.spatial.transform._rotation_groups # 用于解决打包错误 import scipy.special.cython_special # 用于解决打包错误 +import pyproj._compat from scipy.interpolate import griddata import sys import multiprocessing from tool.file.fileHandle import fileHandle +from sample_process import read_sample_csv,combine_sample_attr,ReprojectImages2,read_tiff,check_sample,split_sample_list +from tool.LAI.LAIProcess import train_WMCmodel,test_WMCModel,process_tiff cover_id_list = [] threshold_of_ndvi_min = 0 @@ -122,7 +127,7 @@ class LeafIndexMain: file_dir = os.path.join(self.__workspace_preprocessing_path, name + '\\') file.de_targz(tar_gz_path, file_dir) # 元文件字典 - para_dic.update(InitPara.get_meta_dic(InitPara.get_meta_paths(file_dir, name), name)) + para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) # tif路径字典 pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)) flag_list = [0, 0] @@ -197,7 +202,7 @@ class LeafIndexMain: """ 预处理 """ - para_names = [self.__sar_tif_name, 'LocalIncidenceAngle', "NDVI", "surface_coverage"] + para_names = [self.__sar_tif_name, 'LocalIncidenceAngle', "NDVI", "surface_coverage", 'soilMeasured'] ref_img_name = self.__sar_tif_name p = pp() self.__preprocessed_paras = p.preprocessing(para_names, ref_img_name, @@ -310,14 +315,125 @@ class LeafIndexMain: logger.info('create soil_moisture image success!') logger.info('progress bar: 40%') + def cal_empirical_parameters(self): + work_path = self.__workspace_path + EXE_NAME + "\\Temporary\\empirical""\\" + # b. 结果工作 + result_dir_path = self.__workspace_path + EXE_NAME + "\\Temporary\\empirical_result""\\" + path_list = [work_path, result_dir_path] + file.creat_dirs(path_list) + + # 1. 后向散射系数 dB + sigma_path = self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif' + # 2. 局地入射角 + incident_angle_path = self.__workspace_maskcai_localang_path + "LocalIncidenceAngle_preproed.tif" + # 3. 样本csv地址 + lai_csv_path = self.__processing_paras['laiMeasuredData'] + # 4. NDVI影像地址 -- 修正模型 + NDVI_tiff_path = self.__preprocessed_paras["NDVI"] + # 5. 土壤含水量影像地址 + soil_water_tiff_path = self.__preprocessed_paras['soilMeasured'] + # 6. 土壤含水量样本地址 + soil_water_csv_path = r"" + + # 7. 选择土壤含水量影像 + soil_water = 'tiff' + # 8. 输出图片 + train_err_image_path = os.path.join(result_dir_path, "train_image.png") + NDVI_min = -1 # 完全裸土对应的 NDVI 值 + NDVI_max = 1 # 完全植被覆盖对应的 NDVI 值 + # 临时变量 + soil_tiff_resample_path = os.path.join(work_path, "soil_water.tiff") # 与 后向散射系数同样分辨率的 土壤水分影像 + NDVI_tiff_resample_path = os.path.join(work_path, 'NDVI.tiff') # 与 后向散射系数产品同样分辨率的 NDVI影像 + incident_angle_resample_path = os.path.join(work_path, "localincangle.tiff") + + # 读取数据 + lai_sample = read_sample_csv(lai_csv_path) # 读取样本数据 + sigma_tiff = read_tiff(sigma_path) # 读取后向散射系数 + incident_angle = read_tiff(incident_angle_path) # 读取局地入射角 + + # 对于土壤水分、NDVI做重采样 + ReprojectImages2(soil_water_tiff_path, sigma_path, soil_tiff_resample_path, resampleAlg=gdalconst.GRA_Bilinear) + ReprojectImages2(NDVI_tiff_path, sigma_path, NDVI_tiff_resample_path, resampleAlg=gdalconst.GRA_Bilinear) + ReprojectImages2(incident_angle_path, sigma_path, incident_angle_resample_path, + resampleAlg=gdalconst.GRA_NearestNeighbour) + + soil_water_tiff = read_tiff(soil_tiff_resample_path) # 读取土壤含水量影像 + NDVI_tiff = read_tiff(NDVI_tiff_resample_path) # 引入NDVI + incident_angle = read_tiff(incident_angle_resample_path) # 读取局地入射角 + + # 处理归一化植被指数 + F_VEG = (NDVI_tiff['data'] - NDVI_min) / (NDVI_max - NDVI_min) # 处理得到植被覆盖度 + + soil_water_tiff['data'] = soil_water_tiff['data'] / 100.0 # 转换为百分比 + incident_angle['data'] = incident_angle['data'] * np.pi / 180.0 # 转换为弧度值 + sigma_tiff['data'] = np.power(10, (sigma_tiff['data'] / 10)) # 转换为线性值 + + # float32 转 float64 + soil_water_tiff['data'] = soil_water_tiff['data'].astype(np.float64) + incident_angle['data'] = incident_angle['data'].astype(np.float64) + sigma_tiff['data'] = sigma_tiff['data'].astype(np.float64) + # 将土壤水分与lai样本之间进行关联 + lai_water_sample = [] # ['日期', '样方编号', '经度', '纬度', 'LAI','土壤含水量'] + if soil_water == 'tiff': + lai_water_sample = combine_sample_attr(lai_sample, soil_water_tiff) + pass + else: # 这个暂时没有考虑 + pass + + # 将入射角、后向散射系数与lai样本之间进行关联 + lai_water_inc_list = combine_sample_attr(lai_water_sample, + incident_angle) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角'] + lai_waiter_inc_sigma_list = combine_sample_attr(lai_water_inc_list, + sigma_tiff) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数'] + # lai_waiter_inc_sigma_NDVI_list=combine_sample_attr(lai_waiter_inc_sigma_list,NDVI_tiff) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数','NDVI'] + lai_waiter_inc_sigma_list = check_sample( + lai_waiter_inc_sigma_list) # 清理样本 ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数'] + # lai_waiter_inc_sigma_NDVI_list=check_sample(lai_waiter_inc_sigma_NDVI_list) # ['日期','样方编号','经度','纬度','叶面积指数',"后向散射系数",'土壤含水量','入射角','后向散射系数','NDVI'] + # 数据集筛选 + lai_waiter_inc_sigma_list_result = [] + # 筛选保留的数据集 + logger.info("保留得数据集如下") + for i in range(len(lai_waiter_inc_sigma_list)): + if i in []: + continue + logger.info(str(lai_waiter_inc_sigma_list[i])) + lai_waiter_inc_sigma_list_result.append(lai_waiter_inc_sigma_list[i]) + lai_waiter_inc_sigma_list = lai_waiter_inc_sigma_list_result + + # [sample_train,sample_test]=split_sample_list(lai_waiter_inc_sigma_list,0.6) # step 1 切分数据集 + [sample_train, sample_test] = [lai_waiter_inc_sigma_list[:], lai_waiter_inc_sigma_list[:]] # step 1 切分数据集 + + logger.info("训练模型") + a = self.__processing_paras["A"] + b = self.__processing_paras["B"] + c = self.__processing_paras["C"] + d = self.__processing_paras["D"] + params_X0 = [a, b, c, d, 0.771, -0.028] + params_arr = train_WMCmodel(sample_train, params_X0, train_err_image_path, False) + logging.info("模型初值:\t{}".format(str(params_X0))) + logging.info("训练得到的模型系数:\t{}".format(str(params_arr))) + self.__processing_paras.update({"A": params_arr[0]}) + self.__processing_paras.update({"B": params_arr[1]}) + self.__processing_paras.update({"C": params_arr[2]}) + self.__processing_paras.update({"D": params_arr[3]}) + def block_process(self,start): """ 生成叶面积指数产品 """ # 生成土壤水分影像 - self.create_soil_moisture_tif() - shutil.copyfile(self.__preprocessed_paras[self.__sar_tif_name], self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif') + # self.create_soil_moisture_tif() + if os.path.exists(self.__preprocessed_paras['soilMeasured']): + soil_new = os.path.join(self.__workspace_maskcai_SoilMoi_path, 'soil_moisture.tif') + shutil.copy(self.__preprocessed_paras['soilMeasured'], soil_new) + + lee_path = os.path.join(self.__workspace_preprocessed_path, self.__sar_tif_name + '.tif') + Filter().lee_process_sar(self.__preprocessed_paras[self.__sar_tif_name], lee_path, 3, 0.25) + shutil.copyfile(lee_path, self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif') + # shutil.copyfile(self.__preprocessed_paras[self.__sar_tif_name], self.__workspace_maskcai_image_path + self.__sar_tif_name+'.tif') logger.info('progress bar: 50%') + # 模型训练得到经验系数 + self.cal_empirical_parameters() block_size = self.BlockProcess.get_block_size(self.__rows, self.__cols) @@ -334,7 +450,7 @@ class LeafIndexMain: processes_num = min([len(in_tif_paths), multiprocessing_num]) - Filter().lee_filter_multiprocess(in_tif_paths, in_tif_paths, FILTER_SISE, processes_num) + # Filter().lee_filter_multiprocess(in_tif_paths, in_tif_paths, FILTER_SISE, processes_num) pool = multiprocessing.Pool(processes=processes_num) pl = [] @@ -408,16 +524,23 @@ class LeafIndexMain: model_path = "./product.xml" meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-lef.meta.xml") - para_dict = CreateMetaDict(image_path, self.__processing_paras['META'], self.__workspace_processing_path, + para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path, out_path1, out_path2).calu_nature() para_dict.update({"imageinfo_ProductName": "叶面积指数"}) para_dict.update({"imageinfo_ProductIdentifier": "LeafAreaIndex"}) - para_dict.update({"imageinfo_ProductLevel": "LEVEL3"}) + para_dict.update({"imageinfo_ProductLevel": "5"}) para_dict.update({"ProductProductionInfo_BandSelection": "1"}) para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "MeasuredData,NDVI,LandCover"}) para_dict.update({"MetaInfo_Unit": "None"}) # 设置单位 CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) + # 文件夹打包 file.make_targz(self.__out_para, self.__product_dic) logger.info('process_handle success!') diff --git a/soilMoistureTop/SoilMoistureMain.py b/soilMoistureTop/SoilMoistureMain.py index fc4b25f4..0d053846 100644 --- a/soilMoistureTop/SoilMoistureMain.py +++ b/soilMoistureTop/SoilMoistureMain.py @@ -17,6 +17,7 @@ from tool.algorithm.algtools.logHandler import LogHandler from SoilMoistureALg import MoistureAlg as alg from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler +from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.config.ConfigeHandle import Config as cf from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.algtools.ROIAlg import ROIAlg as roi @@ -225,6 +226,16 @@ class MoistureMain: # 计算ROI区域 bare_land_mask_path = self.create_roi() logger.info('progress bar: 50%') + + para_names = ['HH', 'VV', 'VH', 'HV'] + for i in para_names: + if os.path.exists(self.__preprocessed_paras[i]): + lee_path = os.path.join(self.__workspace_preprocessed_path, os.path.basename(self.__preprocessed_paras[i]).split(".")[0] + '_lee.tif') + Filter().lee_process_sar(self.__preprocessed_paras[i], lee_path, 3, 0.25) + logger.info('lee process finish: ' + self.__preprocessed_paras[i]) + os.remove(self.__preprocessed_paras[i]) + self.__preprocessed_paras.update({i: lee_path}) + # 分块 bp = BlockProcess() # block_size = bp.get_block_size(self.__rows, self.__cols,block_size_config) @@ -261,11 +272,11 @@ class MoistureMain: return False processes_num = min([len(angle_list), multiprocessing_num, multiprocessing.cpu_count() - 1]) - f = Filter() - f.lee_filter_multiprocess(hh_list, hh_list, FILTER_SIZE, processes_num) - f.lee_filter_multiprocess(vv_list, vv_list, FILTER_SIZE, processes_num) - f.lee_filter_multiprocess(vh_list, vh_list, FILTER_SIZE, processes_num) - f.lee_filter_multiprocess(hv_list, hv_list, FILTER_SIZE, processes_num) + # f = Filter() + # f.lee_filter_multiprocess(hh_list, hh_list, FILTER_SIZE, processes_num) + # f.lee_filter_multiprocess(vv_list, vv_list, FILTER_SIZE, processes_num) + # f.lee_filter_multiprocess(vh_list, vh_list, FILTER_SIZE, processes_num) + # f.lee_filter_multiprocess(hv_list, hv_list, FILTER_SIZE, processes_num) # 开启多进程处理 pool = multiprocessing.Pool(processes=processes_num) pl = [] @@ -326,7 +337,9 @@ class MoistureMain: bp.assign_spatial_reference_byfile(self.__preprocessed_paras['HH'], soil_moisture_path) # 生成roi区域 - product_path = self.__product_dic + 'SoilMoistureProduct.tif' + SrcImageName = os.path.split(self.__input_paras["DualPolarSAR"]['ParaValue'])[1].split('.tar.gz')[ + 0] + '-SMC.tif' + product_path = os.path.join(self.__product_dic, SrcImageName) roi.cal_roi(product_path, soil_moisture_path, bare_land_mask_path, background_value=np.nan) logger.info('cal soil_moisture success!') @@ -345,25 +358,43 @@ class MoistureMain: image_path = product_path out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") - par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) + model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 - id_min = 0 - id_max = 1000 - threshold_of_ndvi_min = 0 - threshold_of_ndvi_max = 1 - set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] - CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() - + # id_min = 0 + # id_max = 1000 + # threshold_of_ndvi_min = 0 + # threshold_of_ndvi_max = 1 + # set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] + # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) + # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue'] paths = SrcImagePath.split(';') SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0] - if len(paths) >= 2: - for i in range(1, len(paths)): - SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] - meta_xml_path = self.__product_dic+EXE_NAME+"Product.meta.xml" - CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) + # if len(paths) >= 2: + # for i in range(1, len(paths)): + # SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] + # meta_xml_path = self.__product_dic+EXE_NAME+"Product.meta.xml" + # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) + + model_path = "./product.xml" + meta_xml_path = os.path.join(self.__product_dic, SrcImageName + "-SMC.meta.xml") + + para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__product_dic, + out_path1, out_path2).calu_nature() + para_dict.update({"imageinfo_ProductName": "土壤水分产品"}) + para_dict.update({"imageinfo_ProductIdentifier": "SoilMositure"}) + para_dict.update({"imageinfo_ProductLevel": "5A"}) + para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) + CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) # 文件夹打包 file.make_targz(self.__out_para, self.__product_dic) @@ -398,8 +429,8 @@ if __name__ == '__main__': except Exception: logger.exception('run-time error!') finally: - main_handler.del_temp_workspace() - + # main_handler.del_temp_workspace() + pass end = datetime.datetime.now() msg = 'running use time: %s ' % (end - start) logger.info(msg) \ No newline at end of file diff --git a/soilMoisture_c_sar_oh2004/SoilMoistureMain.py b/soilMoisture_c_sar_oh2004/SoilMoistureMain.py index 7cc27076..fcf5ceae 100644 --- a/soilMoisture_c_sar_oh2004/SoilMoistureMain.py +++ b/soilMoisture_c_sar_oh2004/SoilMoistureMain.py @@ -113,6 +113,8 @@ class MoistureMain: # 元文件字典 # para_dic.update(InitPara.get_meta_dic(InitPara.get_meta_paths(file_dir, name), name)) para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) + parameter_path = os.path.join(file_dir, "orth_para.txt") + para_dic.update({"paraMeter": parameter_path}) # tif路径字典 pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)) @@ -136,6 +138,8 @@ class MoistureMain: para_dic.update({'inc_angle': in_tif_path}) elif 'ori_sim' == key: para_dic.update({'ori_sim': in_tif_path}) + elif 'sim_ori' == key: + para_dic.update({'sim_ori': in_tif_path}) elif 'LocalIncidenceAngle' == key: para_dic.update({'LocalIncidenceAngle': in_tif_path}) elif 'inci_Angle-ortho' == key: @@ -173,13 +177,13 @@ class MoistureMain: # self.__preprocessed_paras, scopes_roi = p.preprocessing_oh2004(para_names, self.__processing_paras, # self.__workspace_preprocessing_path, self.__workspace_preprocessed_path) - para_names_geo = ['Covering', 'NDVI'] + para_names_geo = ['Covering', 'NDVI', 'inc_angle', 'sim_ori'] p = pp() cutted_img_paths, scopes_roi = p.cut_geoimg(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras) self.__preprocessed_paras.update(cutted_img_paths) - para_names_l1a = ["HH", "VV", "HV", "VH", 'inci_Angle-ortho', 'ori_sim'] + para_names_l1a = ["HH", "VV", "HV", "VH", 'ori_sim'] #'inci_Angle-ortho', self._tr = TransImgL1A(self.__processing_paras['ori_sim'], scopes_roi) for name in para_names_l1a: @@ -267,6 +271,18 @@ class MoistureMain: print(os.system(exe_cmd)) print("==========================================================================") + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + def process_handle(self,start): """ 算法主处理函数 @@ -275,7 +291,7 @@ class MoistureMain: tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\" soilOh2004 = SoilMoistureTool(self.__workspace_preprocessed_path, self.__workspace_processing_path, self.__cols, - self.__rows, self.__preprocessed_paras['inci_Angle-ortho'], self.__processing_paras['Origin_META']) + self.__rows, self.__preprocessed_paras['inc_angle'], self.__processing_paras['Origin_META']) result = soilOh2004.soil_oh2004() logger.info('progress bar: 80%') @@ -288,7 +304,11 @@ class MoistureMain: product_geo_path = os.path.join(tem_folder, 'SoilMoistureProduct_geo.tif') space = self.imageHandler.get_geotransform(self.__preprocessed_paras['HH']) - self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'],product_temp_path, product_geo_path, pixelspace) + + self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], + self.__preprocessed_paras['sim_ori'], product_temp_path, + product_geo_path) + # self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'],product_temp_path, product_geo_path, pixelspace) # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], product_temp_path, product_geo_path, 'linear') # @@ -300,7 +320,10 @@ class MoistureMain: bare_land_mask_path = roi().roi_process(para_names, self.__workspace_processing_path + "/roi/", self.__processing_paras, self.__preprocessed_paras) - product_path = os.path.join(self.__product_dic, 'SoilMoistureProduct.tif') + SrcImageName = os.path.split(self.__input_paras["DualPolarSAR"]['ParaValue'])[1].split('.tar.gz')[ + 0] + '-Soil.tif' + product_path = os.path.join(self.__product_dic, SrcImageName) + # product_path = os.path.join(self.__product_dic, 'SoilMoistureProduct.tif') # 获取影像roi区域 roi.cal_roi(product_path, product_geo_path, bare_land_mask_path, background_value=np.nan) @@ -366,8 +389,8 @@ if __name__ == '__main__': except Exception: logger.exception('run-time error!') finally: - main_handler.del_temp_workspace() - # pass + # main_handler.del_temp_workspace() + pass end = datetime.datetime.now() msg = 'running use time: %s ' % (end - start) logger.info(msg) \ No newline at end of file diff --git a/soilMoisture_c_sar_oh2004/SoilMoistureTool.py b/soilMoisture_c_sar_oh2004/SoilMoistureTool.py index 0263576e..995d89fc 100644 --- a/soilMoisture_c_sar_oh2004/SoilMoistureTool.py +++ b/soilMoisture_c_sar_oh2004/SoilMoistureTool.py @@ -46,11 +46,11 @@ class SoilMoistureTool: atp.ahv_to_polsarpro_t3_soil(t3_path, tif_path) # Lee滤波 - # leeFilter = LeeRefinedFilterT3() - # lee_filter_path = os.path.join(self.__workspace_processing_path, - # 'lee_filter\\') - # - # leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols()) + leeFilter = LeeRefinedFilterT3() + lee_filter_path = os.path.join(self.__workspace_processing_path, + 'lee_filter\\') + + leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols()) # logger.info("refine_lee filter success!") # logging.info("refine_lee filter success!") return t3_path diff --git a/soilSalinity/SoilSalinityMain.py b/soilSalinity/SoilSalinityMain.py index 1cc064ce..3877e54d 100644 --- a/soilSalinity/SoilSalinityMain.py +++ b/soilSalinity/SoilSalinityMain.py @@ -11,14 +11,17 @@ 1 2022-6-27 石海军 1.增加配置文件config.ini; 2.内部处理使用地理坐标系(4326); """ import logging +import shutil from tool.algorithm.algtools.MetaDataHandler import Calibration from tool.algorithm.algtools.PreProcess import PreProcess as pp from tool.algorithm.image.ImageHandle import ImageHandler +from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3 from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.algtools.ROIAlg import ROIAlg as roi from tool.algorithm.block.blockprocess import BlockProcess +from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.file.fileHandle import fileHandle # from AHVToPolsarpro import AHVToPolsarpro from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro @@ -115,6 +118,8 @@ class SalinityMain: para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name)) # tif路径字典 para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))) + parameter_path = os.path.join(file_dir, "orth_para.txt") + para_dic.update({"paraMeter": parameter_path}) return para_dic def __create_work_space(self): @@ -148,7 +153,7 @@ class SalinityMain: """ 预处理 """ - para_names_geo = ["Covering", "NDVI"] + para_names_geo = ["Covering", "NDVI", 'sim_ori'] p = pp() p.check_img_projection(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras) #计算roi @@ -209,13 +214,22 @@ class SalinityMain: logger.info('ahv transform to polsarpro T3 matrix success!') logger.info('progress bar: 20%') + # Lee滤波 + leeFilter = LeeRefinedFilterT3() + lee_filter_path = os.path.join(self.__workspace_processing_path, + 'lee_filter\\') + + leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols()) + + logger.info('Refined_lee process success!') + haa = PspHAAlphaDecomposition(normalization=True) haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir, h_a_alpha_decomposition_T3_path='h_a_alpha_decomposition_T3.exe' , h_a_alpha_eigenvalue_set_T3_path='h_a_alpha_eigenvalue_set_T3.exe' , h_a_alpha_eigenvector_set_T3_path='h_a_alpha_eigenvector_set_T3.exe', - polsarpro_in_dir=t3_path) + polsarpro_in_dir=lee_filter_path) def create_meta_file(self, product_path): xml_path = "./model_meta.xml" @@ -223,30 +237,46 @@ class SalinityMain: image_path = product_path out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") - par_dict = CreateDict(image_path, [1, 1, 1, 1], out_path1, out_path2).calu_nature(start) - model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 - CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() + # par_dict = CreateDict(image_path, [1, 1, 1, 1], out_path1, out_path2).calu_nature(start) + # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 + # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() # 文件夹打包 SrcImagePath = self.__input_paras["AHV"]['ParaValue'] paths = SrcImagePath.split(';') SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] - if len(paths) >= 2: - for i in range(1, len(paths)): - SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] - meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" - CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process( - SrcImageName) + # if len(paths) >= 2: + # for i in range(1, len(paths)): + # SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] + # meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" + # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process( + # SrcImageName) + model_path = "./product.xml" + meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-Salinity.meta.xml") - def inter_Range2Geo(self, lon_lat_path, data_tiff, grid_path, space): + para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path, + out_path1, out_path2).calu_nature() + para_dict.update({"imageinfo_ProductName": "土壤盐碱度"}) + para_dict.update({"imageinfo_ProductIdentifier": "SoilSalinity"}) + para_dict.update({"imageinfo_ProductLevel": "5A"}) + para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) + CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) + + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): ''' - # std::cout << "mode 10"; - # std::cout << "SIMOrthoProgram.exe 10 lon_lat_path data_tiff grid_path space"; + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; ''' exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" - exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 10, - lon_lat_path, data_tiff, - grid_path, space) + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) print(exe_cmd) print(os.system(exe_cmd)) print("==========================================================================") @@ -293,6 +323,22 @@ class SalinityMain: self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array) logger.info('create features matrix success!') + # for n in range(block_num): + # name = os.path.basename(dir_dict[key_name][n]) + # suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \ + # name.split('_')[-1] + # features_path = self.__workspace_block_tif_processed_path + "features\\features" + suffix + # row = self.imageHandler.get_img_height(dir_dict[key_name][n]) + # col = self.imageHandler.get_img_width(dir_dict[key_name][n]) + # features_array = np.zeros((len(dir_dict), row, col), dtype='float32') + # for m, value in zip(range(len(dir_dict)), dir_dict.values()): + # features_array[m, :, :] = self.imageHandler.get_band_array(value[n], 1) + # # 异常值转为0 + # features_array[np.isnan(features_array)] = 0.0 + # features_array[np.isinf(features_array)] = 0.0 + # self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array) + # logger.info('create features matrix success!') + # 生成训练集 block_features_dir, block_features_name = bp.get_file_names(self.__workspace_block_tif_processed_path + 'features\\', ['tif']) @@ -344,15 +390,17 @@ class SalinityMain: # l1a图像坐标转换地理坐标 salinity_path = self.__workspace_processing_path + "salinity.tif" - salinity_geo_path = self.__workspace_processing_path + "salinity_geo.tif" + SrcImageName = os.path.split(self.__input_paras["AHV"]['ParaValue'])[1].split('.tar.gz')[0] + '-Salinity.tif' + salinity_geo_path = os.path.join(self.__workspace_processing_path, SrcImageName) - self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path, pixelspace) + self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'], self.__preprocessed_paras['sim_ori'], salinity_path, salinity_geo_path) + # self.inter_Range2Geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path, pixelspace) # self._tr.l1a_2_geo(self.__preprocessed_paras['ori_sim'], salinity_path, salinity_geo_path) self.resampleImgs(salinity_geo_path) # 生成roi区域 - product_path = self.__product_dic + 'SoilSalinityProduct.tif' + product_path = os.path.join(self.__product_dic, SrcImageName) roi.cal_roi(product_path, salinity_geo_path, self.create_roi(), background_value=np.nan) # 生成快视图 diff --git a/surfaceRoughness_oh2004/SurfaceRoughnessMain.py b/surfaceRoughness_oh2004/SurfaceRoughnessMain.py index 723cdc2f..0c618b55 100644 --- a/surfaceRoughness_oh2004/SurfaceRoughnessMain.py +++ b/surfaceRoughness_oh2004/SurfaceRoughnessMain.py @@ -22,6 +22,7 @@ from tool.algorithm.algtools.logHandler import LogHandler from SurfaceRoughnessAlg import MoistureAlg as alg from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.algtools.MetaDataHandler import MetaDataHandler +from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.config.ConfigeHandle import Config as cf from tool.algorithm.xml.CreatMetafile import CreateMetafile from tool.algorithm.algtools.ROIAlg import ROIAlg as roi @@ -319,26 +320,43 @@ class MoistureMain: image_path = product_path out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") - par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) - model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 - - id_min = 0 - id_max = 1000 - threshold_of_ndvi_min = 0 - threshold_of_ndvi_max = 1 - set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] - CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() - + # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) + # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 + # + # id_min = 0 + # id_max = 1000 + # threshold_of_ndvi_min = 0 + # threshold_of_ndvi_max = 1 + # set_threshold = [id_max, id_min, threshold_of_ndvi_min, threshold_of_ndvi_max] + # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, set_threshold, model_xml_path).create_standard_xml() + # SrcImagePath = self.__input_paras["DualPolarSAR"]['ParaValue'] paths = SrcImagePath.split(';') SrcImageName=os.path.split(paths[0])[1].split('.tar.gz')[0] - if len(paths) >= 2: - for i in range(1, len(paths)): - SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] - meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" - CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) + # if len(paths) >= 2: + # for i in range(1, len(paths)): + # SrcImageName=SrcImageName+";"+os.path.split(paths[i])[1].split('.tar.gz')[0] + # meta_xml_path = self.__product_dic + EXE_NAME + "Product.meta.xml" + # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process(SrcImageName) # 文件夹打包 + model_path = "./product.xml" + meta_xml_path = os.path.join(self.__workspace_processing_path, SrcImageName + "-Roughness.meta.xml") + + para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], self.__workspace_processing_path, + out_path1, out_path2).calu_nature() + para_dict.update({"imageinfo_ProductName": "地表粗糙度"}) + para_dict.update({"imageinfo_ProductIdentifier": "SurfaceRoughness"}) + para_dict.update({"imageinfo_ProductLevel": "5A"}) + para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) + CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) file.make_targz(self.__out_para, self.__product_dic) logger.info('process_handle success!') logger.info('progress bar: 100%') @@ -371,7 +389,7 @@ if __name__ == '__main__': except Exception: logger.exception('run-time error!') finally: - # main_handler.del_temp_workspace() + main_handler.del_temp_workspace() pass end = datetime.datetime.now() msg = 'running use time: %s ' % (end - start) diff --git a/tool/algorithm/algtools/ROIAlg.py b/tool/algorithm/algtools/ROIAlg.py index d9a5e5af..2170d52e 100644 --- a/tool/algorithm/algtools/ROIAlg.py +++ b/tool/algorithm/algtools/ROIAlg.py @@ -166,7 +166,7 @@ class ROIAlg: for i in range(0, im_bands): tif_array[i, :, :][np.isnan(mask_array)] = background_value tif_array[i, :, :][mask_array == 0] = background_value - image_handler.write_img(out_tif_path, proj, geotrans, tif_array) + image_handler.write_img(out_tif_path, proj, geotrans, tif_array, '0') logger.info("cal_roi success, path: %s", out_tif_path) return True diff --git a/tool/algorithm/algtools/filter/lee_Filter.py b/tool/algorithm/algtools/filter/lee_Filter.py index 18bab74b..1e36e6bc 100644 --- a/tool/algorithm/algtools/filter/lee_Filter.py +++ b/tool/algorithm/algtools/filter/lee_Filter.py @@ -277,6 +277,19 @@ class Filter: file.del_folder(block_filtered) pass + def lee_process_sar(self, in_sar, out_sar, win_size, noise_var): + ''' + # std::cout << "mode 12" + # std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var" + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar, + out_sar, win_size, noise_var) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + + if __name__ == '__main__': # 示例1: # path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif" diff --git a/tool/algorithm/image/ImageHandle.py b/tool/algorithm/image/ImageHandle.py index 55724008..d898b952 100644 --- a/tool/algorithm/image/ImageHandle.py +++ b/tool/algorithm/image/ImageHandle.py @@ -376,7 +376,7 @@ class ImageHandler: # 写GeoTiff文件 @staticmethod - def write_img(filename, im_proj, im_geotrans, im_data, no_data='null'): + def write_img(filename, im_proj, im_geotrans, im_data, no_data='0'): """ 影像保存 :param filename: 保存的路径 @@ -400,10 +400,11 @@ class ImageHandler: datatype = gdal_dtypes[im_data.dtype.name] else: datatype = gdal.GDT_Float32 - + flag = False # 判读数组维数 if len(im_data.shape) == 3: im_bands, im_height, im_width = im_data.shape + flag = True else: im_bands, (im_height, im_width) = 1, im_data.shape @@ -420,11 +421,18 @@ class ImageHandler: if im_bands == 1: # outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据 - outband = dataset.GetRasterBand(1) - outband.WriteArray(im_data) - if no_data != 'null': - outband.SetNoDataValue(np.double(no_data)) - outband.FlushCache() + if flag: + outband = dataset.GetRasterBand(1) + outband.WriteArray(im_data[0]) + if no_data != 'null': + outband.SetNoDataValue(np.double(no_data)) + outband.FlushCache() + else: + outband = dataset.GetRasterBand(1) + outband.WriteArray(im_data) + if no_data != 'null': + outband.SetNoDataValue(np.double(no_data)) + outband.FlushCache() else: for i in range(im_bands): outband = dataset.GetRasterBand(1 + i) diff --git a/tool/algorithm/ml/machineLearning.py b/tool/algorithm/ml/machineLearning.py index 2ea8ae23..d369ba71 100644 --- a/tool/algorithm/ml/machineLearning.py +++ b/tool/algorithm/ml/machineLearning.py @@ -128,6 +128,8 @@ class MachineLeaning: # dst_ds.SetProjection(sr.ExportToWkt()) # dst_ds.SetGeoTransform(geo_transform) # del dst_ds + if not os.path.exists(img_path): + logger.error('total:%s,block:%s test data failed !path:%s', block_sum, n, img_path) logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path) return True @@ -152,7 +154,14 @@ class MachineLeaning: for path, n in zip(block_features_dir, range(len(block_features_dir))): name = os.path.split(path)[1] - features_array = ImageHandler.get_data(path) + # features_array = ImageHandler.get_data(path) + band = ImageHandler.get_bands(path) + if band == 1: + features_array = np.zeros((1, 1024, 1024), dtype=float) + feature_array = ImageHandler.get_data(path) + features_array[0, :, :] = feature_array + else: + features_array = ImageHandler.get_data(path) X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T @@ -175,6 +184,62 @@ class MachineLeaning: # bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path) return cover_path + @staticmethod + def predict_VP(clf, X_test_list, out_tif_name, workspace_processing_path, rows, cols): + """ + 预测数据 + :param clf : svm模型 + :return X_test_list: 分块测试集影像路径 + """ + ml = MachineLeaning() + # 开启多进程处理 + bp = BlockProcess() + block_size = bp.get_block_size(rows, cols) + + block_features_dir = X_test_list + bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name, + 'pre_result\\') # workspace_processing_path + out_tif_name + '\\' + file.creat_dirs([bp_cover_dir]) + + processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 7]) + pool = multiprocessing.Pool(processes=processes_num) + + for path, n in zip(block_features_dir, range(len(block_features_dir))): + name = os.path.split(path)[1] + + band = ImageHandler.get_bands(path) + if band == 1: + features_array = np.zeros((1, 1024, 1024), dtype=float) + feature_array = ImageHandler.get_data(path) + features_array[0, :, :] = feature_array + else: + features_array = ImageHandler.get_data(path) + + X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T + + suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \ + name.split('_')[-1] + img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix + row_begin = int(name.split('_')[-4]) + col_begin = int(name.split('_')[-2]) + pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n)) + # ml.predict_blok(clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n) + + pool.close() + pool.join() + del pool + + # 合并影像 + data_dir = bp_cover_dir + out_path = workspace_processing_path[0:-1] + bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32') + + # 添加地理信息 + cover_path = os.path.join(workspace_processing_path, + out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif" + # bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path) + return cover_path + @staticmethod def get_name_list(feature_tif_dir): in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif'))) diff --git a/tool/algorithm/transforml1a/transHandle.py b/tool/algorithm/transforml1a/transHandle.py index abc0966f..7f9e170b 100644 --- a/tool/algorithm/transforml1a/transHandle.py +++ b/tool/algorithm/transforml1a/transHandle.py @@ -84,6 +84,7 @@ class polyfit2d_U: class TransImgL1A: def __init__(self, ori_sim_path, roi): self._begin_r, self._begin_c, self._end_r, self._end_c = 0, 0, 0, 0 + self.ori2geo_img = None self._mask = None self._min_lon, self._max_lon, self._min_lat, self._max_lat = 0, 0, 0, 0 self.init_trans_para(ori_sim_path, roi) @@ -93,6 +94,13 @@ class TransImgL1A: data = [(self._begin_r + row, self._begin_c + col) for (row, col) in zip(rowcol[0], rowcol[1])] return data + + def get_lonlat_points(self): + lon = self.ori2geo_img[0, :, :][np.where(self._mask == 1)] + lat = self.ori2geo_img[1, :, :][np.where(self._mask == 1)] + data = [(row, col) for (row, col) in zip(lon, lat)] + return data + ###################### # 插值方法 ###################### @@ -125,10 +133,10 @@ class TransImgL1A: r_max = np.nanmax(r_c_list[0]) c_min = np.nanmin(r_c_list[1]) c_max = np.nanmax(r_c_list[1]) - ori2geo_img = ori2geo_img[:, r_min:r_max + 1, c_min:c_max + 1] + self.ori2geo_img = ori2geo_img[:, r_min:r_max + 1, c_min:c_max + 1] # 开始调用组件 计算 - mask = SAR_GEO.cut_L1A_img(ori2geo_img.astype(np.float64), point_list) + mask = SAR_GEO.cut_L1A_img(self.ori2geo_img.astype(np.float64), point_list) self._begin_r = r_min self._end_r = r_max self._begin_c = c_min @@ -405,105 +413,105 @@ class TransImgL1A: det_grid=pixel_delta, method=method) return result - def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'): - ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) - # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) - l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) - pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 - pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) - - lon_data = ori_geo_tif[0, :, :].reshape(-1) - lat_data = ori_geo_tif[1, :, :].reshape(-1) - l1a_produc = l1a_produc.reshape(-1) - idx = np.logical_not(np.isnan(lon_data)) - lat_data = lat_data[idx] - lon_data = lon_data[idx] - l1a_produc = l1a_produc[idx] - idx = np.logical_not(np.isnan(lat_data)) - lat_data = lat_data[idx] - lon_data = lon_data[idx] - l1a_produc = l1a_produc[idx] - - gt = [self._min_lon, pixel_delta_x, 0.0, - self._max_lat, 0.0, -pixel_delta_y] - [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] - lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y - lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x - - # 获取地理坐标系统信息,用于选取需要的地理坐标系统 - srs = osr.SpatialReference() - srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" - proj = srs.ExportToWkt() - - projection = srs.ExportToPROJJSON() - # (lower_left_x、lower_left_y、upper_right_x、upper_right_y) - target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, - lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) - lon_data = lon_data.reshape(-1, 1) - lat_data = lat_data.reshape(-1, 1) - l1a_produc = l1a_produc.reshape(-1, 1) - source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) - lalo_step = [pixel_delta_x, -pixel_delta_y] - radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') - geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, - radius=radius, neighbours=32, - nprocs=8, fill_value=np.nan, - epsilon=0) - - ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan) - - def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'): - ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) - # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) - l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) - pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 - pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) - - lon_data = ori_geo_tif[0, :, :].reshape(-1) - lat_data = ori_geo_tif[1, :, :].reshape(-1) - l1a_produc = l1a_produc.reshape(-1) - idx = np.logical_not(np.isnan(lon_data)) - lat_data = lat_data[idx] - lon_data = lon_data[idx] - l1a_produc = l1a_produc[idx] - idx = np.logical_not(np.isnan(lat_data)) - lat_data = lat_data[idx] - lon_data = lon_data[idx] - l1a_produc = l1a_produc[idx] - - gt = [self._min_lon, pixel_delta_x, 0.0, - self._max_lat, 0.0, -pixel_delta_y] - [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] - lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y - lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x - - # 获取地理坐标系统信息,用于选取需要的地理坐标系统 - srs = osr.SpatialReference() - srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" - proj = srs.ExportToWkt() - - projection = srs.ExportToPROJJSON() - # (lower_left_x、lower_left_y、upper_right_x、upper_right_y) - target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, - lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) - lon_data = lon_data.reshape(-1, 1) - lat_data = lat_data.reshape(-1, 1) - l1a_produc = l1a_produc.reshape(-1, 1) - source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) - lalo_step = [pixel_delta_x, -pixel_delta_y] - radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') - if method == 'linear': - geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, - radius=radius, neighbours=32, - nprocs=8, fill_value=0, - epsilon=0) - elif method == 'nearest': - geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0, - radius_of_influence=50000, - fill_value=0, nprocs=8 - ) - geo_produc = geo_produc[:,:,0] - ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc) + # def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'): + # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) + # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) + # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) + # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 + # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) + # + # lon_data = ori_geo_tif[0, :, :].reshape(-1) + # lat_data = ori_geo_tif[1, :, :].reshape(-1) + # l1a_produc = l1a_produc.reshape(-1) + # idx = np.logical_not(np.isnan(lon_data)) + # lat_data = lat_data[idx] + # lon_data = lon_data[idx] + # l1a_produc = l1a_produc[idx] + # idx = np.logical_not(np.isnan(lat_data)) + # lat_data = lat_data[idx] + # lon_data = lon_data[idx] + # l1a_produc = l1a_produc[idx] + # + # gt = [self._min_lon, pixel_delta_x, 0.0, + # self._max_lat, 0.0, -pixel_delta_y] + # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] + # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y + # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x + # + # # 获取地理坐标系统信息,用于选取需要的地理坐标系统 + # srs = osr.SpatialReference() + # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" + # proj = srs.ExportToWkt() + # + # projection = srs.ExportToPROJJSON() + # # (lower_left_x、lower_left_y、upper_right_x、upper_right_y) + # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, + # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) + # lon_data = lon_data.reshape(-1, 1) + # lat_data = lat_data.reshape(-1, 1) + # l1a_produc = l1a_produc.reshape(-1, 1) + # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) + # lalo_step = [pixel_delta_x, -pixel_delta_y] + # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') + # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, + # radius=radius, neighbours=32, + # nprocs=8, fill_value=np.nan, + # epsilon=0) + # + # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan) + # + # def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'): + # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path) + # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path) + # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1) + # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001 + # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c) + # + # lon_data = ori_geo_tif[0, :, :].reshape(-1) + # lat_data = ori_geo_tif[1, :, :].reshape(-1) + # l1a_produc = l1a_produc.reshape(-1) + # idx = np.logical_not(np.isnan(lon_data)) + # lat_data = lat_data[idx] + # lon_data = lon_data[idx] + # l1a_produc = l1a_produc[idx] + # idx = np.logical_not(np.isnan(lat_data)) + # lat_data = lat_data[idx] + # lon_data = lon_data[idx] + # l1a_produc = l1a_produc[idx] + # + # gt = [self._min_lon, pixel_delta_x, 0.0, + # self._max_lat, 0.0, -pixel_delta_y] + # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon] + # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y + # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x + # + # # 获取地理坐标系统信息,用于选取需要的地理坐标系统 + # srs = osr.SpatialReference() + # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84" + # proj = srs.ExportToWkt() + # + # projection = srs.ExportToPROJJSON() + # # (lower_left_x、lower_left_y、upper_right_x、upper_right_y) + # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection, + # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max]) + # lon_data = lon_data.reshape(-1, 1) + # lat_data = lat_data.reshape(-1, 1) + # l1a_produc = l1a_produc.reshape(-1, 1) + # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data) + # lalo_step = [pixel_delta_x, -pixel_delta_y] + # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo') + # if method == 'linear': + # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def, + # radius=radius, neighbours=32, + # nprocs=8, fill_value=0, + # epsilon=0) + # elif method == 'nearest': + # geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0, + # radius_of_influence=50000, + # fill_value=0, nprocs=8 + # ) + # geo_produc = geo_produc[:,:,0] + # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc) @property def mask(self): diff --git a/tool/algorithm/xml/CreateMetaDict.py b/tool/algorithm/xml/CreateMetaDict.py index d937e47d..4158afeb 100644 --- a/tool/algorithm/xml/CreateMetaDict.py +++ b/tool/algorithm/xml/CreateMetaDict.py @@ -47,6 +47,7 @@ class CreateMetaDict: # imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率 # para_dict.update({"imageinfo_widthspace": imageinfo_widthspace}) # para_dict.update({"imageinfo_heightspace": imageinfo_heightspace}) + para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace}) para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"}) diff --git a/tool/csv/csvHandle.py b/tool/csv/csvHandle.py index e14181bb..5ad77939 100644 --- a/tool/csv/csvHandle.py +++ b/tool/csv/csvHandle.py @@ -228,6 +228,7 @@ class csvHandle: roi_poly = [(float(lon), float(lat)) for (lon, lat) in points] tr = TransImgL1A(cuted_ori_sim_path, roi_poly) l1a_points = tr.get_roi_points() + # l1a_points = tr.get_lonlat_points() if data_use_type == 'train': train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]]) elif data_use_type == 'test': diff --git a/vegetationHeight-L-SAR/VegetationHeightMain.py b/vegetationHeight-L-SAR/VegetationHeightMain.py index 52898563..48e43cb5 100644 --- a/vegetationHeight-L-SAR/VegetationHeightMain.py +++ b/vegetationHeight-L-SAR/VegetationHeightMain.py @@ -784,7 +784,7 @@ class VegetationHeightMain: coh_arrayt = AHVToPolSarProS2().read_none_complex_bin_to_array(t6_T11_bin) rows, cols=coh_arrayt.shape[0],coh_arrayt.shape[1] # 4、T6->boxcar_filter->T6 - logger.info('start computing the filter...') # 线性平滑滤波 + logger.info('start computing the filter...') # boxcar滤波 boxcar_filter_tool_path = os.path.join(current_path, "boxcar_filter_T6.exe") # boxcar_filter_tool_path = os.path.join(current_path, "lee_refined_filter_T6.exe") master_slave_t6_box = self.__workspace_preprocessed2_path + "master_slave_t6_box""\\" @@ -793,7 +793,8 @@ class VegetationHeightMain: master_slave_t6, *(3, 3, 0, 0)) # PlantHeightAlg().polsar_lee_filter(master_slave_t6_box, boxcar_filter_tool_path, # master_slave_t6, *(3, 3, 0, 0)) - logger.info("T6 lee_refined_filter finish") + # logger.info("T6 lee_refined_filter finish") + logger.info("T6 boxcar_filter finish") logger.info('progress bar :85%') # 5、 T6->coherence_estimation->T6 相干度估计 coherence_estimation_path = os.path.join(current_path, "complex_coherence_estimation.exe") diff --git a/vegetationPhenology/VegetationPhenologyAuxData.py b/vegetationPhenology/VegetationPhenologyAuxData.py index 9b3993bb..bb4987b6 100644 --- a/vegetationPhenology/VegetationPhenologyAuxData.py +++ b/vegetationPhenology/VegetationPhenologyAuxData.py @@ -12,6 +12,7 @@ import csv import numpy as np import mahotas import logging +import random from tool.algorithm.algtools.CoordinateTransformation import lonlat2geo, geo2imagexy from tool.algorithm.image.ImageHandle import ImageHandler logger = logging.getLogger("mylog") @@ -203,4 +204,206 @@ class PhenologyMeasCsv: point.append(float(cells[1])) pList.append(point) pointList.append(pList) + return pointList + + + +class PhenoloyMeasCsv_geo: + def __init__(self, csv_path, preprocessed_paras, max_tran__num_per_class=100000): + self.__csv_path = csv_path + self.__preprocessed_paras = preprocessed_paras + self.__max_tran__num_per_class = max_tran__num_per_class + + def api_read_measure(self): + """ + 读取csv表格数据api函数 + """ + csv_data = self.__readcsv(self.__csv_path) + return self.__trans_measuredata(csv_data) + + def api_read_measure_by_name(self, name): + """ + 读取csv表格数据api函数 + """ + csv_data = self.__readcsv_by_name(self.__csv_path, name) + return self.__trans_measuredata(csv_data) + + def class_list(self): + """ + 输出csv表中的前三列 + """ + reader = csv.reader(open(self.__csv_path, newline='')) + class_list=[] + type_id_name = {} + type_id_parent = {} + for line_data in reader: + class_list.append(line_data) # class_list含有四列 + for data in class_list[1:]: + type_parent= data[0] + type_id = int(data[1]) + type_name = data[2] + + if type_id not in type_id_name.keys(): + type_id_name.update({type_id: type_name}) + type_id_parent.update({type_id: type_parent}) + return type_id_name, type_id_parent + pass + + @staticmethod + def __readcsv(csv_path): + """ + 读取csv表格数据 + :para csv_path: csv文件路径 + """ + reader = csv.reader(open(csv_path, newline='')) + csv_list = [] + for line_data in reader: + csv_list.append(line_data) + return csv_list[1:] + + @staticmethod + def __readcsv_by_name(csv_path, name): + """ + 读取csv表格数据 + :para csv_path: csv文件路径 + """ + reader = csv.reader(open(csv_path, newline='')) + csv_list = [] + for line_data in reader: + if name in line_data[0]: + csv_list.append(line_data) + return csv_list + + def __trans_measuredata(self, meas_data): + """ + 获取多边形区域内所有的点,分为训练集数据和测试集数据 + :para meas_data: csv读取的实测数据 + """ + type_data = {} + n = 1 + train_data_list = [] + for data in meas_data: + + for d in data: + if d == '': + raise Exception('there are empty data!', data) + + point_list = [] + dataset, rows, cols = self.__get_para_tif_inf() + + type_id = int(data[1]) + type_name = data[2] + if type_id not in type_data.keys(): + train_data_list.append([n, type_id, type_name, []]) + type_data.update({type_id: type_name}) + n += 1 + + pointList = self.__roiPolygonAnalysis(data[3]) + for points in pointList: + poly = [] + for point in points: + lon = float(point[0]) + lat = float(point[1]) + # projs = lonlat2geo(dataset, lon, lat) + coord = geo2imagexy(dataset, lon, lat) + row = round(coord[1]) + col = round(coord[0]) + if 0 <= row < rows and 0 <= col < cols: + poly.append([row, col]) + else: + logger.warning("point %s is beyond tif scope, in measure data: %s !", point, data) + if poly != []: + point_list.append(self.__render(poly)) + for train_data in train_data_list: + if train_data[1] == type_id: + train_data[3] = train_data[3] + self.__render(poly) + if train_data[3] == [] : + raise Exception('there are empty data!', train_data) + + num_list = [] + for train_data in train_data_list: + num_list.append(len(train_data[3])) + max_num = np.min(num_list) + for train_data in train_data_list: + logger.info(str(train_data[0]) + "," + str(train_data[2]) +"," + "num:" + str(len(train_data[3]))) + # max_num = self.__max_tran__num_per_class + logger.info("max number =" + str(max_num) +", random select"+str(max_num)+" point as train data!") + if(len(train_data[3]) > max_num): + train_data[3] = random.sample(train_data[3], max_num) + + if len(train_data_list) <= 1: + raise Exception('there is only one label type!', train_data_list) + return train_data_list + + @staticmethod + def __render(poly): + # https://www.cnpython.com/qa/51516 + """Return polygon as grid of points inside polygon. + Input : poly (list of lists) + Output : output (list of lists) + """ + xs, ys = zip(*poly) + minx, maxx = min(xs), max(xs) + miny, maxy = min(ys), max(ys) + + newPoly = [(int(x - minx), int(y - miny)) for (x, y) in poly] + + X = maxx - minx + 1 + Y = maxy - miny + 1 + + grid = np.zeros((X, Y), dtype=np.int8) + mahotas.polygon.fill_polygon(newPoly, grid) + + return [(x + minx, y + miny) for (x, y) in zip(*np.nonzero(grid))] + + + def __get_para_tif_inf(self): + """ + 获取影像的信息 + :para tif_name: 影像名称 + """ + tif_path = self.__preprocessed_paras + ih = ImageHandler() + dataset = ih.get_dataset(tif_path) + rows = ih.get_img_height(tif_path) + cols = ih.get_img_width(tif_path) + return dataset, rows, cols + + @staticmethod + def __roiPolygonAnalysis(roiStr): + """ + 将csv的POLY数据转为数组 + :para roiStr: poly数据 + :return pointList: 保存多边形的list + """ + pointList = [] + strContent = roiStr.replace("POLYGON", "") + # 解析轮廓字符串为二维数组 + bracketsList = [] + strTemp = '' + strList = [] + for c in strContent: + if c == '(': + bracketsList.append(c) + continue + elif c == ')': + if len(bracketsList) > 0: + bracketsList.pop(0) + if len(strTemp) > 0: + strList.append(strTemp) + strTemp = '' + else: + strTemp += c + for item in strList: + if len(item) == 0: + continue + pTempList = item.split(',') + pList = [] + for row in pTempList: + cells = row.split(' ') + if len(cells) != 2: + continue + point = [float(cells[0]), float(cells[1])] + pList.append(point) + pointList.append(pList) return pointList \ No newline at end of file diff --git a/vegetationPhenology/VegetationPhenologyMain.py b/vegetationPhenology/VegetationPhenologyMain.py index e853ca41..39983da1 100644 --- a/vegetationPhenology/VegetationPhenologyMain.py +++ b/vegetationPhenology/VegetationPhenologyMain.py @@ -7,9 +7,12 @@ @Date :2021/9/6 @Version :1.0.0 """ +import glob import logging import os import datetime +import shutil + import pyproj._compat import cv2 import numpy as np @@ -19,7 +22,11 @@ from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.xml.CreatMetafile import CreateMetafile +from tool.algorithm.algtools.ROIAlg import ROIAlg as alg from VegetationPhenologyXmlInfo import CreateDict, CreateStadardXmlFile +from VegetationPhenologyAuxData import PhenoloyMeasCsv_geo + +from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml from tool.file.fileHandle import fileHandle import sys from tool.algorithm.transforml1a.transHandle import TransImgL1A @@ -38,6 +45,7 @@ EXE_NAME = cf.get('exe_name') LogHandler.init_log_handler('run_log\\' + EXE_NAME) logger = logging.getLogger("mylog") FILTER_SIZE = int(cf.get('filter_size')) +MAX_TRAN_NUM = int(cf.get('max_tran__num_per_class')) file =fileHandle(DEBUG) env_str = os.path.split(os.path.realpath(__file__))[0] os.environ['PROJ_LIB'] = env_str @@ -84,8 +92,11 @@ class PhenologyMain: self.__create_work_space() self.__processing_paras = InitPara.init_processing_paras(self.__input_paras) self.__processing_paras.update(InitPara(DEBUG).get_mult_tar_gz_infs(self.__processing_paras, self.__workspace_preprocessing_path)) - - self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', r"VegetationPhenologyProduct.tar.gz") + SrcImagePath = self.__input_paras["AHVS"]['ParaValue'] + paths = SrcImagePath.split(';') + SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] + result_name = SrcImageName + "-VP.tar.gz" + self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name) self.__alg_xml_handler.write_out_para("VegetationPhenologyProduct", self.__out_para) #写入输出参数 logger.info('check_source success!') logger.info('progress bar: 10%') @@ -117,7 +128,8 @@ class PhenologyMain: file.del_folder(path) def preprocess_single_tar(self,name,scopes_roi): - key_list = [key for key in self.__processing_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key)and ('pola' not in key))] + key_list = [key for key in self.__processing_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key)and ('pola' not in key) + and ('paraMeter' not in key) and ('sim_ori' not in key)and ('Origin_META' not in key)and ('META' not in key))] ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0] ori_sim_path = self.__processing_paras[ori_sim_key] @@ -137,6 +149,13 @@ class PhenologyMain: scopes_roi = pp().cal_scopes_roi(self.__processing_paras) for name in self.__processing_paras['name_list']: self.preprocess_single_tar(name, scopes_roi) + + para_names_geo = [name + '_sim_ori'] + self.__feature_name_list = para_names_geo + p = pp() + cutted_img_paths, scopes_roi = p.cut_geoimg(self.__workspace_preprocessing_path, para_names_geo, + self.__processing_paras) + self.__preprocessed_paras.update({name + 'sim_ori': cutted_img_paths.get(name + '_sim_ori')}) logger.info('preprocess_handle success!') logger.info('progress bar: 10%') @@ -219,13 +238,36 @@ class PhenologyMain: self.___FeatureFileNameMap[12] = ['Cloude', "entropy.bin"] self.___FeatureFileNameMap[13] = ['Cloude', "alpha.bin"] + def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar): + ''' + # std::cout << "mode 11"; + # std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path"; + ''' + exe_path = r".\baseTool\x64\Release\SIMOrthoProgram.exe" + exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path, + dem_rc, in_sar, out_sar) + print(exe_cmd) + print(os.system(exe_cmd)) + print("==========================================================================") + def create_feature_single_tar(self, name): key_list = [key for key in self.__preprocessed_paras.keys() if((name in key) and ('inc_angle' not in key) and ('LocalIncidenceAngle' not in key))] ori_sim_key = [key for key in key_list if ('ori_sim' in key)][0] + sim_ori_key = [key for key in key_list if ('sim_ori' in key)][0] ori_sim_path = self.__preprocessed_paras[ori_sim_key] + sim_ori_path = self.__preprocessed_paras[sim_ori_key] + + hh_path = self.__preprocessed_paras[name + "_HH"] + hh_geo_path = os.path.join(self.__workspace_processing_path, 'hh_geo.tif') + paramter = self.__processing_paras[name + "paraMeter"] + self.calInterpolation_bil_Wgs84_rc_sar_sigma(paramter, sim_ori_path, hh_path, hh_geo_path) # 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据 - train_data, test_data, type_map = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(self.__processing_paras['MeasuredData']), ori_sim_path) + pm = PhenoloyMeasCsv_geo(self.__processing_paras['MeasuredData'], hh_geo_path, MAX_TRAN_NUM) + train_data_list = pm.api_read_measure_by_name(name) + train_data_dic = csvh.trans_landCover_list2dic(train_data_list) + + # train_data, test_data, type_map = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(self.__processing_paras['MeasuredData']), ori_sim_path) logger.info("read phenology Measure.csv success!") # 特征分解 @@ -245,98 +287,173 @@ class PhenologyMain: featureInput = self.__getInputFeatures() feature_dir = CreateFeature.decompose_single_tar(hh_hv_vh_vv_list, self.__workspace_processing_path, self.__workspace_preprocessing_path, name, self._env_str, rows, cols, FILTER_SIZE=3, debug=DEBUG, FeatureInput=featureInput) + feature_geo_dir = self.features_geo(feature_dir, paramter, sim_ori_path, name) + # # 获取训练集提取特征的信息 + # ids = [] + # class_ids = [] + # ch_names = [] + # positions = [] + # for data in train_data: + # if data[0] == name: + # class_ids.append(data[1]) + # positions.append(data[2]) + # ch_names.append(data[3]) + # class_id = [map for map in type_map if (data[1] == map[1])][0] + # ids.append(class_id[0]) + # train_data_dic = {} + # train_data_dic.update({"ids": ids}) + # train_data_dic.update({"class_ids": class_ids}) + # train_data_dic.update({"ch_names": ch_names}) + # train_data_dic.update({"positions": positions}) - # 获取训练集提取特征的信息 - ids = [] - class_ids = [] - ch_names = [] - positions = [] - for data in train_data: - if data[0] == name: - class_ids.append(data[1]) - positions.append(data[2]) - ch_names.append(data[3]) - class_id = [map for map in type_map if (data[1] == map[1])][0] - ids.append(class_id[0]) - train_data_dic = {} - train_data_dic.update({"ids": ids}) - train_data_dic.update({"class_ids": class_ids}) - train_data_dic.update({"ch_names": ch_names}) - train_data_dic.update({"positions": positions}) - - name_test_data =[] - for dt in test_data: - if dt[0] == name: - name_test_data.append(dt) - - return feature_dir, train_data_dic, name_test_data, type_map - + # name_test_data =[] + # for dt in test_data: + # if dt[0] == name: + # name_test_data.append(dt) + logger.info("create_features success!") + logger.info('progress bar: 20%') + # return feature_dir, train_data_dic, name_test_data, type_map + return feature_geo_dir, train_data_dic + def features_geo(self, features_path, paraMeter, sim_ori, sar_name): + dir = os.path.join(self.__workspace_processing_path, sar_name, 'features_geo') + if not os.path.exists(dir): + os.mkdir(dir) + in_tif_paths = list(glob.glob(os.path.join(features_path, '*.tif'))) + for file in in_tif_paths: + name = os.path.basename(file).split('.')[0] + '_geo.tif' + out_path = os.path.join(dir, name) + self.calInterpolation_bil_Wgs84_rc_sar_sigma(paraMeter, sim_ori, file, out_path) + return dir def process_handle(self, start): """ - 算法主处理函数 - :return: True or False - """ + 算法主处理函数 + :return: True or False + """ # 生成每个时相的特征, 并提取训练集和测试集 # 每个时相的影像生成特征图 X_train, Y_train = None, None flag = True total_name_list = [] - test_data = [] X_test_dic = {} for name in self.__processing_paras['name_list']: - feature_dir, train_data_dic, test_data_part, type_map = self.create_feature_single_tar(name) - #生成训练集 - X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir) + + feature_dir, train_data_dic = self.create_feature_single_tar(name) + # 生成训练集 + X_train_part, Y_train_part, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_dir, 0.07, 0.85) name_list = ml.get_name_list(feature_dir) + if optimal_feature == []: + logger.error('特征筛选结果为空,无可用特征作为训练集') + continue # 生成测试集合 - rows, cols = self.get_name_rows_cols(name) - name_featuresPath_dic_part = ml.vegetationPhenology_combine_feature(feature_dir, self.__workspace_processing_path, name, rows, cols, DEBUG) - X_test_dic_part = self.gene_test_set(test_data_part, name_featuresPath_dic_part, name) - X_test_dic.update(X_test_dic_part) - if flag: - X_train = X_train_part - Y_train = Y_train_part - total_name_list = name_list - flag = False - test_data = test_data_part - else: - X_train = np.vstack((X_train, X_train_part)) - Y_train = np.hstack((Y_train, Y_train_part)) - total_name_list = total_name_list + name_list - test_data = test_data + test_data_part + X_test_path_list = ml.gene_test_set(feature_dir, optimal_feature) - logger.info("create_features success!") - logger.info('progress bar: 20%') - # if DEBUG: - # optimal_feature = [1, 2, 3] - # optimal_X_train = X_train[:, optimal_feature] - # optimal_Y_train = Y_train - # optimal_X_train = optimal_X_train[0:100, :] - # optimal_Y_train = optimal_Y_train[0:100] - # optimal_Y_train[0:100] = 1 - # optimal_Y_train[50:100] = 2 - # else: - optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7) + X_test_dic.update({name: X_test_path_list}) - logger.info("generate train and test set success!") - logger.info('progress bar: 30%') + X_train = X_train_part + Y_train = Y_train_part - #RF - clf = ml.trainRF(optimal_X_train, optimal_Y_train) - logger.info('svm train success!') - logger.info('progress bar: 80%') + logger.info("generate train and test set success!") + logger.info('progress bar: 30%') - # 测试数据 - logger.info('mode testing') - product_path = self.predict(clf, X_test_dic, optimal_feature, type_map, start) + # RF + clf = ml.trainRF(X_train, Y_train) + logger.info('RF train success!') + logger.info('progress bar: 80%') + + # 测试数据 + logger.info('mode testing') + + in_tif_paths = list(glob.glob(os.path.join(feature_dir, '*.tif'))) + rows = ImageHandler.get_img_height(in_tif_paths[0]) + cols = ImageHandler.get_img_width(in_tif_paths[0]) + proj_geo, geo_geo, cover_data_geo = self.imageHandler.read_img(in_tif_paths[0]) + product_path = ml.predict_VP(clf, X_test_path_list, name, self.__workspace_processing_path, rows, cols) + + proj, geo, cover_data = self.imageHandler.read_img(product_path) + # 形态学(闭运算)去roi区域噪点 + cover_data = np.uint8(cover_data) + kernel = np.ones((5, 5), np.uint8) + cover_data = cv2.erode(cv2.dilate(cover_data, kernel), kernel) + for id, class_id in zip(train_data_dic['ids'], train_data_dic['class_ids']): + cover_data[np.where(cover_data == id)] = class_id + cover_data = np.int16(cover_data) + roi_img = self.imageHandler.get_band_array(self.create_roi(in_tif_paths[0])) + # 获取影像roi区域 + cover_data_pro = cover_data * roi_img + cover_geo_path = os.path.join(self.__product_dic, os.path.basename(product_path).split('.tif')[0] + '-VP.tif') + self.imageHandler.write_img(cover_geo_path, proj_geo, geo_geo, cover_data_pro) + self.imageHandler.write_quick_view(cover_geo_path, color_img=True) + + meta_xml_path = self.create_meta_file(cover_geo_path) + + temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output') + out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path)) + if os.path.exists(temp_folder) is False: + os.mkdir(temp_folder) + # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() + shutil.copy(meta_xml_path, out_xml) logger.info('mode test success!') - self.create_meta_file(product_path) # 文件夹打包 file.make_targz(self.__out_para, self.__product_dic) logger.info('progress bar: 100%') + # """ + # 算法主处理函数 + # :return: True or False + # """ + # # 生成每个时相的特征, 并提取训练集和测试集 + # # 每个时相的影像生成特征图 + # X_train, Y_train = None, None + # flag = True + # total_name_list = [] + # test_data = [] + # X_test_dic = {} + # for name in self.__processing_paras['name_list']: + # feature_dir, train_data_dic, test_data_part, type_map = self.create_feature_single_tar(name) + # #生成训练集 + # X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir) + # name_list = ml.get_name_list(feature_dir) + # # 生成测试集合 + # rows, cols = self.get_name_rows_cols(name) + # name_featuresPath_dic_part = ml.vegetationPhenology_combine_feature(feature_dir, self.__workspace_processing_path, name, rows, cols, DEBUG) + # X_test_dic_part = self.gene_test_set(test_data_part, name_featuresPath_dic_part, name) + # X_test_dic.update(X_test_dic_part) + # if flag: + # X_train = X_train_part + # Y_train = Y_train_part + # total_name_list = name_list + # flag = False + # test_data = test_data_part + # else: + # X_train = np.vstack((X_train, X_train_part)) + # Y_train = np.hstack((Y_train, Y_train_part)) + # total_name_list = total_name_list + name_list + # test_data = test_data + test_data_part + # + # logger.info("create_features success!") + # logger.info('progress bar: 20%') + # + # optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7) + # + # logger.info("generate train and test set success!") + # logger.info('progress bar: 30%') + # + # #RF + # clf = ml.trainRF(optimal_X_train, optimal_Y_train) + # logger.info('svm train success!') + # logger.info('progress bar: 80%') + # + # # 测试数据 + # logger.info('mode testing') + # product_path = self.predict(clf, X_test_dic, optimal_feature, type_map, start) + # logger.info('mode test success!') + # self.create_meta_file(product_path) + # # 文件夹打包 + # file.make_targz(self.__out_para, self.__product_dic) + # logger.info('progress bar: 100%') + def predict(self, mode, X_test_dic, validity_list, type_map, start): # 测试数据 clf = mode @@ -394,6 +511,19 @@ class PhenologyMain: return product_geo_path + def create_roi(self, img_path): + """ + 计算ROI掩膜 + :return:掩膜路径 + """ + processing_path = self.__workspace_processing_path + + # 利用影像的有效范围生成MASK + tif_mask_path = os.path.join(processing_path, "tif_mask.tif") # processing_path + "tif_mask.tif" + alg.trans_tif2mask(tif_mask_path, img_path, np.nan) + logger.info('create ROI image success!') + return tif_mask_path + # def test_resamp(): # # 形态学(闭运算)去roi区域噪点 # # cover_data = np.uint8(cover_data) @@ -404,26 +534,42 @@ class PhenologyMain: def create_meta_file(self, product_path): # 生成元文件案例 - xml_path = "./model_meta.xml" + # xml_path = "./model_meta.xml" tem_folder = self.__workspace_path + EXE_NAME + r"\Temporary""\\" image_path = product_path out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif") out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif") - par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) - model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 - CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() + # par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start) + # model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径 + # CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml() + # + # SrcImagePath = self.__input_paras["AHVS"]['ParaValue'] + # paths = SrcImagePath.split(';') + # SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] + # if len(paths) >= 2: + # for i in range(1, len(paths)): + # SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] + # meta_xml_path = os.path.join(self.__product_dic, EXE_NAME + "Product.meta.xml") + # self.type_id_name = csvh.vegePhenology_class_list(self.__processing_paras['MeasuredData']) + # + # CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process2( + # self.type_id_name, None, SrcImageName) - SrcImagePath = self.__input_paras["AHVS"]['ParaValue'] - paths = SrcImagePath.split(';') - SrcImageName = os.path.split(paths[0])[1].split('.tar.gz')[0] - if len(paths) >= 2: - for i in range(1, len(paths)): - SrcImageName = SrcImageName + ";" + os.path.split(paths[i])[1].split('.tar.gz')[0] - meta_xml_path = os.path.join(self.__product_dic, EXE_NAME + "Product.meta.xml") - self.type_id_name = csvh.vegePhenology_class_list(self.__processing_paras['MeasuredData']) + SrcImageName = os.path.basename(product_path).split('.tif')[0] + model_path = "./product.xml" + meta_xml_path = os.path.join(self.__product_dic, SrcImageName + ".meta.xml") + key = os.path.basename(product_path).split('-VP.tif')[0] + '_Origin_META' + para_dict = CreateMetaDict(image_path, self.__processing_paras[key], self.__workspace_processing_path, + out_path1, out_path2).calu_nature() + para_dict.update({"imageinfo_ProductName": "植被物候"}) + para_dict.update({"imageinfo_ProductIdentifier": "VegetationPhenology"}) + para_dict.update({"imageinfo_ProductLevel": "4"}) + para_dict.update({"ProductProductionInfo_BandSelection": "1,2"}) + para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "Label"}) + CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml() + + return meta_xml_path - CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process2( - self.type_id_name, None, SrcImageName) if __name__ == '__main__':