diff --git a/Ortho/Ortho.xml b/Ortho/Ortho.xml index a58b6e4f..980098d6 100644 --- a/Ortho/Ortho.xml +++ b/Ortho/Ortho.xml @@ -1,7 +1,7 @@ CSAR_202107275419_0001-0 - E:\Result_GF3\ + D:\micro\WorkSpace\ File ElementAlg @@ -45,7 +45,7 @@ File tar.gz Cal - E:\GF3Data\vegetationPhenology\GF3C_MYC_QPSI_006270_E100.4_N27.0_20230615_L1A_AHV_L10000158764.tar.gz + D:\BaiduNetdiskDownload\西北新增CSAR盐碱度\GF3C_KSC_QPSI_008440_E86.2_N44.1_20231113_L1A_AHV_L10000215823.tar.gz True False File @@ -58,9 +58,9 @@ DEM数字高程影像 30m分辨率DEM数字高程影像tif File - zip + File Cal - E:\GF3Data\vegetationPhenology\CSAR_vegetation_ASTGTM2_N26E100_dem.zip;E:\GF3Data\vegetationPhenology\CSAR_vegetation_ASTGTM2_N27E100_dem.zip + D:\BaiduNetdiskDownload\dem_xb True True File @@ -92,7 +92,7 @@ File tar.gz Cal - E:\Result_GF3\Ortho\Output\GF3C_MYC_QPSI_006270_E100.4_N27.0_20230615_L1A_AHV_L10000158764-ortho.tar.gz + D:\micro\WorkSpace\Ortho\Output\GF3C_KSC_QPSI_008440_E86.2_N44.1_20231113_L1A_AHV_L10000215823-ortho.tar.gz DEFAULT DEFAULT DEFAULT diff --git a/Ortho/OrthoMain.py b/Ortho/OrthoMain.py index d161db7c..41e2e71c 100644 --- a/Ortho/OrthoMain.py +++ b/Ortho/OrthoMain.py @@ -13,6 +13,7 @@ import logging from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml, OrthoAzimuth +from tool.algorithm.xml.AnalysisXml import DictXml from tool.algorithm.algtools.PreProcess import PreProcess as pp import tarfile from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource # 导入xml文件读取与检查文件 @@ -498,14 +499,19 @@ class OrthoMain: logger.info('progress bar :100%') return True - pass - def cut_dem(self, dem_merged_path, meta_file_path): - left_up_lon = 0 - left_up_lat = 0 - + _, scopes = DictXml(meta_file_path).get_extend() + intersect_polygon = pp().intersect_polygon(scopes) + if intersect_polygon is None: + raise Exception('cal intersect box fail!') + shp_path = os.path.join(self.__workspace_Temporary_path, 'IntersectPolygon.shp') + if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False: + raise Exception('create intersect shp fail!') + dem_process = os.path.join(self.__workspace_Temporary_path, 'dem_cut.tif') + pp().cut_img(dem_process, dem_merged_path, shp_path) + return dem_process def process_sim_ori(self, ori_sim, sim_ori): p = pp() @@ -534,7 +540,7 @@ class OrthoMain: out_dem_path = self.__workspace_ResampledDEM_path dem_merged_path=DEMProcess.dem_merged(in_dem_path, meta_file_path, out_dem_path) # 生成TestDEM\mergedDEM_VRT.tif - # self.cut_dem(dem_merged_path, meta_file_path) + dem_path = self.cut_dem(dem_merged_path, meta_file_path) # 2、间接定位法求解行列坐标 slc_paths = self.__in_processing_paras["SLC"] # 2.1 生成映射表 @@ -553,7 +559,7 @@ class OrthoMain: in_slc_path=os.path.join(slc_paths,slc_path) break # 获取校正模型后 - Orthorectification.preCaldem_sar_rc(dem_merged_path,in_slc_path,self.__workspace_Temporary_path,self.__workspace_package_path.replace("\\","\\\\")) # 初步筛选坐标范围 + Orthorectification.preCaldem_sar_rc(dem_path,in_slc_path,self.__workspace_Temporary_path,self.__workspace_package_path.replace("\\","\\\\")) # 初步筛选坐标范围 logger.info('progress bar: 40%') # clip_dem_reample_path=os.path.join(self.__workspace_Temporary_path, "SAR_dem.tiff") @@ -686,12 +692,12 @@ class OrthoMain: # 生成压缩包 logger.info('progress bar :94%') logger.info('start make targz..') - self.del_floder(self.__workspace_unpack_path) - self.del_floder(self.__workspace_ResampledDEM_path) - self.del_floder(self.__workspace_LutImg_path) - self.del_floder(self.__workspace_IncidenceImg_path) - self.del_floder(self.__workspace_SimImg_path) - self.del_floder(self.__workspace_SARIntensity_path) + # self.del_floder(self.__workspace_unpack_path) + # self.del_floder(self.__workspace_ResampledDEM_path) + # self.del_floder(self.__workspace_LutImg_path) + # self.del_floder(self.__workspace_IncidenceImg_path) + # self.del_floder(self.__workspace_SimImg_path) + # self.del_floder(self.__workspace_SARIntensity_path) self.make_targz(self.__out_para, self.__workspace_package_path+"\\") logger.info('make targz finish') logger.info('progress bar :100%') diff --git a/backScattering/BackScatteringMain.py b/backScattering/BackScatteringMain.py index ff329fd3..40dcb0bd 100644 --- a/backScattering/BackScatteringMain.py +++ b/backScattering/BackScatteringMain.py @@ -12,6 +12,7 @@ import logging from tool.algorithm.algtools.logHandler import LogHandler from tool.algorithm.block.blockprocess import BlockProcess from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource +from tool.algorithm.xml.AnalysisXml import DictXml from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml, OrthoAzimuth from tool.algorithm.image.ImageHandle import ImageHandler from tool.algorithm.algtools.PreProcess import PreProcess as pp @@ -277,6 +278,18 @@ class ScatteringMain: pp().cut_img(sim_ori_process, sim_ori, shp_path) return sim_ori_process + def cut_dem(self, dem_merged_path, meta_file_path): + _, scopes = DictXml(meta_file_path).get_extend() + intersect_polygon = pp().intersect_polygon(scopes) + if intersect_polygon is None: + raise Exception('cal intersect box fail!') + shp_path = os.path.join(self.__workspace_preprocessing_path, 'IntersectPolygon.shp') + if pp().write_polygon_shp(shp_path, intersect_polygon, 4326) is False: + raise Exception('create intersect shp fail!') + dem_process = os.path.join(self.__workspace_preprocessing_path, 'dem_cut.tif') + pp().cut_img(dem_process, dem_merged_path, shp_path) + return dem_process + def process_handle(self, start): in_tif_paths = list(glob.glob(os.path.join(self.__in_processing_paras['SLC'], '*.tif'))) if in_tif_paths == []: @@ -319,6 +332,7 @@ class ScatteringMain: dem_merged_path = DEMProcess.dem_merged(in_dem_path, meta_file_path, out_dem_path) # 生成TestDEM\mergedDEM_VRT.tif + dem_path = self.cut_dem(dem_merged_path, meta_file_path) in_slc_path = None for slc_path in in_tif_paths: @@ -328,7 +342,7 @@ class ScatteringMain: break # 获取校正模型后 - Orthorectification.preCaldem_sar_rc(dem_merged_path, in_slc_path, self.__workspace_preprocessing_path, + Orthorectification.preCaldem_sar_rc(dem_path, in_slc_path, self.__workspace_preprocessing_path, self.__workspace_processing_path.replace("\\", "\\\\")) # 初步筛选坐标范围 logger.info('progress bar: 40%') # clip_dem_reample_path=os.path.join(self.__workspace_preprocessing_path, "SAR_dem.tiff") diff --git a/soilSalinity-Train_predict/SoilSalinityPredict.xml b/soilSalinity-Train_predict/SoilSalinityPredict.xml index ba31a1d8..c541eeab 100644 --- a/soilSalinity-Train_predict/SoilSalinityPredict.xml +++ b/soilSalinity-Train_predict/SoilSalinityPredict.xml @@ -39,7 +39,7 @@ tar.gz Man - F:\2023xibei\GF3C_KSC_QPSI_008440_E86.0_N44.7_20231113_L1A_AHV_L10000215825-ortho.tar.gz + F:\2024xibei\GF3B_KSC_QPSI_010328_E86.1_N44.3_20231109_L1A_AHV_L10000262133-ortho.tar.gz DEFAULT DEFAULT @@ -110,7 +110,7 @@ File tif Man - F:\2023xibei\S2_NDVImed2.tif + F:\2024xibei\S2_NDVImed.tif DEFAULT DEFAULT DEFAULT @@ -140,7 +140,7 @@ File tar.gz Man - D:\micro\WorkSpace\SoilSalinityPredict\Output\GF3C_KSC_QPSI_008440_E86.0_N44.7_20231113_L1A_AHV_L10000215825-ortho-SSAA.tar.gz + D:\micro\WorkSpace\SoilSalinityPredict\Output\GF3B_KSC_QPSI_010328_E86.1_N44.3_20231109_L1A_AHV_L10000262133-ortho-SSAA.tar.gz DEFAULT DEFAULT DEFAULT diff --git a/soilSalinity/SoilSalinity.xml b/soilSalinity/SoilSalinity.xml index 5a68d015..a698cfd2 100644 --- a/soilSalinity/SoilSalinity.xml +++ b/soilSalinity/SoilSalinity.xml @@ -1,7 +1,7 @@ CSAR_202107275419_0001-0 - D:\soilSanlinity\ + D:\micro\WorkSpace\ File ElementAlg @@ -80,7 +80,7 @@ File tif Man - E:\C-SARNDVINDWIfugaileixingDEM\fugaileixing.tif + F:\2023xibei\fugaileixing.tif DEFAULT DEFAULT DEFAULT @@ -108,7 +108,7 @@ File tif Man - E:\C-SARNDVINDWIfugaileixingDEM\S2_NDVImed2.tif + F:\2023xibei\S2_NDVImed2.tif DEFAULT DEFAULT DEFAULT @@ -138,7 +138,7 @@ File tar.gz Man - D:\soilSanlinity\SoilSalinity\Output\GF3C_KSC_QPSI_008440_E86.0_N44.7_20231113_L1A_AHV_L10000215825-ortho-SSAA.tar.gz + D:\micro\WorkSpace\SoilSalinity\Output\GF3C_KSC_QPSI_008440_E86.0_N44.7_20231113_L1A_AHV_L10000215825-ortho-SSAA.tar.gz DEFAULT DEFAULT DEFAULT diff --git a/soilSalinity/SoilSalinityMain.py b/soilSalinity/SoilSalinityMain.py index ae654186..26c3cc76 100644 --- a/soilSalinity/SoilSalinityMain.py +++ b/soilSalinity/SoilSalinityMain.py @@ -315,6 +315,7 @@ class SalinityMain: rows = self.imageHandler.get_img_height(self.__preprocessed_paras['HH']) cols = self.imageHandler.get_img_width(self.__preprocessed_paras['HH']) block_size = bp.get_block_size(rows, cols) + logger.info('block size is :%s', block_size) bp.cut(out_dir, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size) img_dir, img_name = bp.get_file_names(self.__workspace_block_tif_path, ['tif']) dir_dict = bp.get_same_img(img_dir, img_name) @@ -337,6 +338,7 @@ class SalinityMain: features_array[np.isnan(features_array)] = 0.0 features_array[np.isinf(features_array)] = 0.0 self.imageHandler.write_img(features_path, "", [0, 0, 1, 0, 0, 1], features_array) + logger.info('create features matrix success!') # for n in range(block_num): diff --git a/soilSalinity/config.ini b/soilSalinity/config.ini index 27ec3600..176d49b4 100644 --- a/soilSalinity/config.ini +++ b/soilSalinity/config.ini @@ -8,7 +8,6 @@ productLevel = 5 exe_name = SoilSalinity # 开启调试模式则不删除临时工作区,True:开启调试,False:不开启调试 debug = False - ######2-算法处理参数###### pixelspace = 5 ######3-算法结果参数###### diff --git a/soilSalinity/pspHAAlphaDecomposition.py b/soilSalinity/pspHAAlphaDecomposition.py index 135695df..84c6369e 100644 --- a/soilSalinity/pspHAAlphaDecomposition.py +++ b/soilSalinity/pspHAAlphaDecomposition.py @@ -98,6 +98,7 @@ class PspHAAlphaDecomposition: self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir) if is_read_to_dic: h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir)) + logger.info('run bin to tif success!') return h_a_alpha_features def api_h_a_alpha_decomposition_T3(self, h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args): diff --git a/surfaceRoughness_oh2004/SurfaceRoughnessMain.py b/surfaceRoughness_oh2004/SurfaceRoughnessMain.py index 40caa34e..b41ff5f5 100644 --- a/surfaceRoughness_oh2004/SurfaceRoughnessMain.py +++ b/surfaceRoughness_oh2004/SurfaceRoughnessMain.py @@ -50,7 +50,6 @@ else: DEBUG = False file =fileHandle(DEBUG) EXE_NAME = cf.get('exe_name') -FILTER_SIZE = int(cf.get('filter_size')) soil_moisture_value_min = float(cf.get('product_value_min')) soil_moisture_value_max = float(cf.get('product_value_max')) pixelspace=float(cf.get('pixelspace')) diff --git a/tool/algorithm/algtools/PreProcess.py b/tool/algorithm/algtools/PreProcess.py index 2f93f888..026cd5f0 100644 --- a/tool/algorithm/algtools/PreProcess.py +++ b/tool/algorithm/algtools/PreProcess.py @@ -376,6 +376,31 @@ class PreProcess: return {} return cutted_img_paths + def cut_imgs_VP(self, out_dir, para_names, processing_paras, shp_path, img_name): + """ + 使用矢量数据裁剪影像 + :param para_names:需要检查的参数名称 + :param shp_path:裁剪的shp文件 + """ + if len(para_names) == 0: + return {} + cutted_img_paths = {} + try: + for name in para_names: + if name == 'Covering': + img_name = img_name.split('_')[6] + '_' + output_path = os.path.join(out_dir, img_name + name + '_cut.tif') + else: + output_path = os.path.join(out_dir, name + '_cut.tif') + input_path = processing_paras[name] + self.cut_img(output_path, input_path, shp_path) + cutted_img_paths.update({name: output_path}) + logger.info('cut %s success!', name) + except BaseException: + logger.error('cut_img failed!') + return {} + return cutted_img_paths + @staticmethod def cut_img(output_path, input_path, shp_path): """ diff --git a/tool/algorithm/block/blockprocess.py b/tool/algorithm/block/blockprocess.py index 9ea4de25..32efa1c3 100644 --- a/tool/algorithm/block/blockprocess.py +++ b/tool/algorithm/block/blockprocess.py @@ -25,13 +25,14 @@ class BlockProcess: @staticmethod def get_block_size(rows, cols): - block_size = 512 - if (rows // 1024) <= 5 and (cols // 1024) <= 5: + block_size = 1024 + if rows <= 2048 or cols <= 2048: block_size = 512 - elif 5 < (rows // 1024) < 10 and 5 < (cols // 1024) < 10: + if (rows // 1024) <= 5 or (cols // 1024) <= 5: block_size = 1024 - else: + elif 5 < (rows // 1024) and 5 < (cols // 1024): block_size = 2048 + return block_size # def get_block_size(rows, cols, block_size_config): diff --git a/tool/algorithm/ml/machineLearning.py b/tool/algorithm/ml/machineLearning.py index eac2e261..e5408faf 100644 --- a/tool/algorithm/ml/machineLearning.py +++ b/tool/algorithm/ml/machineLearning.py @@ -70,7 +70,7 @@ class MachineLeaning: # 特征分块 bp = BlockProcess() block_size = bp.get_block_size(rows, cols) - + logger.info('block size is :%s', block_size) bp.cut(feature_tif_dir, workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size) img_dir, img_name = bp.get_file_names(workspace_block_tif_path, ['tif']) dir_dict_all = bp.get_same_img(img_dir, img_name) @@ -209,7 +209,7 @@ class MachineLeaning: band = ImageHandler.get_bands(path) if band == 1: - features_array = np.zeros((1, 1024, 1024), dtype=float) + features_array = np.zeros((1, block_size, block_size), dtype=float) feature_array = ImageHandler.get_data(path) features_array[0, :, :] = feature_array else: diff --git a/tool/algorithm/xml/AnalysisXml.py b/tool/algorithm/xml/AnalysisXml.py index 8bbc228a..b7d74c56 100644 --- a/tool/algorithm/xml/AnalysisXml.py +++ b/tool/algorithm/xml/AnalysisXml.py @@ -45,14 +45,14 @@ class DictXml: point_downright = [float(bottomRight.find("longitude").text), float(bottomRight.find("latitude").text)] scopes = [point_upleft, point_upright, point_downleft, point_downright] - point_upleft_buf = [float(topLeft.find("longitude").text) - 0.5, float(topLeft.find("latitude").text) + 0.5] - point_upright_buf = [float(topRight.find("longitude").text) + 0.5, float(topRight.find("latitude").text) + 0.5] - point_downleft_buf = [float(bottomLeft.find("longitude").text) - 0.5, - float(bottomLeft.find("latitude").text) - 0.5] - point_downright_buf = [float(bottomRight.find("longitude").text) + 0.5, - float(bottomRight.find("latitude").text) - 0.5] - scopes_buf = [point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf] - return scopes + point_upleft_buf = [float(topLeft.find("longitude").text) - 0.4, float(topLeft.find("latitude").text) + 0.4] + point_upright_buf = [float(topRight.find("longitude").text) + 0.4, float(topRight.find("latitude").text) + 0.4] + point_downleft_buf = [float(bottomLeft.find("longitude").text) - 0.4, + float(bottomLeft.find("latitude").text) - 0.4] + point_downright_buf = [float(bottomRight.find("longitude").text) + 0.4, + float(bottomRight.find("latitude").text) - 0.4] + scopes_buf = ([point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf], ) + return scopes, scopes_buf class xml_extend: diff --git a/vegetationPhenology/VegetationPhenology.xml b/vegetationPhenology/VegetationPhenology.xml index 8769e9b2..8288fe2c 100644 --- a/vegetationPhenology/VegetationPhenology.xml +++ b/vegetationPhenology/VegetationPhenology.xml @@ -37,7 +37,9 @@ File tar.gz Man - F:\VegetationPhenology-likun\lijiang\GF3B_KSC_QPSI_007906_E100.2_N27.0_20230525_L1A_AHV_L10000190531-ortho.tar.gz + + F:\VegetationPhenology-likun\lijiang\GF3B_KSC_QPSI_007906_E100.2_N27.0_20230525_L1A_AHV_L10000190531-ortho.tar.gz + DEFAULT DEFAULT DEFAULT @@ -107,13 +109,12 @@ 极化特征组合 可选极化特征组合一、共14种特征(编号依次为0-13) Freeman:表面散射p_s(0)、偶次散射p_d(1)、体散射p_v(2); - Touzi:散射角α_s(3)、散射相位ϕ_α(4)、目标散射对称度τ(5)、相对能量λ_i(6); Yamaguchi:表面散射f_s(7)、二次散射f_d(8)、体散射f_v(9)、螺旋体散射f_h(10); Cloude-Pottier:分解散射熵H(11)、反熵A(12)、平均散射角α(13) Value string Man - 0,1,2,7,8,9,10 + 0,1,2 True True UploadInput diff --git a/vegetationPhenology/VegetationPhenologyMain.py b/vegetationPhenology/VegetationPhenologyMain.py index 8dea872e..e163d4ac 100644 --- a/vegetationPhenology/VegetationPhenologyMain.py +++ b/vegetationPhenology/VegetationPhenologyMain.py @@ -7,6 +7,7 @@ @Date :2021/9/6 @Version :1.0.0 """ +import csv import glob import logging import os @@ -169,8 +170,8 @@ class PhenologyMain: # 计算图像的轮廓,并求相交区域 intersect_shp_path = self.__workspace_preprocessing_path + 'IntersectPolygon.shp' scopes_roi = p.cal_intersect_shp(intersect_shp_path, para_names_geo, self.__processing_paras, scopes) - cutted_img_paths = p.cut_imgs(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras, - intersect_shp_path) + cutted_img_paths = p.cut_imgs_VP(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras, + intersect_shp_path, name) self.__preprocessed_paras.update({name + '_sim_ori': cutted_img_paths.get(name + '_sim_ori')}) self.__preprocessed_paras.update({name + '_Covering': cutted_img_paths.get('Covering')}) @@ -373,7 +374,9 @@ class PhenologyMain: """ # 生成每个时相的特征, 并提取训练集和测试集 # 每个时相的影像生成特征图 - X_train, Y_train = None, None + + X_train = [] + Y_train = [] flag = True total_name_list = [] X_test_dic = {} @@ -381,35 +384,45 @@ class PhenologyMain: feature_dir, train_data_dic = self.create_feature_single_tar(name) # 生成训练集 - X_train_part, Y_train_part, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_dir, 0.08, 0.85) + # X_train_part, Y_train_part, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_dir, 0.08, 0.7) + X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir) name_list = ml.get_name_list(feature_dir) - if len(optimal_feature) <= 0: - logger.error('特征筛选结果为空,无可用特征作为训练集') - continue + # 生成测试集合 + # X_test_path_list = ml.gene_test_set(feature_dir, optimal_feature) + + # X_test_dic.update({name: X_test_path_list}) + X_test_dic.update({name + '_features': feature_dir}) + + if X_train == []: + X_train = X_train_part + Y_train = Y_train_part + total_name_list = name_list + else: + X_train = np.vstack((X_train, X_train_part)) + Y_train = np.hstack((Y_train, Y_train_part)) + total_name_list = total_name_list + name_list + + logger.info("generate train and test set success!") + logger.info('progress bar: 30%') + + optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7) + + # RF + clf = ml.trainRF(optimal_X_train, optimal_Y_train) + logger.info('RF train success!') + + # 测试数据 + logger.info('mode testing') + for name in self.__processing_paras['name_list']: + feature_dir = X_test_dic.get(name + '_features') X_test_path_list = ml.gene_test_set(feature_dir, optimal_feature) - - X_test_dic.update({name: X_test_path_list}) - - X_train = X_train_part - Y_train = Y_train_part - - logger.info("generate train and test set success!") - logger.info('progress bar: 30%') - - # RF - clf = ml.trainRF(X_train, Y_train) - logger.info('RF train success!') - logger.info('progress bar: 80%') - - # 测试数据 - logger.info('mode testing') - in_tif_paths = list(glob.glob(os.path.join(feature_dir, '*.tif'))) rows = ImageHandler.get_img_height(in_tif_paths[0]) cols = ImageHandler.get_img_width(in_tif_paths[0]) proj_geo, geo_geo, cover_data_geo = self.imageHandler.read_img(in_tif_paths[0]) product_path = ml.predict_VP(clf, X_test_path_list, name, self.__workspace_processing_path, rows, cols) + # product_path = ml.predict_VP(clf, X_test_dic.get(name), name, self.__workspace_processing_path, rows, cols) proj, geo, cover_data = self.imageHandler.read_img(product_path) # 形态学(闭运算)去roi区域噪点 @@ -417,7 +430,8 @@ class PhenologyMain: kernel = np.ones((5, 5), np.uint8) cover_data = cv2.erode(cv2.dilate(cover_data, kernel), kernel) cover_data = np.int16(cover_data) - for id, class_id in zip(train_data_dic['ids'], train_data_dic['class_ids']): + train_dic = self.get_train_dic(self.__processing_paras['MeasuredData']) + for id, class_id in zip(train_dic['ids'], train_dic['class_ids']): cover_data[np.where(cover_data == id)] = class_id roi_img = self.imageHandler.get_band_array(self.create_roi(in_tif_paths[0])) # 获取影像roi区域 @@ -443,64 +457,11 @@ class PhenologyMain: # CreateProductXml(para_dict, model_path, out_xml).create_standard_xml() shutil.copy(meta_xml_path, out_xml) logger.info('mode test success!') + logger.info('progress bar: 80%') # 文件夹打包 file.make_targz(self.__out_para, self.__product_dic) logger.info('progress bar: 100%') - # """ - # 算法主处理函数 - # :return: True or False - # """ - # # 生成每个时相的特征, 并提取训练集和测试集 - # # 每个时相的影像生成特征图 - # X_train, Y_train = None, None - # flag = True - # total_name_list = [] - # test_data = [] - # X_test_dic = {} - # for name in self.__processing_paras['name_list']: - # feature_dir, train_data_dic, test_data_part, type_map = self.create_feature_single_tar(name) - # #生成训练集 - # X_train_part, Y_train_part = ml.gene_train_set(train_data_dic, feature_dir) - # name_list = ml.get_name_list(feature_dir) - # # 生成测试集合 - # rows, cols = self.get_name_rows_cols(name) - # name_featuresPath_dic_part = ml.vegetationPhenology_combine_feature(feature_dir, self.__workspace_processing_path, name, rows, cols, DEBUG) - # X_test_dic_part = self.gene_test_set(test_data_part, name_featuresPath_dic_part, name) - # X_test_dic.update(X_test_dic_part) - # if flag: - # X_train = X_train_part - # Y_train = Y_train_part - # total_name_list = name_list - # flag = False - # test_data = test_data_part - # else: - # X_train = np.vstack((X_train, X_train_part)) - # Y_train = np.hstack((Y_train, Y_train_part)) - # total_name_list = total_name_list + name_list - # test_data = test_data + test_data_part - # - # logger.info("create_features success!") - # logger.info('progress bar: 20%') - # - # optimal_X_train, optimal_Y_train, optimal_feature = ml.sel_optimal_feature(X_train, Y_train, total_name_list, correlation_threshold=0.7) - # - # logger.info("generate train and test set success!") - # logger.info('progress bar: 30%') - # - # #RF - # clf = ml.trainRF(optimal_X_train, optimal_Y_train) - # logger.info('svm train success!') - # logger.info('progress bar: 80%') - # - # # 测试数据 - # logger.info('mode testing') - # product_path = self.predict(clf, X_test_dic, optimal_feature, type_map, start) - # logger.info('mode test success!') - # self.create_meta_file(product_path) - # # 文件夹打包 - # file.make_targz(self.__out_para, self.__product_dic) - # logger.info('progress bar: 100%') def predict(self, mode, X_test_dic, validity_list, type_map, start): # 测试数据 @@ -558,6 +519,23 @@ class PhenologyMain: return product_geo_path + def get_train_dic(self, csv_path): + reader = csv.reader(open(csv_path, newline='')) + ids = [] + class_ids = [] + csv_list = [] + for line_data in reader: + csv_list.append(line_data) + for data in csv_list[1:]: + ids.append(data[0]) + class_ids.append(data[1]) + + train_data_dic = {} + train_data_dic.update({"ids": ids}) + train_data_dic.update({"class_ids": class_ids}) + + return train_data_dic + def resampleImgs(self, name, refer_img_path): cover_rampling_path = os.path.join(self.__workspace_processing_path, name + "_cover.tif")