microproduct-l-sar/landcover-L-SAR/LandCoverMain.py

692 lines
34 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

# -*- coding: UTF-8 -*-
"""
@Project microproduct
@File LandCoverMain.py
@Function :实现对数据地表覆盖的训练和分类识别
@Author SHJ
@Date 2022/11/23
@Version 2.0.0
"""
import datetime
import glob
import logging
import os
import shutil
import sys
from PIL import Image
import numpy as np
import cv2
from osgeo import gdal, osr
import multiprocessing
import pyproj._compat
# 导入PreProcess模块要在其他包含gdal库的模块前面不然剪切影像会报错详见https://blog.csdn.net/u014656611/article/details/106450006
from tool.algorithm.algtools.PreProcess import PreProcess as pp
from LandCoverAuxData import LandCoverMeasCsv
from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
from tool.algorithm.image.ImageHandle import ImageHandler
from tool.algorithm.algtools.ROIAlg import ROIAlg as alg
from tool.algorithm.xml.AlgXmlHandle import ManageAlgXML, CheckSource, InitPara
from tool.algorithm.block.blockprocess import BlockProcess
from tool.algorithm.algtools.logHandler import LogHandler
from tool.algorithm.polsarpro.pspCloudePottierDecomposition import PspCloudePottierDecomposition
from tool.algorithm.polsarpro.pspFreemanDecomposition import PspFreemanDecomposition
from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3
from tool.algorithm.polsarpro.pspYamaguchiDecomposition import PspYamaguchiDecomposition
from tool.algorithm.polsarpro.pspTouziDecomposition import PspTouziDecomposition
from tool.algorithm.polsarpro.bin2tif import write_bin_to_tif
from tool.algorithm.xml.CreatMetafile import CreateMetafile
from tool.algorithm.algtools.MetaDataHandler import Calibration
from tool.algorithm.xml.CreateMetaDict import CreateMetaDict, CreateProductXml
from tool.algorithm.xml.AnalysisXml import DictXml, xml_extend
from LandCoverXmlInfo import CreateDict, CreateStadardXmlFile
from tool.config.ConfigeHandle import Config as cf
from tool.file.fileHandle import fileHandle
from tool.algorithm.transforml1a.transHandle import TransImgL1A
from tool.algorithm.ml.machineLearning import MachineLeaning as ml
from tool.csv.csvHandle import csvHandle
csvh = csvHandle()
if cf.get('debug') == 'True':
DEBUG = True
else:
DEBUG = False
EXE_NAME = cf.get('exe_name')
tar = r'-' + cf.get('tar')
productLevel = cf.get('productLevel')
LogHandler.init_log_handler('run_log\\' + EXE_NAME)
logger = logging.getLogger("mylog")
FILTER_SIZE = int(cf.get('filter_size'))
multiprocessing_num = int(cf.get('multiprocessing_num'))
file =fileHandle(DEBUG)
MAX_TRAN_NUM = int(cf.get('max_tran__num_per_class'))
# env_str = os.path.split(os.path.realpath(__file__))[0]
env_str = os.path.dirname(os.path.abspath(sys.argv[0]))
os.environ['PROJ_LIB'] = os.getcwd()
class LandCoverMain:
"""
算法主函数
"""
def __init__(self, alg_xml_path):
self.alg_xml_path = alg_xml_path
self.imageHandler = ImageHandler()
self.__alg_xml_handler = ManageAlgXML(alg_xml_path)
self.__check_handler = CheckSource(self.__alg_xml_handler)
self.__workspace_path, self.__temp_path, self.__task_id, self.__out_para = None, None, None, None
self.__input_paras, self.__output_paras, self.__processing_paras,self.__preprocessed_paras = {},{}, {},{}
# 保存特征数据的名称
self.__feature_name_list = []
# xml参数字典
self.__parameters_dic = {}
# 特征表
self.__FeatureMap = {}
self.__TotalFeatureSize = 14 # 3+4+4+3
# 特征索引对应的文件名
self.___FeatureFileNameMap = {}
# 初始化特征参数
self.__FeatureParaInit()
# 输入特征参数
self.__FeatureInput = {}
# 参考影像路径
self.__ref_img_path = ''
# 宽/列数
self.__cols = 0
# 高/行数
self.__rows = 0
# 坐标系
self.__proj = ''
# 影像投影变换矩阵
self.__geo = [0, 0, 0, 0, 0, 0]
# 分块尺寸
self.__block_size = 0
self.processinfo = [1, 1, 1, 1]
def check_source(self):
"""
检查算法相关的配置文件,图像,辅助文件是否齐全
"""
env_str = os.getcwd()
logger.info("sysdir: %s", env_str)
self.__check_handler.check_alg_xml()
self.__check_handler.check_run_env()
# 检查景影像是否为全极化
self.__input_paras = self.__alg_xml_handler.get_input_paras()
checkFlag, self.__parameters_dic = self.__check_handler.check_input_paras(self.__input_paras)
self.__workspace_path = self.__alg_xml_handler.get_workspace_path()
self.__create_work_space()
self.__processing_paras = InitPara.init_processing_paras(self.__input_paras, self.__workspace_preprocessed_path)
self.__processing_paras.update(self.get_tar_gz_inf(self.__processing_paras["sar_path0"]))
SrcImageName = os.path.split(self.__input_paras["AHV"]['ParaValue'])[1].split('.tar.gz')[0]
result_name = SrcImageName + tar + ".tar.gz"
self.__out_para = os.path.join(self.__workspace_path, EXE_NAME, 'Output', result_name)
self.__alg_xml_handler.write_out_para("LandCoverProduct", self.__out_para) #写入输出参数
logger.info('check_source success!')
return True
def get_tar_gz_inf(self, tar_gz_path):
para_dic = {}
name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
file_dir = os.path.join(self.__workspace_preprocessing_path, name + '\\')
file.de_targz(tar_gz_path, file_dir)
# 元文件字典
para_dic.update(InitPara.get_meta_dic_new(InitPara.get_meta_paths(file_dir, name), name))
# tif路径字典
para_dic.update(InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name)))
parameter_path = os.path.join(file_dir, "orth_para.txt")
para_dic.update({"paraMeter": parameter_path})
return para_dic
def __create_work_space(self):
"""
删除原有工作区文件夹,创建新工作区文件夹
"""
self.__workspace_preprocessing_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "preprocessing\\") # self.__workspace_path + EXE_NAME + r"\Temporary\preprocessing""\\"
self.__workspace_preprocessed_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "preprocessed\\") # self.__workspace_path + EXE_NAME + r"\Temporary\preprocessed""\\"
self.__workspace_processing_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "processing\\") # self.__workspace_path + EXE_NAME + r"\Temporary\processing""\\"
self.__workspace_block_tif_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "blockTif\\") # self.__workspace_path + EXE_NAME + r"\Temporary\blockTif""\\"
self.__workspace_block_tif_processed_path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary", "blockTifProcessed\\") # self.__workspace_path + EXE_NAME + r"\Temporary\blockTifProcessed""\\"
self.__feature_tif_dir = os.path.join(self.__workspace_processing_path, 'feature_tif') # self.__workspace_processing_path + 'feature_tif'
path_list = [self.__workspace_preprocessing_path, self.__workspace_preprocessed_path,
self.__workspace_processing_path, self.__workspace_block_tif_path,
self.__workspace_block_tif_processed_path, self.__feature_tif_dir]
file.creat_dirs(path_list)
logger.info('create new workspace success!')
def del_temp_workspace(self):
"""
临时工作区
"""
if DEBUG is True:
return
path = os.path.join(self.__workspace_path, EXE_NAME, "Temporary") # self.__workspace_path + EXE_NAME + r"\Temporary"
if os.path.exists(path):
file.del_folder(path)
def preprocess_handle(self):
"""
预处理
"""
para_names_geo = ['sim_ori']
self.__feature_name_list = para_names_geo
p = pp()
p.check_img_projection(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras)
# 计算roi
scopes = ()
scopes += (xml_extend(self.__processing_paras['META']).get_extend(),)
scopes += p.box2scope(self.__processing_paras['box'])
# 计算图像的轮廓,并求相交区域
intersect_shp_path = self.__workspace_preprocessing_path + 'IntersectPolygon.shp'
scopes_roi = p.cal_intersect_shp(intersect_shp_path, para_names_geo, self.__processing_paras, scopes)
# 裁剪
# 裁剪图像:裁剪微波图像,裁剪其他图像
cutted_img_paths = p.cut_imgs(self.__workspace_preprocessing_path, para_names_geo, self.__processing_paras,
intersect_shp_path)
self.__preprocessed_paras.update(cutted_img_paths)
para_names_l1a = ["HH", "VV", "HV", "VH"]
l1a_width = ImageHandler.get_img_width(self.__processing_paras['HH'])
l1a_height = ImageHandler.get_img_height(self.__processing_paras['HH'])
self._tr = TransImgL1A(self.__processing_paras['sim_ori'], scopes_roi,l1a_height,l1a_width) # 裁剪图像
for name in para_names_l1a:
out_path = os.path.join(self.__workspace_preprocessed_path, name + "_preprocessed.tif")
self._tr.cut_L1A(self.__processing_paras[name], out_path)
self.__preprocessed_paras.update({name: out_path})
logger.info('preprocess_handle success!')
# for name in para_names_geo:
# l1a_path = os.path.join(self.__workspace_preprocessed_path, name+".tif")
# self._tr.tran_geo_to_l1a(self.__preprocessed_paras[name], l1a_path, self.__preprocessed_paras['ori_sim'], is_class=False)
# self.__preprocessed_paras[name] = l1a_path
self.__cols_geo = self.imageHandler.get_img_width(self.__preprocessed_paras['sim_ori'])
self.__rows_geo = self.imageHandler.get_img_height(self.__preprocessed_paras['sim_ori'])
self.__cols = self.imageHandler.get_img_width(self.__preprocessed_paras['HH'])
self.__rows = self.imageHandler.get_img_height(self.__preprocessed_paras['HH'])
def __decompose(self, t3_path, rows, cols):
"""
极化分解Freeman、Touzi、Yamaguchi、Cloude
:param t3_path: t3文件路径
:param rows: 影像行数
:return cols:影像列数
"""
# 计算特征组合
self.__getInputFeatures()
exeDir = ''
outFolderDic = {}
if 'Freeman' in self.__FeatureInput:
# freeman分解
freemanOutDir = os.path.join(self.__workspace_processing_path, 'freeman\\') # self.__workspace_processing_path + 'freeman\\'
freemDecom = PspFreemanDecomposition(
exeDir, t3_path, freemanOutDir)
flag = freemDecom.api_freeman_decomposition_T3(
0, 0, rows, cols)
if not flag:
logger.error('FreemanDecomposition err')
return False, None
outFolderDic['Freeman'] = freemanOutDir
logger.info('Freeman successful')
if 'Touzi' in self.__FeatureInput:
# Touzi分解
touziOutDir = os.path.join(self.__workspace_processing_path, 'touzi\\') # self.__workspace_processing_path + 'touzi\\'
if not os.path.exists(touziOutDir):
os.makedirs(touziOutDir)
# touzi分解耗时较长且对特征表达效果较差
p = PspTouziDecomposition(self.__preprocessed_paras, touziOutDir)
p.Touzi_decomposition_multiprocessing()
outFolderDic['Touzi'] = touziOutDir
logger.info('Touzi successful')
if 'Yamaguchi' in self.__FeatureInput:
# Yamaguchi分解
yamaguchiOutDir = os.path.join(self.__workspace_processing_path, 'yamaguchi\\') # self.__workspace_processing_path + 'yamaguchi\\'
yamaguchiDecom = PspYamaguchiDecomposition(
exeDir, t3_path, yamaguchiOutDir)
flag = yamaguchiDecom.api_yamaguchi_4components_decomposition_T3(
0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Yamaguchi'] = yamaguchiOutDir
logger.info('Yamaguchi successful')
if 'Cloude' in self.__FeatureInput:
# CloudePottier分解
cloudeOutDir = os.path.join(self.__workspace_processing_path, 'cloude\\') # self.__workspace_processing_path + 'cloude\\'
cloudeDecom = PspCloudePottierDecomposition(
exeDir, t3_path, cloudeOutDir)
flag = cloudeDecom.api_h_a_alpha_decomposition_T3(
0, 0, rows, cols)
if not flag:
logger.error('CloudePottierDecomposition err')
return False, None
outFolderDic['Cloude'] = cloudeOutDir
logger.info('Cloude successful')
return True, outFolderDic
def __getInputFeatures(self):
"""
获取输入特征组合
1.Freeman表面散射p_s、偶次散射p_d、体散射p_v 对应序号:[0,1,2]
2.Touzi散射角α_s、散射相位ϕ_α、目标散射对称度τ、相对能量λ_i 对应序号:[3,4,5,6]
3.Yamaguchi表面散射f_s、二次散射f_d、体散射f_v、螺旋体散射f_h 对应序号:[7,8,9,10]
4.Cloude - Pottier分解散射熵H、反熵A、平均散射角α 对应序号:[11,12,13]
"""
decomposeList = self.__parameters_dic["FeatureCombination"].split(',')
stepSet = set()
for item in decomposeList:
stepSet.add(int(item))
for i in stepSet:
if 0 <= i < 3:
if 'Freeman' in self.__FeatureInput:
self.__FeatureInput['Freeman'].append(i)
else:
self.__FeatureInput['Freeman'] = [i]
elif 3 <= i < 7:
if 'Yamaguchi' in self.__FeatureInput:
self.__FeatureInput['Yamaguchi'].append(i)
else:
self.__FeatureInput['Yamaguchi'] = [i]
elif 7 <= i < 10:
if 'Cloude' in self.__FeatureInput:
self.__FeatureInput['Cloude'].append(i)
else:
self.__FeatureInput['Cloude'] = [i]
else:
logger.warning("__decomposeSteps Invalid data:%s!", i)
def __FeatureParaInit(self):
self.__FeatureMap["Freeman"] = [0, 1, 2]
self.__FeatureMap["Yamaguchi"] = [3, 4, 5, 6]
self.__FeatureMap["Cloude"] = [7, 8, 9]
self.___FeatureFileNameMap[0] = ['Freeman', "Freeman_Odd.bin"]
self.___FeatureFileNameMap[1] = ['Freeman', "Freeman_Dbl.bin"]
self.___FeatureFileNameMap[2] = ['Freeman', "Freeman_Vol.bin"]
self.___FeatureFileNameMap[3] = ['Yamaguchi', "Yamaguchi4_Odd.bin"]
self.___FeatureFileNameMap[4] = ['Yamaguchi', "Yamaguchi4_Dbl.bin"]
self.___FeatureFileNameMap[5] = ['Yamaguchi', "Yamaguchi4_Vol.bin"]
self.___FeatureFileNameMap[6] = ['Yamaguchi', "Yamaguchi4_Hlx.bin"]
self.___FeatureFileNameMap[7] = ['Cloude', "anisotropy.bin"]
self.___FeatureFileNameMap[8] = ['Cloude', "entropy.bin"]
self.___FeatureFileNameMap[9] = ['Cloude', "alpha.bin"]
def create_roi(self, img_path):
"""
计算ROI掩膜
:return:掩膜路径
"""
processing_path = self.__workspace_processing_path
# 利用影像的有效范围生成MASK
tif_mask_path = os.path.join(processing_path, "tif_mask.tif") # processing_path + "tif_mask.tif"
alg.trans_tif2mask(tif_mask_path, img_path, np.nan)
logger.info('create ROI image success!')
return tif_mask_path
def gene_test_set(self, feature_tif_dic, optimal_feature):
"""
生成测试集
:param feature_tif_dic : 特征影像路径字典
:param optimal_feature : 最优特征子集
:return X_test_list : 分块测试集影像路径
"""
# 特征分块
bp = BlockProcess()
block_size = bp.get_block_size(self.__rows, self.__cols)
self.__block_size = block_size
bp.cut_new(feature_tif_dic, self.__workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size)
img_dir, img_name = bp.get_file_names(self.__workspace_block_tif_path, ['tif'])
dir_dict_all = bp.get_same_img(img_dir, img_name)
# 选择最优特征子集特征影像
dir_dict = {}
for n, key in zip(range(len(dir_dict_all)),dir_dict_all):
if n in optimal_feature:
dir_dict.update({key: dir_dict_all[key]})
logger.info('test_feature:%s', dir_dict.keys())
logger.info('blocking tifs success!')
X_test_list = []
# 特征维度合并
for key in dir_dict:
key_name = key
block_num = len(dir_dict[key])
break
for n in range(block_num):
name = os.path.basename(dir_dict[key_name][n])
suffix = '_' + name.split('_')[-4] + "_" + name.split(
'_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
features_path = os.path.join(self.__workspace_block_tif_processed_path, "features","features" + suffix) # self.__workspace_block_tif_processed_path + "features\\features" + suffix
X_test_list.append(features_path)
features_array = np.zeros(
(len(dir_dict), block_size, block_size), dtype='float32')
for m, value in zip(range(len(dir_dict)), dir_dict.values()):
features_array[m, :, :] = ImageHandler.get_band_array(
value[n])
features_array[np.isnan(features_array)] = 0.0 # 异常值转为0
ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
logger.info('create features matrix success!')
return X_test_list
def predict_blok(self, clf, X_test, rows, cols, img_path, row_begin, col_begin, block_sum, n):
Y_test = clf.predict(X_test)
img = Y_test.reshape(rows, cols)
out_image = Image.fromarray(img)
out_image.save(img_path)
# bp = BlockProcess()
# bp.assign_spatial_reference_bypoint(row_begin, col_begin, self.__proj, self.__geo, img_path)
sr = osr.SpatialReference()
sr.ImportFromWkt(self.__proj)
geo_transform = (self.__geo[0] + col_begin * self.__geo[1] + row_begin * self.__geo[2],
self.__geo[1],
self.__geo[2],
self.__geo[3] + col_begin * self.__geo[4] + row_begin * self.__geo[5],
self.__geo[4],
self.__geo[5]
)
dst_ds = gdal.Open(img_path, gdal.GA_Update)
if dst_ds is None:
return False
dst_ds.SetProjection(sr.ExportToWkt())
dst_ds.SetGeoTransform(geo_transform)
del dst_ds
logger.info('total:%s,block:%s test data finished !', block_sum, n)
return True
def predict(self, clf, X_test_list):
"""
预测数据
:param clf : svm模型
:return X_test_list: 分块测试集影像路径
"""
# 开启多进程处理
bp = BlockProcess()
out_tif_name = 'landcover'
block_features_dir = X_test_list
bp_cover_dir = os.path.join(self.__workspace_block_tif_processed_path, out_tif_name + '\\') # self.__workspace_block_tif_processed_path + out_tif_name + '\\'
if not os.path.exists(bp_cover_dir):
os.makedirs(bp_cover_dir)
processes_num = min([len(block_features_dir), 10])
pool = multiprocessing.Pool(processes=processes_num)
pl = []
for path, n in zip(block_features_dir, range(len(block_features_dir))):
name = os.path.split(path)[1]
features_array = self.imageHandler.get_data(path)
X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
row_begin = int(name.split('_')[-4])
col_begin = int(name.split('_')[-2])
pl.append(pool.apply_async(self.predict_blok, (clf, X_test, self.__block_size, self.__block_size, img_path, row_begin, col_begin,len(block_features_dir), n)))
logger.info('total:%s,block:%s testing data !', len(block_features_dir), n)
pool.close()
pool.join()
# 合并影像
data_dir = bp_cover_dir
out_path = self.__workspace_processing_path[0:-1]
bp.combine_new(
data_dir,
self.__cols,
self.__rows,
out_path,
file_type=['tif'],
datetype='float32')
# 添加地理信息
cover_path = os.path.join(self.__workspace_processing_path, out_tif_name + ".tif") # self.__workspace_processing_path + out_tif_name + ".tif"
bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
return cover_path
def ahv_to_t3(self):
# 全极化tif转bin格式T3数据
atp = AHVToPolsarpro()
t3_path = os.path.join(self.__workspace_processing_path, 'psp_t3\\') # self.__workspace_processing_path + 'psp_t3\\'
# atp.ahv_to_polsarpro_t3(t3_path, self.__workspace_preprocessed_path)
polarization = ['HH', 'HV', 'VH', 'VV']
calibration = Calibration.get_Calibration_coefficient(self.__processing_paras['Origin_META'], polarization)
tif_path = atp.calibration(calibration, in_ahv_dir=self.__workspace_preprocessed_path)
atp.ahv_to_polsarpro_t3_soil(t3_path, tif_path)
logger.info('S2 to T3 success!')
# Lee滤波
leeFilter = LeeRefinedFilterT3()
lee_filter_path = os.path.join(self.__workspace_processing_path, 'lee_filter\\') # self.__workspace_processing_path + 'lee_filter\\'
leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols(), FILTER_SIZE)
logger.info("refine_lee filter success!")
logger.info('progress bar: 30%')
return lee_filter_path
def calInterpolation_bil_Wgs84_rc_sar_sigma(self, parameter_path, dem_rc, in_sar, out_sar):
'''
# std::cout << "mode 11";
# std::cout << "SIMOrthoProgram.exe 11 in_parameter_path in_rc_wgs84_path in_ori_sar_path out_orth_sar_path";
'''
exe_path = r".\baseTool\x64\Release\SIMOrthoProgram-L-SAR.exe"
exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 11, parameter_path,
dem_rc, in_sar, out_sar)
# print(exe_cmd)
print(os.system(exe_cmd))
print("==========================================================================")
def features_geo(self, features_path):
dir = os.path.join(self.__workspace_processing_path, 'features_geo\\')
if not os.path.exists(dir):
os.mkdir(dir)
para_names_l1a = ["HH", "VV", "HV", "VH"]
for key in para_names_l1a:
calOut_path = os.path.join(features_path, key + '_cal.tif')
AHVToPolsarpro.sar_backscattering_sigma(self.__preprocessed_paras[key],
self.__processing_paras['Origin_META'], calOut_path)
parameter = self.__processing_paras['paraMeter']
sim_ori = self.__preprocessed_paras['sim_ori']
in_tif_paths = list(glob.glob(os.path.join(features_path, '*.tif')))
processes_num = min([len(in_tif_paths), multiprocessing_num, multiprocessing.cpu_count() - 1])
pool = multiprocessing.Pool(processes=processes_num)
pl = []
for file_p in in_tif_paths:
name = os.path.basename(file_p).split('.')[0] + '_geo.tif'
out_path = os.path.join(dir, name)
pl.append(pool.apply_async(self.calInterpolation_bil_Wgs84_rc_sar_sigma, (parameter, sim_ori, file_p, out_path))) # topp
pool.close()
pool.join()
# self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
# self.__preprocessed_paras['sim_ori'], file, out_path)
return dir
def process_handle(self, start):
"""
算法主处理函数
"""
hh_geo_path = self.__workspace_processing_path + "hh_geo.tif"
self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
self.__preprocessed_paras['sim_ori'],
self.__preprocessed_paras['HH'], hh_geo_path)
# 读取实测值,获取多边形区域内所有的点,分为训练集数据和测试集数据
pm = LandCoverMeasCsv(self.__processing_paras['LabelData'], hh_geo_path, MAX_TRAN_NUM)
train_data_list = pm.api_read_measure()
train_data_dic = csvh.trans_landCover_list2dic(train_data_list)
csvh_roi = csvHandle(self.__rows_geo, self.__cols_geo)
# train_data_dic = csvh_roi.trans_landCover_measuredata_dic(csvh_roi.readcsv(self.__processing_paras['LabelData']), self.__preprocessed_paras['ori_sim'], MAX_TRAN_NUM)
label_img = csvh_roi.get_roi_img()
if(len(label_img) != 0):
self.imageHandler.write_img(os.path.join(self.__workspace_processing_path, "label_img.tif"),"",[0,0,0,0,0,0],label_img)
logger.info("read csv data success!")
logger.info('progress bar: 20%')
# 极化分解
flag, outFolder = self.__decompose(self.ahv_to_t3(), self.__rows, self.__cols)
feature_tif_paths = {}
for key in outFolder:
feature_bin_dic = outFolder[key]
if key == 'Touzi':
for path in list(glob.glob(os.path.join(feature_bin_dic, '*.tif'))):
name = os.path.split(path)[1].split('.')[0]
shutil.copyfile(path, os.path.join(self.__feature_tif_dir, name +'.tif') ) # self.__feature_tif_dir +'\\'+name+'.tif')
feature_tif_paths.update({name: os.path.join(self.__feature_tif_dir, name+'.tif')}) # self.__feature_tif_dir +'\\'+name+'.tif'})
else:
feature_tif_paths.update(write_bin_to_tif(self.__feature_tif_dir, feature_bin_dic))
logging.info("feature_tif_paths:%s",feature_tif_paths)
# 对所有特征进行地理编码
feature_geo = self.features_geo(self.__feature_tif_dir)
# 新添加的特征做归一化
# for name in self.__feature_name_list:
# proj, geo, arr = self.imageHandler.read_img(self.__preprocessed_paras[name])
# arr = ml.standardization(arr)
# self.imageHandler.write_img(os.path.join(self.__feature_tif_dir, name+".tif"), proj, geo, arr)
logger.info("decompose feature success!")
logger.info('progress bar: 50%')
# 生成最优特征子集训练集
X_train, Y_train, optimal_feature = ml.gene_optimal_train_set(train_data_dic, feature_geo, 0.07, 0.85)
# 训练模型
# cost = self.__processing_paras["Cost"]
# kernel = self.__processing_paras["Kernel"]
# if cost < 0:
# cost = 1
# if kernel != 'rbf' and kernel != 'poly' and kernel != 'linear' and kernel != 'sigmoid':
# kernel = 'rbf'
# RF
clf = ml.trainRF(X_train, Y_train)
logger.info("RF train successful")
# # SVM
# clf = ml.trainSVM(X_train, Y_train, cost, kernel)
# logger.info("SVM train successful")
logger.info('progress bar: 60%')
# 生成测试集
# X_test_path_list = ml.gene_test_set(self.__feature_tif_dir, optimal_feature)
X_test_path_list = ml.gene_test_set(feature_geo, optimal_feature)
# 预测
logger.info('testing')
cover_path = ml.predict(clf, X_test_path_list, EXE_NAME, self.__workspace_processing_path, self.__rows_geo, self.__cols_geo)
logger.info('test success!')
logger.info('progress bar: 95%')
# 转换数据
proj, geo, cover_data = self.imageHandler.read_img(cover_path)
# 形态学闭运算去roi区域噪点
cover_data = np.uint8(cover_data)
kernel = np.ones((5, 5), np.uint8)
cover_data = cv2.erode(cv2.dilate(cover_data, kernel), kernel)
cover_data = np.int16(cover_data)
for id, class_id in zip(train_data_dic['ids'], train_data_dic['class_ids']):
cover_data[np.where(cover_data == id)] = class_id
# cover_path = cover_path.replace('.tif', '_geo.tif')
cover_geo_path = cover_path.replace('.tif', '_geo.tif')
self.imageHandler.write_img(cover_geo_path, proj, geo, cover_data)
# l1a图像坐标转换地理坐标
# self.calInterpolation_bil_Wgs84_rc_sar_sigma(self.__processing_paras['paraMeter'],
# self.__preprocessed_paras['sim_ori'], cover_path, cover_geo_path)
# # self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], cover_path, cover_geo_path, 'nearest')
proj, geo, cover_data_geo = self.imageHandler.read_img(cover_geo_path)
proj, geo, cover_data_hh = self.imageHandler.read_img(hh_geo_path)
# self._tr.l1a_2_geo_int(self.__preprocessed_paras['ori_sim'], self.__preprocessed_paras['HH'], hh_geo_path)
roi_img = self.imageHandler.get_band_array(self.create_roi(hh_geo_path))
# 获取影像roi区域
cover_data_geo = cover_data_geo * roi_img
product_dir = os.path.join(self.__workspace_processing_path, 'product\\') # self.__workspace_processing_path + 'product\\'
SrcImageName = os.path.split(self.__input_paras["AHV"]['ParaValue'])[1].split('.tar.gz')[0] + tar + '.tif'
product_path = os.path.join(product_dir, SrcImageName) # product_dir + 'LandCoverProduct.tif'
self.imageHandler.write_img(product_path, proj, geo, np.int16(cover_data_geo), no_data=0)
# 生成快视图
self.imageHandler.write_quick_view(product_path, color_img=True)
self.create_meta_file(product_path)
# 文件夹打包
file.make_targz(self.__out_para, product_dir)
logger.info('process_handle success!')
logger.info('successful production of ' + EXE_NAME + ' products!')
def create_meta_file(self, product_path):
# 生成产品元文件
product_dir = os.path.join(self.__workspace_processing_path, 'product\\') # self.__workspace_processing_path + 'product\\'
# xml_path = "./model_meta.xml"
tem_folder = os.path.join(self.__workspace_path, EXE_NAME, "Temporary\\") # self.__workspace_path + EXE_NAME + r"\Temporary""\\"
image_path = product_path
out_path1 = os.path.join(tem_folder, "trans_geo_projcs.tif")
out_path2 = os.path.join(tem_folder, "trans_projcs_geo.tif")
SrcImageName = os.path.split(self.__input_paras["AHV"]['ParaValue'])[1].split('.tar.gz')[0]
# par_dict = CreateDict(image_path, self.processinfo, out_path1, out_path2).calu_nature(start)
# model_xml_path = os.path.join(tem_folder, "creat_standard.meta.xml") # 输出xml路径
# CreateStadardXmlFile(xml_path, self.alg_xml_path, par_dict, model_xml_path).create_standard_xml()
#
# meta_xml_path = os.path.join(product_dir, EXE_NAME + "Product.meta.xml") # product_dir+EXE_NAME+"Product.meta.xml"
# type_id_name, type_id_parent = csvh.class_landcover_list(self.__processing_paras['LabelData'])
# CreateMetafile(self.__processing_paras['META'], self.alg_xml_path, model_xml_path, meta_xml_path).process2(type_id_name, type_id_parent, SrcImageName)
model_path = "./product.xml"
meta_xml_path = os.path.join(product_dir, SrcImageName + tar + ".meta.xml")
para_dict = CreateMetaDict(image_path, self.__processing_paras['Origin_META'], product_dir,
out_path1, out_path2).calu_nature()
para_dict.update({"imageinfo_ProductName": "地表覆盖类型"})
para_dict.update({"imageinfo_ProductIdentifier": "LandCover"})
para_dict.update({"imageinfo_ProductLevel": productLevel})
para_dict.update({"ProductProductionInfo_BandSelection": "1,2"})
para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": "LabelData"})
CreateProductXml(para_dict, model_path, meta_xml_path).create_standard_xml()
temp_folder = os.path.join(self.__workspace_path, EXE_NAME, 'Output')
out_xml = os.path.join(temp_folder, os.path.basename(meta_xml_path))
if os.path.exists(temp_folder) is False:
os.mkdir(temp_folder)
# CreateProductXml(para_dict, model_path, out_xml).create_standard_xml()
shutil.copy(meta_xml_path, out_xml)
if __name__ == '__main__':
multiprocessing.freeze_support() #解决打包与运行错误
start = datetime.datetime.now()
try:
if len(sys.argv) < 2:
xml_path = 'LandCover-L-SAR.xml'
else:
xml_path = sys.argv[1]
main_handler = LandCoverMain(xml_path)
if main_handler.check_source() is False:
raise Exception('check_source() failed!')
if main_handler.preprocess_handle() is False:
raise Exception('preprocess_handle() failed!')
if main_handler.process_handle(start) is False:
raise Exception('process_handle() failed!')
except Exception:
logger.exception("run-time error!")
finally:
main_handler.del_temp_workspace()
end = datetime.datetime.now()
msg = 'running use time: %s ' % (end - start)
logger.info(msg)
# 模型的导入导出 https://blog.csdn.net/daxiaofan/article/details/71189015?ops_request_misc=&request_id=&biz_id=102&utm_term=scikit-learn%20%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA%E5%AF%BC%E5%85%A5&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduweb~default-5-71189015.142^v59^js_top,201^v3^control_1&spm=1018.2226.3001.4187