diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/ImageMatch.obj b/Ortho-NoS1GBM/baseTool/x64/Release/ImageMatch.obj
new file mode 100644
index 0000000..2dffc26
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/ImageMatch.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/LIBPQ.dll b/Ortho-NoS1GBM/baseTool/x64/Release/LIBPQ.dll
new file mode 100644
index 0000000..21f1003
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/LIBPQ.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/Lerc.dll b/Ortho-NoS1GBM/baseTool/x64/Release/Lerc.dll
new file mode 100644
index 0000000..29513ed
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/Lerc.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/OctreeNode.obj b/Ortho-NoS1GBM/baseTool/x64/Release/OctreeNode.obj
new file mode 100644
index 0000000..3d22e50
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/OctreeNode.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/RPC_Correct.obj b/Ortho-NoS1GBM/baseTool/x64/Release/RPC_Correct.obj
new file mode 100644
index 0000000..4b6479f
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/RPC_Correct.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11108.write.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11108.write.1.tlog
new file mode 100644
index 0000000..fec0179
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11108.write.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11472.write.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11472.write.1.tlog
new file mode 100644
index 0000000..79c356d
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.11472.write.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.command.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.command.1.tlog
new file mode 100644
index 0000000..4727173
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.command.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.read.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.read.1.tlog
new file mode 100644
index 0000000..c45d321
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CL.read.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.read.1u.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.read.1u.tlog
new file mode 100644
index 0000000..b56a673
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.read.1u.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.write.1u.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.write.1u.tlog
new file mode 100644
index 0000000..605eacb
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/CopyLocal.write.1u.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.lastbuildstate b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.lastbuildstate
new file mode 100644
index 0000000..a4d4182
--- /dev/null
+++ b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.lastbuildstate
@@ -0,0 +1,2 @@
+PlatformToolSet=v142:VCToolArchitecture=Native32Bit:VCToolsVersion=14.29.30133:TargetPlatformVersion=10.0.19041.0:VcpkgTriplet=x64-windows:
+Release|x64|D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\|
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.write.1u.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.write.1u.tlog
new file mode 100644
index 0000000..024973f
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/SIMOrthoProgram-S-SAR.write.1u.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.command.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.command.1.tlog
new file mode 100644
index 0000000..71d26aa
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.command.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.read.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.read.1.tlog
new file mode 100644
index 0000000..6b36871
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.read.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.write.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.write.1.tlog
new file mode 100644
index 0000000..e69aabe
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/link.write.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.command.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.command.1.tlog
new file mode 100644
index 0000000..4d44e42
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.command.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.read.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.read.1.tlog
new file mode 100644
index 0000000..7404a57
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.read.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.write.1.tlog b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.write.1.tlog
new file mode 100644
index 0000000..af942f2
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrtho.7722b0a9.tlog/rc.write.1.tlog differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe
new file mode 100644
index 0000000..7045b35
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe.recipe b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe.recipe
new file mode 100644
index 0000000..80ff377
--- /dev/null
+++ b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.exe.recipe
@@ -0,0 +1,11 @@
+
+
+
+
+ D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\SIMOrthoProgram-S-SAR.exe
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.pdb b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.pdb
new file mode 100644
index 0000000..934bf10
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram-S-SAR.pdb differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.obj b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.obj
new file mode 100644
index 0000000..17450ec
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.res b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.res
new file mode 100644
index 0000000..36f26e2
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.res differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.vcxproj.CopyComplete b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.vcxproj.CopyComplete
new file mode 100644
index 0000000..e69de29
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.vcxproj.FileListAbsolute.txt b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.vcxproj.FileListAbsolute.txt
new file mode 100644
index 0000000..a08de7c
--- /dev/null
+++ b/Ortho-NoS1GBM/baseTool/x64/Release/SIMOrthoProgram.vcxproj.FileListAbsolute.txt
@@ -0,0 +1,47 @@
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\SIMOrthoProgram.vcxproj.CopyComplete
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\SIMOrthoProgram-S-SAR.exe
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\concrt140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\msvcp140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\msvcp140_1.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\msvcp140_2.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\msvcp140_atomic_wait.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\msvcp140_codecvt_ids.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\vccorlib140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\vcruntime140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\vcruntime140_1.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\vcamp140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\vcomp140.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\boost_filesystem-vc142-mt-x64-1_82.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\gdal.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\zlib1.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libcrypto-3-x64.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libssl-3-x64.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\liblzma.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\qhull_r.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\jpeg62.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\tiff.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\geotiff.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\proj.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\sqlite3.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libcurl.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libpng16.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\Lerc.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\zstd.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\gif.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\netcdf.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\hdf5_hl.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\hdf5.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libwebp.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libsharpyuv.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\LIBPQ.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\pcre2-8.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libexpat.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\libxml2.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\iconv-2.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\geos_c.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\geos.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\json-c.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\openjp2.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\spatialite.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\freexl-1.dll
+D:\estar-proj\SIMOrthoProgram-Orth_GF3-Strip-master\simorthoprogram-orth_s_sar-strip\x64\Release\minizip.dll
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/SateOrbit.obj b/Ortho-NoS1GBM/baseTool/x64/Release/SateOrbit.obj
new file mode 100644
index 0000000..b70006e
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/SateOrbit.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/baseTool.obj b/Ortho-NoS1GBM/baseTool/x64/Release/baseTool.obj
new file mode 100644
index 0000000..c3806e0
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/baseTool.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/boost_filesystem-vc142-mt-x64-1_82.dll b/Ortho-NoS1GBM/baseTool/x64/Release/boost_filesystem-vc142-mt-x64-1_82.dll
new file mode 100644
index 0000000..f5c136f
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/boost_filesystem-vc142-mt-x64-1_82.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/concrt140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/concrt140.dll
new file mode 100644
index 0000000..9752449
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/concrt140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/freexl-1.dll b/Ortho-NoS1GBM/baseTool/x64/Release/freexl-1.dll
new file mode 100644
index 0000000..9145a7d
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/freexl-1.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/gdal.dll b/Ortho-NoS1GBM/baseTool/x64/Release/gdal.dll
new file mode 100644
index 0000000..bda7c20
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/gdal.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/geos.dll b/Ortho-NoS1GBM/baseTool/x64/Release/geos.dll
new file mode 100644
index 0000000..2edcb83
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/geos.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/geos_c.dll b/Ortho-NoS1GBM/baseTool/x64/Release/geos_c.dll
new file mode 100644
index 0000000..d358b2a
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/geos_c.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/geotiff.dll b/Ortho-NoS1GBM/baseTool/x64/Release/geotiff.dll
new file mode 100644
index 0000000..f513a51
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/geotiff.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/gif.dll b/Ortho-NoS1GBM/baseTool/x64/Release/gif.dll
new file mode 100644
index 0000000..27fdc78
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/gif.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/hdf5.dll b/Ortho-NoS1GBM/baseTool/x64/Release/hdf5.dll
new file mode 100644
index 0000000..97fd469
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/hdf5.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/hdf5_hl.dll b/Ortho-NoS1GBM/baseTool/x64/Release/hdf5_hl.dll
new file mode 100644
index 0000000..7da3e86
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/hdf5_hl.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/iconv-2.dll b/Ortho-NoS1GBM/baseTool/x64/Release/iconv-2.dll
new file mode 100644
index 0000000..29c175c
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/iconv-2.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/interpolation.obj b/Ortho-NoS1GBM/baseTool/x64/Release/interpolation.obj
new file mode 100644
index 0000000..29cb9cc
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/interpolation.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/jpeg62.dll b/Ortho-NoS1GBM/baseTool/x64/Release/jpeg62.dll
new file mode 100644
index 0000000..9c1a2c0
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/jpeg62.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/json-c.dll b/Ortho-NoS1GBM/baseTool/x64/Release/json-c.dll
new file mode 100644
index 0000000..abbb168
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/json-c.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libcrypto-3-x64.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libcrypto-3-x64.dll
new file mode 100644
index 0000000..a0ee3e3
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libcrypto-3-x64.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libcurl.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libcurl.dll
new file mode 100644
index 0000000..647bdc6
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libcurl.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libexpat.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libexpat.dll
new file mode 100644
index 0000000..6cea742
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libexpat.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/liblzma.dll b/Ortho-NoS1GBM/baseTool/x64/Release/liblzma.dll
new file mode 100644
index 0000000..44372c5
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/liblzma.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libpng16.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libpng16.dll
new file mode 100644
index 0000000..c06222a
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libpng16.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libsharpyuv.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libsharpyuv.dll
new file mode 100644
index 0000000..e6c1b52
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libsharpyuv.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libssl-3-x64.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libssl-3-x64.dll
new file mode 100644
index 0000000..4856e67
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libssl-3-x64.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libwebp.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libwebp.dll
new file mode 100644
index 0000000..e758fc8
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libwebp.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/libxml2.dll b/Ortho-NoS1GBM/baseTool/x64/Release/libxml2.dll
new file mode 100644
index 0000000..1e2599f
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/libxml2.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/minizip.dll b/Ortho-NoS1GBM/baseTool/x64/Release/minizip.dll
new file mode 100644
index 0000000..44f5e99
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/minizip.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140.dll
new file mode 100644
index 0000000..130f84a
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_1.dll b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_1.dll
new file mode 100644
index 0000000..5c2f46d
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_1.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_2.dll b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_2.dll
new file mode 100644
index 0000000..737b70a
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_2.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_atomic_wait.dll b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_atomic_wait.dll
new file mode 100644
index 0000000..c92fcc3
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_atomic_wait.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_codecvt_ids.dll b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_codecvt_ids.dll
new file mode 100644
index 0000000..9879454
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/msvcp140_codecvt_ids.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/netcdf.dll b/Ortho-NoS1GBM/baseTool/x64/Release/netcdf.dll
new file mode 100644
index 0000000..e1cbbf1
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/netcdf.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/openjp2.dll b/Ortho-NoS1GBM/baseTool/x64/Release/openjp2.dll
new file mode 100644
index 0000000..d6c8011
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/openjp2.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/pcre2-8.dll b/Ortho-NoS1GBM/baseTool/x64/Release/pcre2-8.dll
new file mode 100644
index 0000000..d047b3c
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/pcre2-8.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/proj.db b/Ortho-NoS1GBM/baseTool/x64/Release/proj.db
new file mode 100644
index 0000000..7780dbe
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/proj.db differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/proj.dll b/Ortho-NoS1GBM/baseTool/x64/Release/proj.dll
new file mode 100644
index 0000000..f4f6147
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/proj.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/qhull_r.dll b/Ortho-NoS1GBM/baseTool/x64/Release/qhull_r.dll
new file mode 100644
index 0000000..0267c70
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/qhull_r.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/simptsn.obj b/Ortho-NoS1GBM/baseTool/x64/Release/simptsn.obj
new file mode 100644
index 0000000..7795393
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/simptsn.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/spatialite.dll b/Ortho-NoS1GBM/baseTool/x64/Release/spatialite.dll
new file mode 100644
index 0000000..165b6e1
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/spatialite.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/sqlite3.dll b/Ortho-NoS1GBM/baseTool/x64/Release/sqlite3.dll
new file mode 100644
index 0000000..65359e2
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/sqlite3.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/test_moudel.obj b/Ortho-NoS1GBM/baseTool/x64/Release/test_moudel.obj
new file mode 100644
index 0000000..d254e06
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/test_moudel.obj differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/tiff.dll b/Ortho-NoS1GBM/baseTool/x64/Release/tiff.dll
new file mode 100644
index 0000000..125e94b
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/tiff.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vc142.pdb b/Ortho-NoS1GBM/baseTool/x64/Release/vc142.pdb
new file mode 100644
index 0000000..6868526
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vc142.pdb differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vcamp140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/vcamp140.dll
new file mode 100644
index 0000000..9fd7179
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vcamp140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vccorlib140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/vccorlib140.dll
new file mode 100644
index 0000000..7194329
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vccorlib140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vcomp140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/vcomp140.dll
new file mode 100644
index 0000000..dbad71a
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vcomp140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140.dll b/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140.dll
new file mode 100644
index 0000000..1d6afaa
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140_1.dll b/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140_1.dll
new file mode 100644
index 0000000..7bf05d3
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/vcruntime140_1.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/zlib1.dll b/Ortho-NoS1GBM/baseTool/x64/Release/zlib1.dll
new file mode 100644
index 0000000..e9300f6
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/zlib1.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/Release/zstd.dll b/Ortho-NoS1GBM/baseTool/x64/Release/zstd.dll
new file mode 100644
index 0000000..09c9050
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/Release/zstd.dll differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/calOffset/calOffset.exe b/Ortho-NoS1GBM/baseTool/x64/calOffset/calOffset.exe
new file mode 100644
index 0000000..d67b305
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/calOffset/calOffset.exe differ
diff --git a/Ortho-NoS1GBM/baseTool/x64/calOffset/models/d2_tf.pth b/Ortho-NoS1GBM/baseTool/x64/calOffset/models/d2_tf.pth
new file mode 100644
index 0000000..dbec398
Binary files /dev/null and b/Ortho-NoS1GBM/baseTool/x64/calOffset/models/d2_tf.pth differ
diff --git a/Ortho-NoS1GBM/models/d2_tf.pth b/Ortho-NoS1GBM/models/d2_tf.pth
new file mode 100644
index 0000000..dbec398
Binary files /dev/null and b/Ortho-NoS1GBM/models/d2_tf.pth differ
diff --git a/Ortho-NoS1GBM/tool/LAI/LAIProcess.cp38-win_amd64.pyd b/Ortho-NoS1GBM/tool/LAI/LAIProcess.cp38-win_amd64.pyd
new file mode 100644
index 0000000..bc5375e
Binary files /dev/null and b/Ortho-NoS1GBM/tool/LAI/LAIProcess.cp38-win_amd64.pyd differ
diff --git a/Ortho-NoS1GBM/tool/LAI/LAIProcess.pyx b/Ortho-NoS1GBM/tool/LAI/LAIProcess.pyx
new file mode 100644
index 0000000..a445ffd
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/LAI/LAIProcess.pyx
@@ -0,0 +1,166 @@
+#
+# 模型计算的库
+#
+import cython
+cimport cython # 必须导入
+import numpy as np
+cimport numpy as np
+from libc.math cimport pi
+from scipy.optimize import leastsq
+import random
+import logging
+logger = logging.getLogger("mylog")
+
+
+def WMCModel(param_arr,sample_lai,sample_soil,sample_inc,sample_sigma):
+ """ WMC模型 增加 归一化植被指数
+
+ Args:
+ param_arr (np.ndarray): 参数数组
+ sample_lai (double): 叶面积指数
+ sample_soil (double): 土壤含水量
+ sample_inc (double): 入射角(弧度值)
+ sample_sigma (double): 后向散射系数(线性值)
+
+ Returns:
+ double: 方程值
+ """
+ # 映射参数,方便修改模型
+ A,B,C,D,M,N=param_arr # 在这里修改模型
+ V_lai=sample_lai
+ #V_lai=E*sample_lai+F
+ exp_gamma=np.exp(-2*B*((V_lai*D+C))*(1/np.cos(sample_inc)))
+ sigma_soil=M*sample_soil+N
+ sigma_veg=A*((V_lai))*np.cos(sample_inc)
+ f_veg=1
+ result=sigma_veg*(1-exp_gamma)+sigma_soil*exp_gamma-sample_sigma
+ return result
+
+
+
+
+def train_WMCmodel(lai_water_inc_sigma_list,params_X0,train_err_image_path,draw_flag=True):
+ """ 训练模型参数
+
+ Args:
+ lai_waiter_inc_sigma_list (list): 训练模型使用的样本呢
+ """
+ def f(X):
+ eqs=[]
+ for lai_water_inc_sigma_item in lai_water_inc_sigma_list:
+ sample_lai=lai_water_inc_sigma_item[4]
+ sample_sigma=lai_water_inc_sigma_item[5] # 5: csv_sigma, 8:tiff_sigma
+ sample_soil=lai_water_inc_sigma_item[6]
+ sample_inc=lai_water_inc_sigma_item[7]
+ FVC=lai_water_inc_sigma_item[8]
+ eqs.append(WMCModel(X,sample_lai,sample_soil,sample_inc,sample_sigma))
+ return eqs
+
+ X0 = params_X0 # 初始值
+ # logger.info(str(X0))
+ h = leastsq(f, X0)
+ # logger.info(h[0],h[1])
+ err_f=f(h[0])
+ x_arr=[lai_waiter_inc_sigma_item[4] for lai_waiter_inc_sigma_item in lai_water_inc_sigma_list]
+ # 根据误差大小进行排序
+ # logger.info("训练集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 样点信息".format(str(np.array(err_f).shape)))
+ # for i in np.argsort(np.array(err_f)):
+ # logger.info('{}\t{}\t{}'.format(i,err_f[i],str(lai_water_inc_sigma_list[i])))
+ # logger.info("\n误差点序输出结束\n")
+
+ if draw_flag:
+ # logger.info(err_f)
+ # logger.info(np.where(np.abs(err_f)<10))
+ from matplotlib import pyplot as plt
+ plt.scatter(x_arr,err_f)
+ plt.title("equation-err")
+ plt.savefig(train_err_image_path,dpi=600)
+ plt.show()
+
+ return h[0]
+
+def test_WMCModel(lai_waiter_inc_sigma_list,param_arr,lai_X0,test_err_image_path,draw_flag=True):
+ """ 测试模型训练结果
+
+ Args:
+ lai_waiter_inc_sigma_list (list): 测试使用的样本集
+ A (_type_): 参数A
+ B (_type_): 参数B
+ C (_type_): 参数C
+ D (_type_): 参数D
+ M (_type_): 参数M
+ N (_type_): 参数N
+ lai_X0 (_type_): 初始值
+
+ Returns:
+ list: 误差列表 [sample_lai,err,predict]
+ """
+ err=[]
+ err_f=[]
+ x_arr=[]
+ err_lai=[]
+ for lai_waiter_inc_sigma_item in lai_waiter_inc_sigma_list:
+ sample_time,sample_code,sample_lon,sample_lat,sample_lai,csv_sigma,sample_soil,sample_inc,sample_sigma=lai_waiter_inc_sigma_item
+ def f(X):
+ lai=X[0]
+ eqs=[WMCModel(param_arr,lai,sample_soil,sample_inc,csv_sigma)]
+ return eqs
+ X0=lai_X0
+ h = leastsq(f, X0)
+ temp_err=h[0]-sample_lai
+ err_lai.append(temp_err[0]) # lai预测的插值
+ err.append([sample_lai,temp_err[0],h[0][0],sample_code])
+ err_f.append(f(h[0])[0]) # 方程差
+ x_arr.append(sample_lai)
+
+ # 根据误差大小进行排序
+ # logger.info("测试集:\n根据误差输出点序\n数量:{}\n点序\t误差值\t 方程差\t样点信息".format(str(np.array(err_lai).shape)))
+ # for i in np.argsort(np.array(err_lai)):
+ # logger.info('{}\t{}\t{}\t{}'.format(i,err_lai[i],err_f[i],str(lai_waiter_inc_sigma_list[i])))
+ # logger.info("\n误差点序输出结束\n")
+
+ if draw_flag:
+ from matplotlib import pyplot as plt
+ plt.scatter(x_arr,err_lai)
+ plt.title("equation-err")
+ plt.savefig(test_err_image_path,dpi=600)
+ plt.show()
+ return err
+
+def processs_WMCModel(param_arr,lai_X0,sigma,inc_angle,soil_water):
+
+ if(sigma<0 ):
+ return np.nan
+ def f(X):
+ lai=X[0]
+ eqs=[WMCModel(param_arr,lai,soil_water,inc_angle,sigma )]
+ return eqs
+ h = leastsq(f, [lai_X0])
+
+ return h[0][0]
+
+# Cython 的扩展地址
+cpdef np.ndarray[double,ndim=2] process_tiff(np.ndarray[double,ndim=2] sigma_tiff,
+ np.ndarray[double,ndim=2] inc_tiff,
+ np.ndarray[double,ndim=2] soil_water_tiff,
+ np.ndarray[double,ndim=1] param_arr,
+ double lai_X0):
+
+ cdef np.ndarray[double,ndim=2] result=sigma_tiff
+ cdef int param_arr_length=param_arr.shape[0]
+ cdef int height=sigma_tiff.shape[0]
+ cdef int width=sigma_tiff.shape[1]
+ cdef int i=0
+ cdef int j=0
+ cdef double temp=0
+
+ while i=0 else np.nan
+ result[i,j]=temp
+ j=j+1
+ i=i+1
+ return result
+
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/LAI/setup.py b/Ortho-NoS1GBM/tool/LAI/setup.py
new file mode 100644
index 0000000..67c737a
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/LAI/setup.py
@@ -0,0 +1,45 @@
+from setuptools import setup
+from setuptools.extension import Extension
+from Cython.Distutils import build_ext
+from Cython.Build import cythonize
+import numpy
+from pathlib import Path
+import shutil
+
+
+class MyBuildExt(build_ext):
+ def run(self):
+ build_ext.run(self)
+
+ build_dir = Path(self.build_lib)
+ root_dir = Path(__file__).parent
+ target_dir = build_dir if not self.inplace else root_dir
+
+ self.copy_file(Path('./LAIProcess') / '__init__.py', root_dir, target_dir)
+ #self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
+ self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
+ def copy_file(self, path, source_dir, destination_dir):
+ if not (source_dir / path).exists():
+ return
+ shutil.copyfile(str(source_dir / path), str(destination_dir / path))
+
+setup(
+ name="MyModule",
+ ext_modules=cythonize(
+ [
+ #Extension("pkg1.*", ["root/pkg1/*.py"]),
+ Extension("pkg2.*", ["./LAIProcess.pyx"]),
+ #Extension("1.*", ["root/*.py"])
+ ],
+ build_dir="build",
+ compiler_directives=dict(
+ always_allow_keywords=True
+ )),
+ cmdclass=dict(
+ build_ext=MyBuildExt
+ ),
+ packages=[],
+ include_dirs=[numpy.get_include()],
+)
+
+# 指令: python setup.py build_ext --inplace
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/CoordinateTransformation.py b/Ortho-NoS1GBM/tool/algorithm/algtools/CoordinateTransformation.py
new file mode 100644
index 0000000..8afe99b
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/CoordinateTransformation.py
@@ -0,0 +1,117 @@
+# -*- encoding: utf-8 -*-
+# code from https://blog.csdn.net/theonegis/article/details/54427906
+from osgeo import gdal
+from osgeo import osr
+import numpy as np
+
+def getSRSPair(dataset):
+ """
+ 获得给定数据的投影参考系和地理参考系
+ :param dataset: GDAL地理数据
+ :return: 投影参考系和地理参考系
+ """
+ prosrs = osr.SpatialReference()
+ prosrs.ImportFromWkt(dataset.GetProjection())
+ geosrs = prosrs.CloneGeogCS()
+ return prosrs, geosrs
+
+
+def geo2lonlat(dataset, x, y):
+ """
+ 将投影坐标转为经纬度坐标(具体的投影坐标系由给定数据确定)
+ :param dataset: GDAL地理数据
+ :param x: 投影坐标x
+ :param y: 投影坐标y
+ :return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
+ """
+ prosrs, geosrs = getSRSPair(dataset)
+ ct = osr.CoordinateTransformation(prosrs, geosrs)
+ coords = ct.TransformPoint(x, y)
+ return coords[:2]
+
+
+def lonlat2geo(dataset, lon, lat):
+ """
+ 将经纬度坐标转为投影坐标(具体的投影坐标系由给定数据确定)
+ :param dataset: GDAL地理数据
+ :param lon: 地理坐标lon经度
+ :param lat: 地理坐标lat纬度
+ :return: 经纬度坐标(lon, lat)对应的投影坐标
+ """
+ prosrs, geosrs = getSRSPair(dataset)
+ ct = osr.CoordinateTransformation(geosrs, prosrs)
+ coords = ct.TransformPoint(lat, lon)
+ return coords[:2]
+
+
+def imagexy2geo(dataset, row, col):
+ """
+ 根据GDAL的六参数模型将影像图上坐标(行列号)转为投影坐标或地理坐标(根据具体数据的坐标系统转换)
+ :param dataset: GDAL地理数据
+ :param row: 像素的行号
+ :param col: 像素的列号
+ :return: 行列号(row, col)对应的投影坐标或地理坐标(x, y)
+ """
+ trans = dataset.GetGeoTransform()
+ px = trans[0] + col * trans[1] + row * trans[2]
+ py = trans[3] + col * trans[4] + row * trans[5]
+ return px, py
+
+
+def geo2imagexy(dataset, x, y):
+ """
+ 根据GDAL的六 参数模型将给定的投影或地理坐标转为影像图上坐标(行列号)
+ :param dataset: GDAL地理数据
+ :param x: 投影或地理坐标x
+ :param y: 投影或地理坐标y
+ :return: 影坐标或地理坐标(x, y)对应的影像图上行列号(col, row)
+ """
+ trans = dataset.GetGeoTransform()
+ a = np.array([[trans[1], trans[2]], [trans[4], trans[5]]])
+ b = np.array([x - trans[0], y - trans[3]])
+ return np.linalg.solve(a, b) # 使用numpy的linalg.solve进行二元一次方程的求解
+
+
+def test1():
+ gdal.AllRegister()
+ tif = 'D:/DATA/testdata/GLCFCS30_E110N25.tif'
+ # dataset = gdal.Open(r"D:\\DATA\\雷达测试\\GaoFen3_20200528_HH_DB.tif")
+ dataset = gdal.Open(tif)
+
+ print('数据投影:')
+ print(dataset.GetProjection())
+ print('数据的大小(行,列):')
+ print('(%s %s)' % (dataset.RasterYSize, dataset.RasterXSize))
+
+ x = 793214.118
+ y = 2485865.527
+ lon = 113.84897082317516
+ lat = 22.453998686022448
+ row = 24576
+ col = 22540
+
+ print('图上坐标 -> 投影坐标:')
+ coords = imagexy2geo(dataset, row, col)
+ print('(%s, %s)->(%s, %s)' % (row, col, coords[0], coords[1]))
+ print('投影坐标 -> 图上坐标:')
+ coords = geo2imagexy(dataset, x, y)
+ col = coords[0]
+ row = coords[1]
+ print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
+
+ print('投影坐标 -> 经纬度:')
+ coords = geo2lonlat(dataset, x, y)
+ print('(%s, %s)->(%s, %s)' % (x, y, coords[0], coords[1]))
+ print('经纬度 -> 投影坐标:')
+ coords = lonlat2geo(dataset, lon, lat)
+ print('(%s, %s)->(%s, %s)' % (lon, lat, coords[0], coords[1]))
+
+ coords1 = geo2lonlat(dataset, 657974.118, 2633321.527)
+ print(coords1)
+ coords2 = geo2lonlat(dataset, 793214.118, 2485865.527)
+ print(coords2)
+ pass
+
+# if __name__ == '__main__':
+#
+# print('done')
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/DEMJoint.py b/Ortho-NoS1GBM/tool/algorithm/algtools/DEMJoint.py
new file mode 100644
index 0000000..d3a71cd
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/DEMJoint.py
@@ -0,0 +1,156 @@
+"""
+@Project :microproduct
+@File :DEMJoint
+@Function :主函数
+@Author :LMM
+@Date :2021/10/19 14:39
+@Version :1.0.0
+"""
+from osgeo import gdal, osr
+import os
+import numpy as np
+
+
+class DEMProcess:
+ """
+ DEM拼接、重采样
+ """
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get_extent(fn):
+ '''
+ 原文链接:https://blog.csdn.net/XBR_2014/article/details/85255412
+ '''
+ ds = gdal.Open(fn)
+ rows = ds.RasterYSize
+ cols = ds.RasterXSize
+ # 获取图像角点坐标
+ gt = ds.GetGeoTransform()
+ minx = gt[0]
+ maxy = gt[3]
+ maxx = gt[0] + gt[1] * rows
+ miny = gt[3] + gt[5] * cols
+ return (minx, maxy, maxx, miny)
+
+ @staticmethod
+ def img_mosaic(in_files, out_dem_path):
+ # 通过两两比较大小,将最终符合条件的四个角点坐标保存
+ # 即为拼接图像的四个角点坐标
+ minX, maxY, maxX, minY = DEMProcess.get_extent(in_files[0])
+ for fn in in_files[1:]:
+ minx, maxy, maxx, miny = DEMProcess.get_extent(fn)
+ minX = min(minX, minx)
+ maxY = max(maxY, maxy)
+ maxX = max(maxX, maxx)
+ minY = min(minY, miny)
+
+ # 获取输出图像的行列数
+ in_ds = gdal.Open(in_files[0])
+ bands_num = in_ds.RasterCount
+ gt = in_ds.GetGeoTransform()
+ rows = int((maxX - minX) / abs(gt[5]))
+ cols = int((maxY - minY) / gt[1])
+
+ # 判断栅格数据的数据类型
+ datatype = gdal.GDT_UInt16
+
+ # 创建输出图像
+ driver = gdal.GetDriverByName('GTiff')
+ out_dem = os.path.join(out_dem_path, 'mosaic0.tif')
+ out_ds = driver.Create(out_dem, cols, rows, bands_num, datatype)
+ out_ds.SetProjection(in_ds.GetProjection())
+
+ gt = list(in_ds.GetGeoTransform())
+ gt[0], gt[3] = minX, maxY
+ out_ds.SetGeoTransform(gt)
+
+ for fn in in_files:
+ in_ds = gdal.Open(fn)
+ x_size = in_ds.RasterXSize
+ y_size = in_ds.RasterYSize
+ trans = gdal.Transformer(in_ds, out_ds, [])
+ success, xyz = trans.TransformPoint(False, 0, 0)
+ x, y, z = map(int, xyz)
+ for i in range(1, bands_num + 1):
+ data = in_ds.GetRasterBand(i).ReadAsArray()
+ out_band = out_ds.GetRasterBand(i)
+ out_data = out_band.ReadAsArray(x, y, x_size, y_size)
+ data = np.maximum(data, out_data)
+ out_band.WriteArray(data, x, y)
+
+ del in_ds, out_band, out_ds
+
+ @staticmethod
+ def dem_clip(OutFilePath, DEMFilePath, SelectArea):
+ '''
+ 根据选择范围裁剪DEM,并输出
+ agrs:
+ outFilePath:裁剪DEM输出地址
+ DEMFilePath:被裁减DEM地址
+ SelectArea:list [(xmin,ymax),(xmax,ymin)] 框选范围 左上角,右下角
+ '''
+ DEM_ptr = gdal.Open(DEMFilePath)
+ DEM_GeoTransform = DEM_ptr.GetGeoTransform() # 读取影像的投影变换
+ DEM_InvGeoTransform = gdal.InvGeoTransform(DEM_GeoTransform)
+ SelectAreaArrayPoints = [gdal.ApplyGeoTransform(DEM_InvGeoTransform, p[0], p[1]) for p in SelectArea]
+ SelectAreaArrayPoints = list(map(lambda p: (int(p[0]), int(p[1])), SelectAreaArrayPoints)) # 确定坐标
+
+ [(ulx, uly), (brx, bry)] = SelectAreaArrayPoints
+ rowCount, colCount = bry - uly, brx - ulx
+
+ # 输出DEM的桌面坐标转换
+ Out_Transfrom = list(DEM_GeoTransform)
+ Out_Transfrom[0] = SelectArea[0][0]
+ Out_Transfrom[3] = SelectArea[0][1]
+
+ # 构建输出DEM
+ Bands_num = DEM_ptr.RasterCount
+ gtiff_driver = gdal.GetDriverByName('GTiff')
+ datatype = gdal.GDT_UInt16
+ out_dem = gtiff_driver.Create(OutFilePath, colCount, rowCount, Bands_num, datatype)
+ out_dem.SetProjection(DEM_ptr.GetProjection())
+ out_dem.SetGeoTransform(Out_Transfrom)
+
+ for i in range(1, Bands_num + 1):
+ data_band = DEM_ptr.GetRasterBand(i)
+ out_band = out_dem.GetRasterBand(i)
+ data = data_band.ReadAsArray(ulx, uly, colCount, rowCount)
+ out_band.WriteArray(data)
+ del out_dem
+
+ @staticmethod
+ def dem_resample(in_dem_path, out_dem_path):
+ '''
+ DEM重采样函数,默认坐标系为WGS84
+ agrs:
+ in_dem_path: 输入的DEM文件夹路径
+ meta_file_path: 输入的xml元文件路径
+ out_dem_path: 输出的DEM文件夹路径
+ '''
+ # 读取文件夹中所有的DEM
+ dem_file_paths=[os.path.join(in_dem_path,dem_name) for dem_name in os.listdir(in_dem_path) if dem_name.find(".tif")>=0 and dem_name.find(".tif.")==-1]
+ spatialreference=osr.SpatialReference()
+ spatialreference.SetWellKnownGeogCS("WGS84") # 设置地理坐标,单位为度 degree # 设置投影坐标,单位为度 degree
+ spatialproj=spatialreference.ExportToWkt() # 导出投影结果
+ # 将DEM拼接成一张大图
+ mergeFile =gdal.BuildVRT(os.path.join(out_dem_path,"mergeDEM.tif"), dem_file_paths)
+ out_DEM=os.path.join(out_dem_path,"mosaic.tif")
+ gdal.Warp(out_DEM,
+ mergeFile,
+ format="GTiff",
+ dstSRS=spatialproj,
+ dstNodata=-9999,
+ outputType=gdal.GDT_Float32)
+ return out_DEM
+
+
+# if __name__ == "__main__":
+# DEMProcess = DEMProcess()
+# in_dem_path = r'F:\大气延迟\out_dem'
+# out_dem_path = r'F:\大气延迟\out_dem'
+# DEMProcess.dem_resample(in_dem_path, out_dem_path)
+
+
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/MetaDataHandler.py b/Ortho-NoS1GBM/tool/algorithm/algtools/MetaDataHandler.py
new file mode 100644
index 0000000..d12f60e
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/MetaDataHandler.py
@@ -0,0 +1,179 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :ScatteringAuxData.py
+@Function :后向散射
+@Author :SHJ
+@Contact:
+@Date :2022/6/29
+@Version :1.0.0
+修改历史:
+[修改序列] [修改日期] [修改者] [修改内容]
+ 1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
+"""
+import json
+import logging
+from xml.etree.ElementTree import ElementTree
+import math
+
+import xmltodict
+
+logger = logging.getLogger("mylog")
+
+class GF3L1AMetaData:
+ def __init__(self):
+ pass
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
+ return QualifyValue
+
+ @staticmethod
+ def get_SubQualifyValue(meta_file_path, polarization, pol_id):
+ try:
+ with open(meta_file_path, 'r', encoding='utf-8') as fp:
+ HeaderFile_dom_str = fp.read()
+ HeaderFile_dom = xmltodict.parse(HeaderFile_dom_str) # 将XML转成json文本
+ HeaderFile_dom_json = json.loads(json.dumps(HeaderFile_dom))
+ QualifyValue = float(HeaderFile_dom_json['product']['imageinfo']['QualifyValue'][pol_id][polarization])
+ return QualifyValue
+ except Exception as e:
+ raise('get QualifyValue failed')
+
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text) if root.find('processinfo').find('CalibrationConst').find(polarization).text!="NULL" else 0
+ return Kdb
+
+class OrthoMetaData:
+ def __init__(self):
+ pass
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
+ return QualifyValue
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
+ return Kdb
+
+
+ @staticmethod
+ def get_RadarCenterFrequency(meta_file_path):
+ # 获取微波中心频率
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
+ return RadarCenterFrequency
+
+
+ @staticmethod
+ def get_lamda(meta_file_path):
+ # 获取微波波长,单位:m
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ lamda = float(root.find('sensor').find('lamda').text)
+ return lamda
+
+class MetaDataHandler:
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ try:
+ QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
+ except Exception:
+ logger.warning('OrthoMetaData.get_QualifyValue() error!')
+ QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
+ logger.info('GF3L1AMetaData.get_QualifyValue() success!')
+ return QualifyValue
+
+ def get_SubQualifyValue(meta_file_path, polarization, pol_id):
+ try:
+ QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
+ except Exception:
+ logger.warning('OrthoMetaData.get_QualifyValue() error!')
+ QualifyValue = GF3L1AMetaData.get_SubQualifyValue(meta_file_path, polarization, pol_id)
+ logger.info('GF3L1AMetaData.get_QualifyValue() success!')
+ return QualifyValue
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ try:
+ Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
+ except Exception:
+ logger.warning('OrthoMetaData.get_Kdb() error!')
+ Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
+ logger.info('GF3L1AMetaData.get_Kdb() success!')
+ return Kdb
+
+ @staticmethod
+ def get_RadarCenterFrequency(meta_file_path):
+ # 获取微波中心频率,单位GHz
+ RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
+ return RadarCenterFrequency
+
+ @staticmethod
+ def get_lamda(meta_file_path):
+ # 获取微波波长,单位:m
+ lamda = OrthoMetaData.get_lamda(meta_file_path)
+ return lamda
+
+class Calibration:
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get_Calibration_coefficient(meta_file_path, polarization):
+ calibration = [0, 0, 0, 0]
+ for i in polarization:
+ if i == 'HH':
+ quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
+ kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
+ data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
+ calibration[0] = math.sqrt(data_value)
+ if i == 'HV':
+ quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
+ kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
+ data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
+ calibration[1] = math.sqrt(data_value)
+ if i == 'VH':
+ quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
+ kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
+ data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
+ calibration[2] = math.sqrt(data_value)
+ if i == 'VV':
+ quality = MetaDataHandler.get_QualifyValue(meta_file_path, i)
+ kdb = MetaDataHandler.get_Kdb(meta_file_path, i)
+ data_value = ((quality/32767)**2) * (10**((kdb/10)*-1))
+ calibration[3] = math.sqrt(data_value)
+ return calibration
+
+
+# if __name__ == '__main__':
+# A = ScatteringAuxData()
+# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
+# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
+# path1 = dir + 'OrthoProduct.meta.xml'
+# t1 = A.get_QualifyValue(path, 'HH')
+# t2 = A.get_Kdb(path, 'HH')
+# t3 = A.get_RadarCenterFrequency(path)
+# t4 = A.get_lamda(path)
+# pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/PreProcess.py b/Ortho-NoS1GBM/tool/algorithm/algtools/PreProcess.py
new file mode 100644
index 0000000..d8d64ea
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/PreProcess.py
@@ -0,0 +1,561 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :PreProcess.py
+@Function :@Function: 坐标转换,坐标系转换,图像裁剪,重投影,重采样
+@Author :LMM
+@Date :2021/8/25 14:17
+@Version :1.0.0
+"""
+from shapely.geometry import Polygon # 导入 gdal库要放到这一句的后面,不然会引起错误
+
+from osgeo import gdal
+from osgeo import gdalconst
+from osgeo import osr
+from osgeo import ogr
+import os
+import cv2
+import numpy as np
+import shutil
+import scipy.spatial.transform
+import scipy.spatial.transform._rotation_groups # 用于解决打包错误
+import scipy.special.cython_special # 用于解决打包错误
+import scipy.spatial.transform._rotation_groups # 解决打包的问题
+import shapefile
+from shapely.errors import TopologicalError
+from tool.algorithm.image.ImageHandle import ImageHandler
+import logging
+logger = logging.getLogger("mylog")
+
+os.environ['PROJ_LIB'] = os.getcwd()
+
+
+
+class PreProcess:
+ """
+ 预处理,所有的影像配准
+ """
+ def __init__(self):
+ self._ImageHandler = ImageHandler()
+ pass
+
+ def cal_scopes(self, processing_paras):
+ # 计算roi
+ scopes = ()
+ for key, value in processing_paras.items():
+ if 'ori_sim' in key:
+ scopes += (ImageHandler.get_scope_ori_sim(value),)
+ if(processing_paras['box'] != "" or processing_paras['box'] != "empty"):
+ scopes += self.box2scope(processing_paras['box'])
+ return scopes
+
+ def cal_scopes_roi(self, processing_paras):
+ return self.intersect_polygon(self.cal_scopes(processing_paras))
+
+ def cut_geoimg(self,workspace_preprocessing_path, para_names_geo, processing_paras):
+ self.check_img_projection(workspace_preprocessing_path, para_names_geo, processing_paras)
+ # 计算roi
+
+ scopes = self.cal_scopes(processing_paras)
+ # 计算图像的轮廓,并求相交区域
+ intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
+ scopes_roi = self.cal_intersect_shp(intersect_shp_path, para_names_geo, processing_paras, scopes)
+ # 裁剪
+ # 裁剪图像:裁剪微波图像,裁剪其他图像
+ cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names_geo, processing_paras, intersect_shp_path)
+ return cutted_img_paths, scopes_roi
+
+
+
+ def preprocessing(self, para_names, ref_img_name, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
+ # 读取每一张图像,检查图像坐标系
+
+ self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
+
+ # 计算图像的轮廓,并求相交区域
+ intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
+ self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
+ self.box2scope(processing_paras['box']))
+ logger.info('create intersect shp success!')
+
+ # 裁剪图像:裁剪微波图像,裁剪其他图像
+ cutted_img_paths = self.cut_imgs(workspace_preprocessing_path, para_names, processing_paras,
+ intersect_shp_path)
+ logger.info('cut images success!')
+
+ # 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
+
+ preprocessed_paras = self.resampling_img(workspace_preprocessed_path, para_names, cutted_img_paths,cutted_img_paths[ref_img_name])
+ # 清除预处理缓存文件
+ logger.info('preprocess_handle success!')
+ return preprocessed_paras # cutted_img_paths
+
+ def get_ref_inf(self, ref_img_path):
+ """获取参考影像的图像信息"""
+ ref_img_path = ref_img_path
+ cols = ImageHandler.get_img_width(ref_img_path)
+ rows = ImageHandler.get_img_height(ref_img_path)
+ proj = ImageHandler.get_projection(ref_img_path)
+ geo = ImageHandler.get_geotransform(ref_img_path)
+ return ref_img_path, cols, rows, proj, geo
+
+ def check_img_projection(self, out_dir, para_names, processing_paras):
+ """
+ 读取每一张图像,检查图像坐标系;
+ 将投影坐标系影像转换为地理坐标系影像(EPSG:4326)
+ :param para_names:需要检查的参数名称
+ """
+ if len(para_names) == 0:
+ return False
+ for name in para_names:
+ proj = ImageHandler.get_projection(processing_paras[name])
+ keyword = proj.split("[", 2)[0]
+
+ if keyword == "PROJCS":
+ # 投影坐标系 转 地理坐标系
+ para_dir = os.path.split(processing_paras[name])
+ out_para = os.path.join(out_dir, para_dir[1].split(".", 1)[0] + "_EPSG4326.tif")
+ self.trans_epsg4326(out_para, processing_paras[name])
+ processing_paras[name] = out_para
+ elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
+ raise Exception('coordinate is missing!')
+
+
+ def preprocessing_oh2004(self, para_names, processing_paras, workspace_preprocessing_path, workspace_preprocessed_path):
+ # 读取每一张图像,检查图像坐标系
+
+ self.check_img_projection(workspace_preprocessing_path, para_names, processing_paras)
+
+ # 计算图像的轮廓,并求相交区域
+ intersect_shp_path = os.path.join(workspace_preprocessing_path, 'IntersectPolygon.shp')
+ scopes = self.cal_intersect_shp(intersect_shp_path, para_names, processing_paras,
+ self.box2scope(processing_paras['box']))
+ logger.info('create intersect shp success!')
+
+ # 裁剪图像:裁剪微波图像,裁剪其他图像
+ cutted_img_paths = self.cut_imgs(workspace_preprocessed_path, para_names, processing_paras,
+ intersect_shp_path)
+ logger.info('cut images success!')
+
+ # 重采样:重采样到跟微波图像一致的分辨率,然后保存到临时目录
+
+ return cutted_img_paths, scopes
+
+ @staticmethod
+ def lonlat2geo(lat, lon):
+ """
+ WGS84转平面坐标
+ Param: lat 为WGS_1984的纬度
+ Param: lon 为WGS_1984的经度
+ 输出转换后的坐标x,y
+ """
+
+ dstsrs1 = osr.SpatialReference()
+ dstsrs1.ImportFromEPSG(32649)
+
+ dstsrs2 = osr.SpatialReference()
+ dstsrs2.ImportFromEPSG(4326)
+
+ ct = osr.CoordinateTransformation(dstsrs2, dstsrs1)
+ coords = ct.TransformPoint(lat, lon)
+ # print("输出转换后的坐标x,y:",coords[:2])
+ return coords[:2]
+
+ @staticmethod
+ def trans_geogcs2projcs(out_path, in_path):
+ """
+ :param out_path:wgs84投影坐标影像保存路径
+ :param in_path:地理坐标影像输入路径
+ """
+ # 创建文件
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:4326', dstSRS='EPSG:32649')
+ gdal.Warp(out_path, in_path, options=options)
+
+ @staticmethod
+ def trans_projcs2geogcs(out_path, in_path):
+ """
+ :param out_path:wgs84地理坐标影像输入路径
+ :param in_path:wgs84投影坐标影像保存路径
+ """
+ # 创建文件
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ options = gdal.WarpOptions(format='GTiff', srcSRS='EPSG:32649', dstSRS='EPSG:4326')
+ gdal.Warp(out_path, in_path, options=options)
+
+ @staticmethod
+ def trans_projcs2geogcs(out_path, in_path ,EPSG_src=32649,EPSG_dst=4326):
+ """
+ :param out_path:wgs84地理坐标影像输入路径
+ :param in_path:wgs84投影坐标影像保存路径
+ :param EPSG_src:原始投影系
+ :param EPSG_dst:目标坐标系
+ """
+ str_EPSG_src = 'EPSG:'+ str(EPSG_src)
+ str_EPSG_dst = 'EPSG:'+ str(EPSG_dst)
+
+ # 创建文件
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ options = gdal.WarpOptions(format='GTiff', srcSRS=str_EPSG_src, dstSRS=str_EPSG_dst)
+ gdal.Warp(out_path, in_path, options=options)
+
+ @staticmethod
+ def trans_epsg4326(out_path, in_path):
+ OutTile = gdal.Warp(out_path, in_path,
+ dstSRS='EPSG:4326',
+ resampleAlg=gdalconst.GRA_Bilinear
+ )
+ OutTile = None
+ return True
+
+ @staticmethod
+ def box2scope(str_box):
+ roi_box = ()
+ if str_box == '' or str_box == 'empty':
+ return roi_box
+ box_list = [float(num) for num in list(str_box.split(';'))]
+ if len(box_list) == 4:
+ roi_box = ([[box_list[2], box_list[1]], [box_list[3], box_list[1]], [box_list[2], box_list[0]],
+ [box_list[3], box_list[0]]],)
+ return roi_box
+
+ def cal_intersect_shp(self, shp_path, para_names,processing_paras, add_scope =()):
+ """
+ :param shp_path:相交区域矢量文件保存区域
+ :param para_names:判断相交影像的名称
+ :return: True or False
+ """
+ scopes = ()
+
+ if len(add_scope) != 0:
+ scopes += add_scope
+ for name in para_names:
+ scope_tuple = (self._ImageHandler.get_scope(processing_paras[name]),)
+ scopes += scope_tuple
+ for n, scope in zip( range(len(scopes)), scopes):
+ logging.info("scope" + str(n) + ":%s", scope)
+
+ intersect_polygon = self.intersect_polygon(scopes)
+ if intersect_polygon is None:
+ logger.error('image range does not overlap!')
+ raise Exception('create intersect shp fail!')
+ logging.info("scope roi :%s", intersect_polygon)
+ if self.write_polygon_shp(shp_path, intersect_polygon, 4326) is False:
+ raise Exception('create intersect shp fail!')
+ return intersect_polygon
+
+ @staticmethod
+ def intersect_polygon(scopes_tuple):
+ """
+ 功能说明:计算多边形相交的区域坐标;注意:多边形区域会转变成凸区域再求交
+ :param scopes_tuple: 输入多个区域坐标的tuple
+ :return: 多边形相交的区域坐标((x0,y0),(x1,y1),..., (xn,yn))
+ """
+ if len(scopes_tuple) < 2:
+ logger.error('len(scopes_tuple) < 2')
+ # return # todo 修改只有单景会出现无法判断相交区域问题
+
+ try:
+ # python四边形对象,会自动计算四个点,最后四个点顺序为:左上 左下 右下 右上 左上
+ tmp = tuple(scopes_tuple[0])
+ poly_intersect = Polygon(tmp).convex_hull
+ for i in range(len(scopes_tuple)-1):
+ polygon_next = Polygon(tuple(scopes_tuple[i+1])).convex_hull
+ if poly_intersect.intersects(polygon_next):
+ poly_intersect = poly_intersect.intersection(polygon_next)
+ else:
+ msg = 'Image:' + str(i) + 'range does not overlap!'
+ logger.error(msg)
+ return
+ return list(poly_intersect.boundary.coords)[:-1]
+ # except shapely.geos.TopologicalError:
+ except TopologicalError:
+ logger.error('shapely.geos.TopologicalError occurred!')
+ return
+
+ @staticmethod
+ def resample_by_gdal(in_path, out_path):
+ src_ds = gdal.Open(in_path, gdal.GA_ReadOnly)
+
+ # 设置目标影像的投影和范围
+ target_projection = src_ds.GetProjection()
+ target_geotransform = src_ds.GetGeoTransform()
+
+ x_scale = target_geotransform[1]
+ y_scale = target_geotransform[5]
+ scale = [x_scale, np.abs(y_scale)]
+ new_scale = np.max(scale)
+
+ dst_geotransform = [target_geotransform[0], new_scale, target_geotransform[2], target_geotransform[3],
+ target_geotransform[4], -new_scale]
+ target_x_size = int(src_ds.RasterXSize * x_scale / new_scale) # 假设我们要将影像大小缩小到原来的一半
+ target_y_size = int(src_ds.RasterYSize * np.abs(y_scale) / new_scale)
+
+ # 创建输出驱动
+ driver = gdal.GetDriverByName('GTiff')
+
+ # 创建输出文件
+ dst_ds = driver.Create(out_path, target_x_size, target_y_size, src_ds.RasterCount,
+ src_ds.GetRasterBand(1).DataType)
+ dst_ds.SetGeoTransform(dst_geotransform)
+ dst_ds.SetProjection(target_projection)
+
+ # 执行重采样
+ gdal.ReprojectImage(src_ds, dst_ds, None, None, gdal.GRA_Bilinear) # 使用双线性插值
+
+ # 关闭数据集
+ dst_ds = None
+ src_ds = None
+
+
+ @staticmethod
+ def write_polygon_shp(out_shp_path, point_list, EPSG =32649):
+ """
+ 功能说明:创建闭环的矢量文件。
+ :param out_shp_path :矢量文件保存路径
+ :param point_list :装有闭环点的列表[[x0,y0],[x1,y1]...[xn,yn]]
+ :return: True or False
+ """
+ # 为了支持中文路径,请添加下面这句代码
+ gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "NO")
+ # 为了使属性表字段支持中文,请添加下面这句
+ gdal.SetConfigOption("SHAPE_ENCODING", "")
+ # 注册所有的驱动
+ ogr.RegisterAll()
+
+ # 创建数据,这里以创建ESRI的shp文件为例
+ str_driver_name = "ESRI Shapefile"
+ o_driver = ogr.GetDriverByName(str_driver_name)
+ if o_driver is None:
+ msg = 'driver('+str_driver_name+')is invalid value'
+ logger.error(msg)
+ return False
+
+ # 创建数据源
+ if os.path.exists(out_shp_path) and os.path.isfile(out_shp_path): # 如果已存在同名文件
+ os.remove(out_shp_path) # 则删除之
+ o_ds = o_driver.CreateDataSource(out_shp_path)
+ if o_ds is None:
+ msg = 'create file failed!' + out_shp_path
+ logger.error(msg)
+ return False
+
+ # 创建图层,创建一个多边形图层
+ srs = osr.SpatialReference()
+ #srs.ImportFromEPSG(32649) # 投影坐标系,空间参考:WGS84
+
+ srs.ImportFromEPSG(EPSG) # 地理坐标系EPSG
+ o_layer = o_ds.CreateLayer("TestPolygon", srs, ogr.wkbPolygon)
+ if o_layer is None:
+ msg = 'create coverage failed!'
+ logger.error(msg)
+ return False
+
+ # 下面创建属性表
+ # 先创建一个叫FieldID的整型属性
+ o_field_id = ogr.FieldDefn("FieldID", ogr.OFTInteger)
+ o_layer.CreateField(o_field_id, 1)
+
+ # 再创建一个叫FeatureName的字符型属性,字符长度为50
+ o_field_name = ogr.FieldDefn("FieldName", ogr.OFTString)
+ o_field_name.SetWidth(100)
+ o_layer.CreateField(o_field_name, 1)
+
+ o_defn = o_layer.GetLayerDefn()
+
+ # 创建矩形要素
+ o_feature_rectangle = ogr.Feature(o_defn)
+ o_feature_rectangle.SetField(0, 1)
+ o_feature_rectangle.SetField(1, "IntersectRegion")
+
+ # 创建环对象ring
+ ring = ogr.Geometry(ogr.wkbLinearRing)
+
+ for i in range(len(point_list)):
+ ring.AddPoint(point_list[i][0], point_list[i][1])
+ ring.CloseRings()
+
+ # 创建环对象polygon
+ geom_rect_polygon = ogr.Geometry(ogr.wkbPolygon)
+ geom_rect_polygon.AddGeometry(ring)
+
+ o_feature_rectangle.SetGeometry(geom_rect_polygon)
+ o_layer.CreateFeature(o_feature_rectangle)
+
+ o_ds.Destroy()
+ return True
+
+ def cut_imgs(self, out_dir, para_names, processing_paras, shp_path):
+ """
+ 使用矢量数据裁剪影像
+ :param para_names:需要检查的参数名称
+ :param shp_path:裁剪的shp文件
+ """
+ if len(para_names) == 0:
+ return {}
+ cutted_img_paths = {}
+ try:
+ for name in para_names:
+ input_path = processing_paras[name]
+ output_path = os.path.join(out_dir, name + '_cut.tif')
+ self.cut_img(output_path, input_path, shp_path)
+ cutted_img_paths.update({name: output_path})
+ logger.info('cut %s success!', name)
+ except BaseException:
+ logger.error('cut_img failed!')
+ return {}
+ return cutted_img_paths
+
+ @staticmethod
+ def cut_img(output_path, input_path, shp_path):
+ """
+ :param output_path:剪切后的影像
+ :param input_path:待剪切的影像
+ :param shp_path:矢量数据
+ :return: True or False
+ """
+ r = shapefile.Reader(shp_path)
+ box = r.bbox
+
+ input_dataset = gdal.Open(input_path)
+
+ gdal.Warp(output_path, input_dataset, format='GTiff', outputBounds=box, cutlineDSName=shp_path, dstNodata=-9999)
+ # cutlineWhere="FIELD = ‘whatever’",
+ # optionally you can filter your cutline (shapefile) based on attribute values
+ # select the no data value you like
+ # ds = None
+ # do other stuff with ds object, it is your cropped dataset. in this case we only close the dataset.
+ del input_dataset
+ return True
+
+ def resampling_img(self, out_dir, para_names, img_paths, refer_img_path):
+ """
+ 以主影像为参考,对影像重采样
+ :param para_names:需要检查的参数名称
+ :param img_paths:待重采样影像路径
+ :param refer_img_path:参考影像路径
+ """
+ if len(para_names) == 0 or len(img_paths) == 0:
+ return
+ prepro_imgs_path = {}
+ for name in para_names:
+ img_path = img_paths[name]
+ output_para = os.path.join(out_dir, name + '_preprocessed.tif') # + name + '_preprocessed.tif'
+ self.resampling_by_scale(img_path, output_para, refer_img_path)
+ prepro_imgs_path.update({name: output_para})
+ logger.info('resampling %s success!', name)
+ return prepro_imgs_path
+
+ @staticmethod
+ def resampling_by_scale(input_path, target_file, refer_img_path):
+ """
+ 按照缩放比例对影像重采样
+ :param input_path: GDAL地理数据路径
+ :param target_file: 输出影像
+ :param refer_img_path:参考影像
+ :return: True or False
+ """
+ ref_dataset = gdal.Open(refer_img_path)
+ ref_cols = ref_dataset.RasterXSize # 列数
+ ref_rows = ref_dataset.RasterYSize # 行数
+
+ target_dataset = gdal.Open(input_path)
+ target_cols = target_dataset.RasterXSize # 列数
+ target_rows = target_dataset.RasterYSize # 行数
+
+ if(ref_cols == target_cols) and (ref_rows == target_rows):
+ shutil.copyfile(input_path, target_file)
+ return True
+
+ dataset = gdal.Open(input_path)
+ if dataset is None:
+ logger.error('resampling_by_scale:dataset is None!')
+ return False
+
+ band_count = dataset.RasterCount # 波段数
+ if (band_count == 0) or (target_file == ""):
+ logger.error("resampling_by_scale:Parameters of the abnormal!")
+ return False
+
+ cols = dataset.RasterXSize # 列数
+ rows = dataset.RasterYSize # 行数
+ scale_x = ref_cols/cols
+ scale_y = ref_rows/rows
+
+ # rows = dataset.RasterYSize # 行数
+ # cols = int(cols * scale) # 计算新的行列数
+ # rows = int(rows * scale)
+ cols = ref_cols
+ rows = ref_rows
+
+ geotrans = list(dataset.GetGeoTransform())
+ geotrans[1] = geotrans[1] / scale_x # 像元宽度变为原来的scale倍
+ geotrans[5] = geotrans[5] / scale_y # 像元高度变为原来的scale倍
+
+ if os.path.exists(target_file) and os.path.isfile(target_file): # 如果已存在同名影像
+ os.remove(target_file) # 则删除之
+ if not os.path.exists(os.path.split(target_file)[0]):
+ os.makedirs(os.path.split(target_file)[0])
+
+ band1 = dataset.GetRasterBand(1)
+ data_type = band1.DataType
+ target = dataset.GetDriver().Create(target_file, xsize=cols, ysize=rows, bands=band_count,
+ eType=data_type)
+ target.SetProjection(dataset.GetProjection()) # 设置投影坐标
+ target.SetGeoTransform(geotrans) # 设置地理变换参数
+ total = band_count + 1
+ for index in range(1, total):
+ # 读取波段数据
+ data = dataset.GetRasterBand(index).ReadAsArray(buf_xsize=cols, buf_ysize=rows)
+ out_band = target.GetRasterBand(index)
+
+ no_data_value = dataset.GetRasterBand(index).GetNoDataValue() # 获取没有数据的点
+ if not (no_data_value is None):
+ out_band.SetNoDataValue(no_data_value)
+
+ out_band.WriteArray(data) # 写入数据到新影像中
+ out_band.FlushCache()
+ out_band.ComputeBandStats(False) # 计算统计信息
+ del dataset
+ del target
+ return True
+
+ @staticmethod
+ def cv_mean_filter(out_path, in_path, filter_size):
+ """
+ :param out_path:滤波后的影像
+ :param in_path:滤波前的影像
+ :param filter_size:滤波尺寸
+ :return: True or False
+ """
+ proj = ImageHandler.get_projection(in_path)
+ geotrans = ImageHandler.get_geotransform(in_path)
+ array = ImageHandler.get_band_array(in_path, 1)
+ array = cv2.blur(array, (filter_size, filter_size)) # 均值滤波
+ ImageHandler.write_img(out_path, proj, geotrans, array)
+ return True
+
+ @staticmethod
+ def check_LocalIncidenceAngle(out_tif_path, in_tif_path):
+ """
+ 将角度的无效值设置为nan,把角度值转为弧度值
+ :param out_tif_path:处理后影像路径
+ :param in_tif_path:处理前影像路径
+ """
+ proj, geo, angle = ImageHandler.read_img(in_tif_path)
+ angle = angle.astype(np.float32, order='C')
+ angle[angle == -9999] = np.nan
+
+ mean = np.nanmean(angle)
+ if mean > np.pi:
+ angle = np.deg2rad(angle)# 角度转弧度
+
+ angle[np.where(angle >= 0.5 * np.pi)] = np.nan
+ angle[np.where(angle < 0)] = np.nan
+
+ ImageHandler.write_img(out_tif_path, proj, geo, angle)
+
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/ROIAlg.py b/Ortho-NoS1GBM/tool/algorithm/algtools/ROIAlg.py
new file mode 100644
index 0000000..9a47761
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/ROIAlg.py
@@ -0,0 +1,184 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:microproduct
+@File:ROIAlg.py
+@Function:
+@Contact:
+@Author:SHJ
+@Date:2021/11/17
+@Version:1.0.0
+"""
+import logging
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.algorithm.algtools.PreProcess import PreProcess as pp
+import numpy as np
+
+
+logger = logging.getLogger("mylog")
+
+
+class ROIAlg:
+ def __init__(self,):
+ pass
+
+ @staticmethod
+ def roi_process(names, processing_path, processing_paras, preprocessed_paras):
+ roi_paths = []
+ roi = ROIAlg()
+ for name in names:
+ if 'LocalIncidenceAngle' in name:
+ # 利用角度为nan生成Mask
+ pp.check_LocalIncidenceAngle(preprocessed_paras[name],preprocessed_paras[name])
+ angle_nan_mask_path = processing_path + 'angle_nan_mask.tif'
+ roi.trans_tif2mask(angle_nan_mask_path, preprocessed_paras[name], np.nan)
+ roi_paths.append(angle_nan_mask_path)
+ elif ("HH" in name) or ("HV" in name) or ("VH" in name) or ("VV" in name):
+ # 利用影像的有效范围生成MASK
+ tif_mask_path = processing_path + name + "_tif_mask.tif"
+ roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
+ roi_paths.append(tif_mask_path)
+ elif name == 'Covering':
+ # 利用cover计算植被覆盖范围
+ if processing_paras['CoveringIDs'] == 'empty':
+ cover_data = ImageHandler.get_data(preprocessed_paras["Covering"])
+ cover_id_list = list(np.unique(cover_data))
+ else:
+ cover_id_list = list(processing_paras['CoveringIDs'].split(';'))
+ cover_id_list = [int(num) for num in cover_id_list]
+ cover_mask_path = processing_path + "cover_mask.tif"
+ roi.trans_cover2mask(cover_mask_path, preprocessed_paras[name], cover_id_list)
+ roi_paths.append(cover_mask_path)
+ elif name == "NDVI":
+ # 利用NDVI计算裸土范围该指数的输出值在 -1.0 和 1.0 之间,大部分表示植被量,
+ # 负值主要根据云、水和雪而生成
+ # 接近零的值则主要根据岩石和裸土而生成。
+ # 较低的(小于等于 0.1)NDVI 值表示岩石、沙石或雪覆盖的贫瘠区域。
+ # 中等值(0.2 至 0.3)表示灌木丛和草地
+ # 较高的值(0.6 至 0.8)表示温带雨林和热带雨林。
+ ndvi_mask_path = processing_path + "ndvi_mask.tif"
+ ndvi_scope = list(processing_paras['NDVIScope'].split(';'))
+ threshold_of_ndvi_min = float(ndvi_scope[0])
+ threshold_of_ndvi_max = float(ndvi_scope[1])
+ roi.trans_tif2mask(ndvi_mask_path, preprocessed_paras[name], threshold_of_ndvi_min, threshold_of_ndvi_max)
+ roi_paths.append(ndvi_mask_path)
+ # else:
+ # # 其他特征影像
+ # tif_mask_path = processing_path + name + "_mask.tif"
+ # roi.trans_tif2mask(tif_mask_path, preprocessed_paras[name], np.nan)
+ # roi_paths.append(tif_mask_path)
+
+
+ bare_land_mask_path = processing_path + "bare_land_mask.tif"
+ for roi_path in roi_paths:
+ roi.combine_mask(bare_land_mask_path, roi_path, bare_land_mask_path)
+ return bare_land_mask_path
+
+ @staticmethod
+ def trans_tif2mask(out_mask_path, in_tif_path, threshold_min, threshold_max = None):
+ """
+ :param out_mask_path:mask输出路径
+ :param in_tif_path:输入路径
+ :param threshold_min:最小阈值
+ :param threshold_max:最大阈值
+ :return: True or False
+ """
+ image_handler = ImageHandler()
+ proj = image_handler.get_projection(in_tif_path)
+ geotrans = image_handler.get_geotransform(in_tif_path)
+ array = image_handler.get_band_array(in_tif_path, 1)
+ if threshold_max == None and np.isnan(threshold_min)==True:
+ nan = np.isnan(array)
+ mask = (nan.astype(int) == 0).astype(int)
+ mask1 = ((array == -9999).astype(int) == 0).astype(int)
+ mask *= mask1
+ image_handler.write_img(out_mask_path, proj, geotrans, mask)
+ else:
+ if threshold_min < threshold_max:
+ mask = ((array > threshold_min) & (array < threshold_max)).astype(int)
+ image_handler.write_img(out_mask_path, proj, geotrans, mask)
+ elif threshold_min > threshold_max:
+ mask = ((array < threshold_min) & (array > threshold_max)).astype(int)
+ image_handler.write_img(out_mask_path, proj, geotrans, mask)
+ elif threshold_max == threshold_min:
+ mask = ((array == threshold_min).astype(int) == 0).astype(int)
+ image_handler.write_img(out_mask_path, proj, geotrans, mask)
+
+ logger.info("trans_tif2mask success, path: %s", out_mask_path)
+ return True
+
+ @staticmethod
+ def trans_cover2mask(out_mask_path, in_tif_path, cover_id_list):
+ """
+ :param out_mask_path:mask输出路径
+ :param in_tif_path:输入路径
+ :param cover_id_list 地表覆盖类型数据的id
+ :return: True or False
+ """
+ image_handler = ImageHandler()
+ proj = image_handler.get_projection(in_tif_path)
+ geotrans = image_handler.get_geotransform(in_tif_path)
+ array = image_handler.get_band_array(in_tif_path, 1)
+
+ mask = np.zeros(array.shape, dtype=bool)
+ for id in cover_id_list:
+ mask_tmp = (array == id)
+ mask = mask | mask_tmp
+
+ mask = mask.astype(int)
+ image_handler.write_img(out_mask_path, proj, geotrans, mask)
+
+ @staticmethod
+ def combine_mask(out_mask_path, in_main_mask_path, in_sub_mask_path):
+ """
+ :param out_mask_path:输出路径
+ :param in_main_mask_path:主mask路径,输出影像采用主mask的地理信息
+ :param in_sub_mask_path:副mask路径
+ """
+ image_handler = ImageHandler()
+ proj = image_handler.get_projection(in_main_mask_path)
+ geotrans = image_handler.get_geotransform(in_main_mask_path)
+ main_array = image_handler.get_band_array(in_main_mask_path, 1)
+ if image_handler.get_dataset(in_sub_mask_path) != None:
+ sub_array = image_handler.get_band_array(in_sub_mask_path, 1)
+ main_array = main_array * sub_array
+ image_handler.write_img(out_mask_path, proj, geotrans, main_array)
+ logger.info("combine_mask success, path: %s", out_mask_path)
+ return True
+
+ @staticmethod
+ def cal_roi(out_tif_path, in_tif_path, mask_path, background_value=1):
+ """
+ :param out_tif_path:ROI的影像
+ :param in_tif_path:计算ROI的影像
+ :param mask_path:掩模
+ :param background_value:无效区域设置的背景值
+ :return: True or False
+ """
+ image_handler = ImageHandler()
+ proj = image_handler.get_projection(in_tif_path)
+ geotrans = image_handler.get_geotransform(in_tif_path)
+ tif_array = image_handler.get_data(in_tif_path) # 读取所有波段的像元值存为数组
+ mask_array = image_handler.get_band_array(mask_path, 1)
+ if len(tif_array.shape) == 3:
+ im_bands, im_height, im_width = tif_array.shape
+ else:
+ im_bands, (im_height, im_width) = 1, tif_array.shape
+ if im_bands == 1:
+ tif_array[np.isnan(mask_array)] = background_value
+ tif_array[mask_array == 0] = background_value
+ elif im_bands>1:
+ for i in range(0, im_bands):
+ tif_array[i, :, :][np.isnan(mask_array)] = background_value
+ tif_array[i, :, :][mask_array == 0] = background_value
+ image_handler.write_img(out_tif_path, proj, geotrans, tif_array, '0')
+ logger.info("cal_roi success, path: %s", out_tif_path)
+ return True
+
+# if __name__ == '__main__':
+# dir = r'G:\MicroWorkspace\C-SAR\SoilMoisture\Temporary\processing/'
+# out_tif_path = dir + 'soil_moisture_roi.tif'
+# in_tif_path = dir + 'soil_moisture.tif'
+# mask_path = dir + 'bare_land_mask.tif'
+# background_value = np.nan
+# ROIAlg.cal_roi(out_tif_path, in_tif_path, mask_path, background_value)
+# pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/RieveFilter.py b/Ortho-NoS1GBM/tool/algorithm/algtools/RieveFilter.py
new file mode 100644
index 0000000..4756a76
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/RieveFilter.py
@@ -0,0 +1,57 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:sieve_filter.py
+@Function:gdal斑点滤波功能
+@Contact: 'https://www.osgeo.cn/gdal/api/gdal_alg.html?highlight=gdalsievefilter#'
+ '_CPPv415GDALSieveFilter15GDALRasterBandH15GDALRasterBandH15GDALRasterBandHiiPPc16GDALProgressFuncPv'
+@Author:SHJ
+@Date:2021/8/30 8:42
+@Version:1.0.0
+"""
+import logging
+from osgeo import gdal
+import numpy as np
+# from onestar.soilMoisture.OneMoistureImage import ImageHandler
+from tool.algorithm.image.ImageHandle import ImageHandler
+logger = logging.getLogger("mylog")
+
+
+def gdal_sieve_filter(dst_filename, src_filename, threshold=100, connectedness=4):
+ """
+ 基于python GDAL栅格滤波
+ :param dst_filename: 输出滤波后的影像
+ :param src_filename: 输入需要处理的文件
+ :param threshold: 滤波的值大小
+ :param connectedness: 连通域, 范围:4或者8
+ :return:
+ """
+ # 4表示对角像素不被视为直接相邻用于多边形成员资格,8表示对角像素不相邻
+ # connectedness = 4
+ gdal.AllRegister()
+ # print('需要处理滤波的栅格文件:{},阈值(分辨率):{}'.format(src_filename, threshold))
+ dataset = gdal.Open(src_filename, gdal.GA_Update)
+ if dataset is None:
+ logger.error('{}open tif fail!'.format(src_filename))
+ return False
+ # 获取需要处理的源栅格波段
+ src_band = dataset.GetRasterBand(1)
+ mask_band = src_band.GetMaskBand()
+ dst_band = src_band
+ prog_func = gdal.TermProgress_nocb
+ # 调用gdal滤波函数
+ result = gdal.SieveFilter(src_band, mask_band, dst_band, threshold, connectedness, callback=prog_func)
+ if result != 0:
+ return False
+ proj = dataset.GetProjection()
+ geotransform = dataset.GetGeoTransform()
+ dst_array = dst_band.ReadAsArray(0, 0, dst_band.XSize, dst_band.YSize)
+ ImageHandler.write_img(dst_filename, proj, geotransform, dst_array)
+ del dataset
+ return True
+
+#
+# if __name__ == '__main__':
+# inputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25.tif'
+# outputfile = r'D:\DATA\testdata\srcimg\GLCFCS30_E110N25_sieve_filter.tif'
+# flag = gdal_sieve_filter(outputfile, inputfile, threshold=100, connectedness=4)
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/ScatteringAuxData.py b/Ortho-NoS1GBM/tool/algorithm/algtools/ScatteringAuxData.py
new file mode 100644
index 0000000..6a00c8f
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/ScatteringAuxData.py
@@ -0,0 +1,122 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :ScatteringAuxData.py
+@Function :后向散射
+@Author :SHJ
+@Contact:
+@Date :2022/6/29
+@Version :1.0.0
+修改历史:
+[修改序列] [修改日期] [修改者] [修改内容]
+ 1 2022-6-29 石海军 1.兼容GF3元文件和正射校正元文件提取信息
+"""
+import logging
+from xml.etree.ElementTree import ElementTree
+logger = logging.getLogger("mylog")
+
+class GF3L1AMetaData:
+ def __init__(self):
+ pass
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ QualifyValue = float(root.find('imageinfo').find('QualifyValue').find(polarization).text)
+ return QualifyValue
+
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ Kdb = float(root.find('processinfo').find('CalibrationConst').find(polarization).text)
+ return Kdb
+
+class OrthoMetaData:
+ def __init__(self):
+ pass
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ QualifyValue = float(root.find('l1aInfo').find('imageinfo').find('QualifyValue').find(polarization).text)
+ return QualifyValue
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ Kdb = float(root.find('l1aInfo').find('processinfo').find('CalibrationConst').find(polarization).text)
+ return Kdb
+
+
+ @staticmethod
+ def get_RadarCenterFrequency(meta_file_path):
+ # 获取微波中心频率
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ RadarCenterFrequency = float(root.find('sensor').find('RadarCenterFrequency').text)
+ return RadarCenterFrequency
+
+
+ @staticmethod
+ def get_lamda(meta_file_path):
+ # 获取微波波长,单位:m
+ tree = ElementTree()
+ tree.parse(meta_file_path)
+ root = tree.getroot()
+ lamda = float(root.find('sensor').find('lamda').text)
+ return lamda
+
+class ScatteringAuxData:
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get_QualifyValue(meta_file_path, polarization):
+ try:
+ QualifyValue = OrthoMetaData.get_QualifyValue(meta_file_path, polarization)
+ except Exception:
+ logger.warning('OrthoMetaData.get_QualifyValue() error!')
+ QualifyValue = GF3L1AMetaData.get_QualifyValue(meta_file_path, polarization)
+ logger.info('GF3L1AMetaData.get_QualifyValue() success!')
+ return QualifyValue
+
+ @staticmethod
+ def get_Kdb(meta_file_path, polarization):
+ try:
+ Kdb = OrthoMetaData.get_Kdb(meta_file_path, polarization)
+ except Exception:
+ logger.warning('OrthoMetaData.get_Kdb() error!')
+ Kdb = GF3L1AMetaData.get_Kdb(meta_file_path, polarization)
+ logger.info('GF3L1AMetaData.get_Kdb() success!')
+ return Kdb
+
+ @staticmethod
+ def get_RadarCenterFrequency(meta_file_path):
+ # 获取微波中心频率,单位GHz
+ RadarCenterFrequency = OrthoMetaData.get_RadarCenterFrequency(meta_file_path)
+ return RadarCenterFrequency
+
+ @staticmethod
+ def get_lamda(meta_file_path):
+ # 获取微波波长,单位:m
+ lamda = OrthoMetaData.get_lamda(meta_file_path)
+ return lamda
+
+# if __name__ == '__main__':
+# A = ScatteringAuxData()
+# dir = 'G:\MicroWorkspace\C-SAR\AuxSAR\GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485_geo/'
+# path = dir + 'GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml'
+# path1 = dir + 'OrthoProduct.meta.xml'
+# t1 = A.get_QualifyValue(path, 'HH')
+# t2 = A.get_Kdb(path, 'HH')
+# t3 = A.get_RadarCenterFrequency(path)
+# t4 = A.get_lamda(path)
+# pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/calculateLocalIncident/calculateLocalIncident.py b/Ortho-NoS1GBM/tool/algorithm/algtools/calculateLocalIncident/calculateLocalIncident.py
new file mode 100644
index 0000000..c2b634d
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/calculateLocalIncident/calculateLocalIncident.py
@@ -0,0 +1,414 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :CalculateIncident.py
+@Function :计算、局部入射角计算
+@Author :LMM
+@Date :2021/8/25 14:17
+@Version :1.0.0
+"""
+import os
+import numpy as np
+from osgeo import gdal
+from osgeo import gdalconst
+import gc
+import math
+from xml.dom import minidom # 不需要安装,默认环境里就有
+
+
+class CalculateIncident:
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def add_round(npgrid):
+ """
+ 边缘填充一圈,然后输出填充得到的矩阵
+ param:npgrid dem数组
+ """
+ ny, nx = npgrid.shape # ny:行数,nx:列数
+ zbc = np.zeros((ny + 2, nx + 2))
+ zbc[1:-1, 1:-1] = npgrid
+ # 四边
+ zbc[0, 1:-1] = npgrid[0, :]
+ zbc[-1, 1:-1] = npgrid[-1, :]
+ zbc[1:-1, 0] = npgrid[:, 0]
+ zbc[1:-1, -1] = npgrid[:, -1]
+ # 角点
+ zbc[0, 0] = npgrid[0, 0]
+ zbc[0, -1] = npgrid[0, -1]
+ zbc[-1, 0] = npgrid[-1, 0]
+ zbc[-1, -1] = npgrid[-1, -1]
+ print("输出填充后的数组的形状", zbc.shape)
+ return zbc
+
+ @staticmethod
+ def cal_dxdy(zbc, dx):
+ """
+ 计算dx,dy
+ param:zbc填充后的数组
+ param:dx dem数据像元大小
+
+ """
+ we_x = ((zbc[1:-1, :-2]) - (zbc[1:-1, 2:])) / dx / 2 # WE方向
+ ns_y = ((zbc[2:, 1:-1]) - (zbc[:-2, 1:-1])) / dx / 2 # NS方向
+ print("输出Sx的数组的形状", we_x.shape, "输出Sy的数组的形状", ns_y.shape)
+ sx = we_x[1:-1, 1:-1]
+ sy = ns_y[1:-1, 1:-1]
+ # np.savetxt("dxdy.csv",dx,delimiter=",")
+ print("输出Sx2的数组的形状", sx.shape, "输出Sy2的数组的形状", sy.shape)
+ return sx, sy
+
+ @staticmethod
+ def cal_slopasp(dx, dy):
+ # 计算坡度\坡向
+ # 坡度计算 slope
+ slope = (np.arctan(np.sqrt(dx * dx + dy * dy))) * 57.29578 # 转换成°,57.29578=180/math.pi
+ slope = slope[1:-1, 1:-1]
+ # 坡向计算 aspect
+ aspect = np.ones([dx.shape[0], dx.shape[1]]).astype(np.float32) # 生成一个全是0的数组
+
+ # dx = dx.astype(np.float32)
+ # dy = dy.astype(np.float32)
+ # a1=(np.where(dx==0) and np.where(dy ==0))
+ # print(a1)
+ # aspect[a1]=-1
+ # a2 = (np.where(dx == 0) and np.where(dy > 0))
+ # aspect[a2] =0.0
+ # a3 = (np.where(dx == 0) and np.where(dy <0))
+ # aspect[a3] =180.0
+ # a4 = (np.where(dx > 0) and np.where(dy ==0))
+ # aspect[a4] =90.0
+ # a5 = (np.where(dx < 0) and np.where(dy ==0))
+ # aspect[a5] =270.0
+ # a6 = (np.where(dx != 0) or np.where(dy !=0))
+ # b=dy[a6]
+ # print(":", 1)
+ # aspect[a6] =float(math.atan2(dy[i, j], dx[i, j])) * 57.29578
+ # a7=np.where(aspect[a6]< 0.0)
+ # aspect[a7] = 90.0 - aspect[a7]
+ # a8=np.where(aspect[a6]> 90.0)
+ # aspect[a8] = 450.0- aspect[a8]
+ # a9 =np.where(aspect[a6] >= 0 or aspect[a6] <= 90)
+ # aspect[a9] =90.0 - aspect[a9]
+
+ for i in range(dx.shape[0]):
+ for j in range(dx.shape[1]):
+ x = float(dx[i, j])
+ y = float(dy[i, j])
+ if (x == 0.0) & (y == 0.0):
+ aspect[i, j] = -1
+ elif x == 0.0:
+ if y > 0.0:
+ aspect[i, j] = 0.0
+ else:
+ aspect[i, j] = 180.0
+ elif y == 0.0:
+ if x > 0.0:
+ aspect[i, j] = 90.0
+ else:
+ aspect[i, j] = 270.0
+ else:
+ aspect[i, j] = float(math.atan2(y, x)) * 57.29578 # 范围(-Π/2,Π/2)
+ if aspect[i, j] < 0.0:
+ aspect[i, j] = 90.0 - aspect[i, j]
+ elif aspect[i, j] > 90.0:
+ aspect[i, j] = 450.0 - aspect[i, j]
+ else:
+ aspect[i, j] = 90.0 - aspect[i, j]
+ print("输出aspect形状:", aspect.shape) # 3599, 3599
+ print("输出aspect:", aspect)
+ return slope, aspect
+
+ def creat_twofile(self, dem_file_path, slope_out_path, aspect_out_path):
+ """
+ 生成坡度图、坡向图
+ param: path_file1 为输入文件tif数据的文件路径
+
+ """
+ if os.path.isfile(dem_file_path):
+ print("高程数据文件存在")
+ else:
+ print("高程数据文件不存在")
+
+ dataset_caijian = gdal.Open(dem_file_path)
+ x_size = dataset_caijian.RasterXSize
+ y_size = dataset_caijian.RasterYSize
+ geo = dataset_caijian.GetGeoTransform()
+ pro = dataset_caijian.GetProjection()
+ array0 = dataset_caijian.ReadAsArray(0, 0, x_size, y_size)
+ print("输出dem数据的数组", array0)
+ zbc = self.add_round(array0)
+ sx, sy = self.cal_dxdy(zbc, 30)
+ slope, aspect = self.cal_slopasp(sx, sy)
+
+ driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
+ driver.Register()
+ newfile = driver.Create(slope_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
+ newfile.SetProjection(pro)
+ geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
+ newfile.SetGeoTransform(geo)
+ newfile.GetRasterBand(1).WriteArray(slope)
+
+ driver2 = gdal.GetDriverByName("GTiff") # 创建一个数据格式
+ driver2.Register()
+ newfile2 = driver2.Create(aspect_out_path, x_size, y_size, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
+ geo = [geo[0], geo[1], 0, geo[3], 0, -geo[1]]
+ newfile2.SetGeoTransform(geo)
+ newfile2.GetRasterBand(1).WriteArray(aspect)
+
+ @staticmethod
+ def resampling(input_file1, input_file2, ref_file, output_file, output_file2):
+ """
+ 采用gdal.Warp()方法进行重采样,差值法为双线性插值
+ :param input_file1 slope path
+ :param input_file2 aspect path
+ :param ref_file: 参考图像路径
+ :param output_file: slope path
+ :param output_file2 aspect path
+ :return:
+ """
+ gdal.AllRegister()
+ in_ds1 = gdal.Open(input_file1)
+ in_ds2 = gdal.Open(input_file2)
+ ref_ds = gdal.Open(ref_file, gdal.GA_ReadOnly)
+
+ # 获取输入影像信息
+ input_file_proj = in_ds1.GetProjection()
+ # inputefileTrans = in_ds1.GetGeoTransform()
+ reference_file_proj = ref_ds.GetProjection()
+ reference_file_trans = ref_ds.GetGeoTransform()
+
+ nbands = in_ds1.RasterCount
+ bandinputfile1 = in_ds1.GetRasterBand(1)
+ bandinputfile2 = in_ds2.GetRasterBand(1)
+ x = ref_ds.RasterXSize
+ y = ref_ds.RasterYSize
+
+ # 创建重采样输出文件(设置投影及六参数)
+ driver1 = gdal.GetDriverByName('GTiff')
+ output1 = driver1.Create(output_file, x, y, nbands, bandinputfile1.DataType)
+ output1.SetGeoTransform(reference_file_trans)
+ output1.SetProjection(reference_file_proj)
+ # options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
+ # resampleAlg = gdalconst.GRA_NearestNeighbour
+ gdal.ReprojectImage(in_ds1, output1, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
+
+ driver2 = gdal.GetDriverByName('GTiff')
+ output2 = driver2.Create(output_file2, x, y, nbands, bandinputfile2.DataType)
+ output2.SetGeoTransform(reference_file_trans)
+ output2.SetProjection(reference_file_proj)
+ # options = gdal.WarpOptions(srcSRS=inputProj, dstSRS=referencefileProj, resampleAlg=gdalconst.GRA_Bilinear)
+ # resampleAlg = gdalconst.GRA_NearestNeighbour
+ gdal.ReprojectImage(in_ds2, output2, input_file_proj, reference_file_proj, gdalconst.GRA_Bilinear)
+
+ @staticmethod
+ def getorbitparameter(xml_path):
+ """
+ 从轨道参数文件xml中获取升降轨信息、影像四个角的经纬度坐标
+
+ """
+ # 打开xml文档,根据路径初始化DOM
+ doc = minidom.parse(xml_path)
+ # 得到xml文档元素对象,初始化root对象
+ root = doc.documentElement
+
+ # 输出升降轨信息,DEC降轨,ASC升轨
+ direction = root.getElementsByTagName("Direction")[0]
+ # print("输出Direction的子节点列表",Direction.firstChild.data)
+ pd = direction.firstChild.data
+
+ imageinfo = root.getElementsByTagName("imageinfo")[0]
+ # 输出topLeft的纬度和经度
+ top_left = imageinfo.getElementsByTagName("topLeft")[0]
+ latitude = top_left.getElementsByTagName("latitude")[0]
+ longitude = top_left.getElementsByTagName("longitude")[0]
+ # print("输出topLeft的纬度lat和经度lon:", latitude.firstChild.data,longitude.firstChild.data)
+ tl_lat, tl_lon = latitude.firstChild.data, longitude.firstChild.data
+
+ # 输出topRight的纬度和经度
+ top_right = imageinfo.getElementsByTagName("topRight")[0]
+ latitude = top_right.getElementsByTagName("latitude")[0]
+ longitude = top_right.getElementsByTagName("longitude")[0]
+ # print("输出topRight的纬度lat和经度lon:", latitude.firstChild.data,longitude.firstChild.data)
+ tr_lat, tr_lon = latitude.firstChild.data, longitude.firstChild.data
+
+ # 输出 bottomLeft的纬度和经度
+ bottom_left = imageinfo.getElementsByTagName("bottomLeft")[0]
+ latitude = bottom_left.getElementsByTagName("latitude")[0]
+ longitude = bottom_left.getElementsByTagName("longitude")[0]
+ # print("输出bottomLeft的纬度lat和经度lon:", latitude.firstChild.data,longitude.firstChild.data)
+ bl_lat, bl_lon = latitude.firstChild.data, longitude.firstChild.data
+
+ # 输出 bottomRight的纬度和经度
+ bottom_right = imageinfo.getElementsByTagName("bottomRight")[0]
+ latitude = bottom_right.getElementsByTagName("latitude")[0]
+ longitude = bottom_right.getElementsByTagName("longitude")[0]
+ # print("输出bottomRight的纬度lat和经度lon:", latitude.firstChild.data,longitude.firstChild.data)
+ br_lat, br_lon = latitude.firstChild.data, longitude.firstChild.data
+ print("pd,tl_lat,tl_lon,tr_lat,tr_lon,bl_lat,bl_lon,br_lat,br_lon", pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat,
+ bl_lon, br_lat, br_lon)
+ return pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon
+
+ def get_rparademeter(self, xml_path):
+ """
+ 计算雷达视线向方向角R
+ """
+ pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
+
+ tl_lat = float(tl_lat) # 原来的数是带有小数点的字符串,int会报错,使用float
+ tl_lon = float(tl_lon)
+ # tr_lat = float(tr_lat)
+ # tr_lon = float(tr_lon)
+ bl_lat = float(bl_lat)
+ bl_lon = float(bl_lon)
+ # br_lat = float(br_lat)
+ # br_lon = float(br_lon)
+
+ if pd == "DEC":
+ # 降轨
+ b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
+ r = 270 + b
+ return r
+ # tl_lat, tl_lon = lonlat2geo(tl_lat, tl_lon)
+ # tr_lat, tr_lon = lonlat2geo(tr_lat, tr_lon)
+ # bl_lat, bl_lon = lonlat2geo(bl_lat, bl_lon)
+ # br_lat, br_lon = lonlat2geo(br_lat, br_lon)
+ # B2 = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
+ # R2 = 270 + B2
+ # print(("输出R2:", R2))
+ if pd == "ASC":
+ # 升轨
+ b = np.arctan((tl_lat - bl_lat) / (tl_lon - bl_lon)) * 57.29578
+ return b
+
+ def clau(self, pathfile1, pathfile2, pathfile3, xml_path, save_localangle_path):
+ """
+ 计算局部入射角
+ param: pathfile1是slope的坡度图路径
+ param: pathfile2是aspect的坡向图路径
+ param: pathfile3是入射角文件的路径
+ param: xml_path是轨道参数文件
+ r是雷达视线向方位角
+ """
+ r = self.get_rparademeter(xml_path)
+ pd, tl_lat, tl_lon, tr_lat, tr_lon, bl_lat, bl_lon, br_lat, br_lon = self.getorbitparameter(xml_path)
+ print("输出升降轨:", pd)
+ dataset = gdal.Open(pathfile1)
+ x = dataset.RasterXSize
+ y = dataset.RasterYSize
+ print("输出slope的行、列:", x, y)
+ slope_array = dataset.ReadAsArray(0, 0, x, y)
+
+ dataset2 = gdal.Open(pathfile2)
+ x2 = dataset2.RasterXSize
+ y2 = dataset2.RasterYSize
+ print("输出aspect的行、列:", x2, y2)
+ aspect_array = dataset2.ReadAsArray(0, 0, x2, y2)
+
+ dataset3 = gdal.Open(pathfile3)
+ x3 = dataset3.RasterXSize
+ y3 = dataset3.RasterYSize
+ geo3 = dataset3.GetGeoTransform()
+ pro3 = dataset3.GetProjection()
+ print("输出入射角文件的行、列:", x3, y3)
+
+ rushe_array = dataset3.ReadAsArray(0, 0, x3, y3)
+ # b0 = np.where(rushe_array > 0.00001, 0, 1)
+ radina_value = 0
+ if pd == "DEC":
+ # 降轨数据
+ # 雷达视线角-坡度角在90度到270度之间
+ where_0 = np.where(rushe_array == 0)
+
+ bb1 = (r-aspect_array).all() and (r-aspect_array).all()
+ bb2 = np.where(90 < bb1 < 270, 1, 0)
+ b1 = (bb1 and bb2)
+ # b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
+ c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) * np.sin(
+ rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
+ d1 = b1 * c1
+ # 雷达视线角-坡度角=90度或=270度时
+ b2 = np.where((r-aspect_array == 90) | (r-aspect_array == 270), 1, 0)
+ d2 = b2*c1
+ # 雷达视线角-坡度角在90度到270度之间
+ b3 = 1-b1-b2
+ c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
+ slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
+ d3 = b3 * c3
+ del b1, b2, b3, c3, c1
+ gc.collect()
+ radina_value = d1 + d2 + d3
+ radina_value[where_0] = 0
+ del d1, d2, d3
+ gc.collect()
+ if pd == "ASC":
+ # 升轨数据
+ # 坡度-雷达视线角在90度到270度之间
+ where_0 = np.where(rushe_array == 0)
+
+ bb1 = (r-aspect_array).all() and (r-aspect_array).all()
+ bb2 = np.where(90 < bb1 < 270, 1, 0)
+ b1 = (bb1 and bb2)
+ # b1 = np.where(90 < ((r-aspect_array).all()) and ((r-aspect_array).all()) < 270, 1, 0)
+ c1 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) + np.sin(
+ slope_array*(math.pi/180)) * np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
+ d1 = b1 * c1
+ # 坡度-雷达视线角=90或=270时
+ b2 = np.where((aspect_array-r == 90) | (aspect_array-r == 270), 1, 0)
+ d2 = b2 * c1
+ # 坡度-雷达视线角在0-90度或270-360度之间
+ b3 = 1 - b1-b2
+ c3 = np.cos(rushe_array*(math.pi/180)) * np.cos(slope_array*(math.pi/180)) - np.sin(slope_array*(math.pi/180)) *\
+ np.sin(rushe_array*(math.pi/180)) * np.cos((r - aspect_array)*(math.pi/180))
+ d3 = b3 * c3
+ radina_value = d1 + d2 + d3
+ radina_value[where_0] = 0
+ del b1, b2, b3, c3, c1, d1, d2, d3
+ gc.collect()
+ jubu_o = 57.29578 * np.arccos(radina_value)
+ print("输出局部入射角", jubu_o)
+ driver = gdal.GetDriverByName("GTiff") # 创建一个数据格式
+ driver.Register()
+ newfile = driver.Create(save_localangle_path, x3, y3, 1, gdal.GDT_Float32) # 存放路径文件名,长,宽,波段,数据类型
+ newfile.SetProjection(pro3)
+ newfile.SetGeoTransform(geo3)
+ newfile.GetRasterBand(1).WriteArray(jubu_o)
+
+ def localangle(self, dem_path, incidence_angle_path, orbital_parameters_path):
+ """
+ 获取输入文件的路径
+ 计算坡度图、坡向图
+ 计算局部入射角
+ """
+ para_names = ["Dem", "IncidenceAngle", "OrbitalParameters", "经验A"]
+ if len(para_names) == 0:
+ return False
+ # 获取三个文件的路径
+
+ # print("输出三个文件路径",Dem_path,IncidenceAngle_path,OrbitalParameters_path)
+ # 确定坡度、坡向的输出路径,输出坡度、坡向图
+ slope_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipslope.tif"
+ aspect_out_path = r"D:\MicroWorkspace\LeafAreaIndex\Temporary\UnClipaspect.tif"
+ print("slope_out_path的路径是", slope_out_path)
+ print("aspect_out_path的路径是", aspect_out_path)
+ self.creat_twofile(dem_path, slope_out_path, aspect_out_path)
+ # 根据入射角文件对坡度坡向图进行裁剪与重采样
+ slope_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipslope.tif"
+ aspect_out_path2 = r"D:\MicroWorkspace\LocaLangle\Temporary\Clipaspect.tif"
+ self.resampling(slope_out_path, aspect_out_path, incidence_angle_path, slope_out_path2, aspect_out_path2)
+
+ # 输出局部入射角文件
+ save_localangle_path = r"D:\\MicroWorkspace\\LocaLangle\\Temporary\\\localangle.tif"
+ self.clau(slope_out_path2, aspect_out_path2, incidence_angle_path,
+ orbital_parameters_path, save_localangle_path)
+
+
+# if __name__ == '__main__':
+# calu_incident = CalculateIncident()
+# Dem_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\dem.tif"
+# IncidenceAngle_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\RSJ.tif"
+# OrbitalParameters_path = "D:\\MicroWorkspace\\LocaLangle\\Input\\" \
+# "GF3_KAS_FSII_020008_E113.2_N23.1_20200528_L1A_HHHV_L10004829485.meta.xml"
+# calu_incident.localangle(Dem_path, IncidenceAngle_path, OrbitalParameters_path)
+# print('done')
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter.py b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter.py
new file mode 100644
index 0000000..b6c4973
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter.py
@@ -0,0 +1,302 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:lee_filter.py
+@Function:lee_filter
+@Contact: https://github.com/PyRadar/pyradar
+@Author:SHJ
+@Date:2021/8/30 8:42
+@Version:1.0.0
+"""
+
+import numpy as np
+import math
+from PIL import Image
+import multiprocessing
+import multiprocessing
+from tool.algorithm.block.blockprocess import BlockProcess
+import logging
+import shutil
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.file.fileHandle import fileHandle
+from tool.algorithm.algtools.filter import lee_Filter_c as lee_Filter_c
+logger = logging.getLogger("mylog")
+file =fileHandle(False)
+COEF_VAR_DEFAULT = 0.01
+CU_DEFAULT = 0.25
+import os
+
+
+class Filter:
+ def __int__(self):
+ pass
+
+ @staticmethod
+ def assert_window_size(win_size):
+ """
+ Asserts invalid window size.
+ Window size must be odd and bigger than 3.
+ """
+ assert win_size >= 3, 'ERROR: win size must be at least 3'
+
+ if win_size % 2 == 0:
+ print('It is highly recommended to user odd window sizes.'
+ 'You provided %s, an even number.' % (win_size, ))
+
+ @staticmethod
+ def assert_indices_in_range(width, height, xleft, xright, yup, ydown):
+ """
+ Asserts index out of image range.
+ """
+
+ # assert xleft >= 0 and xleft <= width, \
+ assert 0 <= xleft <= width, \
+ "index xleft:%s out of range (%s<= xleft < %s)" % (xleft, 0, width)
+
+ # assert xright >= 0 and xright <= width, \
+ assert 0 <= xright <= width, "index xright:%s out of range (%s<= xright < %s)" % (xright, 0, width)
+
+ # assert yup >= 0 and yup <= height, \
+ assert 0 <= yup <= height, "index yup:%s out of range. (%s<= yup < %s)" % (yup, 0, height)
+
+ # assert ydown >= 0 and ydown <= height, \
+ assert 0 <= ydown <= height, "index ydown:%s out of range. (%s<= ydown < %s)" % (ydown, 0, height)
+
+ @staticmethod
+ def weighting(window, cu=CU_DEFAULT):
+ """
+ Computes the weighthing function for Lee filter using cu as the noise
+ coefficient.
+ """
+ # cu is the noise variation coefficient
+ two_cu = cu * cu
+
+ # ci is the variation coefficient in the window
+ window_mean = window.mean()
+ window_std = window.std()
+ ci = window_std / window_mean
+
+ two_ci = ci * ci
+
+ if not two_ci: # dirty patch to avoid zero division
+ two_ci = COEF_VAR_DEFAULT
+
+ if cu > ci:
+ w_t = 0.0
+ else:
+ w_t = 1.0 - (two_cu / two_ci)
+
+ return w_t
+
+ def lee_filter(self, in_path, out_path, win_size):
+ """
+ Apply lee to a numpy matrix containing the image, with a window of
+ win_size x win_size.
+ """
+ cu = CU_DEFAULT
+ self.assert_window_size(win_size)
+ # img = self.ImageHandler.get_band_array(img, 1)
+ array1 = Image.open(in_path)
+ img = np.array(array1)
+ # we process the entire img as float64 to avoid type overflow error
+ img = np.float64(img)
+ img_filtered = np.zeros_like(img)
+ # n, m = img.shape
+ # win_offset = win_size / 2
+ #
+ # for i in range(0, n):
+ # xleft = i - win_offset
+ # xright = i + win_offset
+ #
+ # if xleft < 0:
+ # xleft = 0
+ # if xright >= n:
+ # xright = n
+ #
+ # for j in range(0, m):
+ # yup = j - win_offset
+ # ydown = j + win_offset
+ #
+ # if yup < 0:
+ # yup = 0
+ # if ydown >= m:
+ # ydown = m
+ #
+ # self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
+ #
+ # pix_value = img[i, j]
+ #
+ # window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
+ # w_t = self.weighting(window, cu)
+ # window_mean = window.mean()
+ # new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
+ #
+ # if not new_pix_value > 0:
+ # new_pix_value = 0
+ # img_filtered[i, j] = round(new_pix_value)
+ # # return img_filtered
+ self.lee_filter_array(img, img_filtered, win_size)
+ out_image = Image.fromarray(img_filtered)
+ out_image.save(out_path)
+ print("lee_filter finish! path:" + out_path)
+ return True
+
+ @staticmethod
+ def lee_filter_array(in_arry, out_arry, win_size):
+ """
+ Apply lee to a numpy matrix containing the image, with a window of
+ win_size x win_size.
+ """
+ f = Filter()
+ #cu = CU_DEFAULT
+ f.assert_window_size(win_size)
+ img = in_arry
+ # we process the entire img as float64 to avoid type overflow error
+ img = np.float64(img)
+ img = img + 100
+
+ # lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arryint win_offset,int win_size):
+ newOUt=lee_Filter_c.lee_filter_array(img,out_arry,win_size)
+ newOUt=newOUt-100
+ out_arry[:,:]=newOUt[:,:]
+
+ # def lee_filter_array(self, in_arry, out_arry, win_size):
+ # """
+ # Apply lee to a numpy matrix containing the image, with a window of
+ # win_size x win_size.
+ # """
+ # cu = CU_DEFAULT
+ # self.assert_window_size(win_size)
+ # img = in_arry
+ # # we process the entire img as float64 to avoid type overflow error
+ # img = np.float64(img)
+ # img = img + 100
+ # img_filtered = np.zeros_like(img)
+ # n, m = img.shape
+ # win_offset = win_size / 2
+ #
+ # for i in range(0, n):
+ # xleft = i - win_offset
+ # xright = i + win_offset
+ #
+ # if xleft < 0:
+ # xleft = 0
+ # if xright >= n:
+ # xright = n
+ #
+ # for j in range(0, m):
+ # yup = j - win_offset
+ # ydown = j + win_offset
+ #
+ # if yup < 0:
+ # yup = 0
+ # if ydown >= m:
+ # ydown = m
+ #
+ # self.assert_indices_in_range(n, m, xleft, xright, yup, ydown)
+ #
+ # pix_value = img[i, j]
+ #
+ # window = img[math.ceil(xleft):int(xright)+1, math.ceil(yup):int(ydown)+1]
+ # w_t = self.weighting(window, cu)
+ # window_mean = window.mean()
+ # new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
+ #
+ # if not new_pix_value > 0:
+ # new_pix_value = 0
+ # out_arry[i, j] = round(new_pix_value)
+ # out_arry = out_arry - 100
+ #
+
+ def lee_filter_multiprocess(self, in_paths, out_paths, win_size =3,processes_num=10):
+ if len(in_paths) != len(out_paths):
+ return False
+ # 开启多进程处理
+ pool = multiprocessing.Pool(processes=processes_num)
+ pl = []
+ for i in range(len(in_paths)):
+ #self.lee_filter(in_paths[i], out_paths[i], win_size)
+ pl.append(pool.apply_async(self.lee_filter,(in_paths[i], out_paths[i], win_size)))
+ print("lee_filter runing! path:" + in_paths[i])
+ pool.close()
+ pool.join()
+ return True
+
+
+ def lee_filter_block_multiprocess(self, in_path, out_path, win_size =3):
+ in_name = os.path.basename(in_path)
+ out_name = os.path.basename(out_path)
+ outDir= os.path.split(out_path)[0]
+ #创建工作文件夹
+ src_path = os.path.join(outDir, "src_img")
+ block_path = os.path.join(outDir, "block")
+ block_filtered = os.path.join(outDir, "block_filtered")
+ file.creat_dirs([src_path, block_path, block_filtered])
+
+ shutil.copyfile(in_path, os.path.join(src_path, in_name))
+ cols = ImageHandler.get_img_width(in_path)
+ rows = ImageHandler.get_img_height(in_path)
+ # 分块
+ bp = BlockProcess()
+ block_size = bp.get_block_size(rows, cols)
+ bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
+ logger.info('blocking tifs success!')
+
+ img_dir, img_name = bp.get_file_names(block_path, ['tif'])
+ dir_dict = bp.get_same_img(img_dir, img_name)
+
+ img_path_list = [value for value in dir_dict.values()][0]
+
+ processes_num = min([len(img_path_list), multiprocessing.cpu_count() - 1])
+
+ out_img_path_list =[]
+ for in_path in img_path_list:
+ suffix = bp.get_suffix(os.path.basename(in_path))
+ out_path = os.path.join(block_filtered, out_name.replace('.tif', suffix))
+ out_img_path_list.append(out_path)
+
+ self.lee_filter_multiprocess(img_path_list, out_img_path_list, win_size = win_size, processes_num=processes_num)
+ # 开启多进程处理
+ # pool = multiprocessing.Pool(processes=processes_num)
+ #
+ # for i in range(len(hh_list)):
+ # block_img_path = hh_list[i]
+ # suffix = bp.get_suffix(os.path.basename(hh_list[i]))
+ # filed_block_img_path = os.path.join(block_filtered,out_name.replace('.tif',suffix))
+ # pool.apply_async(self.lee_filter, (block_img_path, filed_block_img_path, win_size))
+ # print("lee_filter runing! path:" + block_img_path)
+ # logger.info('total:%s, block:%s lee_filter!', len(hh_list), i)
+ #
+ # pool.close()
+ # pool.join()
+ # # 合并处理后的影像
+ bp.combine(block_filtered, cols, rows, outDir, file_type=['tif'], datetype='float32')
+
+ file.del_folder(src_path)
+ file.del_folder(block_path)
+ file.del_folder(block_filtered)
+ pass
+
+ def lee_process_sar(self, in_sar, out_sar, win_size, noise_var):
+ '''
+ # std::cout << "mode 12"
+ # std::cout << "SIMOrthoProgram.exe 12 in_sar_path out_sar_path win_size noise_var"
+ '''
+ exe_path = r".\baseTool\x64\Release\SIMOrthoProgram-S-SAR.exe"
+ exe_cmd = r"set PROJ_LIB=.\baseTool\x64\Release; & {0} {1} {2} {3} {4} {5}".format(exe_path, 12, in_sar,
+ out_sar, win_size, noise_var)
+ print(exe_cmd)
+ print(os.system(exe_cmd))
+ print("==========================================================================")
+
+
+if __name__ == '__main__':
+ # 示例1:
+ # path = r"I:\MicroWorkspace\product\C-SAR\LeafAreaIndex\Temporary\cai_sartif\HV_0_512_0_512.tif"
+ # f = Filter()
+ # f.lee_filter(path,path,3)
+ #示例2:
+
+ f = Filter()
+ f.lee_filter_block_multiprocess('I:\preprocessed\HH.tif','I:\preprocessed\HHf.tif')
+ pass
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter/lee_Filter_c.pyx b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter/lee_Filter_c.pyx
new file mode 100644
index 0000000..413b8a6
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter/lee_Filter_c.pyx
@@ -0,0 +1,124 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:lee_filter.py
+@Function:lee_filter
+@Contact: https://github.com/PyRadar/pyradar
+@Author:SHJ
+@Date:2021/8/30 8:42
+@Version:1.0.0
+"""
+
+import os
+cimport cython # 必须导入
+import numpy as np##必须为c类型和python类型的数据都申明一个np
+cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
+from libc.math cimport pi
+from libc.math cimport atan as math_atan
+from libc.math cimport log10 as math_log10
+from libc.math cimport log as math_log
+from libc.math cimport floor as math_floor
+from libc.math cimport sqrt as math_sqrt
+from libc.math cimport exp as math_exp
+from libc.math cimport sin as math_sin
+from libc.math cimport cos as math_cos
+from libc.math cimport tan as math_tan
+from libc.math cimport asin as math_asin
+from libc.math cimport acos as math_acos
+from libc.math cimport tan as math_atan
+from libc.math cimport sinh as math_sinh
+from libc.math cimport cosh as math_cosh
+from libc.math cimport tanh as math_tanh
+from libc.math cimport floor as math_floor
+from libc.math cimport ceil as math_ceil
+from libc.math cimport lround as math_round
+
+cdef double COEF_VAR_DEFAULT = 0.01
+cdef double CU_DEFAULT = 0.25
+
+
+cdef int ceil_usr(double v):
+ return int(math_ceil(v))
+
+
+
+cdef double weighting(np.ndarray[double,ndim=2] window,double cu):
+ """
+ Computes the weighthing function for Lee filter using cu as the noise
+ coefficient.
+ """
+ # cu is the noise variation coefficient
+ cdef double two_cu = cu * cu
+
+ # ci is the variation coefficient in the window
+ cdef double window_mean = window.mean()
+ cdef double window_std = window.std()
+ cdef double ci = window_std / window_mean
+
+ cdef double two_ci = ci * ci
+ cdef double w_t=0;
+ if not (two_ci==0): # dirty patch to avoid zero division
+ two_ci = COEF_VAR_DEFAULT
+
+ if cu > ci:
+ w_t = 0.0
+ else:
+ w_t = 1.0 - (two_cu / two_ci)
+
+ return w_t
+
+cpdef np.ndarray[double,ndim=2] lee_filter_array(np.ndarray[double,ndim=2] img,np.ndarray[double,ndim=2] out_arry,int win_size):
+ """
+ Apply lee to a numpy matrix containing the image, with a window of
+ win_size x win_size.
+ """
+ # we process the entire img as float64 to avoid type overflow error
+ #n, m = img.shape
+ cdef double cu = CU_DEFAULT
+ cdef int i=0
+ cdef int j=0
+ cdef int xleft=0
+ cdef int xright=0
+ cdef int yup=0
+ cdef int ydown=0
+ cdef np.ndarray[double,ndim=2] window;
+ cdef double w_t=0;
+ cdef double window_mean=0;
+ cdef double new_pix_valu=0;
+ cdef int n = img.shape[0]
+ cdef int m=img.shape[1]
+ cdef int win_offset=int(win_size/2)
+
+ while i= n:
+ xright = n
+ j=0
+ while j= m:
+ ydown = m
+
+ pix_value = img[i, j]
+
+ window = img[xleft:xright+1, yup:ydown+1]
+
+ w_t = weighting(window, cu)
+
+ window_mean = np.mean(window)
+ new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
+
+ if not new_pix_value > 0:
+ new_pix_value = 0
+ out_arry[i, j] = round(new_pix_value*100000.0)/100000.0
+ j=j+1
+ i=i+1
+ return out_arry
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter_c.cp38-win_amd64.pyd b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter_c.cp38-win_amd64.pyd
new file mode 100644
index 0000000..0ed7504
Binary files /dev/null and b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/lee_Filter_c.cp38-win_amd64.pyd differ
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/filter/setup.py b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/setup.py
new file mode 100644
index 0000000..184d396
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/filter/setup.py
@@ -0,0 +1,45 @@
+from setuptools import setup
+from setuptools.extension import Extension
+from Cython.Distutils import build_ext
+from Cython.Build import cythonize
+import numpy
+from pathlib import Path
+import shutil
+
+
+class MyBuildExt(build_ext):
+ def run(self):
+ build_ext.run(self)
+
+ build_dir = Path(self.build_lib)
+ root_dir = Path(__file__).parent
+ target_dir = build_dir if not self.inplace else root_dir
+
+ self.copy_file(Path('./lee_Filter') / '__init__.py', root_dir, target_dir)
+ #self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
+ self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
+ def copy_file(self, path, source_dir, destination_dir):
+ if not (source_dir / path).exists():
+ return
+ shutil.copyfile(str(source_dir / path), str(destination_dir / path))
+
+setup(
+ name="MyModule",
+ ext_modules=cythonize(
+ [
+ #Extension("pkg1.*", ["root/pkg1/*.py"]),
+ Extension("pkg2.*", ["./lee_Filter/lee_Filter_c.pyx"]),
+ #Extension("1.*", ["root/*.py"])
+ ],
+ build_dir="build",
+ compiler_directives=dict(
+ always_allow_keywords=True
+ )),
+ cmdclass=dict(
+ build_ext=MyBuildExt
+ ),
+ packages=[],
+ include_dirs=[numpy.get_include()],
+)
+
+# 指令: python setup.py build_ext --inplace
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/logHandler.py b/Ortho-NoS1GBM/tool/algorithm/algtools/logHandler.py
new file mode 100644
index 0000000..54cdd23
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/logHandler.py
@@ -0,0 +1,90 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :logHandler.py
+@Function :日志检查、生成
+@Author :SHJ
+@Date :2021/12/1
+@Version :1.0.0
+"""
+import logging
+import os
+import time
+import datetime
+
+
+class LogHandler:
+ """
+ 生成日志
+ """
+ __logger = logging.getLogger("mylog")
+ __format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
+ "(%(filename)s:%(lineno)d) - %(message)s")
+ __log_path = None
+
+ @staticmethod
+ def init_log_handler(log_name):
+ """
+ 初始化日志
+ :param log_name: 日志保存的路径和名称
+ :return:
+ """
+ path = os.getcwd()
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
+ LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
+ para_dir = os.path.split(LogHandler.__log_path)
+ if not os.path.exists(para_dir[0]):
+ os.makedirs(para_dir[0])
+ # 删除七天以前的文件
+ LogHandler.delete_outdate_files(para_dir[0])
+
+ # 方法1:普通日志
+ log_format = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
+ " (%(filename)s:Line%(lineno)d) "
+ date_format = "%m/%d/%Y %H:%M:%S"
+ fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
+ fs = logging.StreamHandler()
+ logging.basicConfig(level=logging.INFO, format=log_format, datefmt=date_format, handlers=[fp, fs]) # 调用
+
+ # 方法2:回滚日志
+ # LogHandler.__logger.setLevel(logging.DEBUG)
+ # th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
+ # backupCount=2, encoding='utf-8')
+ # th.suffix = "%Y-%m-%d-%H-%M-%S.log"
+ # th.setFormatter(LogHandler.__format_str)
+ # th.setLevel(level=logging.DEBUG)
+
+ # console = logging.StreamHandler()
+ # console.setLevel(logging.INFO)
+ # LogHandler.__logger.addHandler(console)
+ # LogHandler.__logger.addHandler(th)
+
+ @staticmethod
+ def delete_outdate_files(path, date_interval=7):
+ """
+ 删除目录下七天前创建的文件
+ """
+ current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
+ current_time_list = current_time.split("-")
+ current_time_day = datetime.datetime(int(current_time_list[0]), int(current_time_list[1]),
+ int(current_time_list[2]))
+ for root, dirs, files in os.walk(path):
+ for item in files:
+ item_format = item.split(".", 2)
+ if item_format[1] == "log":
+ file_path = os.path.join(root, item)
+ create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
+ create_time_list = create_time.split("-")
+ create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
+ int(create_time_list[2]))
+ time_difference = (current_time_day - create_time_day).days
+ if time_difference > date_interval:
+ os.remove(file_path)
+
+#
+# if __name__ == "__main__":
+# # eg2:
+# log_handler = LogHandler()
+# log_handler.init_log_handler(r"run_log\myrun1")
+# logging.warning("1")
+# print("done")
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004.cp38-win_amd64.pyd b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004.cp38-win_amd64.pyd
new file mode 100644
index 0000000..648d72f
Binary files /dev/null and b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004.cp38-win_amd64.pyd differ
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/Oh2004_inversion.py b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/Oh2004_inversion.py
new file mode 100644
index 0000000..9a6648a
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/Oh2004_inversion.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun Mar 14 18:53:14 2021
+
+@author: Dipankar
+References
+----------
+Oh (2004): Quantitative retrieval of soil moisture content and surface roughness from multipolarized radar observations of bare soil surface. IEEE TGRS 42(3). 596-601.
+"""
+
+ # ---------------------------------------------------------------------------------------
+ # Copyright (C) 2021 by Microwave Remote Sensing Lab, IITBombay http://www.mrslab.in
+
+ # This program is free software; you can redistribute it and/or modify it
+ # under the terms of the GNU General Public License as published by the Free
+ # Software Foundation; either version 3 of the License, or (at your option)
+ # any later version.
+ # This program is distributed in the hope that it will be useful, but WITHOUT
+ # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ # more details.
+
+ # You should have received a copy of the GNU General Public License along
+ # with this program; if not, see http://www.gnu.org/licenses/
+ # ---------------------------------------------------------------------------------------
+
+
+import numpy as np
+#import matplotlib.pyplot as plt
+
+
+## Description: Given sigma_0_vv, sigma_0_hh, and sigma_0_hv, the inverse
+## model computes s, and mv
+
+sigma0vvdB = -14.1
+sigma0hhdB = -16.0
+sigma0hvdB = -26.5
+theta = 35. ##Incidence angle
+f = 5.0 ##GHz
+
+k = 2*np.pi*f/0.3 #calculate the wave number
+
+
+
+
+theta_rad = theta*np.pi/180 #represent angle in radians
+
+sigma_0_vv = np.power(10,(sigma0vvdB/10)) #%represent data in linear scale
+sigma_0_hh = np.power(10,(sigma0hhdB/10))
+sigma_0_hv = np.power(10,(sigma0hvdB/10))
+
+
+p = sigma_0_hh / sigma_0_vv #calculate the p-ratio
+q = sigma_0_hv / sigma_0_vv #calculate the q-ratio
+
+mv0 = np.arange(0.05,0.5,0.01) # set Gamma0 range of values (fine increments)
+
+
+
+## First estimates s1 and mv1
+ks = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
+err = (1 - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
+abs_err = np.abs(err)
+min_err = np.min(abs_err) #find the value of minimum error
+mv1 = mv0[np.where(abs_err == min_err)]
+ks1 = ((-1)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)))**0.556
+s1 = ks1/k
+
+
+## Second estimate s2 and mv2
+ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
+s2 = ks2/k
+
+xx = (1-p)/np.exp(-0.4 * ks2**1.4)
+if xx<=0:
+ mv2 =0
+else:
+ yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
+ mv2 = yy**(-100/65)
+ print(mv2,yy,np.power(yy,-100/65))
+## Third estimate mv3
+mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
+
+## weighted average s and mv-------------------------------------
+sf = (s1 + 0.25*s2)/(1+0.25)
+mvf = (mv1+mv2+mv3)/3
+print(mv1,mv2,mv3,s1,s2)
+
+print('Estimated rms height s (cm): ', sf*100)
+print('Estimated volumetric soil moisture: ', mvf)
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/oh2004.pyx b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/oh2004.pyx
new file mode 100644
index 0000000..ca2b08d
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/algtools/oh2004/oh2004/oh2004.pyx
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Jun 4 14:59:54 2013
+
+@author: Sat Kumar Tomer
+@email: satkumartomer@gmail.com
+@website: www.ambhas.com
+
+"""
+cimport cython # 必须导入
+import numpy as np##必须为c类型和python类型的数据都申明一个np
+cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
+from libc.math cimport pi
+from scipy.optimize import fmin
+
+cpdef np.ndarray[double,ndim=1] inverse_oh2004(double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f):
+ """
+ sigma0vvdB = -14.1 dB
+ sigma0hhdB = -16.0
+ sigma0hvdB = -26.5
+ theta = 35. 角度值 ##Incidence angle
+ f = 5.0 ##GHz
+ """
+ #print("--------------------------------------------------------\n")
+ cdef np.ndarray[double,ndim=1] result=np.ones((2))
+ result[0]=np.nan
+ result[1]=np.nan
+ #print("*************设置为nan****************")
+ #print(sigma0vvdB,sigma0hhdB,sigma0hvdB,theta,f)
+ cdef double k = 2*3.1415926*f/0.299792458; #calculate the wave number
+ cdef double theta_rad = theta*3.1415926/180; #represent angle in radians
+
+ cdef double sigma_0_vv = np.power(10.,(sigma0vvdB/10.)) #%represent data in linear scale
+ cdef double sigma_0_hh = np.power(10.,(sigma0hhdB/10.))
+ cdef double sigma_0_hv = np.power(10.,(sigma0hvdB/10.))
+
+ if sigma_0_vv==0:
+ #print("***********sigma_0_vv==0*************")
+ return result
+ cdef double p = sigma_0_hh / sigma_0_vv; #calculate the p-ratio
+ cdef double q = sigma_0_hv / sigma_0_vv; #calculate the q-ratio
+
+
+ cdef np.ndarray[double,ndim=1] mv0 = np.arange(0.05,0.9,0.01) # set Gamma0 range of values (fine increments)
+
+ ## First estimates s1 and mv1
+ cdef np.ndarray[double,ndim=1] ks = ((-1.)*3.125*np.log(1 - sigma_0_hv/(0.11 * mv0**0.7 * (np.cos(theta_rad))**2.2)))**0.556
+ cdef np.ndarray[double,ndim=1] err = (1. - (2.*theta_rad/np.pi)**(0.35*mv0**(-0.65)) * np.exp(-0.4 * ks**1.4))-p
+ cdef np.ndarray[double,ndim=1] abs_err = np.abs(err);
+ cdef double min_err = np.nanmin(abs_err); #find the value of minimum error
+ #print(np.where(abs_err == min_err)[0].shape)
+ if min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0 :
+ #print("***************min_err==np.nan or np.max(np.where(abs_err == min_err)[0].shape)==0")
+ return result
+ cdef double mv1 = mv0[np.where(abs_err == min_err)[0][0]]
+ cdef double temp_ks1=1. - sigma_0_hv/(0.11 * mv1**0.7 * (np.cos(theta_rad))**2.2)
+ if temp_ks1<0:
+ #print("*********************temp_ks1<0")
+ return result
+ cdef double ks1 = ((-1)*3.125*np.log(temp_ks1))**0.556
+ cdef double s1 = ks1/k
+
+ ## Second estimate s2 and mv2
+ cdef double ks2 = (np.log(1-(q/(0.095 * (0.13 + np.sin(1.5*theta_rad))**1.4))) /(-1.3))**(10./9.)
+ cdef double s2 = ks2/k
+ cdef double mv2 =0.
+ cdef double yy =0.
+ cdef double xx = (1-p)/np.exp(-0.4 * ks2**1.4)
+ if xx<=0:
+ mv2 =0.
+ else:
+ yy = np.log(xx)/(0.35*np.log(2*theta_rad/np.pi))
+ mv2=np.power(yy,-100.0/65)
+
+ ## Third estimate mv3
+ cdef double mv3 = ((sigma_0_hv/(1 - np.exp(-0.32 * ks2**1.8)))/(0.11 * np.cos(theta_rad)**2.2))**(1/0.7)
+ ## weighted average s and mv-------------------------------------
+ #print("q:\t",q)
+ #print("k:\t",k)
+ #print("ks1:\t",ks1)
+ #print("ks2:\t",ks2)
+ #print("theta_rad:\t",theta_rad)
+
+ cdef double sf = (s1 + 0.25*s2)/(1+0.25)
+ cdef double mvf = (mv1+mv2+mv3)/3
+
+ result[0]=mvf*1.0
+ result[1]=sf*1.0
+ #print("mv1:\t",mv1)
+ #print("mv2:\t",mv2)
+ #print("mv3:\t",mv3)
+ #print("s1:\t",s1)
+ #print("s2:\t",s2)
+ #print("Estimated volumetric soil moisture: ",result[0])
+ #print("Estimated rms height s (m): ",result[1])
+ #print("\nend\n")
+ return result
+
+cpdef double lamda2freq(double lamda):
+ return 299792458.0/lamda
+
+cpdef double freq2lamda(double freq):
+ return 299792458.0/freq
+
+# double sigma0vvdB,double sigma0hhdB,double sigma0hvdB,double theta,double f
+cpdef int retrieve_oh2004_main(int n,np.ndarray[double,ndim=1] mv,np.ndarray[double,ndim=1] h,np.ndarray[int,ndim=1] mask,np.ndarray[double,ndim=1] sigma0vvdB,np.ndarray[double,ndim=1] sigma0hhdB,np.ndarray[double,ndim=1] sigma0hvdB, np.ndarray[double,ndim=1] vh, np.ndarray[double,ndim=1] theta,double f):
+ cdef int i=0;
+ cdef np.ndarray[double,ndim=1] result;
+ while i 2048 and cols > 2048:
+ block_size = 1024
+ return block_size
+
+ # def get_block_size(rows, cols, block_size_config):
+ # block_size = 512 if block_size_config < 512 else block_size_config
+ # if rows > 2048 and cols > 2048:
+ # block_size = block_size_config
+ # return block_size
+
+ @staticmethod
+ def get_suffix(path_name):
+ name = path_name
+ suffix = '_' + name.split('_')[-4] + '_' + name.split('_')[-3] + '_' + name.split('_')[-2] + '_' + \
+ name.split('_')[-1]
+ return suffix
+
+ @staticmethod
+ def get_file_names(data_dir, file_type=['tif', 'tiff']):
+ """
+ 获取data_dir文件夹下file_type类型的文件路径
+ """
+ result_dir = []
+ result_name = []
+ for maindir, subdir, file_name_list in os.walk(data_dir):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = apath.split('.')[-1]
+ if ext in file_type:
+ result_dir.append(apath)
+ result_name.append(filename)
+ else:
+ pass
+ return result_dir, result_name
+
+ @staticmethod
+ def unzip_file(zip_file_path, out_path): # ):
+ # 获取压缩文件所在的目录
+ # extract_folder = os.path.dirname(zip_file_path)
+
+ basename = os.path.splitext(os.path.basename(zip_file_path))[0]
+ extract_folder = os.path.join(out_path, basename)
+
+ with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
+ # 解压到和压缩文件同名的文件夹中
+ zip_ref.extractall(extract_folder)
+
+ files = list(glob.glob(os.path.join(extract_folder, '*')))
+ for file in files:
+ if basename in os.path.basename(file):
+ if not file.endswith(".xml"):
+ unzipped_folder_path = file
+
+ return unzipped_folder_path
+
+ @staticmethod
+ def unzip_dem(zip_file_path, out_path):
+ para_value_list = zip_file_path.split(";")
+ for n in para_value_list:
+ with zipfile.ZipFile(n, 'r') as zip_ref:
+ # 解压到和压缩文件同名的文件夹中
+ zip_ref.extractall(out_path)
+
+ return out_path
+
+ @staticmethod
+ def get_same_img(img_dir, img_name):
+ """
+ 在img_dir路径下,用img_name的子图像路径集合,将集合以字典输出
+ """
+ result = {}
+ for idx, name in enumerate(img_name):
+ temp_name = ''
+ for idx2, item in enumerate(name.split('_')[:-4]):
+ if idx2 == 0:
+ temp_name = temp_name + item
+ else:
+ temp_name = temp_name + '_' + item
+
+ if temp_name in result:
+ result[temp_name].append(img_dir[idx])
+ else:
+ result[temp_name] = []
+ result[temp_name].append(img_dir[idx])
+ return result
+
+ @staticmethod
+ def assign_spatial_reference_byfile(src_path, dst_path):
+ """
+ 将src_path的地理信息,输入到dst_path图像中
+ """
+ src_ds = gdal.Open(src_path, gdal.GA_ReadOnly)
+ if src_ds is None:
+ return False
+ sr = osr.SpatialReference()
+ sr.ImportFromWkt(src_ds.GetProjectionRef())
+ geo_transform = src_ds.GetGeoTransform()
+
+ dst_ds = gdal.Open(dst_path, gdal.GA_Update)
+ if dst_ds is None:
+ return False
+ dst_ds.SetProjection(sr.ExportToWkt())
+ dst_ds.SetGeoTransform(geo_transform)
+ del dst_ds
+ del src_ds
+ return True
+
+ @staticmethod
+ def assign_spatial_reference_bypoint(row_begin, col_begin, src_proj, src_geo, img_path):
+ """
+ 将src_path的地理信息,输入到dst_path图像中
+ """
+
+ sr = osr.SpatialReference()
+ sr.ImportFromWkt(src_proj)
+ geo_transform = src_geo
+ geo_transform[0] = src_geo[0] + col_begin * src_geo[1] + row_begin * src_geo[2]
+ geo_transform[3] = src_geo[3] + col_begin * src_geo[4] + row_begin * src_geo[5]
+ dst_ds = gdal.Open(img_path, gdal.GA_Update)
+ if dst_ds is None:
+ return False
+ dst_ds.SetProjection(sr.ExportToWkt())
+ dst_ds.SetGeoTransform(geo_transform)
+ del dst_ds
+ return True
+
+ @staticmethod
+ def __get_band_array(filename, num):
+ """
+ :param filename: tif路径
+ :param num: 波段序号
+ :return: 对应波段的矩阵数据
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ bands = dataset.GetRasterBand(num)
+ array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
+ del dataset
+ return array
+
+ @staticmethod
+ def get_data(filename):
+ """
+ :param filename: tif路径
+ :return: 获取所有波段的数据
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ im_width = dataset.RasterXSize
+ im_height = dataset.RasterYSize
+ im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
+ del dataset
+ return im_data
+
+ def get_tif_dtype(self, filename):
+ """
+ :param filename: tif路径
+ :return: tif数据类型
+ """
+ image = self.__get_band_array(filename, 1)
+ return image.dtype.name
+
+ def cut(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
+ """
+ :param in_dir:存放待裁剪的影像文件夹,不用指定到tif文件
+ :param out_dir:存放裁剪结果的影像文件夹
+ :param file_type:待裁剪的影像文件类型tif、tiff、bmp、jpg、png等等
+ :param out_type:裁剪结果影像文件类型
+ :param out_size:裁剪尺寸,裁剪为n*n的方形
+ :return: True or Flase
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ data_dir_list, _ = self.get_file_names(in_dir, file_type)
+ count = 0
+
+ for each_dir in data_dir_list:
+
+ name_suffix = os.path.basename(each_dir)
+ img_name = os.path.splitext(name_suffix)[0]
+
+ # gdal读取方法
+ image = self.__get_band_array(each_dir, 1)
+
+ cut_factor_row = int(np.ceil(image.shape[0] / out_size))
+ cut_factor_clo = int(np.ceil(image.shape[1] / out_size))
+ for i in range(cut_factor_row):
+ for j in range(cut_factor_clo):
+
+ if i == cut_factor_row - 1:
+ i = image.shape[0] / out_size - 1
+ else:
+ pass
+
+ if j == cut_factor_clo - 1:
+ j = image.shape[1] / out_size - 1
+ else:
+ pass
+
+ start_x = int(np.rint(i * out_size))
+ start_y = int(np.rint(j * out_size))
+ end_x = int(np.rint((i + 1) * out_size))
+ end_y = int(np.rint((j + 1) * out_size))
+ out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
+ end_y) + '.' + out_type)
+ # + '/' + img_name \
+ # + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
+ # end_y) + '.' + out_type
+
+ # temp_image = image[start_x:end_x, start_y:end_y]
+ # out_image = Image.fromarray(temp_data)
+ # out_image = Image.fromarray(temp_image)
+ # out_image.save(out_dir_images)
+
+ data = ImageHandler.get_data(each_dir)
+ if ImageHandler.get_bands(each_dir) > 1:
+ temp_data = data[:,start_x:end_x, start_y:end_y]
+ else:
+ temp_data = data[start_x:end_x, start_y:end_y]
+ ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
+ count += 1
+ return True
+
+ def cut_new(self, in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=2048):
+ """
+ :param in_dir:存放待裁剪的影像文件夹,不用指定到tif文件
+ :param out_dir:存放裁剪结果的影像文件夹
+ :param file_type:待裁剪的影像文件类型tif、tiff、bmp、jpg、png等等
+ :param out_type:裁剪结果影像文件类型
+ :param out_size:裁剪尺寸,裁剪为n*n的方形
+ :return: True or Flase
+ 20230831修改 ----tjx
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ data_dir_list, _ = self.get_file_names(in_dir, file_type)
+ count = 0
+
+ for each_dir in data_dir_list:
+
+ name_suffix = os.path.basename(each_dir)
+ img_name = os.path.splitext(name_suffix)[0]
+
+ # gdal读取方法
+ image = self.__get_band_array(each_dir, 1)
+
+ block_x = int(np.ceil(image.shape[1] / out_size))
+ block_y = int(np.ceil(image.shape[0] / out_size)) # todo 修改分块
+ for i in range(block_y):
+ for j in range(block_x):
+ start_x = image.shape[1] - out_size if (j == block_x - 1 and block_x != 1) else j * out_size
+ start_y = image.shape[0] - out_size if (i == block_y - 1 and block_y != 1) else i * out_size
+ end_x = image.shape[1] if (j + 1) * out_size > image.shape[1] else (j + 1) * out_size
+ end_y = image.shape[0] if (i + 1) * out_size > image.shape[0] else (i + 1) * out_size
+
+ out_dir_images = os.path.join(out_dir, img_name + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(
+ end_y) + '.' + out_type)
+ # print(out_dir_images)
+
+ data = ImageHandler.get_data(each_dir)
+ if ImageHandler.get_bands(each_dir) > 1:
+ # temp_data = data[:,start_x:end_x, start_y:end_y]
+ temp_data = data[:,start_y:end_y, start_x:end_x]
+ else:
+ # temp_data = data[start_x:end_x, start_y:end_y]
+ temp_data = data[start_y:end_y, start_x:end_x]
+ ImageHandler.write_img(out_dir_images, '', [0, 0, 0, 0, 0, 0], temp_data)
+ count += 1
+ return True
+
+ def combine(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
+ """
+ :param data_dir: 存放待裁剪的影像文件夹,不用指定到tif文件
+ :param w 拼接影像的宽度,
+ :param h 拼接影像的高度
+ :param out_dir: 存放裁剪结果的影像文件夹
+ :param out_type: 裁剪结果影像文件类型
+ :param file_type: 待裁剪的影像文件类型
+ :param datetype:数据类型 int8,int16,float16,float32 等
+ :return: True or Flase
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ img_dir, img_name = self.get_file_names(data_dir, file_type)
+
+ dir_dict = self.get_same_img(img_dir, img_name)
+ count = 0
+ for key in dir_dict.keys():
+ temp_label = np.zeros(shape=(h, w), dtype=datetype)
+ dir_list = dir_dict[key]
+ for item in dir_list:
+ name_split = item.split('_')
+ x_start = int(name_split[-4])
+ x_end = int(name_split[-3])
+ y_start = int(name_split[-2])
+ y_end = int(name_split[-1].split('.')[0])
+ # img = Image.open(item)
+ img = ImageHandler.get_band_array(item, 1)
+ img = np.array(img)
+
+ temp_label[x_start:x_end, y_start:y_end] = img
+
+ img_name = key + '.' + out_type
+ new_out_dir = os.path.join(out_dir, img_name)
+ ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
+ # label = Image.fromarray(temp_label)
+ # label.save(new_out_dir)
+
+ count += 1
+ return True
+
+ # todo 20230901 修改分块同步修改合并代码
+ def combine_new(self, data_dir, w, h, out_dir, out_type='tif', file_type=['tif', 'tiff'], datetype='float16'):
+ """
+ :param data_dir: 存放待裁剪的影像文件夹,不用指定到tif文件
+ :param w 拼接影像的宽度,
+ :param h 拼接影像的高度
+ :param out_dir: 存放裁剪结果的影像文件夹
+ :param out_type: 裁剪结果影像文件类型
+ :param file_type: 待裁剪的影像文件类型
+ :param datetype:数据类型 int8,int16,float16,float32 等
+ :return: True or Flase
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ img_dir, img_name = self.get_file_names(data_dir, file_type)
+
+ dir_dict = self.get_same_img(img_dir, img_name)
+ count = 0
+ for key in dir_dict.keys():
+ dir_list = dir_dict[key]
+ bands = ImageHandler.get_bands(dir_list[0])
+ if bands > 1:
+ temp_label = np.zeros(shape=(bands, h, w), dtype=datetype)
+ for item in dir_list:
+ name_split = item.split('_')
+ x_start = int(name_split[-4])
+ x_end = int(name_split[-3])
+ y_start = int(name_split[-2])
+ y_end = int(name_split[-1].split('.')[0])
+ # img = Image.open(item)
+ img = ImageHandler.get_band_array(item, 1)
+ img = np.array(img)
+
+ temp_label[:, y_start:y_end, x_start:x_end] = img
+
+ img_name = key + '.' + out_type
+ new_out_dir = os.path.join(out_dir, img_name)
+ ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
+ # label = Image.fromarray(temp_label)
+ # label.save(new_out_dir)
+ count += 1
+ else:
+ temp_label = np.zeros(shape=(h, w), dtype=datetype)
+ for item in dir_list:
+ name_split = item.split('_')
+ x_start = int(name_split[-4])
+ x_end = int(name_split[-3])
+ y_start = int(name_split[-2])
+ y_end = int(name_split[-1].split('.')[0])
+ # img = Image.open(item)
+ img = ImageHandler.get_band_array(item, 1)
+ img = np.array(img)
+
+ temp_label[y_start:y_end, x_start:x_end] = img
+
+ img_name = key + '.' + out_type
+ new_out_dir = os.path.join(out_dir, img_name)
+ ImageHandler.write_img(new_out_dir, '', [0, 0, 0, 0, 0, 0], temp_label)
+ # label = Image.fromarray(temp_label)
+ # label.save(new_out_dir)
+
+ count += 1
+ return True
+
+ def combine_Tif(self, data_dir, w, h, out_dir, proj, geo, out_type='tif', file_type=['tif', 'tiff'],
+ datetype='float16'):
+ """
+ 将文件夹下的tif拼接成一个大的tif
+ :param data_dir: 存放待裁剪的影像文件夹,不用指定到tif文件
+ :param w 拼接影像的宽度,
+ :param h 拼接影像的高度
+ :param out_dir: 存放裁剪结果的影像文件夹
+ :param proj: 指定投影系
+ :param geo: 指定变换参数
+ :param out_type: 裁剪结果影像文件类型
+ :param file_type: 待裁剪的影像文件类型
+ :param datetype:数据类型 int8,int16,float16,float32 等
+ :return: True or Flase
+ """
+ image_handler = ImageHandler()
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ img_dir, img_name = self.get_file_names(data_dir, file_type)
+
+ dir_dict = self.get_same_img(img_dir, img_name)
+ count = 0
+ for key in dir_dict.keys():
+ temp_label = np.zeros(shape=(h, w), dtype=datetype)
+ dir_list = dir_dict[key]
+ for item in dir_list:
+ name_split = item.split('_')
+ x_start = int(name_split[-4])
+ x_end = int(name_split[-3])
+ y_start = int(name_split[-2])
+ y_end = int(name_split[-1].split('.')[0])
+ img = image_handler.get_data(item)
+ temp_label[x_start:x_end, y_start:y_end] = img
+
+ img_name = key + '.' + out_type
+ new_out_dir = os.path.join(out_dir,img_name)
+ image_handler.write_img(new_out_dir, proj, geo, temp_label)
+ count += 1
+ return True
+
+# if __name__ == '__main__':
+# bp = BlockProcess()
+# # # cut
+# data_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\cut"
+# out_dir = r"D:\micro\WorkSpace\LandCover\Temporary\processing\feature_tif\combine"
+# file_type = ['tif']
+# out_type = 'tif'
+# cut_size = 1024
+# #
+# bp.cut_new(data_dir, out_dir, file_type, out_type, cut_size)
+# # # combine
+# # data_dir=r"D:\Workspace\SoilMoisture\Temporary\test"
+# w= 5043
+# h= 1239
+# out_dirs=r"D:\BaiduNetdiskDownload\HF\cut_outs"
+# # out_type='tif'
+# # file_type=['tif']
+# datetype = 'float'
+# # src_path = r"D:\Workspace\SoilMoisture\Temporary\preprocessed\HH_preprocessed.tif"
+# # datetype = bp.get_tif_dtype(src_path)
+# bp.combine_new(out_dir, w, h, out_dirs, out_type, file_type, datetype)
+
+ #
+ # # 添加地理信息
+ # new_out_dir =r"D:\DATA\testdata1\combine\TEST_20200429_NDVI.tif"
+ # bp.assign_spatial_reference_byfile(src_path, new_out_dir)
+
+ # fn = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture.tif'
+ # product_path = r'D:\Workspace\SoilMoisture\Temporary\combine\soil_moisture_1.tif'
+ #
+ # proj, geos, img = ImageHandler.read_img(fn)
+ # img[img>1] = 1
+ # img[img<0] = 0
+ # ImageHandler.write_img(product_path, proj, geos, img)
+
+if __name__ == '__main__':
+ fn = r'G:\测试数据\cut'
+ bp = BlockProcess()
+ img_dir, img_name = bp.get_file_names(fn, ['jpg'])
+ dir_dict = bp.get_same_img(img_dir, img_name)
+ print(1)
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/image/ImageHandle.py b/Ortho-NoS1GBM/tool/algorithm/image/ImageHandle.py
new file mode 100644
index 0000000..26674fe
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/image/ImageHandle.py
@@ -0,0 +1,785 @@
+"""
+@Project :microproduct
+@File :ImageHandle.py
+@Function :实现对待处理SAR数据的读取、格式标准化和处理完后保存文件功能
+@Author :LMM
+@Date :2021/10/19 14:39
+@Version :1.0.0
+"""
+import os
+from xml.etree.ElementTree import ElementTree
+
+from PIL import Image
+from osgeo import gdal
+from osgeo import osr
+import numpy as np
+from PIL import Image
+import cv2
+import logging
+
+import math
+logger = logging.getLogger("mylog")
+
+
+class ImageHandler:
+ """
+ 影像读取、编辑、保存
+ """
+ def __init__(self):
+ pass
+ @staticmethod
+ def get_dataset(filename):
+ """
+ :param filename: tif路径
+ :return: 图像句柄
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ return dataset
+
+ def get_scope(self, filename):
+ """
+ :param filename: tif路径
+ :return: 图像范围
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ im_scope = self.cal_img_scope(dataset)
+ del dataset
+ return im_scope
+
+ @staticmethod
+ def get_projection(filename):
+ """
+ :param filename: tif路径
+ :return: 地图投影信息
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ im_proj = dataset.GetProjection()
+ del dataset
+ return im_proj
+
+ @staticmethod
+ def get_geotransform(filename):
+ """
+ :param filename: tif路径
+ :return: 从图像坐标空间(行、列),也称为(像素、线)到地理参考坐标空间(投影或地理坐标)的仿射变换
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ geotransform = dataset.GetGeoTransform()
+ del dataset
+ return geotransform
+
+ def get_invgeotransform(filename):
+ """
+ :param filename: tif路径
+ :return: 从地理参考坐标空间(投影或地理坐标)的到图像坐标空间(行、列
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ geotransform = dataset.GetGeoTransform()
+ geotransform=gdal.InvGeoTransform(geotransform)
+ del dataset
+ return geotransform
+
+ @staticmethod
+ def get_bands(filename):
+ """
+ :param filename: tif路径
+ :return: 影像的波段数
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ bands = dataset.RasterCount
+ del dataset
+ return bands
+
+ @staticmethod
+ def geo2lonlat(dataset, x, y):
+ """
+ 将投影坐标转为经纬度坐标(具体的投影坐标系由给定数据确定)
+ :param dataset: GDAL地理数据
+ :param x: 投影坐标x
+ :param y: 投影坐标y
+ :return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
+ """
+ prosrs = osr.SpatialReference()
+ prosrs.ImportFromWkt(dataset.GetProjection())
+ geosrs = prosrs.CloneGeogCS()
+ ct = osr.CoordinateTransformation(prosrs, geosrs)
+ coords = ct.TransformPoint(x, y)
+ return coords[:2]
+
+ @staticmethod
+ def get_band_array(filename, num=1):
+ """
+ :param filename: tif路径
+ :param num: 波段序号
+ :return: 对应波段的矩阵数据
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ bands = dataset.GetRasterBand(num)
+ array = bands.ReadAsArray(0, 0, bands.XSize, bands.YSize)
+
+ # if 'int' in str(array.dtype):
+ # array[np.where(array == -9999)] = np.inf
+ # else:
+ # array[np.where(array < -9000.0)] = np.nan
+
+ del dataset
+ return array
+
+ @staticmethod
+ def get_data(filename):
+ """
+ :param filename: tif路径
+ :return: 获取所有波段的数据
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ im_width = dataset.RasterXSize
+ im_height = dataset.RasterYSize
+ im_data = dataset.ReadAsArray(0, 0, im_width, im_height)
+ del dataset
+ return im_data
+
+ @staticmethod
+ def get_all_band_array(filename):
+ """
+ (大气延迟算法)
+ 将ERA-5影像所有波段存为一个数组, 波段数在第三维度 get_data()->(37,8,8)
+ :param filename: 影像路径 get_all_band_array ->(8,8,37)
+ :return: 影像数组
+ """
+ dataset = gdal.Open(filename)
+ x_size = dataset.RasterXSize
+ y_size = dataset.RasterYSize
+ nums = dataset.RasterCount
+ array = np.zeros((y_size, x_size, nums), dtype=float)
+ if nums == 1:
+ bands_0 = dataset.GetRasterBand(1)
+ array = bands_0.ReadAsArray(0, 0, x_size, y_size)
+ else:
+ for i in range(0, nums):
+ bands = dataset.GetRasterBand(i+1)
+ arr = bands.ReadAsArray(0, 0, x_size, y_size)
+ array[:, :, i] = arr
+ return array
+
+ @staticmethod
+ def get_img_width(filename):
+ """
+ :param filename: tif路径
+ :return: 影像宽度
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ width = dataset.RasterXSize
+
+ del dataset
+ return width
+
+ @staticmethod
+ def get_img_height(filename):
+ """
+ :param filename: tif路径
+ :return: 影像高度
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+ height = dataset.RasterYSize
+ del dataset
+ return height
+
+ @staticmethod
+ def read_img(filename):
+ """
+ 影像读取
+ :param filename:
+ :return:
+ """
+ gdal.AllRegister()
+ img_dataset = gdal.Open(filename) # 打开文件
+
+ if img_dataset is None:
+ msg = 'Could not open ' + filename
+ logger.error(msg)
+ return None, None, None
+
+ im_proj = img_dataset.GetProjection() # 地图投影信息
+ if im_proj is None:
+ return None, None, None
+ im_geotrans = img_dataset.GetGeoTransform() # 仿射矩阵
+
+ im_width = img_dataset.RasterXSize # 栅格矩阵的行数
+ im_height = img_dataset.RasterYSize # 栅格矩阵的行数
+ im_arr = img_dataset.ReadAsArray(0, 0, im_width, im_height)
+ del img_dataset
+ return im_proj, im_geotrans, im_arr
+
+ def cal_img_scope(self, dataset):
+ """
+ 计算影像的地理坐标范围
+ 根据GDAL的六参数模型将影像图上坐标(行列号)转为投影坐标或地理坐标(根据具体数据的坐标系统转换)
+ :param dataset :GDAL地理数据
+ :return: list[point_upleft, point_upright, point_downleft, point_downright]
+ """
+ if dataset is None:
+ return None
+
+ img_geotrans = dataset.GetGeoTransform()
+ if img_geotrans is None:
+ return None
+
+ width = dataset.RasterXSize # 栅格矩阵的列数
+ height = dataset.RasterYSize # 栅格矩阵的行数
+
+ point_upleft = self.trans_rowcol2geo(img_geotrans, 0, 0)
+ point_upright = self.trans_rowcol2geo(img_geotrans, width, 0)
+ point_downleft = self.trans_rowcol2geo(img_geotrans, 0, height)
+ point_downright = self.trans_rowcol2geo(img_geotrans, width, height)
+
+ return [point_upleft, point_upright, point_downleft, point_downright]
+
+ @staticmethod
+ def get_scope_ori_sim(filename):
+ """
+ 计算影像的地理坐标范围
+ 根据GDAL的六参数模型将影像图上坐标(行列号)转为投影坐标或地理坐标(根据具体数据的坐标系统转换)
+ :param dataset :GDAL地理数据
+ :return: list[point_upleft, point_upright, point_downleft, point_downright]
+ """
+ gdal.AllRegister()
+ dataset = gdal.Open(filename)
+ if dataset is None:
+ return None
+
+ width = dataset.RasterXSize # 栅格矩阵的列数
+ height = dataset.RasterYSize # 栅格矩阵的行数
+
+ band1 = dataset.GetRasterBand(1)
+ array1 = band1.ReadAsArray(0, 0, band1.XSize, band1.YSize)
+
+ band2 = dataset.GetRasterBand(2)
+ array2 = band2.ReadAsArray(0, 0, band2.XSize, band2.YSize)
+
+ if array1[0, 0] < array1[0, width-1]:
+ point_upleft = [array1[0, 0], array2[0, 0]]
+ point_upright = [array1[0, width-1], array2[0, width-1]]
+ else:
+ point_upright = [array1[0, 0], array2[0, 0]]
+ point_upleft = [array1[0, width-1], array2[0, width-1]]
+
+
+ if array1[height-1, 0] < array1[height-1, width-1]:
+ point_downleft = [array1[height - 1, 0], array2[height - 1, 0]]
+ point_downright = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
+ else:
+ point_downright = [array1[height - 1, 0], array2[height - 1, 0]]
+ point_downleft = [array1[height - 1, width - 1], array2[height - 1, width - 1]]
+
+
+ if(array2[0, 0] < array2[height - 1, 0]):
+ #上下调换顺序
+ tmp1 = point_upleft
+ point_upleft = point_downleft
+ point_downleft = tmp1
+
+ tmp2 = point_upright
+ point_upright = point_downright
+ point_downright = tmp2
+
+ return [point_upleft, point_upright, point_downleft, point_downright]
+
+
+ @staticmethod
+ def trans_rowcol2geo(img_geotrans,img_col, img_row):
+ """
+ 据GDAL的六参数模型仿射矩阵将影像图上坐标(行列号)转为投影坐标或地理坐标(根据具体数据的坐标系统转换)
+ :param img_geotrans: 仿射矩阵
+ :param img_col:图像纵坐标
+ :param img_row:图像横坐标
+ :return: [geo_x,geo_y]
+ """
+ geo_x = img_geotrans[0] + img_geotrans[1] * img_col + img_geotrans[2] * img_row
+ geo_y = img_geotrans[3] + img_geotrans[4] * img_col + img_geotrans[5] * img_row
+ return [geo_x, geo_y]
+
+ @staticmethod
+ def write_era_into_img(filename, im_proj, im_geotrans, im_data):
+ """
+ 影像保存
+ :param filename:
+ :param im_proj:
+ :param im_geotrans:
+ :param im_data:
+ :return:
+ """
+ gdal_dtypes = {
+ 'int8': gdal.GDT_Byte,
+ 'unit16': gdal.GDT_UInt16,
+ 'int16': gdal.GDT_Int16,
+ 'unit32': gdal.GDT_UInt32,
+ 'int32': gdal.GDT_Int32,
+ 'float32': gdal.GDT_Float32,
+ 'float64': gdal.GDT_Float64,
+ }
+ if not gdal_dtypes.get(im_data.dtype.name, None) is None:
+ datatype = gdal_dtypes[im_data.dtype.name]
+ else:
+ datatype = gdal.GDT_Float32
+
+ # 判读数组维数
+ if len(im_data.shape) == 3:
+ im_height, im_width, im_bands = im_data.shape # shape[0] 行数
+ else:
+ im_bands, (im_height, im_width) = 1, im_data.shape
+
+ # 创建文件
+ if os.path.exists(os.path.split(filename)[0]) is False:
+ os.makedirs(os.path.split(filename)[0])
+
+ driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
+ dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
+ dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
+ dataset.SetProjection(im_proj) # 写入投影
+
+ if im_bands == 1:
+ dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
+ else:
+ for i in range(im_bands):
+ dataset.GetRasterBand(i + 1).WriteArray(im_data[:, :, i])
+ # dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
+ del dataset
+
+ # 写GeoTiff文件
+
+ @staticmethod
+ def lat_lon_to_pixel(raster_dataset_path, location):
+ """From zacharybears.com/using-python-to-translate-latlon-locations-to-pixels-on-a-geotiff/."""
+ gdal.AllRegister()
+ raster_dataset = gdal.Open(raster_dataset_path)
+ if raster_dataset is None:
+ return None
+ ds = raster_dataset
+ gt = ds.GetGeoTransform()
+ srs = osr.SpatialReference()
+ srs.ImportFromWkt(ds.GetProjection())
+ srs_lat_lon = srs.CloneGeogCS()
+ ct = osr.CoordinateTransformation(srs_lat_lon, srs)
+ new_location = [None, None]
+ # Change the point locations into the GeoTransform space
+ (new_location[1], new_location[0], holder) = ct.TransformPoint(location[1], location[0])
+ # Translate the x and y coordinates into pixel values
+ Xp = new_location[0]
+ Yp = new_location[1]
+ dGeoTrans = gt
+ dTemp = dGeoTrans[1] * dGeoTrans[5] - dGeoTrans[2] * dGeoTrans[4]
+ Xpixel = (dGeoTrans[5] * (Xp - dGeoTrans[0]) - dGeoTrans[2] * (Yp - dGeoTrans[3])) / dTemp
+ Yline = (dGeoTrans[1] * (Yp - dGeoTrans[3]) - dGeoTrans[4] * (Xp - dGeoTrans[0])) / dTemp
+ del raster_dataset
+ return (Xpixel, Yline)
+
+ @staticmethod
+ def write_img(filename, im_proj, im_geotrans, im_data, no_data='0'):
+ """
+ 影像保存
+ :param filename: 保存的路径
+ :param im_proj:
+ :param im_geotrans:
+ :param im_data:
+ :param no_data: 把无效值设置为 nodata
+ :return:
+ """
+
+ gdal_dtypes = {
+ 'int8': gdal.GDT_Byte,
+ 'unit16': gdal.GDT_UInt16,
+ 'int16': gdal.GDT_Int16,
+ 'unit32': gdal.GDT_UInt32,
+ 'int32': gdal.GDT_Int32,
+ 'float32': gdal.GDT_Float32,
+ 'float64': gdal.GDT_Float64,
+ }
+ if not gdal_dtypes.get(im_data.dtype.name, None) is None:
+ datatype = gdal_dtypes[im_data.dtype.name]
+ else:
+ datatype = gdal.GDT_Float32
+ flag = False
+ # 判读数组维数
+ if len(im_data.shape) == 3:
+ im_bands, im_height, im_width = im_data.shape
+ flag = True
+ else:
+ im_bands, (im_height, im_width) = 1, im_data.shape
+
+ # 创建文件
+ if os.path.exists(os.path.split(filename)[0]) is False:
+ os.makedirs(os.path.split(filename)[0])
+
+ driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
+ dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
+
+ dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
+
+ dataset.SetProjection(im_proj) # 写入投影
+
+ if im_bands == 1:
+ # outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
+ if flag:
+ outband = dataset.GetRasterBand(1)
+ outband.WriteArray(im_data[0])
+ if no_data != 'null':
+ outband.SetNoDataValue(np.double(no_data))
+ outband.FlushCache()
+ else:
+ outband = dataset.GetRasterBand(1)
+ outband.WriteArray(im_data)
+ if no_data != 'null':
+ outband.SetNoDataValue(np.double(no_data))
+ outband.FlushCache()
+ else:
+ for i in range(im_bands):
+ outband = dataset.GetRasterBand(1 + i)
+ outband.WriteArray(im_data[i])
+ if no_data != 'null':
+ outband.SetNoDataValue(np.double(no_data))
+ outband.FlushCache()
+ # outRaster.GetRasterBand(i + 1).WriteArray(array[i])
+ del dataset
+
+ # 写GeoTiff文件
+
+ @staticmethod
+ def write_img_envi(filename, im_proj, im_geotrans, im_data, no_data='null'):
+ """
+ 影像保存
+ :param filename: 保存的路径
+ :param im_proj:
+ :param im_geotrans:
+ :param im_data:
+ :param no_data: 把无效值设置为 nodata
+ :return:
+ """
+
+ gdal_dtypes = {
+ 'int8': gdal.GDT_Byte,
+ 'unit16': gdal.GDT_UInt16,
+ 'int16': gdal.GDT_Int16,
+ 'unit32': gdal.GDT_UInt32,
+ 'int32': gdal.GDT_Int32,
+ 'float32': gdal.GDT_Float32,
+ 'float64': gdal.GDT_Float64,
+ }
+ if not gdal_dtypes.get(im_data.dtype.name, None) is None:
+ datatype = gdal_dtypes[im_data.dtype.name]
+ else:
+ datatype = gdal.GDT_Float32
+
+ # 判读数组维数
+ if len(im_data.shape) == 3:
+ im_bands, im_height, im_width = im_data.shape
+ else:
+ im_bands, (im_height, im_width) = 1, im_data.shape
+
+ # 创建文件
+ if os.path.exists(os.path.split(filename)[0]) is False:
+ os.makedirs(os.path.split(filename)[0])
+
+ driver = gdal.GetDriverByName("ENVI") # 数据类型必须有,因为要计算需要多大内存空间
+ dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
+
+ dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
+
+ dataset.SetProjection(im_proj) # 写入投影
+
+ if im_bands == 1:
+ # outRaster.GetRasterBand(1).WriteArray(array) # 写入数组数据
+ outband = dataset.GetRasterBand(1)
+ outband.WriteArray(im_data)
+ if no_data != 'null':
+ outband.SetNoDataValue(no_data)
+ outband.FlushCache()
+ else:
+ for i in range(im_bands):
+ outband = dataset.GetRasterBand(1 + i)
+ outband.WriteArray(im_data[i])
+ outband.FlushCache()
+ # outRaster.GetRasterBand(i + 1).WriteArray(array[i])
+ del dataset
+
+ @staticmethod
+ def write_img_rpc(filename, im_proj, im_geotrans, im_data, rpc_dict):
+ """
+ 图像中写入rpc信息
+ """
+ # 判断栅格数据的数据类型
+ if 'int8' in im_data.dtype.name:
+ datatype = gdal.GDT_Byte
+ elif 'int16' in im_data.dtype.name:
+ datatype = gdal.GDT_Int16
+ else:
+ datatype = gdal.GDT_Float32
+
+ # 判读数组维数
+ if len(im_data.shape) == 3:
+ im_bands, im_height, im_width = im_data.shape
+ else:
+ im_bands, (im_height, im_width) = 1, im_data.shape
+
+ # 创建文件
+ driver = gdal.GetDriverByName("GTiff")
+ dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
+
+ dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
+ dataset.SetProjection(im_proj) # 写入投影
+
+ # 写入RPC参数
+ for k in rpc_dict.keys():
+ dataset.SetMetadataItem(k, rpc_dict[k], 'RPC')
+
+ if im_bands == 1:
+ dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
+ else:
+ for i in range(im_bands):
+ dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
+
+ del dataset
+
+
+ def transtif2mask(self,out_tif_path, in_tif_path, threshold):
+ """
+ :param out_tif_path:输出路径
+ :param in_tif_path:输入的路径
+ :param threshold:阈值
+ """
+ im_proj, im_geotrans, im_arr, im_scope = self.read_img(in_tif_path)
+ im_arr_mask = (im_arr < threshold).astype(int)
+ self.write_img(out_tif_path, im_proj, im_geotrans, im_arr_mask)
+
+ def write_quick_view(self, tif_path, color_img=False, quick_view_path=None):
+ """
+ 生成快视图,默认快视图和影像同路径且同名
+ :param tif_path:影像路径
+ :param color_img:是否生成随机伪彩色图
+ :param quick_view_path:快视图路径
+ """
+ if quick_view_path is None:
+ quick_view_path = os.path.splitext(tif_path)[0]+'.jpg'
+
+ n = self.get_bands(tif_path)
+ if n == 1: # 单波段
+ t_data = self.get_data(tif_path)
+ else: # 多波段,转为强度数据
+ t_data = self.get_data(tif_path)
+ t_data = t_data.astype(float)
+ t_data = np.sqrt(t_data[0] ** 2 + t_data[1] ** 2)
+ t_data[np.isnan(t_data)] = 0
+ t_data[np.where(t_data == -9999)] = 0
+ t_r = self.get_img_height(tif_path)
+ t_c = self.get_img_width(tif_path)
+ if t_r > 10000 or t_c > 10000:
+ q_r = int(t_r / 10)
+ q_c = int(t_c / 10)
+ elif 1024 < t_r < 10000 or 1024 < t_c < 10000:
+ if t_r > t_c:
+ q_r = 1024
+ q_c = int(t_c/t_r * 1024)
+ else:
+ q_c = 1024
+ q_r = int(t_r/t_c * 1024)
+ else:
+ q_r = t_r
+ q_c = t_c
+
+ if color_img is True:
+ # 生成伪彩色图
+ img = np.zeros((t_r, t_c, 3), dtype=np.uint8) # (高,宽,维度)
+ u = np.unique(t_data)
+ for i in u:
+ if i != 0:
+ w = np.where(t_data == i)
+ img[w[0], w[1], 0] = np.random.randint(0, 255) # 随机生成一个0到255之间的整数 可以通过挑参数设定不同的颜色范围
+ img[w[0], w[1], 1] = np.random.randint(0, 255)
+ img[w[0], w[1], 2] = np.random.randint(0, 255)
+
+ img = cv2.resize(img, (q_c, q_r)) # (宽,高)
+ cv2.imwrite(quick_view_path, img)
+ # cv2.imshow("result4", img)
+ # cv2.waitKey(0)
+ else:
+ # 灰度图
+ min = np.percentile(t_data, 2) # np.nanmin(t_data)
+ max = np.percentile(t_data, 98) # np.nanmax(t_data)
+ t_data[np.isnan(t_data)] = max
+ # if (max - min) < 256:
+ t_data = (t_data - min) / (max - min) * 255
+ out_img = Image.fromarray(t_data)
+ out_img = out_img.resize((q_c, q_r)) # 重采样
+ out_img = out_img.convert("L") # 转换成灰度图
+ out_img.save(quick_view_path)
+
+ def limit_field(self, out_path, in_path, min_value, max_value):
+ """
+ :param out_path:输出路径
+ :param in_path:主mask路径,输出影像采用主mask的地理信息
+ :param min_value
+ :param max_value
+ """
+ proj = self.get_projection(in_path)
+ geotrans = self.get_geotransform(in_path)
+ array = self.get_band_array(in_path, 1)
+ array[array < min_value] = min_value
+ array[array > max_value] = max_value
+ self.write_img(out_path, proj, geotrans, array)
+ return True
+
+ def band_merge(self, lon, lat, ori_sim):
+ lon_arr = self.get_data(lon)
+ lat_arr = self.get_data(lat)
+ temp = np.zeros((2, lon_arr.shape[0], lon_arr.shape[1]), dtype=float)
+ temp[0, :, :] = lon_arr[:, :]
+ temp[1, :, :] = lat_arr[:, :]
+ self.write_img(ori_sim, '', [0.0, 1.0, 0.0, 0.0, 0.0, 1.0], temp, '0')
+
+
+ def get_scopes(self, ori_sim):
+ ori_sim_data = self.get_data(ori_sim)
+ lon = ori_sim_data[0, :, :]
+ lat = ori_sim_data[1, :, :]
+
+ min_lon = np.nanmin(np.where((lon != 0) & ~np.isnan(lon), lon, np.inf))
+ max_lon = np.nanmax(np.where((lon != 0) & ~np.isnan(lon), lon, -np.inf))
+ min_lat = np.nanmin(np.where((lat != 0) & ~np.isnan(lat), lat, np.inf))
+ max_lat = np.nanmax(np.where((lat != 0) & ~np.isnan(lat), lat, -np.inf))
+
+ scopes = [[min_lon, max_lat], [max_lon, max_lat], [min_lon, min_lat], [max_lon, min_lat]]
+ return scopes
+
+ def get_center_scopes(self, dataset):
+ if dataset is None:
+ return None
+
+ img_geotrans = dataset.GetGeoTransform()
+ if img_geotrans is None:
+ return None
+
+ width = dataset.RasterXSize # 栅格矩阵的列数
+ height = dataset.RasterYSize # 栅格矩阵的行数
+
+ x_split = int(width/5)
+ y_split = int(height/5)
+ img_col_start = x_split * 1
+ img_col_end = x_split * 3
+ img_row_start = y_split * 1
+ img_row_end = y_split * 3
+ cols = img_col_end - img_col_start
+ rows = img_row_end - img_row_start
+ if cols > 10000 or rows > 10000:
+ img_col_end = img_col_start + 10000
+ img_row_end = img_row_start + 10000
+
+ point_upleft = self.trans_rowcol2geo(img_geotrans, img_col_start, img_row_start)
+ point_upright = self.trans_rowcol2geo(img_geotrans, img_col_end, img_row_start)
+ point_downleft = self.trans_rowcol2geo(img_geotrans, img_col_start, img_row_end)
+ point_downright = self.trans_rowcol2geo(img_geotrans, img_col_end, img_row_end)
+
+ return [point_upleft, point_upright, point_downleft, point_downright]
+ def write_view(self, tif_path, color_img=False, quick_view_path=None):
+ """
+ 生成快视图,默认快视图和影像同路径且同名
+ :param tif_path:影像路径
+ :param color_img:是否生成随机伪彩色图
+ :param quick_view_path:快视图路径
+ """
+ if quick_view_path is None:
+ quick_view_path = os.path.splitext(tif_path)[0]+'.jpg'
+
+ n = self.get_bands(tif_path)
+ if n == 1: # 单波段
+ t_data = self.get_data(tif_path)
+ else: # 多波段,转为强度数据
+ t_data = self.get_data(tif_path)
+ t_data = t_data.astype(float)
+ t_data = np.sqrt(t_data[0] ** 2 + t_data[1] ** 2)
+ t_data[np.isnan(t_data)] = 0
+ t_data[np.where(t_data == -9999)] = 0
+ t_r = self.get_img_height(tif_path)
+ t_c = self.get_img_width(tif_path)
+ q_r = t_r
+ q_c = t_c
+
+ if color_img is True:
+ # 生成伪彩色图
+ img = np.zeros((t_r, t_c, 3), dtype=np.uint8) # (高,宽,维度)
+ u = np.unique(t_data)
+ for i in u:
+ if i != 0:
+ w = np.where(t_data == i)
+ img[w[0], w[1], 0] = np.random.randint(0, 255) # 随机生成一个0到255之间的整数 可以通过挑参数设定不同的颜色范围
+ img[w[0], w[1], 1] = np.random.randint(0, 255)
+ img[w[0], w[1], 2] = np.random.randint(0, 255)
+
+ img = cv2.resize(img, (q_c, q_r)) # (宽,高)
+ cv2.imwrite(quick_view_path, img)
+ # cv2.imshow("result4", img)
+ # cv2.waitKey(0)
+ else:
+ # 灰度图
+ min = np.percentile(t_data, 2) # np.nanmin(t_data)
+ max = np.percentile(t_data, 98) # np.nanmax(t_data)
+ # if (max - min) < 256:
+ t_data = (t_data - min) / (max - min) * 255
+ out_img = Image.fromarray(t_data)
+ out_img = out_img.resize((q_c, q_r)) # 重采样
+ out_img = out_img.convert("L") # 转换成灰度图
+ out_img.save(quick_view_path)
+
+ return quick_view_path
+
+ @staticmethod
+ def get_inc_angle(inc_xml, rows, cols, out_path):
+ tree = ElementTree()
+ tree.parse(inc_xml) # 影像头文件
+ root = tree.getroot()
+ values = root.findall('incidenceValue')
+ angle_value = [value.text for value in values]
+ angle_value = np.array(angle_value)
+ inc_angle = np.tile(angle_value, (rows, 1))
+ ImageHandler.write_img(out_path, '', [0.0, 1.0, 0.0, 0.0, 0.0, 1.0], inc_angle)
+
+ pass
+
+
+if __name__ == '__main__':
+ fn = r"D:\micro\SWork\LandCover\Temporary\processing\product\HJ2E_KSC_QPS_006084_E86.2_N44.4_20231116_SLC_AHV_L10000132474-Ortho-LANDCLASS.tif"
+ ImageHandler().write_quick_view(fn)
+# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
+# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
+# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
+# s = ImageHandler().band_merge(path, path2, path3)
+# print(s)
+# pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/ml/MonteCarloSampling.py b/Ortho-NoS1GBM/tool/algorithm/ml/MonteCarloSampling.py
new file mode 100644
index 0000000..00af9cc
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/ml/MonteCarloSampling.py
@@ -0,0 +1,185 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:SalinityMain.py
+@File:MonteCarloSampling.py
+@Function:基于蒙特卡洛随机抽样的最优特征选择算法
+@Contact:
+@Author:SHJ
+@Date:2021/10/19 11:30
+@Version:1.0.0
+"""
+import numpy as np
+from numpy import random
+import matplotlib.pyplot as plt
+import seaborn as sns
+import logging
+logger = logging.getLogger("mylog")
+
+
+def api_sel_feature(x_list, iter=100, alpha=0.5, ts=-0.5, iter_ratio=0.2):
+ """
+ :para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],
+ Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
+ :para iter: 迭代次数
+ :para alpha: 调节因子
+ :para ts: com_sep_coef的阈值
+ :para iter_ratio : 迭代次数阈值
+ :return : True-特征与类别相关度高,False-特征与类别相关度低
+ """
+ com_sep_coef_old = cal_com_sep_coef(x_list, alpha)
+ # print('com_sep_coef_old:', com_sep_coef_old)
+ if com_sep_coef_old < ts:
+ return False, com_sep_coef_old
+
+ X = np.zeros(1) # x_list组合为行向量X
+ x_len_list = [] # 记录每个类别x的位置
+ num_sampler = 0 # 样本总数
+ t = 0
+ flag = 0
+ for x in x_list:
+ len_x = len(x)
+ if t == 0:
+ X = x
+ x_len_list.append(len_x)
+ else:
+ X = np.hstack([X, x])
+ x_len_list.append(x_len_list[t - 1] + len_x)
+ num_sampler += len_x
+ t += 1
+ x_len_list.pop()
+ num = int(np.ceil(num_sampler / 3))
+
+ for i in range(iter):
+ # 生成随机数组
+ randmtx = np.random.rand(1, num)
+ randmtx_ceil = np.ceil(randmtx * num_sampler).astype(int)
+ randmtx_ceil = np.sort(randmtx_ceil[0, :]) - 1
+
+ # 随机取值,重排后,替换原来的数据,组成新数组
+ X_new_sel = X.copy()
+ X_new_sel[randmtx_ceil] = np.random.permutation(X[randmtx_ceil])
+
+ X_new_list = np.split(X_new_sel, x_len_list)
+ com_sep_coef_new = cal_com_sep_coef(X_new_list, alpha)
+ if com_sep_coef_new <= com_sep_coef_old:
+ flag += 1
+ # print('com_sep_coef_new:', com_sep_coef_new)
+ logger.info('flag:' + str(flag) +', iter:' + str(iter) + ', falg/iter:' + str(int(flag)/int(iter)))
+ if flag > (iter * iter_ratio):
+ return False, com_sep_coef_old
+ return True, com_sep_coef_old
+
+def cal_com_coef(x_list):
+ """
+ :para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
+ :return com_coef : 类内聚合因子(Compactness Coefficient)
+ """
+ class_num = len(x_list)
+ coef_array = np.full((1, class_num), 0.0)
+ for m in range(class_num):
+ sample_num = len(x_list[m])
+ c = np.full((1, sample_num), 0.0)
+ for u in range(sample_num):
+ l = np.full((1, sample_num), x_list[m][u])
+ c[0, u] = np.sum(np.abs(l - x_list[m]))
+ coef_array[0, m] = np.sum(c) / (sample_num * (sample_num - 1))
+ com_coef = np.sum(coef_array) / class_num
+ return com_coef
+
+def cal_sep_coef(x_list):
+ """
+ :para x_list : k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
+ :return sep_coef : 类间离散度(Separation Coefficient)
+ """
+ class_num = len(x_list)
+ coef_list = []
+ coef_sum = 0
+ for m in range(class_num):
+ xm = x_list[m]
+ l_xm = len(xm)
+ for n in range(class_num):
+ if not n == m:
+ xn = x_list[n]
+ l_xn = len(xn)
+ xm = np.expand_dims(xm, 1)
+ coef_list.append(np.sum(np.abs(xm - xn)) / (l_xm * l_xn))
+ for coef in coef_list:
+ coef_sum = coef_sum + coef
+
+ if class_num == 1 or class_num == 0:
+ sep_coef = coef_sum
+ else:
+ sep_coef = coef_sum / (class_num * (class_num - 1))
+ return sep_coef
+
+def cal_com_sep_coef(x_list, alpha = 0.5):
+ """
+ :para x_list: k类别的单个特征的训练样本 [X1,X2,X3,...,Xi,...,Xk],Xi = np.array([x1,x2,x3...xn]), 第i类别的训练样本数为n
+ :para alpha : 调节因子
+ :return com_sep_coef: 类内聚合度和类间离散度的因子(Compactness- Separation Coeffcient)
+ """
+ if not alpha >= 0 and alpha <= 1:
+ raise ('input_para_alpha beyond (0,1)!')
+ com_coef = cal_com_coef(x_list)
+ sep_coef = cal_sep_coef(x_list)
+ com_sep_coef = alpha * com_coef - (1-alpha) * sep_coef
+ return com_sep_coef
+
+def get_logistic_rand_number(num, u=0.4): #弃用
+ randmtx = np.full((1, num), 0.0)
+ # randmtx[0,0] = np.random.rand(1, 1) #随机初始值
+ randmtx[0, 0] = 0.5 #初始值
+
+ for i in range(1, num):
+ randmtx[0, i] = u * randmtx[0, i-1]*(1-randmtx[0, i-1])
+ randmtx = randmtx * 3 * num
+ randmtx_ceil = np.ceil(randmtx)
+
+ # 绘制随机数分布图
+ # randmty = np.arange(0,num,1)
+ # randmty = np.expand_dims( randmty, 1)
+ # fig, axes = plt.subplots(1, 1, figsize=(5, 5))
+ # axes.scatter(randmty, randmtx_ceil, alpha=.3, label='ground truth')
+ # axes.legend()
+ # plt.tight_layout()
+ # plt.show()
+ return randmtx_ceil
+
+def test():
+ '''测试生成随机数'''
+ # 插入
+ # a = np.array([3.4, 2.5, 1.8, 4.7, 5.6, 2.1])
+ # b = np.array([2.5, 4.7, 5.6])
+ # c = a[[0,1]]
+ # a[[0,1]] = np.array([1, 1])
+
+ # 随机排列
+ random.shuffle()
+
+ # logist随机数
+ sns.distplot(random.normal(scale=2, size=1000), hist=False, label='normal')
+ sns.distplot(random.logistic(loc=2, scale=0.5, size=1000), hist=False, label='logistic')
+ plt.show()
+
+ # 绘制随机数
+ randmtx = random.logistic(loc=0.5, scale=0.5, size=100)
+ randmtx.sort(axis=0)
+ randmty = np.arange(0,100,1)
+ randmty = np.expand_dims(randmty, 1)
+ fig, axes = plt.subplots(1, 1, figsize=(5, 5))
+ axes.scatter(randmty, randmtx, alpha=.3, label='ground truth')
+ axes.legend()
+ plt.tight_layout()
+ plt.show()
+
+# if __name__ == '__main__':
+ # 例子
+ # x1 = np.array([1, 1.1])
+ # x2 = np.array([2, 2.1, 2.2])
+ # x3 = np.array([3, 3.4, 3.1])
+ # x_list = [x1, x2, x3]
+ # com_sep_coef = cal_com_sep_coef(x_list, 0.5)
+ # flag = api_sel_feature(x_list)
+ # print('done')
+
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/ml/machineLearning.py b/Ortho-NoS1GBM/tool/algorithm/ml/machineLearning.py
new file mode 100644
index 0000000..c70a675
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/ml/machineLearning.py
@@ -0,0 +1,421 @@
+import sklearn # 用于解决打包错误
+import sklearn.utils # 用于解决打包错误
+import sklearn.utils._cython_blas # 用于解决打包错误
+import sklearn.utils._weight_vector # 用于解决打包错误
+import sklearn.neighbors # 用于解决打包错误
+import sklearn.neighbors._typedefs # 用于解决打包错误
+import sklearn.neighbors._partition_nodes # 用于解决打包错误
+import sklearn.neighbors._quad_tree # 用于解决打包错误
+import sklearn.tree._utils # 用于解决打包错误
+from sklearn.cross_decomposition import PLSRegression
+from sklearn.ensemble import ExtraTreesClassifier
+from sklearn.ensemble import RandomForestClassifier
+from sklearn.ensemble import ExtraTreesClassifier
+from sklearn.svm import SVC
+import numpy as np
+from scipy.stats import pearsonr
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.algorithm.block.blockprocess import BlockProcess
+import logging
+import os
+import glob
+from PIL import Image
+from tool.file.fileHandle import fileHandle
+import multiprocessing
+logger = logging.getLogger("mylog")
+file = fileHandle()
+
+class MachineLeaning:
+ """
+ 机器学习库
+ """
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def gene_optimal_train_set(train_data_dic, feature_tif_dir, important_threshold=0.3, correlation_threshold=0.7): # todo 修改特征重要性
+ ml = MachineLeaning()
+ name_list = ml.get_name_list(feature_tif_dir)
+ X_train, Y_train = ml.gene_train_set(train_data_dic, feature_tif_dir)
+ optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
+ optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
+ X_train = X_train[:, optimal_feature]
+ logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
+ return X_train, Y_train, optimal_feature
+
+ @ staticmethod
+ def sel_optimal_feature(X_train, Y_train, name_list,important_threshold=0.3, correlation_threshold=0.7):
+ ml = MachineLeaning()
+ optimal_feature = ml.sel_optimal_feature_set(X_train, Y_train, threshold=important_threshold)
+ optimal_feature = ml.remove_correlation_feature(X_train, optimal_feature, threshold=correlation_threshold)
+ X_train = X_train[:, optimal_feature]
+ logger.info('train_feature:%s', np.array(name_list)[optimal_feature])
+ return X_train, Y_train, optimal_feature
+
+ @staticmethod
+ def gene_test_set(feature_tif_dir, optimal_feature):
+ """
+ 生成测试集
+ :param feature_tif_dir : 特征影像路径字典
+ :param optimal_feature : 最优特征子集
+ :return X_test_list : 分块测试集影像路径
+ """
+ in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
+ cols = ImageHandler.get_img_width(in_tif_paths[0])
+ rows = ImageHandler.get_img_height(in_tif_paths[0])
+ workspace_block_tif_path = os.path.join(feature_tif_dir, 'block')
+ workspace_block_feature_path = os.path.join(feature_tif_dir, 'feature')
+ file.creat_dirs([workspace_block_tif_path, workspace_block_feature_path])
+
+ # 特征分块
+ bp = BlockProcess()
+ block_size = bp.get_block_size(rows, cols)
+
+ bp.cut(feature_tif_dir, workspace_block_tif_path, ['tif', 'tiff'], 'tif', block_size)
+ img_dir, img_name = bp.get_file_names(workspace_block_tif_path, ['tif'])
+ dir_dict_all = bp.get_same_img(img_dir, img_name)
+
+ # 选择最优特征子集特征影像
+ dir_dict = {}
+ for n, key in zip(range(len(dir_dict_all)), dir_dict_all):
+ if n in optimal_feature:
+ dir_dict.update({key: dir_dict_all[key]})
+ logger.info('test_feature:%s', dir_dict.keys())
+ logger.info('blocking tifs success!')
+ X_test_list = []
+ # 特征维度合并
+ for key in dir_dict:
+ key_name = key
+ block_num = len(dir_dict[key])
+ break
+ for n in range(block_num):
+ name = os.path.basename(dir_dict[key_name][n])
+ suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
+ features_path = os.path.join(workspace_block_feature_path, "features" + suffix) # + "\\features" + suffix
+ X_test_list.append(features_path)
+ features_array = np.zeros((len(dir_dict), block_size, block_size), dtype='float32')
+ for m, value in zip(range(len(dir_dict)), dir_dict.values()):
+ features_array[m, :, :] = ImageHandler.get_band_array(value[n])
+ features_array[np.isnan(features_array)] = 0.0 # 异常值转为0
+ ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
+ logger.info('create features matrix success!')
+ # file.del_folder(workspace_block_tif_path)
+ # file.del_folder(workspace_block_feature_path)
+ return X_test_list
+
+ @staticmethod
+ def predict_blok(clf, X_test, rows, cols, img_path, row_begin, col_begin, block_sum, n):
+ logger.info('total:%s,block:%s testing data !path:%s', block_sum, n, img_path)
+
+ Y_test = clf.predict(X_test)
+ img = Y_test.reshape(rows, cols)
+ out_image = Image.fromarray(img)
+ out_image.save(img_path)
+ # bp = BlockProcess()
+ # bp.assign_spatial_reference_bypoint(row_begin, col_begin, self.__proj, self.__geo, img_path)
+ # sr = osr.SpatialReference()
+ # sr.ImportFromWkt(self.__proj)
+ # geo_transform = (self.__geo[0] + col_begin * self.__geo[1] + row_begin * self.__geo[2],
+ # self.__geo[1],
+ # self.__geo[2],
+ # self.__geo[3] + col_begin * self.__geo[4] + row_begin * self.__geo[5],
+ # self.__geo[4],
+ # self.__geo[5]
+ # )
+ # dst_ds = gdal.Open(img_path, gdal.GA_Update)
+ # if dst_ds is None:
+ # return False
+ # dst_ds.SetProjection(sr.ExportToWkt())
+ # dst_ds.SetGeoTransform(geo_transform)
+ # del dst_ds
+ logger.info('total:%s,block:%s test data finished !path:%s', block_sum, n, img_path)
+ return True
+
+ @staticmethod
+ def predict(clf, X_test_list, out_tif_name, workspace_processing_path,rows, cols):
+ """
+ 预测数据
+ :param clf : svm模型
+ :return X_test_list: 分块测试集影像路径
+ """
+ ml = MachineLeaning()
+ # 开启多进程处理
+ bp = BlockProcess()
+ block_size = bp.get_block_size(rows, cols)
+
+ block_features_dir = X_test_list
+ bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name + '\\') # workspace_processing_path + out_tif_name + '\\'
+ file.creat_dirs([bp_cover_dir])
+
+ processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 1])
+ pool = multiprocessing.Pool(processes=processes_num)
+
+ for path, n in zip(block_features_dir, range(len(block_features_dir))):
+ name = os.path.split(path)[1]
+ band = ImageHandler.get_bands(path)
+ if band == 1:
+ features_array = np.zeros((1, 1024, 1024), dtype=float)
+ feature_array = ImageHandler.get_data(path)
+ features_array[0, :, :] = feature_array
+ else:
+ features_array = ImageHandler.get_data(path)
+ X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
+
+ suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + name.split('_')[-1]
+ img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
+ row_begin = int(name.split('_')[-4])
+ col_begin = int(name.split('_')[-2])
+ pool.apply_async(ml.predict_blok, (clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
+
+ pool.close()
+ pool.join()
+
+ # 合并影像
+ data_dir = bp_cover_dir
+ out_path = workspace_processing_path[0:-1]
+ bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
+
+ # 添加地理信息
+ cover_path = os.path.join(workspace_processing_path, out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
+ # bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
+ return cover_path
+
+ @staticmethod
+ def predict_VP(clf, X_test_list, out_tif_name, workspace_processing_path, rows, cols):
+ """
+ 预测数据
+ :param clf : svm模型
+ :return X_test_list: 分块测试集影像路径
+ """
+ ml = MachineLeaning()
+ # 开启多进程处理
+ bp = BlockProcess()
+ block_size = bp.get_block_size(rows, cols)
+
+ block_features_dir = X_test_list
+ bp_cover_dir = os.path.join(workspace_processing_path, out_tif_name,
+ 'pre_result\\') # workspace_processing_path + out_tif_name + '\\'
+ file.creat_dirs([bp_cover_dir])
+
+ processes_num = min([len(block_features_dir), multiprocessing.cpu_count() - 1])
+ pool = multiprocessing.Pool(processes=processes_num)
+
+ for path, n in zip(block_features_dir, range(len(block_features_dir))):
+ name = os.path.split(path)[1]
+ features_array = ImageHandler.get_data(path)
+
+ X_test = np.reshape(features_array, (features_array.shape[0], features_array[0].size)).T
+
+ suffix = '_' + name.split('_')[-4] + "_" + name.split('_')[-3] + "_" + name.split('_')[-2] + "_" + \
+ name.split('_')[-1]
+ img_path = os.path.join(bp_cover_dir, out_tif_name + suffix) # bp_cover_dir + out_tif_name + suffix
+ row_begin = int(name.split('_')[-4])
+ col_begin = int(name.split('_')[-2])
+ pool.apply_async(ml.predict_blok, (
+ clf, X_test, block_size, block_size, img_path, row_begin, col_begin, len(block_features_dir), n))
+
+ pool.close()
+ pool.join()
+
+ # 合并影像
+ data_dir = bp_cover_dir
+ out_path = workspace_processing_path[0:-1]
+ bp.combine(data_dir, cols, rows, out_path, file_type=['tif'], datetype='float32')
+
+ # 添加地理信息
+ cover_path = os.path.join(workspace_processing_path,
+ out_tif_name + ".tif") # workspace_processing_path + out_tif_name + ".tif"
+ # bp.assign_spatial_reference_byfile(self.__ref_img_path, cover_path)
+ return cover_path
+
+ @staticmethod
+ def get_name_list(feature_tif_dir):
+ in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
+ name_list = []
+ dim = len(in_tif_paths)
+ for n, path in zip(range(dim), in_tif_paths):
+ name_list.append(str(n)+': '+os.path.split(path)[1])
+ logger.info('feature_list:%s', name_list)
+ return name_list
+
+
+ @staticmethod
+ def gene_train_set(train_data_dic, feature_tif_dir):
+ """
+ 生成训练集
+ :param train_data_dic : 从csv读取的训练数据
+ :param feature_tif_dir : 特征影像路径路径
+ :return X_train, Y_train : 训练数据
+ """
+ in_tif_paths = list(glob.glob(os.path.join(feature_tif_dir, '*.tif')))
+ dim = len(in_tif_paths)
+ X_train = np.empty(shape=(0, dim))
+ Y_train = np.empty(shape=(0, 1))
+
+ ids = train_data_dic['ids']
+ positions = train_data_dic['positions']
+ for id, points in zip(ids, positions):
+ # for data in train_data_list:
+ if points == []:
+ raise Exception('data is empty!')
+ row, col = zip(*points)
+ l = len(points)
+ X = np.empty(shape=(l, dim))
+
+ for n, tif_path in zip(range(dim), in_tif_paths):
+ feature_array = ImageHandler.get_data(tif_path)
+ feature_array[np.isnan(feature_array)] = 0 # 异常值填充为0
+ x = feature_array[row, col].T
+ X[:, n] = x
+
+ Y = np.full((l, 1), id)
+ X_train = np.vstack((X_train, X))
+ Y_train = np.vstack((Y_train, Y))
+ Y_train = Y_train.T[0, :]
+
+ logger.info("gene_train_set success!")
+ return X_train, Y_train
+
+ @staticmethod
+ def standardization(data, num=1):
+ # 矩阵标准化到[0,1]
+ min = np.nanmin(data)
+ max = np.nanmax(data)
+ data[np.isnan(data)] = min # 异常值填充为0
+ _range = max - min
+ return (data - min) / _range * num
+
+ @staticmethod
+ def sel_optimal_feature_set(X_train, Y_train, threshold=0.01):
+ """
+ 筛选最优特征组合(极度随机树)
+ """
+ model = ExtraTreesClassifier()
+ max = np.max(Y_train)
+ if max < 0.1:
+ Y_train = (Y_train*10000).astype('int')
+ model.fit(X_train, Y_train.astype('int'))
+ # select the relative importance of each attribute
+ importances = model.feature_importances_
+ logger.info('importances:%s,threshold=%s', importances, threshold)
+
+ importances_resort = -np.sort(-importances) # 从大到小排序
+ imp_argsort = np.argsort(-importances) # 输出从大到小的序号
+
+ optimal_feature = list(imp_argsort[np.where(importances_resort > threshold)]) # 过滤重要性低的特征
+ logger.info('optimal_feature:%s', optimal_feature)
+
+ if len(optimal_feature)==0:
+ logger.error('optimal_feature is empty')
+ optimal_feature = list(imp_argsort)
+ return optimal_feature
+
+ @staticmethod
+ def correlation_map(x, y):
+ # https://blog.csdn.net/weixin_39836726/article/details/110783640
+ # cc matrix based on scipy pearsonr
+ n_row_x = x.shape[0]
+ n_row_y = x.shape[0]
+ ccmtx_xy = np.empty((n_row_x, n_row_y))
+ for n in range(n_row_x):
+ for m in range(n_row_y):
+ ccmtx_xy[n, m] = pearsonr(x[n, :], y[m, :])[0]
+ return ccmtx_xy
+
+ @staticmethod
+ def remove_correlation_feature(X_train,validity_list, threshold=0.85):
+ """
+ 相关性抑制,去除相关性
+ :param X_train : 训练集
+ :param validity_list : 最优特征子集
+ :param threshold: 相关性阈值
+ :return validity_list : 最优特征子集
+ """
+ ccmtx = MachineLeaning().correlation_map(X_train[:, validity_list].T, X_train[:, validity_list].T)
+ ccmtx = np.abs(ccmtx)
+ for r in range(len(validity_list)):
+ for c in range(len(validity_list)):
+ if c <= r:
+ ccmtx[r, c] = 0
+ logger.info('correlation_map:\n %s', ccmtx)
+ # 相关性大于0.85的特征,删除com_sep_coef较大的特征
+ high_corr = np.unique(np.where(ccmtx > threshold)[1]) # 删除的特征序号
+ validity_list = np.delete(validity_list, high_corr)
+ logger.info('validity_list_corr:%s', validity_list)
+ logger.info(validity_list)
+ return validity_list
+
+ @staticmethod
+ def gene_train_data(block_features_dir,rows,cols,block_size,measured_data_img):
+ # 生成训练集
+ X_train = []
+ Y_train = []
+
+ block_rows = int(np.ceil(rows/block_size))
+ block_cols = int(np.ceil(cols/block_size))
+
+ for data, n in zip(measured_data_img, range(len(measured_data_img))):
+ row = data[0]
+ col = data[1]
+ block_row = row//block_size
+ block_col = col//block_size
+
+ if block_row == block_rows-1:
+ part_img_row = row - (rows - block_size)
+ else:
+ part_img_row = row % block_size
+
+ if block_col == block_cols-1:
+ part_img_col = col - (cols-block_size)
+ else:
+ part_img_col = col % block_size
+
+ features_path = block_features_dir[block_row*block_rows + block_col]
+ features_array = ImageHandler().get_data(features_path)
+
+ feature = features_array[:, part_img_row, part_img_col]
+ if not np.isnan(feature).any() or np.isinf(feature).any():
+ X_train.append(list(feature))
+ Y_train.append([data[2]])
+ logger.info('total:%s,num:%s create train set success!', len(measured_data_img), n)
+ return np.array(X_train), np.array(Y_train)
+
+ @staticmethod
+ def trainRF(X_train, Y_train):
+ #随机森林
+ logger.info('RF trainning')
+ clf = RandomForestClassifier()
+ clf.fit(X_train, Y_train)
+ return clf
+
+ @staticmethod
+ def trainSVM(X_train, Y_train, cost=1, kernel='rbf'):
+ logger.info('svm trainning')
+ clf = SVC(decision_function_shape='ovo')
+ clf.fit(X_train, Y_train)
+ SVC(C=cost, cache_size=1000, class_weight='balanced', coef0=0.0, decision_function_shape='ovr',
+ degree=3, gamma='auto', kernel=kernel, max_iter=-1, probability=False, random_state=None,
+ shrinking=True, tol=0.001, verbose=True)
+ return clf
+ @staticmethod
+ def vegetationPhenology_combine_feature(feature_dir,workspace_processing_path, name, rows, cols, debug =False):
+ ml = MachineLeaning()
+ path_list = list(glob.glob(os.path.join(feature_dir, '*.tif')))
+ #多维矩阵合并为一个
+ name_featuresPath_dic = {}
+ dim = len(path_list)
+ features_path = workspace_processing_path + name + "/"+ name +'_features.tif'
+ if debug== False:
+ features_array = np.zeros((dim, rows, cols), dtype='float16')
+ for m, path in zip(range(dim), path_list):
+ data = ImageHandler.get_data(path)
+ data = ml.standardization(data)
+ features_array[m, :, :] = data
+ # 异常值转为0
+ features_array[np.isnan(features_array)] = 0.0
+ features_array[np.isinf(features_array)] = 0.0
+ ImageHandler.write_img(features_path, '', [0, 0, 0, 0, 0, 0], features_array)
+ name_featuresPath_dic.update({name: features_path})
+ return name_featuresPath_dic
+
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarpro.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarpro.py
new file mode 100644
index 0000000..ef60b54
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarpro.py
@@ -0,0 +1,491 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:AHVToPolsarpro.py
+@Function:全极化影像转成polsarpro格式T3数据
+@Contact:
+@Author:SHJ
+@Date:2021/9/18 16:44
+@Version:1.0.0
+"""
+import os
+import numpy as np
+import glob
+import struct
+from tool.algorithm.image.ImageHandle import ImageHandler
+
+
+class AHVToPolsarpro:
+ """
+ 全极化影像转换为bin格式T3矩阵,支持polsarpro处理
+ """
+
+ def __init__(self, hh_hv_vh_vv_path_list=[]):
+ self._hh_hv_vh_vv_path_list = hh_hv_vh_vv_path_list
+ pass
+
+ @staticmethod
+ def __ahv_to_s2_veg(ahv_dir):
+ """
+ 全极化影像转S2矩阵
+ :param ahv_dir: 全极化影像文件夹路径
+ :return: 极化散射矩阵S2
+ """
+ global s11
+ in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
+ in_tif_paths += in_tif_paths1
+ s11, s12, s21, s22 = None, None, None, None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path in in_tif_paths:
+
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+
+ # 获取极化类型
+ if '_HH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s11 = data_real + 1j * data_imag
+ flag_list[0] = 1
+ elif '_HV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s12 = data_real + 1j * data_imag
+ flag_list[1] = 1
+ elif '_VH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s21 = data_real + 1j * data_imag
+ flag_list[2] = 1
+ elif '_VV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s22 = data_real + 1j * data_imag
+ flag_list[3] = 1
+ else:
+ continue
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
+ return s11, s12, s21, s22
+
+ @staticmethod
+ def __ahv_to_s2_soil(ahv_dir):
+ """
+ 全极化影像转S2矩阵
+ :param ahv_dir: 全极化影像文件夹路径
+ :return: 极化散射矩阵S2
+ """
+ global s11
+ in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
+ in_tif_paths += in_tif_paths1
+ s11, s12, s21, s22 = None, None, None, None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path in in_tif_paths:
+
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+
+ # 获取极化类型
+ if 'HH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s11 = data_real + 1j * data_imag
+ flag_list[0] = 1
+ elif 'HV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s12 = data_real + 1j * data_imag
+ flag_list[1] = 1
+ elif 'VH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s21 = data_real + 1j * data_imag
+ flag_list[2] = 1
+ elif 'VV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s22 = data_real + 1j * data_imag
+ flag_list[3] = 1
+ else:
+ continue
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('HH or HV or VH or VV is not in path :%s', ahv_dir)
+ return s11, s12, s21, s22
+
+ @staticmethod
+ def __ahv_to_s2_list(ahv_path_list):
+ """
+ 全极化影像转S2矩阵
+ :param ahv_dir: 全极化影像文件夹路径
+ :return: 极化散射矩阵S2
+ """
+ global s11
+ in_tif_paths = ahv_path_list
+ s11, s12, s21, s22 = None, None, None, None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path in in_tif_paths:
+
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+
+ # 获取极化类型
+ if 'HH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s11 = data_real + 1j * data_imag
+ flag_list[0] = 1
+ elif 'HV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s12 = data_real + 1j * data_imag
+ flag_list[1] = 1
+ elif 'VH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s21 = data_real + 1j * data_imag
+ flag_list[2] = 1
+ elif 'VV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s22 = data_real + 1j * data_imag
+ flag_list[3] = 1
+ else:
+ continue
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('HH or HV or VH or VV is not in path')
+ return s11, s12, s21, s22
+
+
+ @staticmethod
+ def __ahv_to_s2_list_2(hh_hv_vh_vv_path_list):
+ """
+ 全极化影像转S2矩阵
+ :param ahv_dir: 全极化影像文件夹路径
+ :return: 极化散射矩阵S2
+ """
+ global s11
+ in_tif_paths = hh_hv_vh_vv_path_list
+ s11, s12, s21, s22 = None, None, None, None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path, n in zip(in_tif_paths, range(len(in_tif_paths))):
+
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+
+ # 获取极化类型
+ if n == 0:
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s11 = data_real + 1j * data_imag
+ flag_list[0] = 1
+ elif n == 1:
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s12 = data_real + 1j * data_imag
+ flag_list[1] = 1
+ elif n == 2:
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s21 = data_real + 1j * data_imag
+ flag_list[2] = 1
+ elif n == 3:
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s22 = data_real + 1j * data_imag
+ flag_list[3] = 1
+ else:
+ continue
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('HH or HV or VH or VV is not in path')
+ return s11, s12, s21, s22
+
+ @staticmethod
+ def __s2_to_t3(s11, s12, s21, s22):
+ """
+ S2矩阵转T3矩阵
+ :param s11: HH极化数据
+ :param s12: HV极化数据
+ :param s21: VH极化数据
+ :param s22: VV极化数据
+ :return: 极化相干矩阵T3
+ """
+ HH = s11
+ HV = s12
+ VH = s21
+ VV = s22
+
+ t11 = (np.abs(HH + VV)) ** 2 / 2
+ t12 = (HH + VV) * np.conj(HH - VV) / 2
+ t13 = (HH + VV) * np.conj(HV + VH)
+
+ t21 = (HH - VV) * np.conj(HH + VV) / 2
+ t22 = np.abs(HH - VV) ** 2 / 2
+ t23 = (HH - VV) * np.conj(HV + VH)
+
+ t31 = (HV + VH) * np.conj(HH + VV)
+ t32 = (HV + VH) * np.conj(HH - VV)
+ t33 = 2 * np.abs(HV + VH) ** 2
+ return t11, t12, t13, t21, t22, t23, t31, t32, t33
+
+ def __t3_to_polsarpro_t3(self, out_dir, t11, t12, t13, t22, t23, t33):
+ """
+ T3矩阵转bin格式,支持 polsarpro处理
+ :param out_dir: 输出的文件夹路径
+ :param t11:
+ :param t12:
+ :param t13:
+ :param t22:
+ :param t23:
+ :param t33:
+ :return: bin格式矩阵T3和头文件
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+
+ rows = t11.shape[0]
+ cols = t11.shape[1]
+ bins_dict = {
+ 'T11.bin': t11,
+ 'T12_real.bin': t12.real,
+ 'T12_imag.bin': t12.imag,
+ 'T13_real.bin': t13.real,
+ 'T13_imag.bin': t13.imag,
+ 'T22.bin': t22,
+ 'T23_real.bin': t23.real,
+ 'T23_imag.bin': t23.imag,
+ 'T33.bin': t33}
+
+ for name, data in bins_dict.items():
+ bin_path = os.path.join(out_dir, name)
+ self.__write_img_bin(data, bin_path) # todo 修改T3阵保存方式
+ # data.tofile(bin_path)
+ out_hdr_path = bin_path + '.hdr'
+ self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
+
+ self.__write_config_file(out_dir, rows, cols)
+
+ def rows(self):
+ """获取影像行数"""
+ return self._rows
+
+ def cols(self):
+ """获取影像列数"""
+ return self._cols
+
+ def __write_img_bin(self, im, file_path):
+ """
+ 写入影像到bin文件中,保存为float32类型
+ :param im : 影像矩阵数据,暂支持单通道影像数据
+ :param file_path: bin文件的完整路径
+ """
+ with open(file_path, 'wb') as f:
+ self._rows = im.shape[0]
+ self._cols = im.shape[1]
+ for row in range(self._rows):
+ im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
+ f.write(im_bin)
+ f.close()
+
+ @staticmethod
+ def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
+ """
+ 写入影像的头文件
+ :param out_hdr_path : 头文件的路径
+ :param bin_path: bin文件的路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ h1 = 'ENVI'
+ h2 = 'description = {'
+ h3 = 'File Imported into ENVI. }'
+ h4 = 'samples = ' + str(cols) # 列
+ h5 = 'lines = ' + str(rows) # 行
+ h6 = 'bands = 1 ' # 波段数
+ h7 = 'header offset = 0'
+ h8 = 'file type = ENVI Standard'
+ h9 = 'data type = 4' # 数据格式
+ h10 = 'interleave = bsq' # 存储格式
+ h11 = 'sensor type = Unknown'
+ h12 = 'byte order = 0'
+ h13 = 'band names = {'
+ h14 = bin_path + '}'
+ # h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
+ # doc = open(out_hdr_path, 'w')
+ # for i in range(0, 14):
+ # print(h[i], end='', file=doc)
+ # print('\n', end='', file=doc)
+ h = [h1, h4, h5, h6, h7, h8, h9, h10, h12]
+ doc = open(out_hdr_path, 'w')
+ for i in range(0, 9):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+ @staticmethod
+ def __write_config_file(out_config_dir, rows, cols):
+ """
+ 写入polsarpro配置文件
+ :param out_config_dir : 配置文件路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ h1 = 'Nrow'
+ h2 = str(rows)
+ h3 = '---------'
+ h4 = 'Ncol'
+ h5 = str(cols)
+ h6 = '---------'
+ h7 = 'PolarCase'
+ h8 = 'monostatic'
+ h9 = '---------'
+ h10 = 'PolarType'
+ h11 = 'full'
+ h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
+
+ out_config_path = os.path.join(out_config_dir, 'config.txt')
+ doc = open(out_config_path, 'w')
+ for i in range(0, 11):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+ def incidence_tif2bin(self, incidence_file, out_path):
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+ incidence_bin = os.path.join(out_path, 'incidence.bin')
+ data = ImageHandler().get_data(incidence_file)
+ rows = data.shape[0]
+ cols = data.shape[1]
+ self.__write_img_bin(data, incidence_bin)
+ if not os.path.exists(incidence_bin):
+ raise Exception('incidence to bin failed')
+ out_hdr_path = incidence_bin + '.hdr'
+ self.__write_bin_hdr(out_hdr_path, incidence_bin, rows, cols)
+ return incidence_bin
+
+ def ahv_to_polsarpro_t3_veg(self, out_file_dir, in_ahv_dir=''):
+
+ if self._hh_hv_vh_vv_path_list == [] :
+ s11, s12, s21, s22 = self.__ahv_to_s2_veg(in_ahv_dir)
+ else:
+ s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
+
+ t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
+ s11, s12, s21, s22)
+
+ self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
+
+
+ def ahv_to_polsarpro_t3_soil(self, out_file_dir, in_ahv_dir=''):
+
+ if self._hh_hv_vh_vv_path_list == [] :
+ s11, s12, s21, s22 = self.__ahv_to_s2_soil(in_ahv_dir)
+ else:
+ s11, s12, s21, s22 = self.__ahv_to_s2_list_2(self._hh_hv_vh_vv_path_list)
+
+ t11, t12, t13, t21, t22, t23, t31, t32, t33 = self.__s2_to_t3(
+ s11, s12, s21, s22)
+
+ self.__t3_to_polsarpro_t3(out_file_dir, t11, t12, t13, t22, t23, t33)
+
+ def calibration(self, calibration_value, in_ahv_dir='', name=''):
+ if name == '':
+ out_dir = os.path.join(in_ahv_dir, 'calibration')
+ else:
+ out_dir = os.path.join(in_ahv_dir, name, 'calibration')
+ flag_list = [0, 0, 0, 0]
+ if self._hh_hv_vh_vv_path_list == []: # 地表覆盖、土壤盐碱度
+ in_tif_paths = list(glob.glob(os.path.join(in_ahv_dir, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(in_ahv_dir, '*.tiff')))
+ in_tif_paths += in_tif_paths1
+ for in_tif_path in in_tif_paths:
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+ name = os.path.basename(in_tif_path)
+ data_new = np.zeros(data.shape)
+ # 获取极化类型
+ if 'HH' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[0]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[0]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[0] = 1
+ elif 'HV' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[1]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[1]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[1] = 1
+ elif 'VH' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[2]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[2]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[2] = 1
+ elif 'VV' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[3]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[3]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[3] = 1
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('calibration error! ')
+ else:
+ for in_tif_path in self._hh_hv_vh_vv_path_list: # 植被物候
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+ name = os.path.basename(in_tif_path)
+ data_new = np.zeros(data.shape)
+
+ # 获取极化类型
+ if '_HH' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[0]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[0]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[0] = 1
+ elif '_HV' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[1]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[1]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[1] = 1
+ elif '_VH' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[2]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[2]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[2] = 1
+ elif '_VV' in os.path.basename(in_tif_path):
+ data_new[0, :, :] = data[0, :, :] * calibration_value[3]
+ data_new[1, :, :] = data[1, :, :] * calibration_value[3]
+ ImageHandler.write_img(os.path.join(out_dir, name), proj, geotrans, data_new)
+ flag_list[3] = 1
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('calibration error! ')
+ self._hh_hv_vh_vv_path_list = []
+ return out_dir
+
+
+
+if __name__ == '__main__':
+ #实例1:
+ # atp = AHVToPolsarpro()
+ # ahv_path = 'D:\\DATA\\GAOFEN3\\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\\'
+ # # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
+ # out_file_path = 'D:\\bintest0923\\'
+ # atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
+
+ # # 极化分解得到T3矩阵
+ # atp = AHVToPolsarpro()
+ # ahv_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPC"
+ # t3_path = ahv_path + 'psp_t3\\'
+ # atp.ahv_to_polsarpro_t3(t3_path, ahv_path)
+
+ #实例2:
+ # dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
+ # path_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
+ #
+ #
+ # atp = AHVToPolsarpro(path_list)
+ # atp.ahv_to_polsarpro_t3(r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC/t3')
+
+ print("done")
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarproS2.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarproS2.py
new file mode 100644
index 0000000..1b6fa83
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/AHVToPolsarproS2.py
@@ -0,0 +1,228 @@
+"""
+@Project :microproduct
+@File :AHVToPolsarpro.PY
+@Function :将四个极化数据转成S2矩阵文件
+@Author :LMM
+@Date :2021/10/19 14:39
+@Version :1.0.0
+"""
+import os
+import numpy as np
+import glob
+import struct
+from tool.algorithm.image.ImageHandle import ImageHandler
+
+
+class AHVToPolsarproS2:
+ """
+ 全极化影像转换为bin格式S2矩阵,支持polsarpro处理
+ """
+ def __init__(self):
+
+ pass
+
+ @staticmethod
+ def __ahv_to_s2(ahv_dir):
+ """
+ 全极化影像转S2矩阵
+ :param ahv_dir: 全极化影像文件夹路径
+ :return: 极化散射矩阵S2
+ """
+ in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tif')))
+
+ if in_tif_paths == []:
+ in_tif_paths = list(glob.glob(os.path.join(ahv_dir, '*.tiff')))
+ s11, s12, s21, s22 = None,None,None,None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path in in_tif_paths:
+
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+
+ # 获取极化类型
+ if 'HH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :] # 获取第一个波段 (实部)
+ data_imag = data[1, :, :] # 获取第二个波段 (虚部)
+ s11 = data_real + 1j * data_imag
+ flag_list[0] = 1
+ elif 'HV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s12 = data_real + 1j * data_imag
+ flag_list[1] = 1
+ elif 'VH' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s21 = data_real + 1j * data_imag
+ flag_list[2] = 1
+ elif 'VV' in os.path.basename(in_tif_path):
+ data_real = data[0, :, :]
+ data_imag = data[1, :, :]
+ s22 = data_real + 1j * data_imag
+ flag_list[3] = 1
+ else:
+ continue
+ if not flag_list == [1, 1, 1, 1]:
+ raise Exception('tif of HH or HV or VH or VV is not in path :%s', ahv_dir)
+ return s11, s12, s21, s22
+
+ def __s2_to_bin(self, out_dir, s11, s12, s21, s22):
+ """
+ S2矩阵转bin格式,支持 polsarpro处理
+ :param out_dir: 输出的文件夹路径
+ :param s11:
+ :param s12:
+ :param s21
+ :param s22:
+ :return: bin格式矩阵S2和头文件
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+
+ rows = s11.shape[0]
+ cols = s11.shape[1]
+ bins_dict = {'s11.bin': s11,
+ 's12.bin': s12,
+ 's21.bin': s21,
+ 's22.bin': s22}
+
+
+ for name, data in bins_dict.items():
+
+ bin_path = os.path.join(out_dir, name)
+ self.__write_slc_img_bin(data, bin_path,name)
+ out_hdr_path = bin_path+'.hdr'
+ self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
+
+ self.__write_config_file(out_dir, rows, cols)
+
+ @staticmethod
+ def __write_slc_img_bin(im, file_path,name):
+ """
+ 写入影像到bin文件中,保存为float32类型
+ :param im : 影像矩阵数据,暂支持单通道影像数据
+ :param file_path: bin文件的完整路径
+ """
+ with open(file_path, 'wb') as f:
+ rows = im.shape[0]
+ cols = im.shape[1]
+ cre_im = np.zeros((rows, 2*cols), dtype=float)
+ cre_im[:, ::2] = im.real #存 real
+ cre_im[:, 1::2] = im.imag #存 imag
+ for row in range(rows):
+ cre_im_bin = struct.pack("f" * 2*cols, *np.reshape(cre_im[row, :], (2*cols, 1), order='F'))
+ f.write(cre_im_bin)
+ f.close()
+
+ @staticmethod
+ def read_slc_bin_to_img(bin_path):
+ """
+ 读取bin格式二进制数据,输出为矩阵
+ :param bin_path : bin文件的路径,包含.bin,.config
+ :return : 矩阵信息
+ """
+ (bin_dir, bin_name) = os.path.split(bin_path)
+ config_path = os.path.join(bin_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+ rows = int(config[1])
+ cols = int(config[4])
+
+ bin_file = open(bin_path, 'rb') # 打开二进制文件
+ size = os.path.getsize(bin_path) # 获得文件大小
+ if size < rows * cols * 4 * 2:
+ raise Exception(
+ 'bin size less than rows*cols*4! size:',
+ size,
+ 'byte, rows:',
+ rows,
+ 'cols:',
+ cols)
+
+ bin_data = np.zeros([rows, cols*2], dtype=np.float32)
+ img_array = np.zeros([2,rows, cols], dtype=np.float32)
+ for row in range(rows):
+ data = bin_file.read(4 * cols * 2) # 每次读取一行的二进制数据
+ row_data = struct.unpack('f' * cols*2, data) # 转为一行float数据
+ bin_data[row, :] = row_data
+ bin_file.close()
+ img_array[0] = bin_data[:, ::2] # real
+ img_array[1] = bin_data[:, 1::2] # imag
+ return img_array
+
+
+ @staticmethod
+ def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
+ """
+ 写入影像的头文件
+ :param out_hdr_path : 头文件的路径
+ :param bin_path: bin文件的路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ h1 = 'ENVI'
+ h2 = 'description = {'
+ h3 = 'ENVI File, Created [] }'
+ h4 = 'samples = ' + str(cols) # 列
+ h5 = 'lines = ' + str(rows) # 行
+ h6 = 'bands = 1 ' # 波段数
+ h7 = 'header offset = 0'
+ h8 = 'file type = ENVI Standard'
+ h9 = 'data type = 6' # 数据格式,6代表复数
+ h10 = 'interleave = bsq' # 存储格式
+ h11 = 'sensor type = Unknown'
+ h12 = 'byte order = 0'
+ h13 = 'wavelength units = Unknown'
+ h14 = 'complex function = Power'
+ h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
+ doc = open(out_hdr_path, 'w')
+ for i in range(0, 14):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+ @staticmethod
+ def __write_config_file(out_config_dir, rows, cols):
+ """
+ 写入polsarpro配置文件
+ :param out_config_dir : 配置文件路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ h1 = 'Nrow'
+ h2 = str(rows)
+ h3 = '---------'
+ h4 = 'Ncol'
+ h5 = str(cols)
+ h6 = '---------'
+ h7 = 'PolarCase'
+ # h8 = 'monostatic'
+ h8 = 'bistatic'
+ h9 = '---------'
+ h10 = 'PolarType'
+ h11 = 'full'
+ h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
+
+ out_config_path = os.path.join(out_config_dir, 'config.txt')
+ doc = open(out_config_path, 'w')
+ for i in range(0, 11):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+ def api_ahv_to_polsarpro_s2(self, out_file_dir, in_ahv_dir):
+
+ s11, s12, s21, s22 = self.__ahv_to_s2(in_ahv_dir)
+
+ self.__s2_to_bin(out_file_dir, s11, s12, s21, s22)
+
+
+# if __name__ == '__main__':
+# # test()
+# atp = AHVToPolsarproS2()
+# ahv_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087'
+# # ahv_path = 'D:\\DATA\\GAOFEN3\\2598957_Paris\\'
+# out_file_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ_2'
+# atp.api_ahv_to_polsarpro_s2(out_file_path, ahv_path)
+# bin_path = r'D:\DATA\GAOFEN3\2-GF3_MYN_WAV_020086_E107.2_N27.6_20200603_L1A_AHV_L10004843087\SLC_SHJ\s11.bin'
+# # data = atp.read_slc_bin_to_img(bin_path)
+# print("done")
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/DualPolarToPolsarproC2.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/DualPolarToPolsarproC2.py
new file mode 100644
index 0000000..ca0950b
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/DualPolarToPolsarproC2.py
@@ -0,0 +1,196 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:DualPolarToPolsarproC2.py
+@Function:双极化影像转成polsarpro格式C2数据
+@Contact:
+@Author:SHJ
+@Date:2021/11/5
+@Version:1.0.0
+"""
+import os
+import numpy as np
+import glob
+import struct
+import gc
+from tool.algorithm.image.ImageHandle import ImageHandler
+
+
+class DualPolarToPolsarproC2:
+ """
+ 双极化影像转换为bin格式C2矩阵,支持polsarpro处理
+ """
+
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def __dual_polar_to_c2(dual_polar_dir):
+ """
+ 双影像转S2矩阵
+ :param dual_polar_dir: 双极化影像文件夹路径
+ :return: C2矩阵
+ """
+ in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tif')))
+
+ if in_tif_paths == []:
+ in_tif_paths = list(glob.glob(os.path.join(dual_polar_dir, '*.tiff')))
+ s11, s22 = None, None
+ flag_list = [0, 0, 0, 0]
+ for in_tif_path in in_tif_paths:
+ # 读取原始SAR影像
+ proj, geotrans, data = ImageHandler.read_img(in_tif_path)
+ # 获取极化类型
+ if 'HH' in os.path.basename(in_tif_path):
+ s11 = data[0, :, :] + 1j * data[1, :, :]
+ flag_list[0] = 1
+ elif 'HV' in os.path.basename(in_tif_path):
+ s22 = data[0, :, :] + 1j * data[1, :, :]
+ flag_list[1] = 1
+ elif 'VH' in os.path.basename(in_tif_path):
+ s22 = data[0, :, :] + 1j * data[1, :, :]
+ flag_list[2] = 1
+ elif 'VV' in os.path.basename(in_tif_path):
+ s11 = data[0, :, :] + 1j * data[1, :, :]
+ flag_list[3] = 1
+ else:
+ continue
+ del data
+ gc.collect()
+
+ if flag_list != [1, 1, 0, 0] and flag_list != [0, 0, 1, 1] :
+ raise Exception('Dual-Polarization SAR is not in path :%s',in_tif_path)
+
+ c11,c12,c22 = None, None, None
+ c11 = np.abs(s11)** 2
+ c12 = s11 * np.conj(s22)
+ del s11
+ gc.collect()
+ c22 = np.abs(s22)**2
+ return c11, c12, c22
+
+ def __c2_to_polsarpro_c2(self, out_dir, c11, c12, c22):
+ """
+ C2矩阵转bin格式,支持 polsarpro处理
+ :param out_dir: 输出的文件夹路径
+ :param c11:
+ :param c12:
+ :param c21:
+ :param c22:
+ :return: bin格式矩阵C3和头文件
+ """
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+
+ rows = c11.shape[0]
+ cols = c11.shape[1]
+ bins_dict = {
+ 'C11.bin': c11,
+ 'C12_real.bin': c12.real,
+ 'C12_imag.bin': c12.imag,
+ 'C22.bin': c22}
+
+ for name, data in bins_dict.items():
+ bin_path = os.path.join(out_dir, name)
+ self.__write_img_bin(data, bin_path)
+ out_hdr_path = bin_path + '.hdr'
+ self.__write_bin_hdr(out_hdr_path, bin_path, rows, cols)
+
+ self.__write_config_file(out_dir, rows, cols)
+
+ def rows(self):
+ """获取影像行数"""
+ return self._rows
+
+ def cols(self):
+ """获取影像列数"""
+ return self._cols
+
+ def __write_img_bin(self, im, file_path):
+ """
+ 写入影像到bin文件中,保存为float32类型
+ :param im : 影像矩阵数据,暂支持单通道影像数据
+ :param file_path: bin文件的完整路径
+ """
+ with open(file_path, 'wb') as f:
+ self._rows = im.shape[0]
+ self._cols = im.shape[1]
+ for row in range(self._rows):
+ im_bin = struct.pack("f" * self._cols, *np.reshape(im[row, :], (self._cols, 1), order='F'))
+ f.write(im_bin)
+ f.close()
+
+ @staticmethod
+ def __write_bin_hdr(out_hdr_path, bin_path, rows, cols):
+ """
+ 写入影像的头文件
+ :param out_hdr_path : 头文件的路径
+ :param bin_path: bin文件的路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ name = os.path.split(bin_path)[1]
+ h1 = 'ENVI'
+ h2 = 'description = {'
+ h3 = 'File Imported into ENVI. }'
+ h4 = 'samples = ' + str(cols) # 列
+ h5 = 'lines = ' + str(rows) # 行
+ h6 = 'bands = 1 ' # 波段数
+ h7 = 'header offset = 0'
+ h8 = 'file type = ENVI Standard'
+ h9 = 'data type = 4' # 数据格式 浮点型
+ h10 = 'interleave = bsq' # 存储格式
+ h11 = 'sensor type = Unknown'
+ h12 = 'byte order = 0'
+ h13 = 'band names = {'
+ h14 = name + '}'
+ h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14]
+ doc = open(out_hdr_path, 'w')
+ for i in range(0, 14):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+ @staticmethod
+ def __write_config_file(out_config_dir, rows, cols):
+ """
+ 写入polsarpro配置文件
+ :param out_config_dir : 配置文件路径
+ :param rows: 影像的行数
+ :param cols: 影像的列数
+ """
+ h1 = 'Nrow'
+ h2 = str(rows)
+ h3 = '---------'
+ h4 = 'Ncol'
+ h5 = str(cols)
+ h6 = '---------'
+ h7 = 'PolarCase'
+ h8 = 'monostatic'
+ h9 = '---------'
+ h10 = 'PolarType'
+ h11 = 'pp1'
+ h = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11]
+
+ out_config_path = os.path.join(out_config_dir, 'config.txt')
+ doc = open(out_config_path, 'w')
+ for i in range(0, 11):
+ print(h[i], end='', file=doc)
+ print('\n', end='', file=doc)
+ doc.close()
+
+
+ def api_dual_polar__to_polsarpro_c2(self, out_file_dir, dual_polar_dir):
+ c11, c12, c22 = self.__dual_polar_to_c2(dual_polar_dir)
+ self.__c2_to_polsarpro_c2(out_file_dir,c11, c12, c22)
+
+
+# if __name__ == '__main__':
+# tp = DualPolarToPolsarproC2()
+# out_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\SLC_SHJ1'
+# in_dic = 'E:\\3-GF3_KAS_FSI_020253_E110.8_N25.5_20200614_L1A_HHHV_L10004871459\\'
+# # out_file_path = 'D:\\bintest0923\\'
+# tp.api_dual_polar__to_polsarpro_c2(out_dic,in_dic)
+# # atp.ahv_to_polsarpro_t3(out_file_path, ahv_path)
+#
+# print("done")
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/GLCM_当前没用到灰度共生矩阵特征.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/GLCM_当前没用到灰度共生矩阵特征.py
new file mode 100644
index 0000000..01d86dd
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/GLCM_当前没用到灰度共生矩阵特征.py
@@ -0,0 +1,97 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :onestar
+@File :GLDM.py
+@Contact:
+scikit-image feature计算图像特征:https://blog.csdn.net/lyxleft/article/details/102904909
+python如何在二维图像上进行卷积:https://www.xz577.com/j/281686.html
+利用python的skimage计算灰度共生矩阵:https://zhuanlan.zhihu.com/p/147066037
+@function :计算图像灰度共生矩阵
+@Author :SHJ
+@Date :2021/11/10 14:42
+@Version :1.0.0
+"""
+import numpy as np
+import os
+from skimage.feature import greycomatrix, greycoprops
+import datetime
+from tool.algorithm.image.ImageHandle import ImageHandler
+
+class GLDM:
+ def __init__(self,win_size = 15, step=2,levels=16,angles=[0,45,90,135],
+ prop=['contrast', 'dissimilarity', 'homogeneity', 'energy', 'correlation', 'ASM']):
+ self._win_size = win_size # 计算灰度共生矩阵窗口尺寸,为奇数
+ self._step = step # 步长
+ self._levels = levels # 灰度等级:例如16,256
+ self._angles = list(np.deg2rad(np.array(angles))) #角度,使用弧度制
+ """
+ 'contrast':对比度:反映了图像的清晰度和纹理沟纹深浅的程度
+ 'dissimilarity':差异性
+ 'homogeneity':同质性/逆差距:度量图像纹理局部变化的多少。其值大则说明图像纹理的不同区域间缺少变化,局部非常均匀。
+ 'energy':能量:是灰度共生矩阵元素值的平方和,所以也称能量,反映了图像灰度分布均匀程度和纹理粗细度
+ 'correlation':相关性:它度量空间灰度共生矩阵元素在行或列方向上的相似程度
+ 'ASM':二阶距
+ """
+ self._prop = prop #纹理特征名称
+
+ def get_glcm_value(self,input):
+ values_temp = []
+ # 统计得到glcm
+ # 得到共生矩阵,参数:图像矩阵,距离,方向,灰度级别,是否对称,是否标准化
+ # para2: [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4] 一共计算了四个方向,你也可以选择一个方向
+ glcm = greycomatrix(input, [self._step], self._angles, self._levels, symmetric=False, normed=True)
+ # print(glcm.shape)
+ # 循环计算表征纹理的参数
+ for prop in self._prop:
+ temp = greycoprops(glcm, prop)
+ # print(temp)
+ values_temp.append(np.mean(temp))
+ return values_temp
+
+
+ def get_glcm_array(self,inputs: np.ndarray, win_size):
+ h, w = inputs.shape
+ pad = (win_size - 1) // 2
+ inputs = np.pad(inputs, pad_width=[(pad, pad), (pad, pad)], mode="constant", constant_values=0)
+ glcm_array ={}
+ for name in self._prop:
+ glcm_array.update({name:np.zeros(shape=(h, w),dtype=np.float32)})
+
+ for i in range(h): # 行号
+ for j in range(w): # 列号
+ window = inputs[i: i + win_size, j: j + win_size]
+ value = self.get_glcm_value(window)
+ print('i:%s,j:%s',i,j)
+ # print(value)
+ for n,array in zip(range(len(glcm_array)),glcm_array.values()):
+ array[i,j] = value[n]
+ return glcm_array
+
+ @staticmethod
+ def standardization(data, num=1):
+ # 矩阵标准化到[0,1]
+ data[np.isnan(data)] = np.min(data) # 异常值填充为0
+ _range = np.max(data) - np.min(data)
+ return (data - np.min(data)) / _range * num
+
+ def api_get_glcm_array(self,out_dir,in_tif_path,name=''):
+
+ ih = ImageHandler()
+ proj, geotrans, array = ih.read_img(in_tif_path)
+ array[np.where(array > 500000)]=500000 #去除过大的值,避免标准化时,大部分的值都接近0
+ array = self.standardization(array,self._levels-1) #标准化到0~(self._levels-1)
+ array = np.uint8(array)
+ glcm_array = self.get_glcm_array(array, self._win_size)
+ for key,value in glcm_array.items():
+ out_path = os.path.join(out_dir,name+'_'+key+'.tif')
+ ih.write_img(out_path, proj, geotrans,value)
+
+if __name__ == '__main__':
+ start = datetime.datetime.now()
+ gldm = GLDM(win_size=9,levels=16,step=3,angles=[0,45,90,135])
+ gldm.api_get_glcm_array('D:\glcm','D:\glcm\src_img.tif',)
+ end = datetime.datetime.now()
+ msg = 'running use time: %s ' % (end - start)
+ print(msg)
+
+ # 666*720尺寸影像消耗的running use time: 0:04:23.155424
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/bin2tif.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/bin2tif.py
new file mode 100644
index 0000000..a970584
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/bin2tif.py
@@ -0,0 +1,85 @@
+import os
+import glob
+import numpy as np
+import struct
+from PIL import Image
+from tool.algorithm.ml.machineLearning import MachineLeaning as ml
+
+
+def read_bin_to_img(bin_path):
+ """
+ 读取bin格式二进制数据,输出为矩阵
+ :param bin_path : bin文件的路径,包含.bin,.config
+ :return : 矩阵信息
+ """
+ (bin_dir, bin_name) = os.path.split(bin_path)
+ config_path = os.path.join(bin_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+ rows = int(config[1])
+ cols = int(config[4])
+
+ bin_file = open(bin_path, 'rb') # 打开二进制文件
+ size = os.path.getsize(bin_path) # 获得文件大小
+ if size < rows * cols * 4:
+ raise Exception(
+ 'bin size less than rows*cols*4! size:',
+ size,
+ 'byte, rows:',
+ rows,
+ 'cols:',
+ cols)
+
+ img = np.zeros([rows, cols], dtype=np.float32)
+ for row in range(rows):
+ data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
+ row_data = struct.unpack('f' * cols, data) # 转为一行float数据
+ img[row, :] = row_data
+ bin_file.close()
+ return img
+
+def write_bin_to_tif(out_tif_dir, bin_dir):
+ """
+ 读取H-A-Alpha分解二进制数据,输出为矩阵格式的字典
+ :param out_tif_dir : tif的输出路径
+ :param bin_dir : 二进制数据的目录,包含.bin,.config
+ :return out_tif_path: 生成tif的路径字典
+ """
+ bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
+ out_tif_path = {}
+ for in_path in bin_paths:
+ name = os.path.split(in_path)[1].split('.')[0]
+ out_path = os.path.join(out_tif_dir, name + '.tif')
+ out_tif_path.update({name: out_path})
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ img_array = read_bin_to_img(in_path)
+ img_array[np.isnan(img_array)] = 0 # 异常值填充为0
+ img_array = ml.standardization(img_array) # 数据标准化到[0,1]
+ out_image = Image.fromarray(img_array)
+ out_image.save(out_path)
+ return out_tif_path
+
+def write_bin_to_tif_soil(out_tif_dir, bin_dir):
+ """
+ 读取H-A-Alpha分解二进制数据,输出为矩阵格式的字典
+ :param out_tif_dir : tif的输出路径
+ :param bin_dir : 二进制数据的目录,包含.bin,.config
+ :return out_tif_path: 生成tif的路径字典
+ """
+ bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
+ out_tif_path = {}
+ for in_path in bin_paths:
+ name = os.path.split(in_path)[1].split('.')[0]
+ out_path = os.path.join(out_tif_dir, name + '.tif')
+ out_tif_path.update({name: out_path})
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ img_array = read_bin_to_img(in_path)
+ img_array[np.isnan(img_array)] = 0 # 异常值填充为0
+ # img_array = ml.standardization(img_array) # 数据标准化到[0,1]
+ out_image = Image.fromarray(img_array)
+ out_image.save(out_path)
+ return out_tif_path
+
+
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/createfeature.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/createfeature.py
new file mode 100644
index 0000000..6640d49
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/createfeature.py
@@ -0,0 +1,190 @@
+from tool.algorithm.algtools.MetaDataHandler import Calibration
+from tool.algorithm.polsarpro.AHVToPolsarpro import AHVToPolsarpro
+from tool.algorithm.polsarpro.pspLeeRefinedFilterT3 import LeeRefinedFilterT3
+from tool.algorithm.polsarpro.pspCloudePottierDecomposition import PspCloudePottierDecomposition
+from tool.algorithm.polsarpro.pspFreemanDecomposition import PspFreemanDecomposition
+from tool.algorithm.polsarpro.pspYamaguchiDecomposition import PspYamaguchiDecomposition
+from tool.algorithm.polsarpro.pspTouziDecomposition import PspTouziDecomposition
+from tool.algorithm.polsarpro.bin2tif import write_bin_to_tif
+from tool.algorithm.polsarpro.pspHAAlphaDecomposition import PspHAAlphaDecomposition
+from tool.algorithm.xml.AlgXmlHandle import InitPara
+import logging
+import os
+import shutil
+import glob
+logger = logging.getLogger("mylog")
+
+class CreateFeature:
+ """
+ 生产特征
+ """
+ def __init__(self, debug = False, exe_dir = ''):
+ self._debug = debug
+ self._exe_dir = exe_dir
+ pass
+
+
+ def ahv_to_t3(self, workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name='',FILTER_SIZE=3):
+ # 全极化tif转bin格式T3数据
+ atp = AHVToPolsarpro()
+
+ atp = AHVToPolsarpro(hh_hv_vh_vv_list)
+ lee_filter_path = os.path.join(workspace_processing_path, name, 'lee_filter\\') # workspace_processing_path + name + '\\lee_filter\\'
+ if self._debug == False:
+
+ t3_path = os.path.join(workspace_processing_path, name, 'psp_t3\\') # workspace_processing_path + name + '\\psp_t3\\'
+ # atp.ahv_to_polsarpro_t3(t3_path, tif_path)
+
+ polarization = ['HH', 'HV', 'VH', 'VV']
+ if os.path.exists(workspace_preprocessing_path + name + '\\'):
+ meta_xml_paths = list(glob.glob(os.path.join(workspace_preprocessing_path + name, '*.meta.xml')))
+ meta_dic = InitPara.get_meta_dic_new(meta_xml_paths, name)
+ calibration = Calibration.get_Calibration_coefficient(meta_dic['Origin_META'], polarization)
+ tif_path = atp.calibration(calibration, workspace_preprocessing_path, name)
+ atp.ahv_to_polsarpro_t3_veg(t3_path, tif_path)
+
+ # Lee滤波
+ leeFilter = LeeRefinedFilterT3()
+ leeFilter.api_lee_refined_filter_T3('', t3_path, lee_filter_path, 0, 0, atp.rows(), atp.cols(), FILTER_SIZE)
+ logger.info("refine_lee filter success!")
+ return lee_filter_path
+
+ def decompose(self,workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic={},FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']): # , 'Touzi'
+ """
+ 极化分解:Freeman、Touzi、Yamaguchi、Cloude
+ :param t3_path: t3文件路径
+ :param rows: 影像行数
+ :return cols:影像列数
+ """
+ # 计算特征组合
+ exeDir = self._exe_dir
+ outFolderDic = {}
+ if 'Freeman' in FeatureInput:
+ # freeman分解
+ freemanOutDir = os.path.join(workspace_processing_path, name + '\\freeman\\')
+ if self._debug == False:
+ freemDecom = PspFreemanDecomposition(exeDir, t3_path, freemanOutDir)
+ flag = freemDecom.api_freeman_decomposition_T3(0, 0, rows, cols)
+ if not flag:
+ logger.error('FreemanDecomposition err')
+ return False, None
+ outFolderDic['Freeman'] = freemanOutDir
+
+ # Touzi分解
+ if 'Touzi' in FeatureInput:
+
+ touziOutDir = os.path.join(workspace_processing_path, name + '\\touzi\\')
+ if not os.path.exists(touziOutDir):
+ os.makedirs(touziOutDir)
+ if self._debug == False:
+ # touzi分解耗时较长,且对特征表达效果较差
+ p = PspTouziDecomposition(hh_hv_vh_vv_dic, touziOutDir)
+ p.Touzi_decomposition_multiprocessing()
+ outFolderDic['Touzi'] = touziOutDir
+
+ if 'Yamaguchi' in FeatureInput:
+ # Yamaguchi分解
+ yamaguchiOutDir = os.path.join(workspace_processing_path, name + '\\yamaguchi\\')
+ if self._debug == False:
+ yamaguchiDecom = PspYamaguchiDecomposition(exeDir, t3_path, yamaguchiOutDir)
+ flag = yamaguchiDecom.api_yamaguchi_4components_decomposition_T3(0, 0, rows, cols)
+ if not flag:
+ logger.error('CloudePottierDecomposition err')
+ return False, None
+ outFolderDic['Yamaguchi'] = yamaguchiOutDir
+
+ if 'Cloude' in FeatureInput:
+ # CloudePottier分解
+ cloudeOutDir = os.path.join(workspace_processing_path, name + '\\cloude\\')
+ if self._debug == False:
+ cloudeDecom = PspCloudePottierDecomposition(
+ exeDir, t3_path, cloudeOutDir)
+ flag = cloudeDecom.api_h_a_alpha_decomposition_T3(
+ 0, 0, rows, cols)
+ if not flag:
+ logger.error('CloudePottierDecomposition err')
+ return False, None
+ outFolderDic['Cloude'] = cloudeOutDir
+ return True, outFolderDic
+
+ def creat_h_a_alpha_features(self, t3_path, out_dir):
+ logger.info('ahv transform to polsarpro T3 matrix success!')
+ logger.info('progress bar: 20%')
+ h_a_alpha_decomposition_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_decomposition_T3.exe')
+ h_a_alpha_eigenvalue_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvalue_set_T3.exe')
+ h_a_alpha_eigenvector_set_T3_path = os.path.join(self._exe_dir, 'h_a_alpha_eigenvector_set_T3.exe')
+
+ if self._debug == False:
+ haa = PspHAAlphaDecomposition(normalization=True)
+ haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=out_dir,
+ h_a_alpha_decomposition_T3_path=h_a_alpha_decomposition_T3_path ,
+ h_a_alpha_eigenvalue_set_T3_path=h_a_alpha_eigenvalue_set_T3_path ,
+ h_a_alpha_eigenvector_set_T3_path=h_a_alpha_eigenvector_set_T3_path,
+ polsarpro_in_dir=t3_path)
+
+
+ def cereat_features_dic(self,outFolderDic, feature_tif_dir):
+
+ if not os.path.exists(feature_tif_dir):
+ os.makedirs(feature_tif_dir)
+
+ feature_tif_paths = {}
+ for key in outFolderDic:
+ feature_bin_dic = outFolderDic[key]
+ if key == 'Touzi':
+ for path in list(glob.glob(os.path.join(feature_bin_dic, '*.tif'))):
+ name = os.path.split(path)[1].split('.')[0]
+ if self._debug == False:
+ shutil.copyfile(path, os.path.join(feature_tif_dir, name + '.tif')) # feature_tif_dir + '\\' + name + '.tif')
+ feature_tif_paths.update({name: os.path.join(feature_tif_dir, name + '.tif')}) # feature_tif_dir + '\\' + name + '.tif'
+ else:
+ feature_tif_paths.update(write_bin_to_tif(feature_tif_dir, feature_bin_dic))
+ return feature_tif_paths
+
+ @staticmethod
+ def decompose_single_tar(hh_hv_vh_vv_list, workspace_processing_path, workspace_preprocessing_path, name, exe_dir, rows, cols, FILTER_SIZE = 3, debug =False, FeatureInput=['Freeman', 'Yamaguchi', 'Cloude']):
+ hh_hv_vh_vv_dic = {}
+ hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
+ hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
+ hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
+ hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
+ t3_path = os.path.join(workspace_processing_path, name, "lee_filter") # workspace_processing_path + name + "\\lee_filter"
+ feature_tif_dir = os.path.join(workspace_processing_path, name, 'features') # workspace_processing_path + name + "\\features"
+
+ cfeature = CreateFeature(debug, exe_dir)
+
+ cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
+
+ t3_path = cfeature.ahv_to_t3(workspace_processing_path, workspace_preprocessing_path, hh_hv_vh_vv_list, name, FILTER_SIZE)
+ flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, rows, cols, hh_hv_vh_vv_dic, FeatureInput) # , 'Touzi'
+ cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
+ return feature_tif_dir
+
+if __name__ == '__main__':
+ # # 实例1:
+ # exe_dir = os.getcwd()
+ # dir = r'D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\preprocessed/'
+ # hh_hv_vh_vv_list = [dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HH_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_HV_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VH_preprocessed.tif',
+ # dir +'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_VV_preprocessed.tif']
+ #
+ # workspace_processing_path= r"D:\MicroWorkspace\product\C-SAR\VegetationPhenology\Temporary\processing/"
+ # name= 'GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC'
+ # hh_hv_vh_vv_dic = {}
+ # hh_hv_vh_vv_dic.update({'HH': hh_hv_vh_vv_list[0]})
+ # hh_hv_vh_vv_dic.update({'HV': hh_hv_vh_vv_list[1]})
+ # hh_hv_vh_vv_dic.update({'VH': hh_hv_vh_vv_list[2]})
+ # hh_hv_vh_vv_dic.update({'VV': hh_hv_vh_vv_list[3]})
+ # t3_path = workspace_processing_path + name + "\\lee_filter"
+ # feature_tif_dir = workspace_processing_path + name + "\\features"
+ #
+ # cfeature = CreateFeature(False, exe_dir)
+ #
+ # cfeature.creat_h_a_alpha_features(t3_path, feature_tif_dir)
+ #
+ # t3_path = cfeature.ahv_to_t3(workspace_processing_path, hh_hv_vh_vv_list, name, 3)
+ # flag, outFolderDic = cfeature.decompose(workspace_processing_path, name, t3_path, 997, 1227, hh_hv_vh_vv_dic, FeatureInput=['Freeman', 'Touzi', 'Yamaguchi', 'Cloude'])
+ #
+ # feature_tifs_dic = cfeature.cereat_features_dic(outFolderDic, feature_tif_dir)
+ pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/polarizationDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/polarizationDecomposition.py
new file mode 100644
index 0000000..5e1798d
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/polarizationDecomposition.py
@@ -0,0 +1,2283 @@
+# -*- coding: UTF-8 -*-
+
+from typing import Union
+import os
+from osgeo import gdal
+import numpy as np
+import cv2
+import math
+import cmath
+import scipy.linalg as la
+import struct
+
+"*** ======================================================================================== ***"
+" This program is written to implement the miscellaneous target decomposition theorems in the domain " \
+
+"of polarimetric radar remote sensing using full polarimetric information"
+
+
+"*** Programmer - Raktim Ghosh (MSc, University of Twente) Date Written - May, 2020***"
+
+
+""" List of scattering power decompositions implemented """
+
+"""========================================================================"""
+
+"Touzi Decomposition - Class Name (ModTouzi) *** Including orientation angle"
+
+"Touzi Decomposition - CLass Name (Touzi) *** Excluding orientation angle"
+
+"H/A/Alpha Decompositions (Cloude-Pottier) - Class Name (HAAlpha)"
+
+"Sinclair Decomposition - Class Name (Sinclair)"
+
+"Cloude Decomposition - CLass Name (Cloude)"
+
+"Pauli Decomposition - Class Name (Pauli)"
+
+"Van Zyl Decomposition - Class Name (Vanzyl)"
+
+"FreeMan-Durden Decomposition Class Name (FreeMan)"
+
+"Yamaguchi 4-Component Decomposition Class Name (Yamaguchi2005) *** Original - Introduced Helix Scattering"
+
+"Yamaguchi 4-Component Decomposition Class Name (Yamaguchi2011) *** Modified - Rotated Coherency Matrix"
+
+"General 4-Component Decomposition (Singh) Class Name (General4SD)"
+
+"Model-based 6-Component Decomposition (Singh) Class Name (General6SD) *** Extension of General 4SD"
+
+"Seven Component Decomposition (Singh) Class Name (General7SD) *** Extension of 6SD"
+
+"""========================================================================"""
+
+
+class Polarimetry:
+ def __init__(self, b, w):
+
+ self.__band = b
+ self.__w = w
+ self.__band_list = list()
+ self.__band_list_avg = list()
+
+ """
+ A list is created to append all the full polarimetric channels with conjugates
+ """
+ for item in self.__band:
+ for i in range(1, item.RasterCount + 1):
+ self.__temp = item.GetRasterBand(i).ReadAsArray().astype(float)
+ self.__band_list.append(self.__temp)
+
+ for i in range(len(self.__band_list)):
+ self.__band_list_avg.append(cv2.blur(self.__band_list[i], (self.__w, self.__w)))
+
+ """
+ The private variables are consisting of the fully polarimetric channels. As for a fully polarimetric
+ synthetic aperture radar system, there are four components according to the Sinclair matrix.
+
+ :param s_hh: represents the horizontal-horizontal channel
+ :param s_hh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_hv: represents the horizontal-vertical channel
+ :param s_hv_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vh: represents the vertical-horizontal channel
+ :param s_vh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vv: represents the vertical-vertical channel
+ :param s_vv_conj: represents the conjugate of horizontal-horizontal channel
+ :param b: represents the object of bands
+ """
+
+ self.__S_hh = self.__band_list_avg[0] + 1j * self.__band_list_avg[1]
+ self.__S_hh_conj = self.__band_list_avg[0] - 1j * self.__band_list_avg[1]
+ self.__S_hv = self.__band_list_avg[2] + 1j * self.__band_list_avg[3]
+ self.__S_hv_conj = self.__band_list_avg[2] - 1j * self.__band_list_avg[3]
+ self.__S_vh = self.__band_list_avg[4] + 1j * self.__band_list_avg[5]
+ self.__S_vh_conj = self.__band_list_avg[4] - 1j * self.__band_list_avg[5]
+ self.__S_vv = self.__band_list_avg[6] + 1j * self.__band_list_avg[7]
+ self.__S_vv_conj = self.__band_list_avg[6] - 1j * self.__band_list_avg[7]
+
+ def get_cov_mat_img(self):
+
+ """
+ This function returns the 3 * 3 covariance matrix based on physically measurable parameters.
+ The covariance matrix consists of 9 components.
+
+ Format of the covariance matrix:
+
+ [ 2 ^ 0.5 *
+ <2 ^ 0.5 * <2 * 2 ^ 0.5 *
+ 2 ^ 0.5 * ]
+
+ :return: It returns the nine parameters. t_ij represents the components of covariance matrix with
+ ith row and jth column
+ """
+
+ s_11 = abs(self.__S_hh * self.__S_hh_conj)
+ s_12 = np.sqrt(2) * self.__S_hh * self.__S_hv_conj
+ s_13 = self.__S_hh * self.__S_vv_conj
+ s_21 = np.sqrt(2) * self.__S_hv * self.__S_hh_conj
+ s_22 = abs(2 * self.__S_hv * self.__S_hv_conj)
+ s_23 = np.sqrt(2) * self.__S_hv * self.__S_vv_conj
+ s_31 = self.__S_vv * self.__S_hh_conj
+ s_32 = np.sqrt(2) * self.__S_vv * self.__S_hv_conj
+ s_33 = abs(self.__S_vv * self.__S_vv_conj)
+
+ return [s_11, s_12, s_13, s_21, s_22, s_23, s_31, s_32, s_33]
+
+ def get_coh_mat_img(self):
+
+ """
+ This function returns the 3 * 3 coherency (Pauli-based covariance) matrix based on
+ physically measurable parameters based on mathematically and orthogonal Pauli matrix components.
+ The coherency matrix consists of 9 components.
+
+ Format of the coherency matrix:
+
+ 0.5 * [<(Shh + Svv) * conj(Shh + Svv)> <(Shh + Svv) * conj(Shh + Svv)> 2 * <(Shh + Svv) * conj(Shv)>
+ <(Shh - Svv) * conj(Shh + Svv)> <(Shh - Svv) * conj(Shh - Svv)> 2 * <(Shh - Svv) * conj(Shv)>
+ <2 * Shv * conj(Shh + Svv)> <2 * Shv * conj(Shh - Svv)> 4 * ]
+
+ :return: It returns the nine parameters. s_ij represents the components of covariance matrix with
+ ith row and jth column
+ """
+
+ t_11 = 0.5 * abs((self.__S_hh + self.__S_vv) * (self.__S_hh_conj + self.__S_vv_conj))
+ t_12 = 0.5 * (self.__S_hh + self.__S_vv) * (self.__S_hh_conj - self.__S_vv_conj)
+ t_13 = (self.__S_hh + self.__S_vv) * self.__S_hv_conj
+ t_21 = 0.5 * (self.__S_hh - self.__S_vv) * (self.__S_hh_conj + self.__S_vv_conj)
+ t_22 = 0.5 * abs((self.__S_hh - self.__S_vv) * (self.__S_hh_conj - self.__S_vv_conj))
+ t_23 = (self.__S_hh - self.__S_vv) * self.__S_hv_conj
+ t_31 = self.__S_hv * (self.__S_hh_conj + self.__S_vv_conj)
+ t_32 = self.__S_hv * (self.__S_hh_conj - self.__S_vv_conj)
+ t_33 = 2 * abs(self.__S_hv * self.__S_hv_conj)
+
+ return [t_11, t_12, t_13, t_21, t_22, t_23, t_31, t_32, t_33]
+
+ def get_eig_val(self):
+
+ """
+ This function returns the eigen values extracted from the 3 * 3 coherency matrix.
+ """
+
+ coh_mat = self.get_coh_mat_img()
+
+ rows, cols = np.shape(coh_mat[0])[0], np.shape(coh_mat[0])[1]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ ev1 = np.zeros([rows, cols], dtype=complex)
+ ev2 = np.zeros([rows, cols], dtype=complex)
+ ev3 = np.zeros([rows, cols], dtype=complex)
+
+ for i in range(rows):
+ for j in range(cols):
+
+ x = np.array([
+ [t11[i, j], t12[i, j], t13[i, j]],
+ [t21[i, j], t22[i, j], t23[i, j]],
+ [t31[i, j], t32[i, j], t33[i, j]]
+ ])
+ eigV = la.eig(x)[0]
+ ev1[i, j] = abs(eigV[0])
+ ev2[i, j] = abs(eigV[1])
+ ev3[i, j] = abs(eigV[2])
+
+ if ev2[i, j] < ev3[i, j]:
+ ev2[i, j], ev3[i, j] = ev3[i, j], ev2[i, j]
+
+ ev1[~np.isfinite(ev1)] = 0
+ ev2[~np.isfinite(ev2)] = 0
+ ev3[~np.isfinite(ev3)] = 0
+
+ trt = t11 + t22 + t33
+
+ return [ev1, ev2, ev3, trt]
+
+ def get_eig_vect(self):
+
+ """
+ This function returns the normalized eigen vectors extracted from the coherency matrix
+ """
+
+ coh_mat = self.get_coh_mat_img()
+
+ rows, cols = np.shape(coh_mat[0])[0], np.shape(coh_mat[0])[1]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ list2 = list()
+
+ for i in range(rows):
+ list1 = list()
+ for j in range(cols):
+
+ x = np.array([
+ [t11[i, j], t12[i, j], t13[i, j]],
+ [t21[i, j], t22[i, j], t23[i, j]],
+ [t31[i, j], t32[i, j], t33[i, j]]
+ ])
+ list1.append(la.eig(x)[1])
+
+ list2.append(list1)
+
+ # print(len(list2))
+ x = np.array(list2)
+ y = np.reshape(x, (rows, cols, 3, 3))
+ # print(np.shape(y))
+ # print(type(y[1, 1][1, 1]))
+
+ return y
+
+ def rot_coh_mat_img(self):
+
+ """
+ This function returns rotated version of the 3 * 3 coherency (Pauli-based covariance) matrix based on
+ physically measurable parameters based on mathematically and orthogonal Pauli matrix components.
+ The rotated coherency matrix consists of 9 components.
+
+ t_ij_theta: the components of rotated coherency matrix is derived by multiplying the 3 * 3 rotation
+ matrices and its transpose conjugate.
+
+ Format of the rotation matrix:
+
+ [1 0 0
+ Rp(theta) = 0 cos(2 * theta) sin(2 * theta)
+ 0 - sin(2 * theta) cos(2 * theta)]
+
+
+ T(theta) = Rp(theta) * T * transpose(conj(Rp(theta))
+
+ T => Denote the coherency matrix
+ theta => Denote the rotation angle
+ T(theta) => Denote the rotated coherency matrix
+
+ :return: It returns the nine parameters. t_ij_theta represents the components of
+ rotated coherency matrix with ith row and jth column
+ """
+
+ t = self.get_coh_mat_img()
+ rows, cols = np.shape(t[0])[0], np.shape(t[0])[1]
+ t_theta_mat = np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+ a = t[5][i, j].real
+ b, c = t[4][i, j], t[8][i, j]
+ if b == c:
+ t_theta_mat[i, j] = math.pi / 4
+ else:
+ t_theta_mat[i, j] = 0.5 * math.atan((2 * a.real) / (b - c))
+
+ t_11_theta = t[0]
+ t_12_theta = t[1] * np.cos(t_theta_mat) + t[2] * np.sin(t_theta_mat)
+ t_13_theta = - t[1] * np.sin(t_theta_mat) + t[2] * np.cos(t_theta_mat)
+ t_21_theta = np.conj(t_12_theta)
+ t_22_theta = t[4] * (np.cos(t_theta_mat) ** 2) + t[8] * (np.sin(t_theta_mat) ** 2)
+ t_22_theta += np.real(t[5]) * np.sin(2 * t_theta_mat)
+ t_23_theta = np.imag(t[5]) * 1j
+ t_31_theta = np.conj(t_13_theta)
+ t_32_theta = - np.imag(t[5]) * 1j
+ t_33_theta = t[8] * (np.cos(t_theta_mat) ** 2) + t[4] * (np.sin(t_theta_mat) ** 2)
+ t_33_theta -= np.real(t[5]) * (np.sin(t_theta_mat) ** 2)
+
+ return [t_11_theta, t_12_theta, t_13_theta, t_21_theta, t_22_theta,
+ t_23_theta, t_31_theta, t_32_theta, t_33_theta]
+
+ def get_image(self, ps, pd, pv, pc):
+
+ """
+ After estimating the corresponding power term, we generate the decomposed power images of surface,
+ double-bounce and volume scattering respectively.
+
+ :param ps: power term associated with surface scattering
+ :param pd: power term associated with double-bounce scattering
+ :param pv: power term associated with volume scattering
+ :param pc: power term associated with helix scattering
+ :return: Images of surface, double-bounce, and volume scattering
+ """
+
+ cols, rows = pd.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'helix_Scattering_yamaguchi'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.__band[0].GetProjection())
+ out_data.SetGeoTransform(self.__band[0].GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(pc / (ps + pd + pv + pc))
+
+ def get_band(self):
+ return self.__band
+
+
+class ModTouzi(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+
+ def get_psi(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Target Scattering Decomposition in Terms of Roll-Invariant Target Parameters"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/4039635
+
+ DOI: 10.1109/TGRS.2006.886176
+
+ With the projection of Kennaugh-Huynen scattering matrix into the Pauli basis, this model established
+ the basis invariant representation of coherent target scattering.
+
+ alp: for symmetric scattering type magnitude (normalized)
+ phi: for symmetric scattering type phase (normalized)
+ psi: orientation angle (Kennaugh-Huynen maximum polarization parameter (normalized)
+ tau: target helicity (normalized)
+
+ :return: It returns alp, psi, tau, phi
+ """
+
+ ev = self.get_eig_val()
+ ev1, ev2, ev3, trt = ev[0], ev[1], ev[2], ev[3]
+
+ p1 = ev1 / trt
+ p2 = ev2 / trt
+ p3 = ev3 / trt
+
+ rows, cols = np.shape(ev[0])[0], np.shape(ev[0])[1]
+ eig_vect = self.get_eig_vect()
+
+ alp1, alp2, alp3 = np.zeros([rows, cols]), np.zeros([rows, cols]), np.zeros([rows, cols])
+ psi1, psi2, psi3 = np.zeros([rows, cols]), np.zeros([rows, cols]), np.zeros([rows, cols])
+ tau1, tau2, tau3 = np.zeros([rows, cols]), np.zeros([rows, cols]), np.zeros([rows, cols])
+ phis1, phis2, phis3 = np.zeros([rows, cols]), np.zeros([rows, cols]), np.zeros([rows, cols])
+
+ print(np.shape(eig_vect))
+
+ for i in range(rows):
+ for j in range(cols):
+ u = np.array([
+ [eig_vect[i, j][0, 0], eig_vect[i, j][0, 1], eig_vect[i, j][0, 2]],
+ [eig_vect[i, j][1, 0], eig_vect[i, j][1, 1], eig_vect[i, j][1, 2]],
+ [eig_vect[i, j][2, 0], eig_vect[i, j][2, 1], eig_vect[i, j][2, 2]]
+ ])
+
+ psi1[i, j] = 0.5 * np.arctan(abs(np.real(u[2, 0]) / np.real(u[1, 0]))) * 180 / math.pi
+ alp1[i, j] = np.arcsin(abs(np.real(u[1, 0]) / np.cos(2 * psi1[i, j] * math.pi / 180))) * 180 / math.pi
+ tau1[i, j] = 0.5 * np.arccos(np.real(u[0, 0]) / np.cos(alp1[i, j] * math.pi / 180)) * 180 / math.pi
+
+ psi2[i, j] = 0.5 * np.arctan(abs(np.real(u[2, 1]) / np.real(u[1, 1]))) * 180 / math.pi
+ alp2[i, j] = np.arcsin(abs(np.real(u[1, 1]) / np.cos(2 * psi2[i, j] * math.pi / 180))) * 180 / math.pi
+ tau2[i, j] = 0.5 * np.arccos(np.real(u[0, 1]) / np.cos(alp2[i, j] * math.pi / 180)) * 180 / math.pi
+
+ psi3[i, j] = 0.5 * np.arctan(abs(np.real(u[2, 2]) / np.real(u[1, 2]))) * 180 / math.pi
+ alp3[i, j] = np.arcsin(abs(np.real(u[1, 2]) / np.cos(2 * psi3[i, j] * math.pi / 180))) * 180 / math.pi
+ tau3[i, j] = 0.5 * np.arccos(np.real(u[0, 2]) / np.cos(alp3[i, j] * math.pi / 180)) * 180 / math.pi
+
+ phis1[i, j] = np.arctan(np.imag(u[1, 0]) / np.real(u[1, 0])) * 180 / math.pi
+ phis2[i, j] = np.arctan(np.imag(u[1, 1]) / np.real(u[1, 1])) * 180 / math.pi
+ phis3[i, j] = np.arctan(np.imag(u[1, 2]) / np.real(u[1, 2])) * 180 / math.pi
+
+ alp = p1 * alp1 + p2 * alp2 + p3 * alp3
+ psi = p1 * psi1 + p2 * psi2 + p3 * psi3
+ tau = p1 * tau1 + p2 * tau2 + p3 * tau3
+ phi = p1 * phis1 + p2 * phis2 + p3 * phis3
+
+ return [alp, psi, tau, phi]
+
+ def get_result(self, saveFolder):
+ x = self.get_psi()
+ scattering_list = ['alpha', 'psi', 'tau', 'phi']
+ for i in range(len(x)):
+
+ cols, rows = x[0].shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Touzi_New'
+ outfile += '.tif'
+ # 创建文件
+ #if os.path.exists(os.path.split(outfile)) is False:
+ # os.makedirs(os.path.split(outfile))
+ out_data = driver.Create(os.path.join(saveFolder,outfile) , rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band()[0].GetProjection())
+ out_data.SetGeoTransform(self.get_band()[0].GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+ def get_result_block(self, saveFolder, suffix):
+ x = self.get_psi()
+ scattering_list = ['alpha', 'psi', 'tau', 'phi']
+ for i in range(len(x)):
+
+ cols, rows = x[0].shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Touzi_New'
+ outfile += suffix
+ # 创建文件
+ #if os.path.exists(os.path.split(outfile)) is False:
+ # os.makedirs(os.path.split(outfile))
+ out_data = driver.Create(os.path.join(saveFolder,outfile) , rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band()[0].GetProjection())
+ out_data.SetGeoTransform(self.get_band()[0].GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+class Touzi(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+
+ def get_eig_value(self):
+ coh_mat = self.get_coh_mat_img()
+
+ rows, cols = np.shape(coh_mat[0])[0], np.shape(coh_mat[0])[1]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ ev1 = np.zeros([rows, cols], dtype=complex)
+ ev2 = np.zeros([rows, cols], dtype=complex)
+ ev3 = np.zeros([rows, cols], dtype=complex)
+
+ for i in range(rows):
+ for j in range(cols):
+
+ x = np.array([
+ [t11[i, j], t12[i, j], t13[i, j]],
+ [t21[i, j], t22[i, j], t23[i, j]],
+ [t31[i, j], t32[i, j], t33[i, j]]
+ ])
+
+ ev1[i, j] = abs(la.eig(x)[0][0])
+ ev2[i, j] = abs(la.eig(x)[0][1])
+ ev3[i, j] = abs(la.eig(x)[0][2])
+
+ if ev2[i, j] < ev3[i, j]:
+ ev2[i, j], ev3[i, j] = ev3[i, j], ev2[i, j]
+
+ ev1[~np.isfinite(ev1)] = 0
+ ev2[~np.isfinite(ev2)] = 0
+ ev3[~np.isfinite(ev3)] = 0
+
+ trt = t11 + t22 + t33
+ """
+ det = t11 * t22 * t33 - t33 * t12 * np.conj(t12) - t22 * t13 * np.conj(t13)
+ det += t12 * np.conj(t13) * t23 + np.conj(t12) * t13 * np.conj(t23) - t11 * t23 * np.conj(t23)
+ det = abs(det)
+
+ a = abs(t11 * t22 + t11 * t33 + t22 * t33 - t12 * np.conj(t12) - t13 * np.conj(t13) - t23 * np.conj(t23))
+
+ b = t11 * t11 - t11 * t22 + t22 * t22 - t11 * t33 - t22 * t33 + t33 * t33
+ b = b + 3.0 * (t12 * np.conj(t12) + t13 * np.conj(t13) + t23 * np.conj(t23))
+
+ c = 27.0 * det - 9.0 * a * trt + 2.0 * (trt ** 3)
+ c = c + 0.0j
+ c = c + np.sqrt(c ** 2.0 - 4.0 * b ** 3.0)
+ c = c ** (1.0 / 3.0)
+
+ trt = (1.0 / 3.0) * trt
+
+ eig1 = trt + ((2 ** (1.0 / 3.0)) * b) / (3.0 * c) + c / (3.0 * 2 ** (1.0 / 3.0))
+ eig2 = trt - complex(1, math.sqrt(3)) * b / (3 * 2 ** (2 / 3) * c) - complex(
+ 1, -math.sqrt(3)) * c / (6 * 2 ** (1 / 3))
+ eig3 = trt - complex(1, -math.sqrt(3)) * b / (3 * 2 ** (2 / 3) * c) - complex(
+ 1, math.sqrt(3)) * c / (6 * 2 ** (1 / 3))
+
+ eig1, eig2, eig3 = abs(eig1), abs(eig2), abs(eig3)
+
+ eig1[~np.isfinite(eig1)] = 0
+ eig2[~np.isfinite(eig2)] = 0
+ eig3[~np.isfinite(eig3)] = 0
+
+ x = eig2 >= eig3
+ y = eig2 < eig3
+
+ ref_eig2 = x * eig2 + y * eig3
+ ref_eig3 = x * eig3 + y * eig2
+
+ print(ref_eig2[200:205, 200:205], ref_eig3[200:205, 200:205])
+
+ trt = t11 + t22 + t33
+ """
+
+ return [ev1, ev2, ev3, trt]
+
+ def get_params(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Target Scattering Decomposition in Terms of Roll-Invariant Target Parameters"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/4039635
+
+ DOI: 10.1109/TGRS.2006.886176
+
+ With the projection of Kennaugh-Huynen scattering matrix into the Pauli basis, this model established
+ the basis invariant representation of coherent target scattering.
+
+ U3 = [cos(alp1)*cos(2*taum1) cos(alp2)*cos(2*taum2) cos(alp3)*cos(2*taum3)
+ sin(alp1)*exp(j*phi1) sin(alp2)*exp(j*phi2) sin(alp3)*exp(j*phi3)
+ -j*cos(alp1)*sin(2*taum1) -j*cos(alp2)*sin(2*taum2) -j*cos(alp3)*sin(2*taum3)]
+
+ T = U3 * [lambda1 0 0 * Transpose(conj(U3))
+ 0 lambda2 0
+ 0 0 lambda3]
+
+ alp: for symmetric scattering type magnitude (normalized)
+ phi: for symmetric scattering type phase (normalized)
+ psi: orientation angle (Kennaugh-Huynen maximum polarization parameter (normalized)
+ tau: target helicity (normalized)
+
+ Pseudo Probability = The pseudo probability is estimated by normalizing each eigen values with the
+ total summation of all the eigen values
+
+ :return: It returns u11, u12, u13, u21, u22, u23 of U3 matrix and pseudo probabilities
+
+ ** Note: In this implementation, orientation angle has been neglected. Please refer ModTouzi for the
+ orientation angle
+ """
+
+ coh_mat = self.get_coh_mat_img()
+ x = self.get_eig_value()
+ ev1, ev2, ev3 = x[0], x[1], x[2]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ t12c, t13c, t23c = np.conj(t12), np.conj(t13), np.conj(t23)
+
+ u11 = (ev1 - t33) / t13c + ((ev1 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev1) * t13c - t12c * t23c) * t13c)
+
+ u21 = ((ev1 - t33) * t12c + t13c * t23) / ((t22 - ev1) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u11 * np.conj(u11) + u21 * np.conj(u21) + 1)
+
+ u11 = u11 / nrm
+ u21 = u21 / nrm
+
+ u11[~np.isfinite(u11)] = 0
+ u21[~np.isfinite(u21)] = 0
+
+ u12 = (ev2 - t33) / t13c + ((ev2 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev2) * t13c - t12c * t23c) * t13c)
+
+ u22 = ((ev2 - t33) * t12c + t13c * t23) / ((t22 - ev2) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u12 * np.conj(u12) + u22 * np.conj(u22) + 1)
+
+ u12 = u12 / nrm
+ u22 = u22 / nrm
+
+ u12[~np.isfinite(u12)] = 0
+ u22[~np.isfinite(u22)] = 0
+
+ u13 = (ev3 - t33) / t13c + ((ev3 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev3) * t13c - t12c * t23c) * t13c)
+
+ u23 = ((ev3 - t33) * t12c + t13c * t23) / ((t22 - ev3) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u13 * np.conj(u13) + u23 * np.conj(u23) + 1)
+
+ u13 = u13 / nrm
+ u23 = u23 / nrm
+
+ u13[~np.isfinite(u13)] = 0
+ u23[~np.isfinite(u23)] = 0
+
+ trt = t11 + t22 + t33
+
+ p1 = ev1 / trt
+ p2 = ev2 / trt
+ p3 = ev3 / trt
+
+ p1[~np.isfinite(p1)] = 0
+ p2[~np.isfinite(p2)] = 0
+ p3[~np.isfinite(p3)] = 0
+
+ return [u11, u12, u13, u21, u22, u23, p1, p2, p3]
+
+ def get_alpha(self):
+
+ """
+ The alpha parameter is introduced as a symmetric scattering type magnitude
+
+ tan(alpha) * exp(phi) = {(mu1 - mu2) / (mu1 + mu2)}
+ mu1, mu2 are coneignevalues
+
+ :return: It returns the normalized alpha using the linear combination of pseudo probabilities and alpha
+ parameters (p1 * alpha1 + p2 * alpha2 + p3 * alpha3)
+ """
+
+ x = self.get_params()
+ u11, u12, u13 = x[0], x[1], x[2]
+ u21, u22, u23 = x[3], x[4], x[5]
+ p1, p2, p3 = x[6], x[7], x[8]
+
+ alps1 = np.arcsin(abs(u21)) * 180 / math.pi
+ alps2 = np.arcsin(abs(u22)) * 180 / math.pi
+ alps3 = np.arcsin(abs(u23)) * 180 / math.pi
+
+ alps1[~np.isfinite(alps1)] = 0
+ alps2[~np.isfinite(alps2)] = 0
+ alps3[~np.isfinite(alps3)] = 0
+
+ alps = p1 * alps1 + p2 * alps2 + p3 * alps3
+
+ return [u11, u12, u13, u21, u22, u23, p1, p2, p3, alps]
+
+ def get_tau(self):
+
+ """
+ The tau parameter is defined as the target helicity. Under the assumption of target reciprocity, the Kennaugh-
+ Huynen condiagonalization is performed using:
+
+ [S] = [R(psi)].[T(taum)].[Sd].[T(taum)].[R(-psi)]
+
+ [R(psi)] = [cos(psi) - sin(psi) [T(taum)] = [cos(taum) -j * sin(taum) Sd = [mu1 0
+ sin(psi) cos(psi)] -j * sin(taum) cos(taum)] 0 mu2]
+
+ :return: It returns the normalized tau using the linear combination of pseudo probabilities and tau
+ parameters (p1 * tau1 + p2 * tau2 + p3 * tau3)
+ """
+
+ x = self.get_alpha()
+ u11, u12, u13 = x[0], x[1], x[2]
+ u21, u22, u23 = x[3], x[4], x[5]
+ p1, p2, p3 = x[6], x[7], x[8]
+ alps = x[9]
+
+ taum1 = 0.5 * np.arccos(abs(u11 / (np.cos(u21 * math.pi / 180)))) * 180 / math.pi
+ taum2 = 0.5 * np.arccos(abs(u12 / (np.cos(u22 * math.pi / 180)))) * 180 / math.pi
+ taum3 = 0.5 * np.arccos(abs(u13 / (np.cos(u23 * math.pi / 180)))) * 180 / math.pi
+
+ taum1[~np.isfinite(taum1)] = 0
+ taum2[~np.isfinite(taum2)] = 0
+ taum3[~np.isfinite(taum3)] = 0
+
+ taum = p1 * taum1 + p2 * taum2 + p3 * taum3
+
+ return [u11, u12, u13, u21, u22, u23, p1, p2, p3, alps, taum]
+
+ def get_phi(self):
+
+ """
+ The parameter phi is the phase difference between the vector components in the trihedral-dihedral basis
+
+ :return: It returns the normalized phi using the linear combination of pseudo probabilities and phi
+ parameters (p1 * tau1 + p2 * tau2 + p3 * tau3)
+ """
+
+ x = self.get_tau()
+ # u11, u12, u13 = x[0], x[1], x[2]
+ u21, u22, u23 = x[3], x[4], x[5]
+ p1, p2, p3 = x[6], x[7], x[8]
+ alps, taum = x[9], x[10]
+
+ phis1 = np.arctan(np.imag(u21) / np.real(u21)) * 180 / math.pi
+ phis2 = np.arctan(np.imag(u22) / np.real(u22)) * 180 / math.pi
+ phis3 = np.arctan(np.imag(u23) / np.real(u23)) * 180 / math.pi
+
+ phis = p1 * phis1 + p2 * phis2 + p3 * phis3
+
+ return [alps, taum, phis]
+
+ def get_result(self):
+ x = self.get_phi()
+
+ scattering_list = ['alpha', 'tau', 'phi']
+
+ for i in range(len(x)):
+
+ cols, rows = x[0].shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Touzi'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class HAAlpha(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+
+ def get_eigen_value(self):
+
+ coh_mat = self.get_coh_mat_img()
+
+ rows, cols = np.shape(coh_mat[0])[0], np.shape(coh_mat[0])[1]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ ev1 = np.zeros([rows, cols], dtype=complex)
+ ev2 = np.zeros([rows, cols], dtype=complex)
+ ev3 = np.zeros([rows, cols], dtype=complex)
+
+ for i in range(rows):
+ for j in range(cols):
+
+ x = np.array([
+ [t11[i, j], t12[i, j], t13[i, j]],
+ [t21[i, j], t22[i, j], t23[i, j]],
+ [t31[i, j], t32[i, j], t33[i, j]]
+ ])
+
+ ev1[i, j] = abs(la.eig(x)[0][0])
+ ev2[i, j] = abs(la.eig(x)[0][1])
+ ev3[i, j] = abs(la.eig(x)[0][2])
+
+ if ev2[i, j] < ev3[i, j]:
+ ev2[i, j], ev3[i, j] = ev3[i, j], ev2[i, j]
+
+ ev1[~np.isfinite(ev1)] = 0
+ ev2[~np.isfinite(ev2)] = 0
+ ev3[~np.isfinite(ev3)] = 0
+
+ trt = t11 + t22 + t33
+ """
+ det = t11 * t22 * t33 - t33 * t12 * np.conj(t12) - t22 * t13 * np.conj(t13)
+ det += t12 * np.conj(t13) * t23 + np.conj(t12) * t13 * np.conj(t23) - t11 * t23 * np.conj(t23)
+ det = abs(det)
+
+ a = abs(t11 * t22 + t11 * t33 + t22 * t33 - t12 * np.conj(t12) - t13 * np.conj(t13) - t23 * np.conj(t23))
+
+ b = t11 * t11 - t11 * t22 + t22 * t22 - t11 * t33 - t22 * t33 + t33 * t33
+ b = b + 3.0 * (t12 * np.conj(t12) + t13 * np.conj(t13) + t23 * np.conj(t23))
+
+ c = 27.0 * det - 9.0 * a * trt + 2.0 * (trt ** 3)
+ c = c + 0.0j
+ c = c + np.sqrt(c ** 2.0 - 4.0 * b ** 3.0)
+ c = c ** (1.0 / 3.0)
+
+ trt = (1.0 / 3.0) * trt
+
+ eig1 = trt + ((2 ** (1.0 / 3.0)) * b) / (3.0 * c) + c / (3.0 * 2 ** (1.0 / 3.0))
+ eig2 = trt - complex(1, math.sqrt(3)) * b / (3 * 2 ** (2 / 3) * c) - complex(
+ 1, -math.sqrt(3)) * c / (6 * 2 ** (1 / 3))
+ eig3 = trt - complex(1, -math.sqrt(3)) * b / (3 * 2 ** (2 / 3) * c) - complex(
+ 1, math.sqrt(3)) * c / (6 * 2 ** (1 / 3))
+
+ eig1, eig2, eig3 = abs(eig1), abs(eig2), abs(eig3)
+
+ eig1[~np.isfinite(eig1)] = 0
+ eig2[~np.isfinite(eig2)] = 0
+ eig3[~np.isfinite(eig3)] = 0
+
+ x = eig2 >= eig3
+ y = eig2 < eig3
+
+ ref_eig2 = x * eig2 + y * eig3
+ ref_eig3 = x * eig3 + y * eig2
+
+ print(ref_eig2[200:205, 200:205], ref_eig3[200:205, 200:205])
+
+ trt = t11 + t22 + t33
+ """
+
+ return [ev1, ev2, ev3, trt]
+
+ def get_entropy(self):
+
+ """
+ The scattering entropy is associated with the degree of randomness under the assumption of reflection symmetric
+ media. The entropy is calculated using the eigen values and pseudo probabilities as follows:
+
+ H = - (p1 * log3(p1) + p2 * log3(p2) + p3 * log3(p3))
+
+ The pseudo probabilities are estimated as
+
+ p1 = lambda1 / (lambda1 + lambda2 + lambda3)
+ p2 = lambda2 / (lambda1 + lambda2 + lambda3)
+ p3 = lambda3 / (lambda1 + lambda2 + lambda3)
+
+ :return: It returns primarily the entropy along with three eigen values extracted from the 3 * 3 coherency
+ matrices
+ """
+
+ x = self.get_eigen_value()
+ ev1, ev2, ev3, trt = x[0], x[1], x[2], x[3]
+
+ p1 = ev1 / trt
+ p2 = ev2 / trt
+ p3 = ev3 / trt
+
+ ent = - (p1 * (np.log(p1) / np.log(3)) + p2 * (np.log(p2) / np.log(3)) + p3 * (np.log(p3) / np.log(3)))
+ ent[~np.isfinite(ent)] = 0
+
+ return [ent, ev1, ev2, ev3]
+
+ def get_anisotropy(self):
+
+ """
+ The anisotropy is the measure of the amount of the forward direction retained after a single scattering event
+
+ :return:It returns majorly the anisotropy
+ """
+
+ x = self.get_entropy()
+ ent = x[0]
+ ev1, ev2, ev3 = x[1], x[2], x[3]
+
+ ani = abs((ev2 - ev3) / (ev2 + ev3))
+
+ ani[~np.isfinite(ani)] = 0
+
+ return [ani, ev1, ev2, ev3, ent]
+
+ def get_alpha(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "An entropy based classification scheme for land applications of polarimetric SAR"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/551935
+
+ DOI: 10.1109/36.551935
+
+ This method employs the eigen value analysis of coherency matrix by employing 3-level Bernoulli
+ statistical model to estimate the average target scattering parameters.
+
+ U3 = [cos(alp1) cos(alp2) cos(alp3)
+ sin(alp1)*cos(beta1)*exp(j*del1) sin(alp1)*cos(beta1)*exp(j*del2) sin(alp1)*cos(beta3)*exp(j*del3)
+ sin(alp1)*sin(beta1)*exp(j*gama1) sin(alp2)*cos(beta2)*exp(j*gama2) sin(alp3)*sin(beta3)*exp(j*gama3)]
+
+ T = U3 * [lambda1 0 0 * Transpose(conj(U3))
+ 0 lambda2 0
+ 0 0 lambda3]
+
+ Pseudo Probability = The pseudo probability is estimated by normalizing each eigen values with the
+ total summation of all the eigen values
+
+ :return: It returns the alpha parameter along with entropy and anisotropy
+ """
+
+ coh_mat = self.get_coh_mat_img()
+ x = self.get_anisotropy()
+ ev1, ev2, ev3 = x[1], x[2], x[3]
+ ent, ani = x[4], x[0]
+
+ t11, t12, t13 = coh_mat[0], coh_mat[1], coh_mat[2]
+ t21, t22, t23 = coh_mat[3], coh_mat[4], coh_mat[5]
+ t31, t32, t33 = coh_mat[6], coh_mat[7], coh_mat[8]
+
+ t12c, t13c, t23c = np.conj(t12), np.conj(t13), np.conj(t23)
+
+ u11 = (ev1 - t33) / t13c + ((ev1 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev1) * t13c - t12c * t23c) * t13c)
+
+ u21 = ((ev1 - t33) * t12c + t13c * t23) / ((t22 - ev1) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u11 * np.conj(u11) + u21 * np.conj(u21) + 1)
+
+ u11 = u11 / nrm
+ u21 = u21 / nrm
+
+ u11[~np.isfinite(u11)] = 0
+ u21[~np.isfinite(u21)] = 0
+
+ u12 = (ev2 - t33) / t13c + ((ev2 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev2) * t13c - t12c * t23c) * t13c)
+
+ u22 = ((ev2 - t33) * t12c + t13c * t23) / ((t22 - ev2) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u12 * np.conj(u12) + u22 * np.conj(u22) + 1)
+
+ u12 = u12 / nrm
+ u22 = u22 / nrm
+
+ u12[~np.isfinite(u12)] = 0
+ u22[~np.isfinite(u22)] = 0
+
+ u13 = (ev3 - t33) / t13c + ((ev3 - t33) * t12c + t13c * t23) * t23c / (
+ ((t22 - ev3) * t13c - t12c * t23c) * t13c)
+
+ u23 = ((ev3 - t33) * t12c + t13c * t23) / ((t22 - ev3) * t13c - t12c * t23c)
+
+ nrm = np.sqrt(u13 * np.conj(u13) + u23 * np.conj(u23) + 1)
+
+ u13 = u13 / nrm
+ u23 = u23 / nrm
+
+ u13[~np.isfinite(u13)] = 0
+ u23[~np.isfinite(u23)] = 0
+
+ trt = t11 + t22 + t33
+
+ p1 = ev1 / trt
+ p2 = ev2 / trt
+ p3 = ev3 / trt
+
+ p1[~np.isfinite(p1)] = 0
+ p2[~np.isfinite(p2)] = 0
+ p3[~np.isfinite(p3)] = 0
+
+ alp1 = np.arccos(abs(u11)) * 180 / math.pi
+ alp2 = np.arccos(abs(u12)) * 180 / math.pi
+ alp3 = np.arccos(abs(u13)) * 180 / math.pi
+
+ alp1[~np.isfinite(alp1)] = 0
+ alp2[~np.isfinite(alp2)] = 0
+ alp3[~np.isfinite(alp3)] = 0
+
+ alp = p1 * alp1 + p2 * alp2 + p3 * alp3
+
+ return [ent, ani, alp]
+
+ def get_result(self):
+ x = self.get_alpha()
+ ent, ani, alp = x[0], x[1], x[2]
+
+ scattering_list = ['entropy', 'anisotropy', 'alpha']
+
+ for i in range(len(x)):
+
+ cols, rows = ent.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Cloude_Pottier'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class Sinclair(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+ self.__band_list = list()
+ self.__band_list_avg = list()
+
+ for i in range(self.__band.RasterCount):
+ self.__band_list.append(self.__band.GetRasterBand(i).ReadAsArray().astype(float))
+
+ for i in range(len(self.__band_list)):
+ self.__band_list_avg.append(cv2.blur(self.__band_list[i], (5, 5)))
+
+ """
+ The private variables are consisting of the fully polarimetric channels. As for a fully polarimetric
+ synthetic aperture radar system, there are four components according to the Sinclair matrix.
+
+ :param s_hh: represents the horizontal-horizontal channel
+ :param s_hh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_hv: represents the horizontal-vertical channel
+ :param s_hv_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vh: represents the vertical-horizontal channel
+ :param s_vh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vv: represents the vertical-vertical channel
+ :param s_vv_conj: represents the conjugate of horizontal-horizontal channel
+ :param b: represents the object of bands
+ """
+
+ self.__S_hh = self.__band_list_avg[0] + 1j * self.__band_list_avg[1]
+ self.__S_hh_conj = self.__band_list_avg[0] - 1j * self.__band_list_avg[1]
+ self.__S_hv = self.__band_list_avg[2] + 1j * self.__band_list_avg[3]
+ self.__S_hv_conj = self.__band_list_avg[2] - 1j * self.__band_list_avg[3]
+ self.__S_vh = self.__band_list_avg[4] + 1j * self.__band_list_avg[5]
+ self.__S_vh_conj = self.__band_list_avg[4] - 1j * self.__band_list_avg[5]
+ self.__S_vv = self.__band_list_avg[6] + 1j * self.__band_list_avg[7]
+ self.__S_vv_conj = self.__band_list_avg[6] - 1j * self.__band_list_avg[7]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Book: Polarimetric Radar Imaging: From basics to applications
+
+ General form of Sinclair matrix
+ [S] = [S11 S12
+ S21 S22]
+
+ :return: It returns the three intensity parameters based on Sinclair Matrix
+ """
+ intensity1 = abs(self.__S_vv * self.__S_vv_conj)
+ intensity2 = 0.25 * abs((self.__S_hv + self.__S_vh) * (self.__S_hv_conj + self.__S_vh_conj))
+ intensity3 = abs(self.__S_hh * self.__S_hh_conj)
+
+ return [10 * np.log10(abs(intensity1)), 10 * np.log10(abs(intensity2)), 10 * np.log10(abs(intensity3))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ intensity1, intensity2, intensity3 = x[0], x[1], x[2]
+
+ scattering_list = ['intensity1', 'intensity2', 'intensity3']
+
+ for i in range(len(x)):
+
+ cols, rows = intensity1.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Sinclair'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i] / (intensity1 + intensity2 + intensity3))
+
+
+class Cloude(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+ self.__band_list = list()
+ self.__band_list_avg = list()
+
+ for i in range(self.__band.RasterCount):
+ self.__band_list.append(self.__band.GetRasterBand(i).ReadAsArray().astype(float))
+
+ for i in range(len(self.__band_list)):
+ self.__band_list_avg.append(cv2.blur(self.__band_list[i], (5, 5)))
+
+ """
+ The private variables are consisting of the fully polarimetric channels. As for a fully polarimetric
+ synthetic aperture radar system, there are four components according to the Sinclair matrix.
+
+ :param s_hh: represents the horizontal-horizontal channel
+ :param s_hh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_hv: represents the horizontal-vertical channel
+ :param s_hv_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vh: represents the vertical-horizontal channel
+ :param s_vh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vv: represents the vertical-vertical channel
+ :param s_vv_conj: represents the conjugate of horizontal-horizontal channel
+ :param b: represents the object of bands
+ """
+
+ self.__S_hh = self.__band_list_avg[0] + 1j * self.__band_list_avg[1]
+ self.__S_hh_conj = self.__band_list_avg[0] - 1j * self.__band_list_avg[1]
+ self.__S_hv = self.__band_list_avg[2] + 1j * self.__band_list_avg[3]
+ self.__S_hv_conj = self.__band_list_avg[2] - 1j * self.__band_list_avg[3]
+ self.__S_vh = self.__band_list_avg[4] + 1j * self.__band_list_avg[5]
+ self.__S_vh_conj = self.__band_list_avg[4] - 1j * self.__band_list_avg[5]
+ self.__S_vv = self.__band_list_avg[6] + 1j * self.__band_list_avg[7]
+ self.__S_vv_conj = self.__band_list_avg[6] - 1j * self.__band_list_avg[7]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Book: Polarimetric Radar Imaging: From basics to applications
+
+ Cloude introduced the concept of eigen-vector based decomposition which utilized teh largest eigen-value to
+ identify the dominant scattering mechanism
+
+ The corresponding target vector can thus be expressed as
+
+ k1 = sqrt(Lambda1) * u1 = exp(j*phi) / sqrt(2*A0) transpose([2*A0 C + j*D H - j*G])
+
+ k1 = exp(j*phi) * transpose([sqrt(2*A0) sqrt(B0 + B)*exp(j*arctan(D/C)) sqrt(B0 - B)*exp(j*arctan(G/H))])
+
+ The three target structure generators are in one-to-one correspondence between Kennaugh Matrix and coherency
+ matrix (T):
+
+ T = [2A0 C - J*D H + J*G
+ C + J*D B0 + B E + J*F
+ H - J*G E - J*F B0 - B]
+
+ A0 => target symmetry, (B0 + B) => target irregularity, (B0 - B) => target nonsymmetry
+
+ :return: It returns the three parameters representing the surface scattering, dihedral-scattering and volume
+ scattering
+ """
+
+ ps = 0.5 * abs((self.__S_hh + self.__S_vv) * (self.__S_hh_conj + self.__S_vv_conj))
+ pd = 0.5 * abs((self.__S_hh - self.__S_vv) * (self.__S_hh_conj - self.__S_vv_conj))
+ pv = 0.5 * abs((self.__S_hv + self.__S_vh) * (self.__S_hv_conj + self.__S_vh_conj))
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)), 10 * np.log10(abs(pv))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ intensity1, intensity2, intensity3 = x[0], x[1], x[2]
+
+ scattering_list = ['surface', 'double_bounce', 'volume']
+
+ for i in range(len(x)):
+
+ cols, rows = intensity1.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Cloude'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i] / (intensity1 + intensity2 + intensity3))
+
+
+class Pauli(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ self.__band = b
+ self.__w = w
+ self.__band_list = list()
+ self.__band_list_avg = list()
+
+ for i in range(self.__band.RasterCount):
+ self.__band_list.append(self.__band.GetRasterBand(i).ReadAsArray().astype(float))
+
+ for i in range(len(self.__band_list)):
+ self.__band_list_avg.append(cv2.blur(self.__band_list[i], (5, 5)))
+
+ """
+ The private variables are consisting of the fully polarimetric channels. As for a fully polarimetric
+ synthetic aperture radar system, there are four components according to the Sinclair matrix.
+
+ :param s_hh: represents the horizontal-horizontal channel
+ :param s_hh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_hv: represents the horizontal-vertical channel
+ :param s_hv_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vh: represents the vertical-horizontal channel
+ :param s_vh_conj: represents the conjugate of horizontal-horizontal channel
+ :param s_vv: represents the vertical-vertical channel
+ :param s_vv_conj: represents the conjugate of horizontal-horizontal channel
+ :param b: represents the object of bands
+ """
+
+ self.__S_hh = self.__band_list_avg[0] + 1j * self.__band_list_avg[1]
+ self.__S_hh_conj = self.__band_list_avg[0] - 1j * self.__band_list_avg[1]
+ self.__S_hv = self.__band_list_avg[2] + 1j * self.__band_list_avg[3]
+ self.__S_hv_conj = self.__band_list_avg[2] - 1j * self.__band_list_avg[3]
+ self.__S_vh = self.__band_list_avg[4] + 1j * self.__band_list_avg[5]
+ self.__S_vh_conj = self.__band_list_avg[4] - 1j * self.__band_list_avg[5]
+ self.__S_vv = self.__band_list_avg[6] + 1j * self.__band_list_avg[7]
+ self.__S_vv_conj = self.__band_list_avg[6] - 1j * self.__band_list_avg[7]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Book: Polarimetric Radar Imaging: From basics to applications
+
+ The coherent target decompositions express the measured scattering matrix as a combinations of basis
+ matrices corresponding to canonical scattering mechanisms
+
+ The Pauli decomposition is the basis of the coherency matrix formulation.
+
+ S = [Sxx Sxy The target vector is constructed as k = V(S) = 0.5 * Trace(S * Psi)
+ Syx Syy]
+
+ The complex Pauli Spin matrix basis set is represented as
+
+ Psi = { sqrt(2) * [1 0 sqrt(2) * [1 0 sqrt(2) * [0 1 }
+ 0 1] 0 -1] 1 0]
+
+ So the resulting target vector will be => sqrt(2) * transpose[(Sxx + Syy) (Sxx - Syy) 2*Sxy]
+
+ :return:
+ """
+
+ intensity1 = 0.5 * abs((self.__S_hh - self.__S_vv) * (self.__S_hh_conj - self.__S_vv_conj))
+ intensity2 = 0.5 * abs((self.__S_hv + self.__S_vh) * (self.__S_hv_conj + self.__S_vh_conj))
+ intensity3 = 0.5 * abs((self.__S_hh + self.__S_vv) * (self.__S_hh_conj + self.__S_vv_conj))
+
+ return [10 * np.log10(abs(intensity1)), 10 * np.log10(abs(intensity2)), 10 * np.log10(abs(intensity3))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ intensity1, intensity2, intensity3 = x[0], x[1], x[2]
+
+ scattering_list = ['intensity1', 'intensity2', 'intensity3']
+
+ for i in range(len(x)):
+
+ cols, rows = intensity1.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Sinclair'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i] / (intensity1 + intensity2 + intensity3))
+
+
+class Vanzyl(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def get_coef(self):
+ cov_mat = self.get_cov_mat_img()
+ alpha = cov_mat[0]
+ rho = cov_mat[2] / alpha
+ eta = cov_mat[4] / alpha
+ mu = cov_mat[8] / alpha
+
+ return [alpha, rho, eta, mu]
+
+ def get_eigen_values(self):
+ coef = self.get_coef()
+ alpha, rho, eta, mu = coef[0], coef[1], coef[2], coef[3]
+
+ del1 = ((1 - mu) ** 2) + 4 * abs(rho * np.conj(rho))
+ lambda1 = (alpha / 2) * (1 + mu + del1 ** 0.5)
+ lambda2 = (alpha / 2) * (1 + mu - del1 ** 0.5)
+ lambda3 = alpha * eta
+
+ return [lambda1, lambda2, lambda3, coef, del1]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Application of Cloude's target decomposition theorem to polarimetric imaging radar data"
+
+ SemanticScholar: https://www.semanticscholar.org/paper
+ /Application-of-Cloude's-target-decomposition-to-Zyl/bad4b80ea872e5c00798c089f76b8fa7390fed34
+
+ DOI: 10.1117/12.140615
+
+ In this paper the Cloude's decomposition has been incorporated using general description of the
+ covariance matrix for azimuthally symmetrical natural terrain in the monostatic case
+
+ T = C * [1 0 rho
+ 0 eta 0 C = rho = / C
+ rho* 0 zeta] eta = 2 / C zeta = / C
+
+ The correspoding eigen-values and eigen-vector can be extracted from T
+
+ :return: It returns ps, pd, and pv which are associated with fs, fd, and fv
+ """
+
+ eigen_values = self.get_eigen_values()
+ lambda1, lambda2, lambda3, coef = eigen_values[0], eigen_values[1], eigen_values[2], eigen_values[3]
+ del1 = eigen_values[4]
+ alpha, rho, eta, mu = coef[0], coef[1], coef[2], coef[3]
+
+ alpha1 = (2 * rho) / (mu - 1 + del1 ** 0.5)
+ beta1 = (2 * rho) / (mu - 1 - del1 ** 0.5)
+
+ intensity1 = lambda1 / (1 + abs(alpha1 * np.conj(alpha1)))
+ intensity2 = lambda2 / (1 + abs(beta1 * np.conj(beta1)))
+ intensity3 = lambda3
+
+ return [10 * np.log10(abs(intensity1)), 10 * np.log10(abs(intensity2)), 10 * np.log10(abs(intensity3))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv = x[0], x[1], x[2]
+
+ scattering_list = ['intensity1', 'intensity2', 'intensity3']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Vanzyl'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class FreeMan(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "A three-component scattering model for polarimetric SAR data"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/673687
+
+ DOI: 10.1109/36.673687
+
+ The coefficients (fs, fv, fd) represents the scattering contributions to the corresponding
+ channels associated with the scattering matrix.
+
+ fs: for surface scattering
+ fd: for double-bounce scattering
+ fv: for volume scattering
+
+ alpha: associated with the double-bounce scattering
+ beta: associated with the surface scattering (First order Bragg model with second order
+ statistics
+
+ ps: scattered power return associated with surface scattering
+ pd: scattered power return associated with double-bounce scattering
+ pv: scattered power return associated with volume scattering
+
+ :return: It returns ps, pd, and pv which are associated with fs, fd, and fv
+ """
+
+ cov_mat = self.get_cov_mat_img()
+ fv = 3 * cov_mat[4]
+ s_hh = cov_mat[0] - fv
+ s_vv = cov_mat[8] - fv
+ s_hh_vv_conj = cov_mat[2] - fv / 3
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+ ps, pd = np.zeros([rows, cols]), np.zeros([rows, cols])
+ pv = (8 * fv) / 3
+
+ for i in range(np.shape(s_hh)[0]):
+ for j in range(np.shape(s_hh)[1]):
+ if s_hh_vv_conj[i, j].real > 0:
+ alpha = -1
+ beta = ((s_hh[i, j] - s_vv[i, j]) / (s_hh_vv_conj[i, j] + s_vv[i, j])) + 1
+ fs = (s_vv[i, j] + s_hh_vv_conj[i, j]) / (1 + beta)
+ fd = s_vv[i, j] - fs
+ else:
+ beta = 1
+ alpha = ((s_hh[i, j] - s_vv[i, j]) / (s_hh_vv_conj[i, j] - s_vv[i, j])) - 1
+ fd = (s_hh_vv_conj[i, j] - s_vv[i, j]) / (alpha - 1)
+ fs = s_vv[i, j] - fd
+
+ ps[i, j] = (abs(fs) ** 2) * (1 + abs(beta) ** 2)
+ pd[i, j] = (abs(fd) ** 2) * (1 + abs(alpha) ** 2)
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)), 10 * np.log10(abs(pv))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv = x[0], x[1], x[2]
+
+ scattering_list = ['ps', 'pd', 'pv']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'FreeMan_3SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class Yamaguchi2005(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Four-component scattering model for polarimetric SAR image decomposition"
+
+ link: https://ieeexplore.ieee.org/document/1487628
+
+ DOI: 10.1109/TGRS.2005.852084
+
+ The coefficients (fs, fv, fd, fc) represents the scattering contributions to the corresponding
+ channels associated with the scattering matrix.
+
+ <[C]> = fs * <[Cs]> + fd * <[Cd]> + fv * <[Cv]> + fc * <[Cc]>
+
+ fs: for surface scattering
+ fd: for double-bounce scattering
+ fv: for volume scattering
+ fc: for helix scattering
+
+ alpha: associated with the double-bounce scattering
+ beta: associated with the surface scattering (First order Bragg model with second order
+ statistics
+
+ ps: scattered power return associated with surface scattering
+ pd: scattered power return associated with double-bounce scattering
+ pv: scattered power return associated with volume scattering
+ pc: scattered power return associated with helix scattering
+
+ :return: It returns ps, pd, pv, and pc which are associated with fs, fd, fv, and fc
+ """
+
+ cov_mat = self.get_cov_mat_img()
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+
+ fc = 2 * abs(np.imag((cov_mat[1] + cov_mat[5]) / np.sqrt(2)))
+ fv = np.zeros([rows, cols])
+ ps = np.zeros([rows, cols])
+ pd = np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ if cov_mat[8][i, j] <= 0 or cov_mat[0][i, j] <= 0:
+ cov_mat[8][i, j], cov_mat[0][i, j] = 1.0, 1.0
+
+ if 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) < -2:
+
+ fv[i, j] = 7.5 * (0.5 * cov_mat[4][i, j] - 0.25 * fc[i, j])
+ a = cov_mat[0][i, j] - (8 / 15) * fv[i, j] - 0.25 * fc[i, j]
+ b = cov_mat[8][i, j] - (3 / 15) * fv[i, j] - 0.25 * fc[i, j]
+ c = cov_mat[2][i, j] - (2 / 15) * fv[i, j] + 0.25 * fc[i, j]
+
+ if cov_mat[4][i, j] < cov_mat[0][i, j] or cov_mat[4][i, j] < cov_mat[8][i, j]:
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ else:
+ fv[i, j], fc[i, j] = 0, 0
+ a, b, c = cov_mat[0][i, j], cov_mat[8][i, j], cov_mat[2][i, j]
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ elif 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) > 2:
+
+ fv[i, j] = 7.5 * (0.5 * cov_mat[4][i, j] - 0.25 * fc[i, j])
+ a = cov_mat[0][i, j] - (3 / 15) * fv[i, j] - 0.25 * fc[i, j]
+ b = cov_mat[8][i, j] - (8 / 15) * fv[i, j] - 0.25 * fc[i, j]
+ c = cov_mat[2][i, j] - (2 / 15) * fv[i, j] + 0.25 * fc[i, j]
+
+ if cov_mat[4][i, j] < cov_mat[0][i, j] or cov_mat[4][i, j] < cov_mat[8][i, j]:
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ else:
+ fv[i, j], fc[i, j] = 0, 0
+ a, b, c = cov_mat[0][i, j], cov_mat[8][i, j], cov_mat[2][i, j]
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ else:
+ fv[i, j] = 8 * (0.5 * cov_mat[4][i, j] - 0.25 * fc[i, j])
+ a = cov_mat[0][i, j] - (3 / 8) * fv[i, j] - 0.25 * fc[i, j]
+ b = cov_mat[8][i, j] - (3 / 8) * fv[i, j] - 0.25 * fc[i, j]
+ c = cov_mat[2][i, j] - (1 / 8) * fv[i, j] + 0.25 * fc[i, j]
+
+ if cov_mat[4][i, j] < cov_mat[0][i, j] or cov_mat[4][i, j] < cov_mat[8][i, j]:
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ else:
+ fv[i, j], fc[i, j] = 0, 0
+ a, b, c = cov_mat[0][i, j], cov_mat[8][i, j], cov_mat[2][i, j]
+
+ if np.real(cov_mat[2][i, j]) > 0:
+ alpha = -1
+ beta = (a + c) / (b + c)
+ fs = (a - b) / (abs(beta * np.conj(beta)) - 1)
+ fd = b - fs
+
+ else:
+ beta = 1
+ alpha = (a - b) / (c - b)
+ fd = (a - b) / (abs(alpha * np.conj(alpha)) - 1)
+ fs = b - fd
+
+ ps[i, j] = fs * (1 + abs(beta * np.conj(beta)))
+ pd[i, j] = fd * (1 + abs(alpha * np.conj(alpha)))
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)),
+ 10 * np.log10(abs(fv)), 10 * np.log10(abs(fc))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv, pc = x[0], x[1], x[2], x[3]
+
+ scattering_list = ['ps', 'pd', 'pv', 'pc']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Yamaguchi2005_4SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class Yamaguchi2011(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def get_scattering_power(self):
+
+ cov_mat = self.get_cov_mat_img()
+ rot_coh_mat = self.rot_coh_mat_img()
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+
+ pc = 2 * abs(np.imag(rot_coh_mat[5]))
+ pv = np.zeros([rows, cols])
+ s = np.zeros([rows, cols])
+ d = np.zeros([rows, cols])
+ c = np.zeros([rows, cols], dtype=complex)
+
+ for i in range(rows):
+ for j in range(cols):
+
+ if cov_mat[8][i, j] <= 0 or cov_mat[0][i, j] <= 0:
+ cov_mat[8][i, j], cov_mat[0][i, j] = 1.0, 1.0
+
+ if 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) < -2:
+
+ pv[i, j] = (15 / 4) * rot_coh_mat[8][i, j] - (15 / 8) * pc[i, j]
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = (15 / 4) * rot_coh_mat[8][i, j] - (15 / 18) * pc[i, j]
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = rot_coh_mat[4][i, j] - (7 / 30) * pv[i, j] - 0.5 * pc[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] - (1 / 6) * pv[i, j]
+
+ elif 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) > 2:
+
+ pv[i, j] = (15 / 4) * rot_coh_mat[8][i, j] - (15 / 8) * pc[i, j]
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = (15 / 4) * rot_coh_mat[8][i, j] - (15 / 8) * pc[i, j]
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = rot_coh_mat[4][i, j] - (7 / 30) * pv[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] + (1 / 6) * pv[i, j]
+
+ else:
+ pv[i, j] = 4 * rot_coh_mat[8][i, j] - 2 * pc[i, j]
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = 4 * rot_coh_mat[8][i, j] - 2 * pc[i, j]
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = rot_coh_mat[4][i, j] - rot_coh_mat[8][i, j]
+ c[i, j] = rot_coh_mat[1][i, j]
+
+ return [pc, pv, s, d, c, rot_coh_mat]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Four-Component Scattering Power Decomposition With Rotation of Coherency Matrix"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/5710415?arnumber=5710415
+
+ DOI: 10.1109/TGRS.2010.2099124
+
+ In this paper, the rotation of the coherency matrix is utilized to minimized the cross-polarized
+ component. As the oriented urban structures and vegetation structures are decomposed into the same
+ scattering mechanism.
+
+ fs: for surface scattering
+ fd: for double-bounce scattering
+ fv: for volume scattering
+ fc: for helix scattering
+
+ <[T]> = fs * <[Ts]> + fd * <[Td]> + fv * <[Tv]> + fc * <[Tc]>
+
+ <[T(theta)]> = R(theta) * <[T]> * transpose(conj(R(theta))
+
+ R(theta) = [1 0 0
+ 0 cos(2*theta) -sin(2*theta)
+ 0 sin(2*theta) cos(2*theta)]
+
+ ps: scattered power return associated with surface scattering
+ pd: scattered power return associated with double-bounce scattering
+ pv: scattered power return associated with volume scattering
+ pc: scattered power return associated with helix scattering
+
+ :return: It returns ps, pd, pv, and pc
+ """
+
+ x = self.get_scattering_power()
+ rot_coh_mat = x[5]
+ t_11_theta, t_22_theta, t_33_theta = rot_coh_mat[0], rot_coh_mat[4], rot_coh_mat[8]
+ rows, cols = np.shape(rot_coh_mat[0])[0], np.shape(rot_coh_mat[0])[1]
+ pc, pv, s, d, c = x[0], x[1], x[2], x[3], x[4]
+ # print(np.shape(pc), np.shape(pv), np.shape(s), np.shape(d), np.shape(c))
+ ps, pd = np.zeros([rows, cols]), np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ tp = t_11_theta[i, j] + t_22_theta[i, j] + t_33_theta[i, j]
+ print(tp)
+ if pv[i, j] + pc[i, j] > tp:
+ ps[i, j], pd[i, j] = 0, 0
+ pv[i, j] = tp - pc[i, j]
+
+ else:
+
+ c1 = t_11_theta[i, j] - t_22_theta[i, j] - t_33_theta[i, j] + pc[i, j]
+
+ if c1 > 0:
+ ps[i, j] = s[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+ pd[i, j] = d[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+
+ else:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ if ps[i, j] > 0 > pd[i, j]:
+ pd[i, j] = 0
+ ps[i, j] = tp - pv[i, j] - pc[i, j]
+
+ elif pd[i, j] > 0 > ps[i, j]:
+ ps[i, j] = 0
+ pd[i, j] = tp - pv[i, j] - pc[i, j]
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)),
+ 10 * np.log10(abs(pv)), 10 * np.log10(abs(pc))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv, pc = x[0], x[1], x[2], x[3]
+
+ scattering_list = ['ps', 'pd', 'pv', 'pc']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'Yamaguchi2011_4SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class General4SD(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def get_scattering_power(self):
+
+ cov_mat = self.get_cov_mat_img()
+ rot_coh_mat = self.rot_coh_mat_img()
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+
+ pc = 2 * abs(np.imag(rot_coh_mat[5]))
+ pv = np.zeros([rows, cols])
+ s = np.zeros([rows, cols])
+ d = np.zeros([rows, cols])
+ c = np.zeros([rows, cols], dtype=complex)
+ c1 = rot_coh_mat[0] - rot_coh_mat[4] + (7 / 8) * (1 / 16) * pc
+
+ for i in range(rows):
+ for j in range(cols):
+
+ tp = rot_coh_mat[0][i, j] + rot_coh_mat[4][i, j] + rot_coh_mat[8][i, j]
+
+ if c1[i, j] > 0:
+
+ if cov_mat[8][i, j] <= 0 or cov_mat[0][i, j] <= 0:
+
+ cov_mat[8][i, j], cov_mat[0][i, j] = 1.0, 1.0
+
+ print(cov_mat[8][i, j], cov_mat[0][i, j])
+
+ if 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) < -2:
+
+ pv[i, j] = (15 / 8) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = (15 / 8) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = tp - pv[i, j] - pc[i, j] - s[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] + rot_coh_mat[2][i, j] - (1 / 6) * pv[i, j]
+
+ elif 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) > 2:
+
+ pv[i, j] = (15 / 8) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = (15 / 8) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = tp - pv[i, j] - pc[i, j] - s[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] + rot_coh_mat[2][i, j] + (1 / 6) * pv[i, j]
+
+ else:
+
+ pv[i, j] = 2 * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = 2 * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+ s[i, j] = rot_coh_mat[0][i, j] - 0.5 * pv[i, j]
+ d[i, j] = tp - pv[i, j] - pc[i, j] - s[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] + rot_coh_mat[2][i, j]
+
+ else:
+ pv[i, j] = (15 / 16) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+
+ if pv[i, j] < 0:
+ pc[i, j] = 0
+
+ pv[i, j] = (15 / 16) * (2 * rot_coh_mat[8][i, j] - pc[i, j])
+ s[i, j] = rot_coh_mat[0][i, j]
+ d[i, j] = tp - pv[i, j] - pc[i, j] - s[i, j]
+ c[i, j] = rot_coh_mat[1][i, j] + rot_coh_mat[2][i, j]
+
+ return [pc, pv, s, d, c, rot_coh_mat, c1]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "General Four-Component Scattering Power Decomposition With Unitary Transformation
+ of Coherency Matrix"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/6311461
+
+ DOI: 10.1109/TGRS.2012.2212446
+
+ This paper presents a new general four component scattering power methods by unitary transformation of
+ the coherency matrix. By additional transformation, it became possible to reduce the number of independent
+ parameters from eight to seven.
+
+ The model can be written as:
+
+ <[T(theta)]> = fs * <[Ts]> + fd * <[Td]> + fv * <[Tv]> + fc * <[Tc]>
+
+ <[T(phi)]> = U(phi) * <[T(theta)]> * transpose(conj(U(phi))
+
+ <[T(phi)]> = U(phi) * (fs * <[Ts]> + fd * <[Td]> + fv * <[Tv]> + fc * <[Tc]>) * transpose(conj(U(phi))
+
+ <[T(phi)]> = fs * <[T(phi)s]> + fd * <[T(phi)d]> + fv * <[T(phi)v]> + fc * <[T(phi)c]>
+
+ U(phi) = [1 0 0
+ 0 cos(phi) j * sin(phi)
+ 0 j * sin(phi) cos(phi)]
+
+ fs: for surface scattering
+ fd: for double-bounce scattering
+ fv: for volume scattering
+ fc: for helix scattering
+
+ ps: scattered power return associated with surface scattering
+ pd: scattered power return associated with double-bounce scattering
+ pv: scattered power return associated with volume scattering
+ pc: scattered power return associated with helix scattering
+
+ :return: It returns ps, pd, pv, and pc which are associated with fs, fd, fv, and fc
+ """
+
+ x = self.get_scattering_power()
+ rot_coh_mat, c1 = x[5], x[6]
+ t_11_theta, t_22_theta, t_33_theta = rot_coh_mat[0], rot_coh_mat[4], rot_coh_mat[8]
+ rows, cols = np.shape(rot_coh_mat[0])[0], np.shape(rot_coh_mat[0])[1]
+ pc, pv, s, d, c = x[0], x[1], x[2], x[3], x[4]
+ print(np.shape(pc), np.shape(pv), np.shape(s), np.shape(d), np.shape(c))
+ ps, pd = np.zeros([rows, cols]), np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ tp = rot_coh_mat[0][i, j] + rot_coh_mat[4][i, j] + rot_coh_mat[8][i, j]
+
+ if c1[i, j] < 0:
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ else:
+ if pv[i, j] + pc[i, j] > tp:
+ ps[i, j], pd[i, j] = 0, 0
+ pv[i, j] = tp - pc[i, j]
+
+ else:
+ c0 = 2 * t_11_theta[i, j] + pc[i, j] - tp
+
+ if c0 > 0:
+ ps[i, j] = s[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+ pd[i, j] = d[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+
+ else:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ if ps[i, j] > 0 > pd[i, j]:
+ pd[i, j] = 0
+ ps[i, j] = tp - pv[i, j] - pc[i, j]
+
+ elif pd[i, j] > 0 > ps[i, j]:
+ ps[i, j] = 0
+ pd[i, j] = tp - pv[i, j] - pc[i, j]
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)),
+ 10 * np.log10(abs(pv)), 10 * np.log10(abs(pc))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv, pc = x[0], x[1], x[2], x[3]
+
+ scattering_list = ['ps', 'pd', 'pv', 'pc']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'singh_4SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class General6SD(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def scattering_power(self):
+
+ cov_mat = self.get_cov_mat_img()
+ coh_mat = self.get_coh_mat_img()
+ rot_coh_mat = self.rot_coh_mat_img()
+ t_11_theta, t_22_theta, t_33_theta = rot_coh_mat[0], rot_coh_mat[4], rot_coh_mat[8]
+ t_12_theta = rot_coh_mat[1]
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+
+ ph = 2 * abs(np.imag(coh_mat[5]))
+ pcd = 2 * abs(np.imag(rot_coh_mat[2]))
+ pod = 2 * abs(np.real(rot_coh_mat[2]))
+ pcw = pcd + pod
+ pv = 2 * rot_coh_mat[8] - ph - pod - pcd
+ s = np.zeros([rows, cols])
+ d = np.zeros([rows, cols])
+ c = np.zeros([rows, cols], dtype=complex)
+ c1_mat = np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ c1 = t_11_theta[i, j] - t_22_theta[i, j] + (7 / 8) * t_33_theta[i, j]
+ c1 += (1 / 16) * ph[i, j] - (15 / 16) * (pod[i, j] + pcd[i, j])
+ c1_mat[i, j] = c1
+
+ if c1 > 0:
+ if pv[i, j] < 0:
+ ph[i, j] = 0
+ pod[i, j] = 0
+ pcd[i, j] = 0
+ pv[i, j] = 2 * t_33_theta[i, j]
+
+ if cov_mat[8][i, j] <= 0 or cov_mat[0][i, j] <= 0:
+ cov_mat[8][i, j], cov_mat[0][i, j] = 1.0, 1.0
+
+ if 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) < -2:
+ pv[i, j] = (15 / 8) * pv[i, j]
+ s[i, j] = t_11_theta[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22_theta[i, j] - (7 / 30) * pv[i, j] - 0.5 * ph[i, j]
+ c[i, j] = t_12_theta[i, j] - (1 / 6) * pv[i, j]
+
+ elif 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) > 2:
+ pv[i, j] = (15 / 8) * pv[i, j]
+ s[i, j] = t_11_theta[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22_theta[i, j] - (7 / 30) * pv[i, j] - 0.5 * ph[i, j]
+ c[i, j] = t_12_theta[i, j] + (1 / 6) * pv[i, j]
+
+ else:
+ pv[i, j] = 2 * pv[i, j]
+ s[i, j] = t_11_theta[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22_theta[i, j] - 0.25 * pv[i, j] - 0.5 * ph[i, j]
+ c[i, j] = t_12_theta[i, j]
+
+ else:
+
+ if pv[i, j] < 0:
+ pv[i, j] = 0
+
+ if ph[i, j] > pcw[i, j]:
+ pcw[i, j] = 2 * t_33_theta[i, j] - ph[i, j]
+
+ if pcw[i, j] < 0:
+ pcw[i, j] = 0
+
+ if pod[i, j] > pcd[i, j]:
+ pcd[i, j] = pcw[i, j] - pod[i, j]
+ else:
+ pod[i, j] = pcw[i, j] - pcd[i, j]
+
+ else:
+ ph[i, j] = 2 * t_33_theta[i, j] - pcw[i, j]
+
+ if ph[i, j] < 0:
+ ph[i, j] = 0
+
+ pv[i, j] = (15 / 16) * pv[i, j]
+ s[i, j] = t_11_theta[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22_theta[i, j] - (7 / 15) * pv[i, j] - 0.5 * ph[i, j]
+ c[i, j] = t_12_theta[i, j]
+
+ return [ph, pcd, pod, pv, pcw, s, d, c, cov_mat, rot_coh_mat, c1_mat]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Model-Based Six-Component Scattering Matrix Power Decomposition"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/8356711
+
+ DOI: 10.1109/TGRS.2018.2824322
+
+ The major accomplishment of this paper was associated with the inclusion of 2 new types of physical
+ scattering submodels against the existing 4 component scattering power decomposition(4SD -
+ Refer Yamaguchi, Singh4SD) to account for the real and imaginary part of the T13 of the coherency matrix.
+
+ *** One is oriented dipole scattering and other is oriented quarter wave reflector ***
+
+ The total scattered power can be written as:
+
+ [T] = ps*[Ts] + pd*[Ts] + pv*[Tv] + ph*[Th] + pod*[Tod] + pcd*[Tcd]
+
+ ps: surface scattering
+ pd: double bounce scattering
+ pv: volume scattering
+ ph: helix scattering
+ pcd: compound dipole scattering
+ pod: oriented dipole scattering
+
+ :return: It returns the decibel units of ps, pd, pv, ph, pod, pcd
+ """
+
+ x = self.scattering_power()
+ ph, pcd, pod, pv, pcw, s, d, c = x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]
+ cov_mat, rot_coh_mat = x[8], x[9]
+ c1_mat = x[10]
+ t_11_theta, t_22_theta, t_33_theta = rot_coh_mat[0], rot_coh_mat[4], rot_coh_mat[8]
+ rows, cols = np.shape(ph)[0], np.shape(ph)[1]
+ ps, pd = np.zeros([rows, cols]), np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ tp = t_11_theta[i, j] + t_22_theta[i, j] + t_33_theta[i, j]
+
+ if c1_mat[i, j] < 0:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ else:
+
+ if pv[i, j] + ph[i, j] + pcw[i, j] > tp:
+ ps[i, j], pd[i, j] = 0, 0
+ pv[i, j] = tp - ph[i, j] - pod[i, j] - pcd[i, j]
+
+ else:
+ c0 = 2 * t_11_theta[i, j] + ph[i, j] - tp
+
+ if c0 > 0:
+ ps[i, j] = s[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+ pd[i, j] = d[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+
+ else:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ if ps[i, j] > 0 > pd[i, j]:
+ pd[i, j] = 0
+ ps[i, j] = tp - pv[i, j] - ph[i, j] - pod[i, j] - pcd[i, j]
+
+ elif pd[i, j] > 0 > ps[i, j]:
+ ps[i, j] = 0
+ pd[i, j] = tp - pv[i, j] - ph[i, j] - pod[i, j] - pcd[i, j]
+
+ return [10 * np.log10(abs(ps)), 10 * np.log10(abs(pd)), 10 * np.log10(abs(pv)),
+ 10 * np.log10(abs(ph)), 10 * np.log10(abs(pod)), 10 * np.log10(abs(pcd))]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv, ph, pod, pcd = x[0], x[1], x[2], x[3], x[4], x[5]
+
+ scattering_list = ['ps', 'pd', 'pv', 'ph', 'pod', 'pcd']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'singh_6SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+
+
+class General7SD(Polarimetry):
+ def __init__(self, b, w):
+ super().__init__(b, w)
+
+ def scattering_power(self):
+
+ cov_mat = self.get_cov_mat_img()
+ coh_mat = self.get_coh_mat_img()
+ t_11, t_22, t_33 = coh_mat[0], coh_mat[4], coh_mat[8]
+ t_12 = coh_mat[1]
+ rows, cols = np.shape(cov_mat[0])[0], np.shape(cov_mat[0])[1]
+
+ ph = 2 * abs(np.imag(coh_mat[5]))
+ pcd = 2 * abs(np.imag(coh_mat[2]))
+ pod = 2 * abs(np.real(coh_mat[2]))
+ pmd = 2 * abs(np.real(coh_mat[5]))
+ pmw = pmd + ph
+ pcw = pcd + pod
+ pv = 2 * t_33 - ph - pod - pcd - pmd
+ s = np.zeros([rows, cols])
+ d = np.zeros([rows, cols])
+ c = np.zeros([rows, cols], dtype=complex)
+ c1_mat = np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ c1 = t_11[i, j] - t_22[i, j] - (7 / 8) * t_33[i, j]
+ c1 += (1 / 16) * (pmd[i, j] + ph[i, j]) - (15 / 16) * (pcd[i, j] + pod[i, j])
+
+ c1_mat[i, j] = c1
+
+ if c1 > 0:
+ if pv[i, j] < 0:
+ ph[i, j], pod[i, j] = 0, 0
+ pmd[i, j], pcd[i, j] = 0, 0
+ pv[i, j] = 2 * t_33[i, j]
+
+ if cov_mat[8][i, j] <= 0 or cov_mat[0][i, j] <= 0:
+ cov_mat[8][i, j], cov_mat[0][i, j] = 1.0, 1.0
+
+ if 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) < -2:
+ pv[i, j] = (15 / 8) * pv[i, j]
+ s[i, j] = t_11[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22[i, j] - (7 / 30) * pv[i, j] - 0.5 * ph[i, j] - 0.5 * pmd[i, j]
+ c[i, j] = t_12[i, j] - (1 / 6) * pv[i, j]
+
+ elif 10 * math.log10(cov_mat[8][i, j] / cov_mat[0][i, j]) > 2:
+ pv[i, j] = (15 / 8) * pv[i, j]
+ s[i, j] = t_11[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22[i, j] - (7 / 30) * pv[i, j] - 0.5 * ph[i, j] - 0.5 * pmd[i, j]
+ c[i, j] = t_12[i, j] + (1 / 6) * pv[i, j]
+
+ else:
+ pv[i, j] = 2 * pv[i, j]
+ s[i, j] = t_11[i, j] - 0.5 * pv[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22[i, j] - 0.25 * pv[i, j] - 0.5 * ph[i, j] - 0.5 * pmd[i, j]
+ c[i, j] = t_12[i, j]
+
+ else:
+ if pv[i, j] < 0:
+ pv[i, j] = 0
+
+ if pmw[i, j] > pcw[i, j]:
+ pcw[i, j] = 2 * t_33[i, j] - pmw[i, j]
+
+ if pcw[i, j] < 0:
+ pcw[i, j] = 0
+
+ if pod[i, j] > pcd[i, j]:
+ pcd[i, j] = pcw[i, j] - pod[i, j]
+
+ if pcd[i, j] < 0:
+ pcd[i, j] = 0
+
+ else:
+ pod[i, j] = pcw[i, j] - pcd[i, j]
+
+ if pod[i, j] < 0:
+ pod[i, j] = 0
+
+ else:
+ pmw[i, j] = 2 * t_33[i, j] - pcw[i, j]
+
+ if pmw[i, j] < 0:
+ pmw[i, j] = 0
+
+ if pmd[i, j] > ph[i, j]:
+ ph[i, j] = pmw[i, j] - pmd[i, j]
+
+ if ph[i, j] < 0:
+ ph[i, j] = 0
+
+ else:
+ pmd[i, j] = pmw[i, j] - ph[i, j]
+ if pmd[i, j] < 0:
+ pmd[i, j] = 0
+
+ pv[i, j] = (15 / 16) * pv[i, j]
+ s[i, j] = t_11[i, j] - 0.5 * pod[i, j] - 0.5 * pcd[i, j]
+ d[i, j] = t_22[i, j] - (7 / 15) * pv[i, j] - 0.5 * ph[i, j] - 0.5 * pmd[i, j]
+ c[i, j] = t_12[i, j]
+
+ return [ph, pcd, pcw, pmd, pmw, pod, pv, s, d, c, cov_mat, coh_mat, c1_mat]
+
+ def get_decomposition(self):
+
+ """
+ PROGRAMMER: Raktim Ghosh, MSc (University of Twente) - Date Written (May, 2020)
+
+ Paper Details: "Seven-Component Scattering Power Decomposition of POLSAR Coherency Matrix"
+
+ IEEEXplore: https://ieeexplore.ieee.org/document/8751154
+
+ DOI: 10.1109/TGRS.2019.2920762
+
+ The 9 independent parameters of target coherency matrix, are assocaited with some phsical scattering models.
+ This paper attempts to assign one such physical scattering model to the real part of T23, and develop a new
+ scattering power decomposition model.
+
+ The establised 7SD model is a extension of previosly developed 6SD model (Refer General 6SD)
+
+ [T] = ps*[Ts] + pd*[Ts] + pv*[Tv] + ph*[Th] + pod*[Tod] + pcd*[Tcd]
+
+ The scattering powers (ps, pd, pv, ph, pod, pcd, pmd) represents the scattering contributions
+ to the corresponding channels associated with the scattering matrix.
+
+ ps: surface scattering
+ pd: double bounce scattering
+ pv: volume scattering
+ ph: helix scattering
+ pcd: compound dipole scattering
+ pod: oriented dipole scattering
+ pmd: mixed dipole scattering
+
+ :return: It returns decibel unit of ps, pd, pv, ph, pcd, pod, pmd
+ """
+
+ x = self.scattering_power()
+ ph, pcd, pcw, pmd, pmw, pod, pv = x[0], x[1], x[2], x[3], x[4], x[5], x[6]
+ s, d, c = x[7], x[8], x[9]
+ cov_mat, coh_mat = x[10], x[11]
+ c1_mat = x[12]
+ t_11, t_22, t_33 = coh_mat[0], coh_mat[4], coh_mat[8]
+ rows, cols = np.shape(ph)[0], np.shape(ph)[1]
+ ps, pd = np.zeros([rows, cols]), np.zeros([rows, cols])
+
+ for i in range(rows):
+ for j in range(cols):
+
+ tp = t_11[i, j] + t_22[i, j] + t_33[i, j]
+
+ if c1_mat[i, j] < 0:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ else:
+
+ if pv[i, j] + pmw[i, j] + pcw[i, j] > tp:
+ ps[i, j], pd[i, j] = 0, 0
+ pv[i, j] = tp - ph[i, j] - pmd[i, j] - pod[i, j] - pcd[i, j]
+
+ else:
+ c0 = 2 * t_11[i, j] + ph[i, j] + pmd[i, j] - tp
+
+ if c0 > 0:
+ ps[i, j] = s[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+ pd[i, j] = d[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / s[i, j]
+
+ else:
+ ps[i, j] = s[i, j] - abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+ pd[i, j] = d[i, j] + abs((c[i, j] * np.conj(c[i, j]))) / d[i, j]
+
+ if ps[i, j] > 0 > pd[i, j]:
+ pd[i, j] = 0
+ ps[i, j] = tp - pv[i, j] - ph[i, j] - pod[i, j] - pcd[i, j]
+
+ elif pd[i, j] > 0 > ps[i, j]:
+ ps[i, j] = 0
+ pd[i, j] = tp - pv[i, j] - ph[i, j] - pod[i, j] - pcd[i, j]
+
+ return [10 * np.log10(ps), 10 * np.log10(pd), 10 * np.log10(pv),
+ 10 * np.log10(ph), 10 * np.log10(pod), 10 * np.log10(pcd),
+ 10 * np.log10(pmd)]
+
+ def get_result(self):
+ x = self.get_decomposition()
+ ps, pd, pv, ph, pod, pcd, pmd = x[0], x[1], x[2], x[3], x[4], x[5], x[6]
+
+ scattering_list = ['ps', 'pd', 'pv', 'ph', 'pod', 'pcd', 'pmd']
+
+ for i in range(len(x)):
+
+ cols, rows = ps.shape
+ driver = gdal.GetDriverByName("GTiff")
+ outfile = 'scattering' + '_' + scattering_list[i] + '_' + 'singh_7SD'
+ outfile += '.tiff'
+ out_data = driver.Create(outfile, rows, cols, 1, gdal.GDT_Float32)
+ out_data.SetProjection(self.get_band().GetProjection())
+ out_data.SetGeoTransform(self.get_band().GetGeoTransform())
+ out_data.GetRasterBand(1).WriteArray(x[i])
+"""
+band = gdal.Open("HH.tif")
+#band = gdal.Open("C:/Users/ThisPc/RS2-SLC-FQ2-DES-15-Apr-2008_14.38-PDS_05116980_Cal.tif")
+# band = gdal.Open("C:/Users/ThisPc/RS2-SLC-FQ2-DES-15-Apr-2008_14.tif")
+
+decomposition = ModTouzi(band, 5)
+decomposition.get_result()
+"""
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspCloudePottierDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspCloudePottierDecomposition.py
new file mode 100644
index 0000000..fb31608
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspCloudePottierDecomposition.py
@@ -0,0 +1,132 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspHAAlphaDecomposition.py Cloude-Pottier分解
+@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
+ (Averaging using a sliding window)
+ V1.0.1:(1)可选分解特征;(2)bin转tif格式
+@Contact:
+@Author:SHJ
+@Date:2021/9/24 9:06
+@Version:1.0.1
+"""
+
+import os
+import shutil
+import subprocess
+import logging
+
+logger = logging.getLogger("mylog")
+
+
+class PspCloudePottierDecomposition:
+ """
+ 调用polsarpro4.2.0的Cloude-Pottier极化分解 h_a_alpha_decomposition_T3.exe
+ """
+
+ def __init__(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ exeDecomposeName='h_a_alpha_decomposition_T3.exe'):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ """
+ self.__exeName = exeDecomposeName
+ self.__exeDir = exeDir
+ self.__inT3Dir = inT3Dir
+ self.__outDir = outDir
+ self.__DecompostFlag = False
+ pass
+
+ def api_h_a_alpha_decomposition_T3(
+ self,
+ rectX,
+ rectY,
+ rectWidth,
+ rectHeight,
+ Nwin=1):
+ """
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
+ """
+ if self.__DecompostFlag:
+ return True
+ if len(self.__exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(self.__inT3Dir):
+ logger.error('T3 Matrix check failed.')
+ return False
+ if not os.path.exists(self.__outDir):
+ os.makedirs(self.__outDir)
+
+ alpbetdelgam = 1
+ Lambda = 1
+ alpha = 1
+ entropy = 1
+ anisotropy = 1
+
+ CombHA = 1
+ CombH1mA = 1
+ Comb1mHA = 1
+ Comb1mH1mA = 1
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ self.__inT3Dir,
+ self.__outDir,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol,
+ alpbetdelgam,
+ Lambda,
+ alpha,
+ entropy,
+ anisotropy,
+ CombHA,
+ CombH1mA,
+ Comb1mHA,
+ Comb1mH1mA]
+ cmd = " ".join(str(i) for i in para_list)
+ config_path = os.path.join(self.__inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ self.__DecompostFlag = True
+ return True
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspFreemanDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspFreemanDecomposition.py
new file mode 100644
index 0000000..656f81a
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspFreemanDecomposition.py
@@ -0,0 +1,109 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspFreemanDecomposition.py
+@Function:
+@Contact:
+@Author:LVY
+@Date:2021/10/12 18:45
+@Version:1.0.0
+"""
+
+import os
+import shutil
+import subprocess
+import logging
+logger = logging.getLogger("mylog")
+
+
+class PspFreemanDecomposition:
+ """
+ Freeman分解
+ """
+
+ def __init__(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ exeDecomposeName='freeman_decomposition_T3.exe'):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ """
+ self.__exeName = exeDecomposeName
+ self.__exeDir = exeDir
+ self.__inT3Dir = inT3Dir
+ self.__outDir = outDir
+ self.__DecompostFlag = False
+ pass
+
+ def api_freeman_decomposition_T3(
+ self,
+ rectX,
+ rectY,
+ rectWidth,
+ rectHeight,
+ Nwin=1):
+ """
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
+ """
+ if self.__DecompostFlag:
+ return True
+ if len(self.__exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(self.__inT3Dir):
+ logger.error('T3 Matrix check failed.')
+ return False
+ if not os.path.exists(self.__outDir):
+ os.makedirs(self.__outDir)
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ self.__inT3Dir,
+ self.__outDir,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = " ".join(str(i) for i in para_list)
+ config_path = os.path.join(self.__inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ self.__DecompostFlag = True
+ return True
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspHAAlphaDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspHAAlphaDecomposition.py
new file mode 100644
index 0000000..041a124
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspHAAlphaDecomposition.py
@@ -0,0 +1,435 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspHAAlphaDecomposition.py
+@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
+ (Averaging using a sliding window)
+ V1.0.1:(1)可选分解特征;(2)bin转tif格式
+@Contact:
+@Author:SHJ
+@Date:2021/9/24 9:06
+@Version:1.0.1
+"""
+
+import os
+import shutil
+import subprocess
+import struct
+import numpy as np
+import glob
+from PIL import Image
+import logging
+logger = logging.getLogger("mylog")
+import multiprocessing
+
+class PspHAAlphaDecomposition:
+ """
+ 调用polsarpro4.2.0的Cloude-Pottier极化分解
+ """
+ def __init__(self,normalization = False):
+ self.__normalization = normalization #是否做归一化
+ self.__res_h_a_alpha_decomposition_T3 = {}
+ self.__res_h_a_alpha_eigenvalue_set_T3 = {}
+ self.__res_h_a_alpha_eigenvector_set_T3 = {}
+ pass
+
+ def api_creat_h_a_alpha_features_single_process(self, h_a_alpha_out_dir,
+ h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
+ h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
+ """
+ 对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decomposition、h_a_alpha_eigenvalue_set 和 h_a_alpha_eigenvector_set)
+ :param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
+ :param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
+ :param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
+ :param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ """
+ h_a_alpha_features ={}
+ h_a_alpha_features.update(self.api_h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1)))
+ logger.info("run h_a_alpha_decomposition_T3 success!")
+ logger.info('progress bar: 40%')
+ h_a_alpha_features.update(self.api_h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)))
+ logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
+ logger.info('progress bar: 60%')
+ h_a_alpha_features.update(self.api_h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1)))
+ logger.info("run h_a_alpha_eigenvector_set_T3 success!")
+ logger.info('progress bar: 80%')
+ if is_trans_to_tif:
+ self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
+ if is_read_to_dic:
+ h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
+ return h_a_alpha_features
+
+ def api_creat_h_a_alpha_features(self, h_a_alpha_out_dir,
+ h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path,
+ h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir,is_trans_to_tif=True, is_read_to_dic=False):
+ """
+ 对porsarpro格式T3矩阵做Cloude-Pottier分解(h_a_alpha_decomposition、h_a_alpha_eigenvalue_set 和 h_a_alpha_eigenvector_set)
+ :param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
+ :param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
+ :param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
+ :param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ """
+
+ pool = multiprocessing.Pool(processes=3)
+ pl = []
+
+ logger.info("run h_a_alpha_decomposition_T3!")
+ pl.append(pool.apply_async(self.api_h_a_alpha_decomposition_T3, (h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1))))
+ logger.info("run h_a_alpha_eigenvalue_set_T3!")
+ pl.append(pool.apply_async(self.api_h_a_alpha_eigenvalue_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif, is_read_to_dic, *(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))))
+ logger.info("run h_a_alpha_eigenvector_set_T3!")
+ pl.append(pool.apply_async(self.api_h_a_alpha_eigenvector_set_T3, (h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif,is_read_to_dic, *(1, 1, 1, 1, 1))))
+
+ pool.close()
+ pool.join()
+ logger.info(pl)
+ logger.info('progress bar: 60%')
+
+ h_a_alpha_features = {}
+ h_a_alpha_features.update(self.__res_h_a_alpha_decomposition_T3)
+ logger.info("run h_a_alpha_decomposition_T3 success!")
+ h_a_alpha_features.update(self.__res_h_a_alpha_eigenvalue_set_T3)
+ logger.info("run h_a_alpha_eigenvalue_set_T3 success!")
+ h_a_alpha_features.update(self.__res_h_a_alpha_eigenvector_set_T3)
+ logger.info("run h_a_alpha_eigenvector_set_T3 success!")
+ if is_trans_to_tif:
+ self.api_trans_T3_to_tif(h_a_alpha_out_dir, polsarpro_in_dir)
+ if is_read_to_dic:
+ h_a_alpha_features.update(self.api_read_T3_matrix(polsarpro_in_dir))
+ return h_a_alpha_features
+
+ def api_h_a_alpha_decomposition_T3(self, h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
+ """
+ 对porsarpro格式T3矩阵做Cloude-Pottier分解(H-A-Alpha分解)
+ :param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
+ :param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param is_trans_to_tif:分解特征是否转换为tif
+ :param is_read_to_dic:分解特征是否以字典输出
+ :param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
+ CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0,输出:1
+ :return : 包含分解特征的字典
+ """
+ if not os.path.exists(h_a_alpha_out_dir):
+ os.makedirs(h_a_alpha_out_dir)
+ self.__h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args)
+ name_list = ['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
+ 'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
+ if is_trans_to_tif:
+ self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
+
+ if is_read_to_dic:
+ self.__res_h_a_alpha_decomposition_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
+ return self.__res_h_a_alpha_decomposition_T3
+ else:
+ return {}
+
+ def api_h_a_alpha_eigenvalue_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
+
+ """
+ Cloude-Pottier eigenvalue based decomposition of a coherency matrix
+ :param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
+ :param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param is_trans_to_tif:分解特征是否转换为tif
+ :param is_read_to_dic:分解特征是否以字典输出
+ :param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
+ CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0,输出:1
+ :return : 包含分解特征的字典
+ """
+ if not os.path.exists(h_a_alpha_out_dir):
+ os.makedirs(h_a_alpha_out_dir)
+ self.__h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args)
+ name_list = ['anisotropy', 'anisotropy_lueneburg', 'anisotropy12', 'asymetry', 'derd', 'derd_norm', 'entropy_shannon',
+ 'entropy_shannon_I', 'entropy_shannon_I_norm', 'entropy_shannon_norm', 'entropy_shannon_P',
+ 'entropy_shannon_P_norm', 'l1', 'l2', 'l3', 'p1', 'p2', 'p3', 'pedestal', 'polarisation_fraction',
+ 'rvi', 'serd', 'serd_norm']
+ if is_trans_to_tif:
+ self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
+
+ if is_read_to_dic:
+ self.__res_h_a_alpha_eigenvalue_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
+ return self.__res_h_a_alpha_eigenvalue_set_T3
+ else:
+ return {}
+
+ def api_h_a_alpha_eigenvector_set_T3(self, h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, is_trans_to_tif=True, is_read_to_dic=False, *args):
+
+ """
+ Cloude-Pottier eigenvector based decomposition of a coherency matrix
+ :param h_a_alpha_out_dir : Cloude-Pottier eigenvector
+ :param h_a_alpha_eigenvector_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param is_trans_to_tif:分解特征是否转换为tif
+ :param is_read_to_dic:分解特征是否以字典输出
+ :param *args:9个可选分解特征(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
+ CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0,输出:1
+ :return : 包含分解特征的字典
+ """
+ if not os.path.exists(h_a_alpha_out_dir):
+ os.makedirs(h_a_alpha_out_dir)
+ self.__h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args)
+ name_list = ['alpha', 'alpha1', 'alpha2', 'alpha3',
+ 'beta', 'beta1', 'beta2', 'beta3',
+ 'delta', 'delta1', 'delta2', 'delta3',
+ 'gamma', 'gamma1', 'gamma2', 'gamma3']
+ if is_trans_to_tif:
+ self.__write_haalpha_to_tif(h_a_alpha_out_dir, h_a_alpha_out_dir, name_list)
+
+ if is_read_to_dic:
+ self.__res_h_a_alpha_eigenvector_set_T3 = self.__read_haalpha(h_a_alpha_out_dir, name_list)
+ return self.__res_h_a_alpha_eigenvector_set_T3
+ else:
+ return {}
+
+ def api_read_T3_matrix(self,polsarpro_T3_dir):
+ """
+ 读取T3矩阵,转换字典
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :return : 包含T3矩阵的字典
+ """
+ name_list = ['T11', 'T12_imag', 'T12_real',
+ 'T22', 'T13_imag', 'T13_real',
+ 'T33', 'T23_imag', 'T23_real']
+ return self.__read_haalpha(polsarpro_T3_dir, name_list)
+
+ def api_trans_T3_to_tif(self, out_tif_dir, polsarpro_T3_dir):
+ """
+ 将T3矩阵从bin格式转换为tif格式
+ :param out_tif_dir:保存路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ """
+ name_list = ['T11', 'T12_imag', 'T12_real',
+ 'T22', 'T13_imag', 'T13_real',
+ 'T33', 'T23_imag', 'T23_real']
+ self.__write_haalpha_to_tif(out_tif_dir, polsarpro_T3_dir, name_list)
+
+ @staticmethod
+ def __h_a_alpha_decomposition_T3(h_a_alpha_out_dir, h_a_alpha_decomposition_T3_path, polsarpro_in_dir, *args):
+ """
+ 对porsarpro格式T3矩阵做Cloude-Pottier分解(H-A-Alpha分解)
+ :param h_a_alpha_out_dir : 输出h_a_alpha二进制数据的目录
+ :param h_a_alpha_decomposition_T3_path: haalphadecompositionT3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param *args:9个可选输出变量(alpbetdelgam,Lambda,alpha,entropy,anisotropy,
+ CombHA,CombH1mA,Comb1mHA,Comb1mH1mA),不输出:0,输出:1
+ """
+ if not os.path.exists(h_a_alpha_decomposition_T3_path):
+ raise Exception(h_a_alpha_decomposition_T3_path +' is not exists!')
+
+ NwinFilter = 1
+ offsetRow = 0
+ offsetCol = 0
+
+ config_path = os.path.join(polsarpro_in_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+
+ numRow = int(config[1])
+ numCol = int(config[4])
+
+ alpbetdelgam = int(args[0])
+ Lambda = int(args[1])
+ alpha = int(args[2])
+ entropy = int(args[3])
+ anisotropy = int(args[4])
+
+ CombHA = int(args[5])
+ CombH1mA = int(args[6])
+ Comb1mHA = int(args[7])
+ Comb1mH1mA = int(args[8])
+
+ para_list = [h_a_alpha_decomposition_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
+ str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
+ str(alpbetdelgam), str(Lambda), str(alpha), str(entropy), str(anisotropy),
+ str(CombHA), str(CombH1mA), str(Comb1mHA), str(Comb1mH1mA)]
+ cmd = ' '.join(para_list)
+
+ result_tuple = subprocess.getstatusoutput(cmd)
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
+ raise Exception(result_tuple[1])
+ shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
+
+ @staticmethod
+ def __h_a_alpha_eigenvalue_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, *args):
+
+ """
+ Cloude-Pottier eigenvalue based decomposition of a coherency matrix
+ :param h_a_alpha_out_dir : Cloude-Pottier eigenvalue
+ :param h_a_alpha_eigenvalue_set_T3_path: h_a_alpha_eigenvalue_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param *args:11个可选输出变量(eigen123,proba123,anisotropy,anisotropy12,asymetry,
+ polarisation_fraction,erd,rvi,pedestal,shannon,lueneburg),不输出:0,输出:1
+ """
+ if not os.path.exists(h_a_alpha_eigenvalue_set_T3_path):
+ raise Exception(h_a_alpha_eigenvalue_set_T3_path +' is not exists!')
+ NwinFilter = 1
+ offsetRow = 0
+ offsetCol = 0
+
+ config_path = os.path.join(polsarpro_in_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+
+ numRow = int(config[1])
+ numCol = int(config[4])
+
+ eigen123 = int(args[0])
+ proba123 = int(args[1])
+ anisotropy = int(args[2])
+ anisotropy12 = int(args[3])
+ asymetry = int(args[4])
+ polarisation_fraction = int(args[5])
+ erd = int(args[6])
+ rvi = int(args[7])
+ pedestal = int(args[8])
+ shannon = int(args[9])
+ lueneburg = int(args[10])
+
+ para_list = [h_a_alpha_eigenvalue_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
+ str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
+ str(eigen123), str(proba123), str(anisotropy), str(anisotropy12), str(asymetry),
+ str(polarisation_fraction), str(erd), str(rvi), str(pedestal),
+ str(shannon), str(lueneburg)]
+ cmd = ' '.join(para_list)
+
+ result_tuple = subprocess.getstatusoutput(cmd)
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
+ raise Exception(result_tuple[1])
+ shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
+
+ @staticmethod
+ def __h_a_alpha_eigenvector_set_T3(h_a_alpha_out_dir, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, *args):
+
+ """
+ Cloude-Pottier eigenvector based decomposition of a coherency matrix
+ :param h_a_alpha_out_dir : Cloude-Pottier eigenvector
+ :param h_a_alpha_eigenvector_set_T3_set_T3_path: h_a_alpha_eigenvector_set_T3.exe路径
+ :param polsarpro_in_dir:输入porsarpro格式T3矩阵目录,包含.bin,.config
+ :param *args:5个可选输出变量(alpha123,beta123,delta123,gamma123,alpbetdelgam),不输出:0,输出:1
+ """
+ if not os.path.exists(h_a_alpha_eigenvector_set_T3_path):
+ raise Exception(h_a_alpha_eigenvector_set_T3_path +' is not exists!')
+ NwinFilter = 1
+ offsetRow = 0
+ offsetCol = 0
+
+ config_path = os.path.join(polsarpro_in_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+
+ numRow = int(config[1])
+ numCol = int(config[4])
+
+ alpha123 = int(args[0])
+ beta123 = int(args[1])
+ delta123 = int(args[2])
+ gamma123 = int(args[3])
+ alpbetdelgam = int(args[4])
+
+ para_list = [h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir, h_a_alpha_out_dir,
+ str(NwinFilter), str(offsetRow), str(offsetCol), str(numRow), str(numCol),
+ str(alpha123), str(beta123), str(delta123), str(gamma123), str(alpbetdelgam)]
+ cmd = ' '.join(para_list)
+
+ result_tuple = subprocess.getstatusoutput(cmd)
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1 or result_tuple[1].find('Could not open') != -1:
+ raise Exception(result_tuple[1])
+ shutil.copyfile(config_path, os.path.join(h_a_alpha_out_dir, 'config.txt'))
+
+ def __read_haalpha(self, h_a_alpha_dir, name_list):
+ """
+ 读取H-A-Alpha分解二进制数据,输出为矩阵格式的字典
+ :param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
+ :name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
+ 'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
+ :return : 包含H-A-Alpha矩阵信息的字典
+ """
+ dir = os.path.join(h_a_alpha_dir, '*.bin')
+ bin_paths = list(glob.glob(dir))
+ haalpha_dic ={}
+ for name in name_list:
+ path = os.path.join(h_a_alpha_dir, name + '.bin')
+ if path in bin_paths:
+ img = self.__read_bin_to_img(path)
+ haalpha_dic.update({name: img})
+ return haalpha_dic
+
+ def standardization(self, data, num=1):
+ # 矩阵标准化到[0,1]
+ data[np.isnan(data)] = np.min(data) # 异常值填充为0
+ _range = np.max(data) - np.min(data)
+ return (data - np.min(data)) / _range * num
+
+ def __write_haalpha_to_tif(self, out_tif_dir, h_a_alpha_dir, name_list):
+ """
+ 读取H-A-Alpha分解二进制数据,输出为矩阵格式的字典
+ :param out_tif_dir : tif的输出路径
+ :param h_a_alpha_dir : h_a_alpha二进制数据的目录,包含.bin,.config
+ :name_list : 需要组合的名称集合['entropy', 'anisotropy', 'alpha', 'beta', 'delta', 'gamma', 'lambda',
+ 'combination_1mH1mA', 'combination_1mHA', 'combination_H1mA', 'combination_HA']
+
+ """
+ dir = os.path.join(h_a_alpha_dir, '*.bin')
+ bin_paths = list(glob.glob(dir))
+
+ for name in name_list:
+ in_path = os.path.join(h_a_alpha_dir, name + '.bin')
+ out_path = os.path.join(out_tif_dir, name + '.tif')
+ if in_path in bin_paths:
+ img_array = self.__read_bin_to_img(in_path)
+ if self.__normalization is True:
+ img_array = self.standardization(img_array, num=1)
+ out_image = Image.fromarray(img_array)
+ out_image.save(out_path)
+
+ @staticmethod
+ def __read_bin_to_img(bin_path):
+ """
+ 读取bin格式二进制数据,输出为矩阵
+ :param bin_path : bin文件的路径,包含.bin,.config
+ :return : 矩阵信息
+ """
+ (bin_dir, bin_name) = os.path.split(bin_path)
+ config_path = os.path.join(bin_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+ rows = int(config[1])
+ cols = int(config[4])
+
+ bin_file = open(bin_path, 'rb') # 打开二进制文件
+ size = os.path.getsize(bin_path) # 获得文件大小
+ if size < rows*cols*4:
+ raise Exception('bin size less than rows*cols*4! size:', size, 'byte, rows:', rows, 'cols:', cols)
+
+ img = np.zeros([rows, cols], dtype=np.float32)
+ for row in range(rows):
+ data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
+ row_data = struct.unpack('f' * cols, data) # 转为一行float数据
+ img[row, :] = row_data
+ bin_file.close()
+ return img
+
+
+# if __name__ == '__main__':
+ # h_a_alpha_decomposition_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_decomposition_T3.exe'
+ # h_a_alpha_eigenvalue_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvalue_set_T3.exe'
+ # h_a_alpha_eigenvector_set_T3_path = 'D:\\PolSARpro_v4.2.0\\Soft\data_process_sngl\\h_a_alpha_eigenvector_set_T3.exe'
+ # polsarpro_in_dir = 'D:\\PolSARpro_v4.2.0\\in'
+ # haalpha_out_dir = 'D:\\PolSARpro_v4.2.0\\out'
+ # h_a_alpha_eigenvalue_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvalue_set_T3'
+ # h_a_alpha_eigenvector_set_T3_out = 'D:\\PolSARpro_v4.2.0\\out\\h_a_alpha_eigenvector_set_T3'
+ #
+ # haa = PspHAAlphaDecomposition()
+ # h_a_alpha_features = haa.api_creat_h_a_alpha_features(haalpha_out_dir, h_a_alpha_decomposition_T3_path, h_a_alpha_eigenvalue_set_T3_path, h_a_alpha_eigenvector_set_T3_path, polsarpro_in_dir)
+
+
+ # haa = PspHAAlphaDecomposition(normalization=True)
+ # psp_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024_RPCpsp_t3"
+ # t3_path = r"I:\MicroWorkspace\product\C-SAR\SoilSalinity\t3"
+ # exe_dir = r"I:\microproduct\soilSalinity/"
+ # haa.api_creat_h_a_alpha_features(h_a_alpha_out_dir=t3_path,
+ # h_a_alpha_decomposition_T3_path= exe_dir + 'h_a_alpha_decomposition_T3.exe',
+ # h_a_alpha_eigenvalue_set_T3_path= exe_dir + 'h_a_alpha_eigenvalue_set_T3.exe',
+ # h_a_alpha_eigenvector_set_T3_path=exe_dir +'h_a_alpha_eigenvector_set_T3.exe',
+ # polsarpro_in_dir=psp_path)
+
+ # print('done')
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterC2.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterC2.py
new file mode 100644
index 0000000..079e090
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterC2.py
@@ -0,0 +1,170 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspLeeRefinedFilterC2.py
+@Function:
+@Contact:
+@Author:SHJ
+@Date:2021/11/5
+@Version:1.0.0
+"""
+import logging
+import os
+import shutil
+import subprocess
+import glob
+import numpy as np
+import struct
+from PIL import Image
+
+logger = logging.getLogger("mylog")
+
+
+class LeeRefinedFilterC2:
+ """
+ 调用polsarpro4.2.0的lee_refined_filter_C2.exe做精致Lee滤波
+ """
+
+ def __init__(self, exeFilterName='lee_refined_filter_C2.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_lee_refined_filter_C2(
+ self,
+ exeDir,
+ inC2Dir,
+ outDir,
+ off_row,
+ off_col,
+ Nrow,
+ Ncol,
+ Nwin=7,
+ Nlook=1):
+ """
+ :param exeDir:exe所在目录
+ :param inC2Dir:C2矩阵目录
+ :param outDir:输出目录
+ :param off_row:行偏移,行启始位置
+ :param off_col:列偏移,列启始位置
+ :param Nrow:终止行
+ :param Ncol:终止列
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ # if not self._checkT3Matrix(inT3Dir):
+ # raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+ if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0,则使用默认值
+ Nwin = 7
+
+ Off_lig = off_row
+ Off_col = off_col
+ Sub_Nlig = Nrow
+ Sub_Ncol = Ncol
+
+ para_list = [
+ exePath,
+ inC2Dir,
+ outDir,
+ Nlook,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = ' '.join(str(i) for i in para_list)
+ result_tuple = subprocess.getstatusoutput(cmd)
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ config_path = os.path.join(inC2Dir, 'config.txt')
+ if config_path != os.path.join(outDir, 'config.txt'):
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ @staticmethod
+ def _checkC2Matrix(C2Dir):
+ # 检测C2矩阵
+ if not os.path.exists(C2Dir):
+ return False
+ file_name_in_out = ['C11.bin', 'C12_real.bin', 'C12_imag.bin', 'C22.bin','config.txt']
+ for item in file_name_in_out:
+ if not os.path.exists(C2Dir + "\\" + item):
+ return False
+ return True
+ def write_bin_to_tif(self, out_tif_dir, bin_dir):
+ """
+ 读取H-A-Alpha分解二进制数据,输出为矩阵格式的字典
+ :param out_tif_dir : tif的输出路径
+ :param bin_dir : 二进制数据的目录,包含.bin,.config
+ :return out_tif_path: 生成tif的路径字典
+ """
+ bin_paths = list(glob.glob(os.path.join(bin_dir, '*.bin')))
+ out_tif_path = {}
+ for in_path in bin_paths:
+ name = os.path.split(in_path)[1].split('.')[0]
+ out_path = os.path.join(out_tif_dir, name + '.tif')
+ out_tif_path.update({name: out_path})
+ if os.path.exists(os.path.split(out_path)[0]) is False:
+ os.makedirs(os.path.split(out_path)[0])
+ img_array = self.__read_bin_to_img(in_path)
+ img_array[np.isnan(img_array)] = 0 # 异常值填充为0
+ # img_array = self.standardization(img_array) # 数据标准化到[0,1]
+ out_image = Image.fromarray(img_array)
+ out_image.save(out_path)
+ return out_tif_path
+ @staticmethod
+
+ def __read_bin_to_img(bin_path):
+ """
+ 读取bin格式二进制数据,输出为矩阵
+ :param bin_path : bin文件的路径,包含.bin,.config
+ :return : 矩阵信息
+ """
+ (bin_dir, bin_name) = os.path.split(bin_path)
+ config_path = os.path.join(bin_dir, 'config.txt')
+ config = open(config_path, 'r').read().split('\n', -1)
+ rows = int(config[1])
+ cols = int(config[4])
+
+ bin_file = open(bin_path, 'rb') # 打开二进制文件
+ size = os.path.getsize(bin_path) # 获得文件大小
+ if size < rows * cols * 4:
+ raise Exception(
+ 'bin size less than rows*cols*4! size:',
+ size,
+ 'byte, rows:',
+ rows,
+ 'cols:',
+ cols)
+
+ img = np.zeros([rows, cols], dtype=np.float32)
+ for row in range(rows):
+ data = bin_file.read(4 * cols) # 每次读取一行的二进制数据
+ row_data = struct.unpack('f' * cols, data) # 转为一行float数据
+ img[row, :] = row_data
+ bin_file.close()
+ return img
+
+if __name__ == '__main__':
+ tp =LeeRefinedFilterC2()
+ inC2Dir=r'E:\MicroWorkspace\LandCover\HHHV1'
+ outDir =r'E:\MicroWorkspace\LandCover\HHHV1_f'
+ off_row = 0
+ off_col = 0
+ Nrow = 666
+ Ncol = 746
+ tp.api_lee_refined_filter_C2( '',inC2Dir,outDir,off_row,off_col,Nrow,Ncol)
+ tp.write_bin_to_tif(outDir,outDir)
+ print('done')
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterT3.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterT3.py
new file mode 100644
index 0000000..e8c3911
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspLeeRefinedFilterT3.py
@@ -0,0 +1,104 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspLeeRefinedFilterT3.py
+@Function: Cloude-Pottier eigenvector/eigenvalue based decomposition of a 3x3 coherency matrix [T3]
+ (Averaging using a sliding window)
+@Contact:
+@Author:LVY
+@Date:2021/10/12 9:06
+@Version:1.0.0
+"""
+import logging
+import os
+import shutil
+import subprocess
+
+logger = logging.getLogger("mylog")
+
+
+class LeeRefinedFilterT3:
+ """
+ 调用polsarpro4.2.0的lee_refined_filter_T3.exe做精致Lee滤波
+ """
+
+ def __init__(self, exeFilterName='lee_refined_filter_T3.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_lee_refined_filter_T3(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ rectX,
+ rectY,
+ rectWidth,
+ rectHeight,
+ Nwin=7,
+ Nlook=1):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(inT3Dir):
+ raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+ if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0,则使用默认值
+ Nwin = 7
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ inT3Dir,
+ outDir,
+ Nlook,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = ' '.join(str(i) for i in para_list)
+ config_path = os.path.join(inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspSurfaceInversion.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspSurfaceInversion.py
new file mode 100644
index 0000000..b8463a2
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspSurfaceInversion.py
@@ -0,0 +1,393 @@
+import logging
+import os
+import shutil
+import subprocess
+
+logger = logging.getLogger("mylog")
+
+
+class SurfaceInversionDubois:
+ """
+ 调用polsarpro4.2.0的surface_inversion_dubois.exe做土壤水分反演
+ """
+
+ def __init__(self, exeFilterName='surface_inversion_dubois.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_surface_inversion_dubois(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ incidence,
+ rectX,
+ rectY,
+ row,
+ col,
+ frequency, # GHZ
+ angleFlag, # 0:deg, 1:rad
+ ):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(inT3Dir):
+ raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = row
+ Sub_Ncol = col
+ dataFormat = 'T3'
+ calibration_flag = 1
+ calibration_coefficient = 0.0
+ threshold_HHHH_VVVV = 0.0
+ threshold_HVHV_VVVV = 0.0
+
+ para_list = [
+ exePath,
+ inT3Dir,
+ outDir,
+ dataFormat,
+ incidence,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol,
+ frequency, # GHZ
+ angleFlag,
+ ]
+
+ cmd = "surface_inversion_dubois.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -caf {} -cac {} -th1 {} -th2 {}".format(
+ inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag,
+ calibration_flag, calibration_coefficient, threshold_HHHH_VVVV, threshold_HVHV_VVVV)
+
+ logger.info('surface_inversion_dubois:{}'.format(cmd))
+ result = os.system(cmd)
+ logger.info('cmd_result:{}'.format(result))
+ logger.info('surface_inversion_dubois finish!')
+
+ config_path = os.path.join(inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+
+ # cmd = ' '.join(str(i) for i in para_list)
+ # config_path = os.path.join(inT3Dir, 'config.txt')
+ # shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ # result_tuple = subprocess.getstatusoutput(cmd)
+ #
+ # if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ # raise Exception(result_tuple[1])
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
+
+
+class SurfaceInversionHisto:
+ """
+ 调用polsarpro4.2.0的surface_inversion_histo.exe做土壤水分反演
+ """
+
+ def __init__(self, exeFilterName='surface_inversion_histo.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_surface_inversion_histo(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ rectX,
+ rectY,
+ rectWidth,
+ rectHeight,
+ Nwin=7,
+ Nlook=1):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(inT3Dir):
+ raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+ if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0,则使用默认值
+ Nwin = 7
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ inT3Dir,
+ outDir,
+ Nlook,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = ' '.join(str(i) for i in para_list)
+ config_path = os.path.join(inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
+
+
+class SurfaceInversionOh:
+ """
+ 调用polsarpro4.2.0的surface_inversion_oh.exe做土壤水分反演
+ """
+
+ def __init__(self, exeFilterName='surface_inversion_oh.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_surface_inversion_oh(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ rectX,
+ rectY,
+ rectWidth,
+ rectHeight,
+ Nwin=7,
+ Nlook=1):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(inT3Dir):
+ raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+ if (Nwin % 2) == 0 or Nwin < 0: # 如果为偶数或小于0,则使用默认值
+ Nwin = 7
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ inT3Dir,
+ outDir,
+ Nlook,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = ' '.join(str(i) for i in para_list)
+ config_path = os.path.join(inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
+
+
+class SurfaceInversionOh2004:
+ """
+ 调用polsarpro4.2.0的surface_inversion_oh2004.exe做土壤水分反演
+ """
+
+ def __init__(self, exeFilterName='surface_inversion_oh2004.exe'):
+ self.__exeName = exeFilterName
+ pass
+
+ def api_surface_inversion_oh2004(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ incidence,
+ rectY,
+ rectX,
+ row,
+ col,
+ frequency, # GHZ
+ angleFlag):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ :param Nlook:一般是1
+ """
+ if len(exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ raise Exception(self.__exeName + ' not exists.')
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(exeDir + '\\' + self.__exeName):
+ raise Exception(
+ exeDir +
+ '\\' +
+ self.__exeName +
+ ' not exists.')
+ exePath = exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(inT3Dir):
+ raise Exception('T3 Matrix check failed.')
+ if not os.path.exists(outDir):
+ os.makedirs(outDir)
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = row
+ Sub_Ncol = col
+ dataFormat = 'T3'
+ threshold_mv = 1.0
+ threshold_s = 7.0
+
+ para_list = [
+ exePath,
+ inT3Dir,
+ outDir,
+ dataFormat,
+ incidence,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol,
+ frequency, # GHZ
+ angleFlag,
+ threshold_mv,
+ threshold_s]
+ cmd = "surface_inversion_oh2004.exe -id {} -od {} -iodf {} -ang {} -ofr {} -ofc {} -fnr {} -fnc {} -fr {} -un {} -th1 {} -th2 {}".format(
+ inT3Dir, outDir, dataFormat, incidence, Off_lig, Off_col, Sub_Nlig, Sub_Ncol, frequency, angleFlag, threshold_mv, threshold_s)
+
+ logger.info('surface_inversion_oh2004:{}'.format(cmd))
+ result = os.system(cmd)
+ logger.info('cmd_result:{}'.format(result))
+ logger.info('surface_inversion_oh2004 finish!')
+
+
+ config_path = os.path.join(inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(outDir, 'config.txt'))
+ # cmd = ' '.join(str(i) for i in para_list)
+ # result_tuple = subprocess.getstatusoutput(cmd)
+ # #
+ # if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ # raise Exception(result_tuple[1])
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ # file_name_in_out = ['T11.img', 'T12_real.img', 'T12_imag.img',
+ # 'T13_real.img', 'T13_imag.img', 'T22.img',
+ # 'T23_real.img', 'T23_imag.img', 'T33.img']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspTouziDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspTouziDecomposition.py
new file mode 100644
index 0000000..35ed6ca
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspTouziDecomposition.py
@@ -0,0 +1,146 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspTouziDecomposition.py
+@Function:
+@Contact:
+@Author:LVY
+@Date:2021/10/14 10:11
+@Version:1.0.0
+"""
+import os
+import logging
+from tool.algorithm.polsarpro.polarizationDecomposition import ModTouzi as TouziDecomp
+from osgeo import gdal
+import multiprocessing
+from tool.algorithm.block.blockprocess import BlockProcess
+import shutil
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.file.fileHandle import fileHandle
+logger = logging.getLogger("mylog")
+file =fileHandle(False)
+
+class PspTouziDecomposition:
+ """
+ Touzi分解
+ """
+ def __init__(self, inDic, outDir):
+ """
+ :param inDic:T3矩阵目录
+ :param outDir:输出目录
+ """
+ self.__inDic = inDic
+ self.__outDir = outDir
+ self.__DecompostFlag = False
+ if self._checkTifFileDic(self.__inDic) is False:
+ return False
+ if not os.path.exists(self.__outDir):
+ os.makedirs(self.__outDir)
+
+ def api_Touzi_decomposition_TIF(self, Nwin = 5):
+ """
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ """
+ bandHH = gdal.Open(self.__inDic["HH"])
+ bandHV = gdal.Open(self.__inDic["HV"])
+ bandVH = gdal.Open(self.__inDic["VH"])
+ bandVV = gdal.Open(self.__inDic["VV"])
+ bandAll = [bandHH, bandHV, bandVH, bandVV]
+ decomposition = TouziDecomp(bandAll, Nwin)
+ decomposition.get_result(self.__outDir)
+ return True
+
+ def Touzi_decomposition_TIF(self,hh_path,hv_path,vh_path,vv_path,out_dir,suffix,Nwin = 5):
+ """
+ :param Nwin:滤波窗口大小 3 5 7 9 11
+ """
+ bandHH = gdal.Open(hh_path)
+ bandHV = gdal.Open(hv_path)
+ bandVH = gdal.Open(vh_path)
+ bandVV = gdal.Open(vv_path)
+ bandAll = [bandHH, bandHV, bandVH, bandVV]
+ decomposition = TouziDecomp(bandAll, Nwin)
+ decomposition.get_result_block(out_dir, suffix)
+ return True
+
+ @staticmethod
+ def _checkTifFileDic(inDic):
+ file_name_in_out = ['HH', 'VV', 'HV', 'VH']
+ for item in file_name_in_out:
+ if item in inDic:
+ print(inDic[item])
+ if not os.path.exists(os.path.join(inDic[item])):
+ return False
+ else:
+ return False
+ return True
+
+ def Touzi_decomposition_multiprocessing(self):
+
+ #创建工作文件夹
+ src_path = os.path.join(self.__outDir, "src_img")
+ block_path = os.path.join(self.__outDir, "block")
+ decomposition_path = os.path.join(self.__outDir, "feature")
+ file.creat_dirs([src_path,block_path,decomposition_path])
+
+ shutil.copyfile(self.__inDic["HH"], os.path.join(src_path, "HH.tif"))
+ shutil.copyfile(self.__inDic["HV"], os.path.join(src_path, "HV.tif"))
+ shutil.copyfile(self.__inDic["VH"], os.path.join(src_path, "VH.tif"))
+ shutil.copyfile(self.__inDic["VV"], os.path.join(src_path, "VV.tif"))
+ self.__cols = ImageHandler.get_img_width(self.__inDic["HH"])
+ self.__rows = ImageHandler.get_img_height(self.__inDic["HH"])
+ # 分块
+ bp = BlockProcess()
+ block_size = bp.get_block_size(self.__rows, self.__cols)
+ bp.cut(src_path, block_path, ['tif', 'tiff'], 'tif', block_size)
+ logger.info('blocking tifs success!')
+
+ img_dir, img_name = bp.get_file_names(block_path, ['tif'])
+ dir_dict = bp.get_same_img(img_dir, img_name)
+
+ hh_list, vv_list, hv_list, vh_list = None, None, None, None
+ for key in dir_dict.keys():
+ tmp = key.split('_', 2)[0]
+ if tmp == 'HH':
+ hh_list = dir_dict[key]
+ elif tmp == 'VV':
+ vv_list = dir_dict[key]
+ elif tmp == 'HV':
+ hv_list = dir_dict[key]
+ elif tmp == 'VH':
+ vh_list = dir_dict[key]
+
+ processes_num = min([len(hh_list), multiprocessing.cpu_count() - 1])
+
+ # 开启多进程处理
+ pool = multiprocessing.Pool(processes=processes_num)
+
+ for i in range(len(hh_list)):
+ suffix = bp.get_suffix(os.path.basename(hh_list[i]))
+ # self.Touzi_decomposition_TIF(hh_list[i], hv_list[i], vh_list[i], vv_list[i], block_path, suffix,5)
+ pool.apply_async(self.Touzi_decomposition_TIF, (hh_list[i], hv_list[i], vh_list[i], vv_list[i], decomposition_path, suffix,5))
+ logger.info('total:%s, block:%s touzi!', len(hh_list), i)
+
+ pool.close()
+ pool.join()
+ # 合并处理后的影像
+ bp.combine(decomposition_path, self.__cols, self.__rows, self.__outDir, file_type=['tif'], datetype='float16')
+
+ file.del_folder(src_path)
+ file.del_folder(block_path)
+ file.del_folder(decomposition_path)
+ pass
+
+
+# if __name__ == '__main__':
+# dir = {}
+# dir.update({"HH":"I:\preprocessed\HH_preprocessed.tif"})
+# dir.update({"HV":"I:\preprocessed\HV_preprocessed.tif"})
+# dir.update({"VH":"I:\preprocessed\VH_preprocessed.tif"})
+# dir.update({"VV":"I:\preprocessed\VV_preprocessed.tif"})
+#
+#
+# p = PspTouziDecomposition(dir, "I:/preprocessed/")
+# p.Touzi_decomposition_multiprocessing()
+# pass
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspYamaguchiDecomposition.py b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspYamaguchiDecomposition.py
new file mode 100644
index 0000000..dd4f9e8
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/polsarpro/pspYamaguchiDecomposition.py
@@ -0,0 +1,104 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project:__init__.py
+@File:pspFreemanDecomposition.py
+@Function:
+@Contact:
+@Author:LVY
+@Date:2021/10/12 18:45
+@Version:1.0.0
+"""
+
+import os
+import shutil
+import subprocess
+import logging
+logger = logging.getLogger("mylog")
+
+
+class PspYamaguchiDecomposition:
+ """
+ Yamaguchi yamaguchi_3components_decomposition_T3.exe yamaguchi_4components_decomposition_T3.exe
+ """
+
+ def __init__(
+ self,
+ exeDir,
+ inT3Dir,
+ outDir,
+ exeDecomposeName='yamaguchi_4components_decomposition_T3.exe'):
+ """
+ :param exeDir:exe所在目录
+ :param inT3Dir:T3矩阵目录
+ :param outDir:输出目录
+ """
+ self.__exeName = exeDecomposeName
+ self.__exeDir = exeDir
+ self.__inT3Dir = inT3Dir
+ self.__outDir = outDir
+ self.__DecompostFlag = False
+ pass
+
+ def api_yamaguchi_4components_decomposition_T3(
+ self, rectX, rectY, rectWidth, rectHeight, Nwin=1):
+ """
+ :param rectX:有效区域x
+ :param rectY:有效区域y
+ :param rectWidth:有效区域宽
+ :param rectHeight:有效区域高
+ :param Nwin :Size of the (Nwin, Nwin) sliding window used to compute local estimates. (int)
+ """
+ if self.__DecompostFlag:
+ return True
+ if len(self.__exeDir) == 0:
+ if not os.path.exists(self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeName
+ else:
+ if not os.path.exists(self.__exeDir + '\\' + self.__exeName):
+ logger.error(self.__exeName + ' not exists.')
+ return False
+ exePath = self.__exeDir + '\\' + self.__exeName
+
+ if not self._checkT3Matrix(self.__inT3Dir):
+ logger.error('T3 Matrix check failed.')
+ return False
+ if not os.path.exists(self.__outDir):
+ os.makedirs(self.__outDir)
+
+ Off_lig = rectX
+ Off_col = rectY
+ Sub_Nlig = rectWidth
+ Sub_Ncol = rectHeight
+
+ para_list = [
+ exePath,
+ self.__inT3Dir,
+ self.__outDir,
+ Nwin,
+ Off_lig,
+ Off_col,
+ Sub_Nlig,
+ Sub_Ncol]
+ cmd = " ".join(str(i) for i in para_list)
+ config_path = os.path.join(self.__inT3Dir, 'config.txt')
+ shutil.copyfile(config_path, os.path.join(self.__outDir, 'config.txt'))
+ result_tuple = subprocess.getstatusoutput(cmd)
+
+ if result_tuple[0] != 1 or result_tuple[1].find('error') != -1:
+ raise Exception(result_tuple[1])
+ self.__DecompostFlag = True
+ return True
+ @staticmethod
+ def _checkT3Matrix(T3Dir):
+ # 检测T3矩阵
+ if not os.path.exists(T3Dir):
+ return False
+ file_name_in_out = ['T11.bin', 'T12_real.bin', 'T12_imag.bin',
+ 'T13_real.bin', 'T13_imag.bin', 'T22.bin',
+ 'T23_real.bin', 'T23_imag.bin', 'T33.bin']
+ for item in file_name_in_out:
+ if not os.path.exists(T3Dir + "\\" + item):
+ return False
+ return True
diff --git a/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_GEO.cp38-win_amd64.pyd b/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_GEO.cp38-win_amd64.pyd
new file mode 100644
index 0000000..642f519
Binary files /dev/null and b/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_GEO.cp38-win_amd64.pyd differ
diff --git a/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_geo/SAR_GEO.pyx b/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_geo/SAR_GEO.pyx
new file mode 100644
index 0000000..c443ac3
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/transforml1a/SAR_geo/SAR_GEO.pyx
@@ -0,0 +1,194 @@
+import os
+cimport cython # 必须导入
+import numpy as np##必须为c类型和python类型的数据都申明一个np
+cimport numpy as np # 必须为c类型和python类型的数据都申明一个np
+from libc.math cimport pi,ceil,floor
+from scipy.interpolate import griddata
+
+
+
+
+
+#####################
+# 结构定义区
+####################
+cdef struct Point: # 结构
+ double x
+ double y
+
+
+
+
+
+
+
+
+######################
+# 射线法
+######################
+cdef int rayCasting(Point p,np.ndarray[double,ndim=2] poly):
+ cdef double px = p.x,
+ cdef double py = p.y,
+ cdef int flag = 0
+
+ cdef int i=0
+ cdef int l=poly.shape[0]
+ cdef int j=l-1
+
+ cdef double sx
+ cdef double sy
+ cdef double tx
+ cdef double ty
+ cdef x=0
+ while(i= py) or (sy >= py and ty < py)) :
+ #// 线段上与射线 Y 坐标相同的点的 X 坐标
+ x = sx + (py - sy) * (tx - sx) / (ty - sy)
+ #// 点在多边形的边上
+ if(x == px):
+ return 1
+
+ #// 射线穿过多边形的边界
+ if(x > px):
+ flag = 0 if flag==1 else 1
+ # 循环体
+ j=i
+ i=i+1
+
+ #// 射线穿过多边形边界的次数为奇数时点在多边形内
+ return 1 if flag==1 else 0
+
+cpdef np.ndarray[double,ndim=2] insert_data(np.ndarray[double,ndim=2] ori2geo_img,np.ndarray[int , ndim=1] row_ids,np.ndarray[int,ndim=1] col_ids,np.ndarray[double,ndim=1] data):
+ cdef int i=0
+ cdef int count=row_ids.shape[0]
+ while i=0 and temp_col>=0 and temp_row=0 and temp_col>=0 and temp_row=0 and temp_col>=0 and temp_row=0 and temp_col>=0 and temp_rowp1.x or max_lonmax_lat:
+ continue
+ c=c+1
+
+
+
+
+
+
+
+
+
+
+
+# 测试程序
+cpdef np.ndarray[double,ndim=2] Add(np.ndarray[double,ndim=2] a,double x):
+ cdef double d=0; # 声明 注意 cython没有 bool类型
+ print("调用成功")
+ print(a)
+ print(x)
+ return a+x
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/transforml1a/setup.py b/Ortho-NoS1GBM/tool/algorithm/transforml1a/setup.py
new file mode 100644
index 0000000..5183e0c
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/transforml1a/setup.py
@@ -0,0 +1,45 @@
+from setuptools import setup
+from setuptools.extension import Extension
+from Cython.Distutils import build_ext
+from Cython.Build import cythonize
+import numpy
+from pathlib import Path
+import shutil
+
+
+class MyBuildExt(build_ext):
+ def run(self):
+ build_ext.run(self)
+
+ build_dir = Path(self.build_lib)
+ root_dir = Path(__file__).parent
+ target_dir = build_dir if not self.inplace else root_dir
+
+ self.copy_file(Path('./SAR_geo') / '__init__.py', root_dir, target_dir)
+ #self.copy_file(Path('./pkg2') / '__init__.py', root_dir, target_dir)
+ self.copy_file(Path('.') / '__init__.py', root_dir, target_dir)
+ def copy_file(self, path, source_dir, destination_dir):
+ if not (source_dir / path).exists():
+ return
+ shutil.copyfile(str(source_dir / path), str(destination_dir / path))
+
+setup(
+ name="MyModule",
+ ext_modules=cythonize(
+ [
+ #Extension("pkg1.*", ["root/pkg1/*.py"]),
+ Extension("pkg2.*", ["./SAR_geo/SAR_GEO.pyx"]),
+ #Extension("1.*", ["root/*.py"])
+ ],
+ build_dir="build",
+ compiler_directives=dict(
+ always_allow_keywords=True
+ )),
+ cmdclass=dict(
+ build_ext=MyBuildExt
+ ),
+ packages=[],
+ include_dirs=[numpy.get_include()],
+)
+
+# 指令: python setup.py build_ext --inplace
diff --git a/Ortho-NoS1GBM/tool/algorithm/transforml1a/transHandle.py b/Ortho-NoS1GBM/tool/algorithm/transforml1a/transHandle.py
new file mode 100644
index 0000000..64ad4be
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/transforml1a/transHandle.py
@@ -0,0 +1,669 @@
+from tool.algorithm.transforml1a import SAR_GEO as SAR_GEO
+from tool.algorithm.image import ImageHandle
+import numpy as np
+import scipy
+from scipy.interpolate import griddata, RegularGridInterpolator
+import logging
+# import pyresample as pr
+# 插值模块
+from pyresample.bilinear import NumpyBilinearResampler
+from pyresample import geometry
+from pyresample.geometry import AreaDefinition
+from osgeo import osr
+import os
+import math
+
+# os.environ['PROJ_LIB'] = r"D:\Anaconda\envs\micro\Lib\site-packages\osgeo\data\proj"
+
+logger = logging.getLogger("mylog")
+
+
+##############
+# 多项式回归组件
+##############
+#
+def griddata_geo(points, data, lon_grid, lat_grid, method, i, end_i):
+ grid_data = griddata(points, data, (lon_grid, lat_grid), method=method, )
+ grid_data = grid_data[:, :, 0]
+ return [i, end_i, grid_data]
+
+
+def griddataBlock(start_x, len_x, start_y, len_y, grid_data_input, grid_x, grid_y, method):
+ grid_x = grid_x.reshape(-1)
+ grid_y = grid_y.reshape(-1)
+ grid_data_input = grid_data_input.reshape(-1)
+ x_list = np.array(list(range(len_x))) + start_x
+ y_list = np.array(list(range(len_y))) + start_y
+
+ x_grid, y_grid = np.meshgrid(x_list, y_list)
+ idx = np.argsort(grid_x)
+ grid_x = grid_x[idx].reshape(-1)
+ grid_y = grid_y[idx].reshape(-1)
+ grid_data_input = grid_data_input[idx].reshape(-1)
+ interp_func = RegularGridInterpolator((grid_x.reshape(-1), grid_y.reshape(-1)), grid_data_input.reshape(-1),
+ method='slinear', bounds_error=False, fill_value=np.nan)
+ grid_data = interp_func((x_grid, y_grid))
+ # grid_data = griddata(p, grid_data_input, (x_grid, y_grid), method=method)
+ grid_data = grid_data[:, :, 0]
+ return (x_grid, y_grid, grid_data)
+
+
+class polyfit2d_U:
+ def __init__(self, x, y, z) -> None:
+ # 定义参数
+
+ X = np.ones((x.shape[0], 10))
+ X[:, 0] = 1
+ X[:, 1] = x
+ X[:, 2] = y
+ X[:, 3] = x * y
+ X[:, 4] = x ** 2
+ X[:, 5] = y ** 2
+ X[:, 6] = x * X[:, 5]
+ X[:, 7] = y * X[:, 4]
+ X[:, 8] = x ** 3
+ X[:, 9] = y ** 3
+ Y = z.reshape(-1, 1)
+ A = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y)
+ self.paras_fit = A
+
+ def fit(self, x, y):
+ X = np.ones((x.shape[0], 10))
+ X[:, 0] = 1
+ X[:, 1] = x
+ X[:, 2] = y
+ X[:, 3] = x * y
+ X[:, 4] = x ** 2
+ X[:, 5] = y ** 2
+ X[:, 6] = x * X[:, 5]
+ X[:, 7] = y * X[:, 4]
+ X[:, 8] = x ** 3
+ X[:, 9] = y ** 3
+ z = np.matmul(X, self.paras_fit)
+ return np.sum(z)
+
+
+class TransImgL1A:
+ def __init__(self, ori_sim_path, roi, l1a_height, l1a_width):
+ self._begin_r, self._begin_c, self._end_r, self._end_c = 0, 0, 0, 0
+ self.ori2geo_img = None
+ self._mask = None
+ self.l1a_height = l1a_height
+ self.l1a_width = l1a_width
+ self._min_lon, self._max_lon, self._min_lat, self._max_lat = 0, 0, 0, 0
+ self.init_trans_para(ori_sim_path, roi)
+
+ def get_roi_points(self):
+ rowcol = np.where(self._mask == 1)
+ data = [(self._begin_r + row, self._begin_c + col) for (row, col) in zip(rowcol[0], rowcol[1])]
+ return data
+
+ def get_lonlat_points(self):
+ lon = self.ori2geo_img[0, :, :][np.where(self._mask == 1)]
+ lat = self.ori2geo_img[1, :, :][np.where(self._mask == 1)]
+ data = [(row, col) for (row, col) in zip(lon, lat)]
+ return data
+
+ ######################
+ # 插值方法
+ ######################
+ def init_trans_para(self, sim_ori_path, roi):
+ """裁剪L1a_img --裁剪L1A影像
+ --- 修改 ori_sim 变换为 sim_ori
+ Args:
+ src_img_path (_type_): 原始L1A影像
+ cuted_img_path (_type_): 待裁剪对象
+ roi (_type_): 裁剪roi
+ """
+ ori2geo_img_height = ImageHandle.ImageHandler.get_img_height(sim_ori_path)
+ ori2geo_img_width = ImageHandle.ImageHandler.get_img_width(sim_ori_path)
+ ori2geo_img = ImageHandle.ImageHandler.get_data(sim_ori_path)
+ ori2geo_gt = ImageHandle.ImageHandler.get_geotransform(sim_ori_path)
+ point_list = np.array(roi)
+ min_lon = np.nanmin(point_list[:, 0])
+ max_lon = np.nanmax(point_list[:, 0])
+ min_lat = np.nanmin(point_list[:, 1])
+ max_lat = np.nanmax(point_list[:, 1])
+ self._min_lon, self._max_lon, self._min_lat, self._max_lat = min_lon, max_lon, min_lat, max_lat
+ # 根据 min_lon max_lon
+ # 根据 min_lat max_lat
+
+ (x_min, y_min) = ImageHandle.ImageHandler.lat_lon_to_pixel(sim_ori_path, (min_lon, min_lat))
+ (x_max, y_max) = ImageHandle.ImageHandler.lat_lon_to_pixel(sim_ori_path, (max_lon, max_lat))
+
+ xmin = x_min if x_min < x_max else x_max
+ xmax = x_min if x_min > x_max else x_max
+
+ ymin = y_min if y_min < y_max else y_max
+ ymax = y_min if y_min > y_max else y_max
+
+ xmin = int(math.floor(xmin)) # 列号
+ xmax = int(math.ceil(xmax)) # 因为python 的索引机制
+ # xmax = int(math.ceil(xmax)) + 1 # 因为python 的索引机制
+ ymin = int(math.floor(ymin)) # 行号
+ ymax = int(math.ceil(ymax)) # 因为pytohn的索引机制
+ # ymax = int(math.ceil(ymax)) + 1 # 因为pytohn的索引机制
+
+ # 处理最大最小范围
+ xmin = 0 if 0 > xmin else xmin
+ ymin = 0 if 0 > ymin else ymin
+ # xmax = ori2geo_img_width if ori2geo_img_width > xmax else xmax
+ # ymax = ori2geo_img_height if ori2geo_img_height > ymax else ymax
+ xmax = xmax if ori2geo_img_width > xmax else ori2geo_img_width
+ ymax = ymax if ori2geo_img_height > ymax else ori2geo_img_height
+
+ # 判断条件
+ xmax = xmax + 1 if xmax == xmin else xmax
+ ymax = ymax + 1 if ymax == ymin else ymax
+
+ if ymax <= ymin or xmax <= xmin or ymax > ori2geo_img_height or xmax > ori2geo_img_width or xmin < 0 or ymin < 0 or xmin > ori2geo_img_width or ymin > ori2geo_img_height or ymax < 0 or xmax < 0:
+ msg = 'csv_roi:' + str(roi) + 'not in box,please revise csv data!'
+ print(msg)
+ else:
+ r_arr = ori2geo_img[0, ymin:ymax, xmin:xmax]
+ c_arr = ori2geo_img[1, ymin:ymax, xmin:xmax]
+
+ # 构建坐标矩阵
+ ori2geo_mask_r_count = ymax - ymin
+ ori2geo_mask_c_count = xmax - xmin
+
+ lon_lat_arr = np.ones((2, ori2geo_mask_r_count, ori2geo_mask_c_count))
+ col_arr = np.arange(xmin, xmax) * np.ones((ori2geo_mask_r_count, ori2geo_mask_c_count))
+ row_arr = ((np.arange(ymin, ymax)) * np.ones((ori2geo_mask_c_count, ori2geo_mask_r_count))).T
+
+ img_geotrans = ori2geo_gt
+ lon_arr = img_geotrans[0] + img_geotrans[1] * col_arr + img_geotrans[2] * row_arr
+ lat_arr = img_geotrans[3] + img_geotrans[4] * col_arr + img_geotrans[5] * row_arr
+ lon_lat_arr[0, :, :] = lon_arr
+ lon_lat_arr[1, :, :] = lat_arr
+
+ # print("csv_roi:")
+ # print(roi)
+ r_min = np.floor(np.nanmin(r_arr)) # 获取 L1A 的行列号范围
+ r_max = np.ceil(np.nanmax(r_arr)) + 1
+ c_min = np.floor(np.nanmin(c_arr))
+ c_max = np.ceil(np.nanmax(c_arr)) + 1
+
+ # 判断是否越界
+ r_min = 0 if r_min < 0 else r_min
+ r_max = self.l1a_height if r_max > self.l1a_height else r_max
+ c_min = 0 if c_min < 0 else c_min
+ c_max = self.l1a_width if c_max > self.l1a_width else c_max
+
+ # 判断条件
+ r_max = r_max + 1 if r_min == r_max else r_max
+ c_max = c_max + 1 if c_min == c_max else c_max
+ if r_max <= r_min or c_max <= c_min or r_max > self.l1a_height or c_max > self.l1a_width or r_min < 0 or c_min < 0 or c_min > self.l1a_width or r_min > self.l1a_height or r_max < 0 or c_max < 0:
+ msg = 'csv_roi:' + str(roi) + 'not in box,please revise csv data!'
+ else:
+ pass
+ mask_geo = SAR_GEO.cut_L1A_img(lon_lat_arr, point_list) # 在地理坐标系下裁剪对应影像
+
+ mask_geo = mask_geo.reshape(-1)
+ r_arr = r_arr.reshape(-1)
+ c_arr = c_arr.reshape(-1)
+
+ mask_geo_idx = np.where(mask_geo == 1)[0]
+ if mask_geo_idx.shape[0] == 0:
+ msg = 'csv_roi:' + str(roi) + 'not in box,please revise csv data!'
+ print(msg)
+ else:
+ r_idx = r_arr[mask_geo_idx]
+ c_idx = c_arr[mask_geo_idx]
+
+ r_idx = r_idx - r_min # offset row
+ c_idx = c_idx - c_min # offset col
+ r_count = r_max - r_min # 行数
+ c_count = c_max - c_min # 列数
+
+ #
+ mask_l1a = np.zeros((r_count, c_count)) * np.nan # 创建目标大小的行列号
+ mask = SAR_GEO.gereratorMask(r_idx.astype(np.float64), c_idx.astype(np.float64).astype(np.float64),
+ mask_l1a) # 这个函数修改了
+
+ self._begin_r = r_min
+ self._end_r = r_max
+ self._begin_c = c_min
+ self._end_c = c_max
+ self._mask = mask
+
+ def cut_L1A(self, in_path, out_path):
+ img = ImageHandle.ImageHandler.get_data(in_path)
+ if len(img.shape) == 3:
+ cut_img = img[:, self._begin_r:self._end_r, self._begin_c:self._end_c]
+ cut_img[0, :, :] = cut_img[0, :, :] * self._mask
+ cut_img[1, :, :] = cut_img[1, :, :] * self._mask
+ ImageHandle.ImageHandler.write_img(out_path, '', [0, 0, 0, 0, 0, 0], cut_img)
+ else:
+ cut_img = img[self._begin_r:self._end_r + 1, self._begin_c:self._end_c + 1]
+ cut_img[:, :] = cut_img[:, :] * self._mask
+ cut_img[:, :] = cut_img[:, :] * self._mask
+ ImageHandle.ImageHandler.write_img(out_path, '', [0, 0, 0, 0, 0, 0], cut_img)
+
+ def grid_interp_to_station(self, all_data, station_lon, station_lat, method='linear'):
+ '''
+ func: 将等经纬度网格值 插值到 离散站点。使用griddata进行插值
+ inputs:
+ all_data,形式为:[grid_lon,grid_lat,data] 即[经度网格,纬度网格,数值网格]
+ station_lon: 站点经度
+ station_lat: 站点纬度。可以是 单个点,列表或者一维数组
+ method: 插值方法,默认使用 linear
+ '''
+ station_lon = np.array(station_lon).reshape(-1, 1)
+ station_lat = np.array(station_lat).reshape(-1, 1)
+
+ lon = all_data[0].reshape(-1, 1)
+ lat = all_data[1].reshape(-1, 1)
+ data = all_data[2].reshape(-1, 1)
+
+ points = np.concatenate([lon, lat], axis=1)
+
+ station_value = griddata(points, data, (station_lon, station_lat), method=method)
+
+ station_value = station_value[:, :, 0]
+
+ return station_value
+
+ #####################
+ # 当存在 ori2geo.tif
+ #####################
+
+ @staticmethod
+ def cut_L1a_img(src_img_path, cuted_img_path, roi):
+ """裁剪L1a_img
+ Args:
+ src_img_path (_type_): 原始L1A影像
+ cuted_img_path (_type_): 待裁剪对象
+ roi (_type_): 裁剪roi
+ """
+ ori2geo_img = ImageHandle.ImageHandler.get_data(src_img_path)
+ point_list = np.array(roi)
+ # 开始调用组件 计算
+ mask = SAR_GEO.cut_L1A_img(ori2geo_img.astype(np.float64), point_list)
+ #
+ ori2geo_img[0, :, :] = ori2geo_img[0, :, :] * mask
+ ori2geo_img[1, :, :] = ori2geo_img[1, :, :] * mask
+
+ ImageHandle.ImageHandler.write_img(cuted_img_path, '', [0, 0, 0, 0, 0, 0], ori2geo_img)
+ return ori2geo_img # 保存成影像
+
+ def tran_geo_to_l1a(self, geo_img_path, out_l1a_img_path, ori_sim_img_path, is_class=False):
+ """裁剪后的有投影信息的影像(cover、ndvi)转换到L1A裁剪影像的尺寸
+ Args:
+ geo_img_path (_type_): _description_
+ out_l1a_img_path (_type_): _description_
+ ori_sim_img_path (_type_): _description_
+
+ geo_img_path:地理影像路径
+ out_l1a_img_path:转换L1A坐标系图像路径
+ ori_sim_img_path:裁剪后模拟影像路径
+ is_clss: 是否是 定性类产品
+
+ """
+ inverse_gt = ImageHandle.ImageHandler.get_invgeotransform(geo_img_path)
+ ori2geo_tif = ImageHandle.ImageHandler.get_data(ori_sim_img_path)
+ height = ImageHandle.ImageHandler.get_img_height(geo_img_path)
+ width = ImageHandle.ImageHandler.get_img_width(geo_img_path)
+ # 计算投影
+ x = ori2geo_tif[0, :, :] # lon lat x,y
+ y = ori2geo_tif[1, :, :]
+ ori2geo_tif[0, :, :] = inverse_gt[0] + inverse_gt[1] * x + inverse_gt[2] * y # x
+ ori2geo_tif[1, :, :] = inverse_gt[3] + inverse_gt[4] * x + inverse_gt[5] * y # y
+
+ del x, y
+ geo_tif = ImageHandle.ImageHandler.get_data(geo_img_path) # 获取目标影像
+ ori2geo_tif_shape = ori2geo_tif.shape # height,width
+
+ if is_class:
+ ori2geo_tif = np.round(ori2geo_tif).astype(np.int32)
+ mask = (ori2geo_tif[0, :, :] >= 0) & (ori2geo_tif[0, :, :] < width) & (ori2geo_tif[1, :, :] >= 0) & (
+ ori2geo_tif[1, :, :] < height)
+ ori2geo_tif[0, :, :] = ori2geo_tif[0, :, :] * mask
+ ori2geo_tif[1, :, :] = ori2geo_tif[1, :, :] * mask
+ geo_tif_shape = geo_tif.shape
+ geo_tif_l1a = geo_tif[ori2geo_tif[1, :, :].reshape(-1), ori2geo_tif[0, :, :].reshape(-1)].reshape(
+ ori2geo_tif.shape[1], ori2geo_tif.shape[2]).astype(np.float32)
+ del ori2geo_tif, geo_tif
+ one_ids = np.where(mask == False)
+ geo_tif_l1a[one_ids[0], one_ids[1]] = np.nan
+
+ ImageHandle.ImageHandler.write_img(out_l1a_img_path, '', [0, 0, 0, 0, 0, 0], geo_tif_l1a)
+ # save_temp_L1A(out_l1a_img_path,geo_tif_l1a)
+ return geo_tif_l1a
+ else: # 数值性插值
+ mask = (ori2geo_tif[0, :, :] > 0) & (ori2geo_tif[0, :, :] < width - 1) & (ori2geo_tif[1, :, :] > 0) & (
+ ori2geo_tif[1, :, :] < height - 1)
+ one_ids = np.where(mask == 1)
+ x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
+ result_data = self.grid_interp_to_station([y.reshape(-1), x.reshape(-1), geo_tif.reshape(-1)],
+ ori2geo_tif[1, one_ids[0], one_ids[1]].reshape(-1),
+ ori2geo_tif[0, one_ids[0], one_ids[1]].reshape(-1),
+ method='linear').reshape(-1)
+ mask = mask.reshape(-1)
+ result_data_result = np.zeros((ori2geo_tif.shape[1], ori2geo_tif.shape[2]))
+ result_data_result[:, :] = np.nan
+ result_data_result = SAR_GEO.insert_data(result_data_result, one_ids[0].astype(np.int32),
+ one_ids[1].astype(np.int32), result_data)
+ ImageHandle.ImageHandler.write_img(out_l1a_img_path, '', [0, 0, 0, 0, 0, 0], result_data_result)
+ # save_temp_L1A(out_l1a_img_path,result_data_result)
+ return result_data_result
+
+ def tran_lonlats_to_rowcols(self, lonlats, ori_sim_img_path):
+ """
+ 功能:输入经纬度坐标,输出图像行列号
+ 函数名称:tran_lonlats_to_rowcols(lonlats,out_rowcols,ori_sim_img_path)
+ Lonlats:经纬度坐标,示例[[120.53, 31.5], [120.61, 31.5], [120.53, 31.45], [120.61, 31.45]]
+ out_rowcols:图像行列号,示例[[0, 0], [7000, 0], [7000, 8000], [0, 8000]]
+ ori_sim_img_path:裁剪后模拟影像路径
+ """
+ ori2geo_tif = ImageHandle.ImageHandler.get_data(ori_sim_img_path)
+ min_lon = np.nanmin(ori2geo_tif[0, :, :])
+ max_lon = np.nanmax(ori2geo_tif[0, :, :])
+ min_lat = np.nanmin(ori2geo_tif[1, :, :])
+ max_lat = np.nanmax(ori2geo_tif[1, :, :])
+
+ result = []
+ for i in range(len(lonlats)):
+ p = lonlats[i]
+ if min_lon > p[0] or max_lon < p[0] or min_lat > p[1] or max_lat < p[1]:
+ result.append([-1, -1])
+ continue
+ temp_x = np.square(ori2geo_tif[0, :, :] - p[0]) + np.square(ori2geo_tif[1, :, :] - p[1])
+ r_c_list = []
+ r_c = np.argmin(temp_x)
+ r_c = [r_c // temp_x.shape[1], r_c % temp_x.shape[1]]
+ r_c_list.append([r_c[0], r_c[1], ori2geo_tif[0, r_c[0], r_c[1]], ori2geo_tif[1, r_c[0], r_c[1]]])
+ # 插值
+ for i in range(r_c[0] - 3, r_c[0] + 3):
+ if i < 0 or i > temp_x.shape[0] - 1:
+ continue
+ for j in range(r_c[1] - 3, r_c[1] + 3):
+ if j < 0 or j > temp_x.shape[1] - 1:
+ continue
+ r_c_list.append([i, j, ori2geo_tif[0, i, j], ori2geo_tif[1, i, j]])
+ r_c_list = np.array(r_c_list)
+ points = r_c_list[:, 2:]
+ f_r = scipy.interpolate.interp2d(r_c_list[:, 2], r_c_list[:, 3], r_c_list[:, 0], kind='linear')
+ f_c = scipy.interpolate.interp2d(r_c_list[:, 2], r_c_list[:, 3], r_c_list[:, 1], kind='linear')
+ tar_get_r = f_r(p[0], p[1])[0]
+ tar_get_c = f_c(p[0], p[1])[0]
+ if tar_get_r < ori2geo_tif.shape[1] and tar_get_c < ori2geo_tif.shape[2] and tar_get_r>=0 and tar_get_c>=0:
+ lon_temp = ori2geo_tif[0, int(round(tar_get_r)), int(round(tar_get_c))]
+ lon_lat = ori2geo_tif[1, int(round(tar_get_r)), int(round(tar_get_c))]
+ # 增加条件筛选
+ result.append([tar_get_r, tar_get_c])
+ else:
+ result.append([-1, -1])
+ return result
+
+ def tran_lonlats_to_L1A_rowcols(self, meas_data, ori_sim_path, row, col):
+ lonlats = []
+ data_roi = []
+ rowcols = []
+ measdata_list = []
+ data_sim = ImageHandle.ImageHandler.get_all_band_array(ori_sim_path)
+ for data in meas_data:
+ lon = float(data[1])
+ lat = float(data[2])
+ if (lon > self._min_lon and lon < self._max_lon and lat > self._min_lat and lat < self._max_lat):
+ lonlats.append([lon, lat])
+ data_roi.append(data)
+
+ for lonlat in lonlats:
+ (x, y) = ImageHandle.ImageHandler.lat_lon_to_pixel(ori_sim_path, lonlat)
+ rowcols.append([x, y])
+
+ for data, rowcol in zip(data_roi, rowcols):
+ img_x = round(data_sim[round(rowcol[1]), round(rowcol[0]), 0])
+ img_y = round(data_sim[round(rowcol[1]), round(rowcol[0]), 1])
+ if (img_x > 0 and img_x < row and img_y > 0 and img_y < col):
+ measdata_list.append([img_x, img_y, float(data[3])])
+
+ # rowcols = self.tran_lonlats_to_rowcols(lonlats, ori_sim_path)
+ # measdata_list = []
+ # for data, rowcol in zip(data_roi, rowcols):
+ # if (rowcol[0] != -1 and rowcol[1] != -1):
+ # measdata_list.append(
+ # [round(rowcol[0]) - self._begin_r, round(rowcol[1]) - self._begin_c, float(data[3])])
+ return measdata_list
+
+ @staticmethod
+ def get_radius_of_influence(lalo_step, src_meta='radar2geo', ratio=3):
+ EARTH_RADIUS = 6378122.65 # m
+ """Get radius of influence based on the lookup table resolution in lat/lon direction"""
+ if src_meta == "geo2radar":
+ # geo2radar
+ radius = 100e3
+ else:
+ # radar2geo
+ step_deg = max(np.abs(lalo_step))
+ step_m = step_deg * np.pi / 180.0 * EARTH_RADIUS
+ radius = step_m * ratio
+ return radius
+
+ def interp2d_station_to_grid(self, lon, lat, data, loc_range=[18, 54, 73, 135],
+ det_grid=1, method='linear', projCode=4326):
+ # 参考链接 https://blog.csdn.net/weixin_43718675/article/details/103497930
+ '''
+ func : 将站点数据插值到等经纬度格点
+ inputs:
+ lon: 站点的经度
+ lat: 站点的纬度
+ data: 对应经纬度站点的 气象要素值
+ loc_range: [lat_min,lat_max,lon_min,lon_max]。站点数据插值到loc_range这个范围
+ det_grid: 插值形成的网格空间分辨率
+ method: 所选插值方法,默认 0.125
+ return:
+
+ [lon_grid,lat_grid,data_grid]
+ '''
+ # step1: 先将 lon,lat,data转换成 n*1 的array数组
+ lon = np.array(lon).reshape(-1, 1)
+ lat = np.array(lat).reshape(-1, 1)
+ data = np.array(data).reshape(-1, 1)
+
+ # step2:确定插值区域的经纬度网格
+ lat_min = loc_range[0] # y
+ lat_max = loc_range[1] # y
+ lon_min = loc_range[2] # x
+ lon_max = loc_range[3] # x
+ gt = [0, 0, 0, 0, 0, 0]
+ gt[0] = lon_min # x
+ gt[1] = det_grid
+ gt[3] = lat_max # y
+ gt[5] = -det_grid
+ lat_count = int((lat_max - lat_min) / det_grid + 1) # y
+ lon_count = int((lon_max - lon_min) / det_grid + 1) # x
+ # 替换为pyresample 插值方法
+ proj_osr = osr.SpatialReference()
+ proj_osr.ImportFromEPSG(projCode)
+ projection = proj_osr.ExportToPROJJSON()
+ # (lower_left_x、lower_left_y、upper_right_x、upper_right_y)
+ target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
+ lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
+ source_def = geometry.SwathDefinition(lons=lon, lats=lat)
+ lalo_step = [det_grid, -det_grid]
+ radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
+ result = pr.bilinear.resample_bilinear(data, source_def, target_def,
+ radius=radius, neighbours=32,
+ nprocs=8, fill_value=np.nan,
+ epsilon=0)
+ #
+ return result
+
+ def geocoding(self, ori_geo_tif, produc_arr, pixel_delta=1, method='linear'):
+ # 参考链接 https://blog.csdn.net/weixin_43718675/article/details/103497930
+ ori_geo_tif[np.isnan(ori_geo_tif)] = -1
+ lon_data = ori_geo_tif[0, :, :].reshape(-1)
+ lat_data = ori_geo_tif[1, :, :].reshape(-1)
+ idx = np.where(lat_data != -1)
+ lat_data = lat_data[idx]
+ lon_data = lon_data[idx]
+ idx = np.where(lon_data != -1)
+
+ lat_data = lat_data[idx]
+ lon_data = lon_data[idx]
+ # ###########################################
+ result = self.interp2d_station_to_grid(lon_data, lat_data, produc_arr,
+ [self._min_lat, self._max_lat, self._min_lon, self._max_lon],
+ det_grid=pixel_delta, method=method)
+ return result
+
+ # def l1a_2_geo(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='linear'):
+ # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
+ # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
+ # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
+ # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
+ # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
+ #
+ # lon_data = ori_geo_tif[0, :, :].reshape(-1)
+ # lat_data = ori_geo_tif[1, :, :].reshape(-1)
+ # l1a_produc = l1a_produc.reshape(-1)
+ # idx = np.logical_not(np.isnan(lon_data))
+ # lat_data = lat_data[idx]
+ # lon_data = lon_data[idx]
+ # l1a_produc = l1a_produc[idx]
+ # idx = np.logical_not(np.isnan(lat_data))
+ # lat_data = lat_data[idx]
+ # lon_data = lon_data[idx]
+ # l1a_produc = l1a_produc[idx]
+ #
+ # gt = [self._min_lon, pixel_delta_x, 0.0,
+ # self._max_lat, 0.0, -pixel_delta_y]
+ # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
+ # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
+ # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
+ #
+ # # 获取地理坐标系统信息,用于选取需要的地理坐标系统
+ # srs = osr.SpatialReference()
+ # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
+ # proj = srs.ExportToWkt()
+ #
+ # projection = srs.ExportToPROJJSON()
+ # # (lower_left_x、lower_left_y、upper_right_x、upper_right_y)
+ # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
+ # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
+ # lon_data = lon_data.reshape(-1, 1)
+ # lat_data = lat_data.reshape(-1, 1)
+ # l1a_produc = l1a_produc.reshape(-1, 1)
+ # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
+ # lalo_step = [pixel_delta_x, -pixel_delta_y]
+ # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
+ # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
+ # radius=radius, neighbours=32,
+ # nprocs=8, fill_value=np.nan,
+ # epsilon=0)
+ #
+ # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc, np.nan)
+ #
+ # def l1a_2_geo_int(self, ori_geo_path, l1a_produc_path, geo_produc_path, method='nearest'):
+ # ori_geo_tif = ImageHandle.ImageHandler.get_data(ori_geo_path)
+ # # l1a_produc = ImageHandle.ImageHandler.get_data(l1a_produc_path)
+ # l1a_produc = ImageHandle.ImageHandler.get_band_array(l1a_produc_path, 1)
+ # pixel_delta_y = (self._max_lat - self._min_lat) / (self._end_r - self._begin_r) # 0.001
+ # pixel_delta_x = (self._max_lon - self._min_lon) / (self._end_c - self._begin_c)
+ #
+ # lon_data = ori_geo_tif[0, :, :].reshape(-1)
+ # lat_data = ori_geo_tif[1, :, :].reshape(-1)
+ # l1a_produc = l1a_produc.reshape(-1)
+ # idx = np.logical_not(np.isnan(lon_data))
+ # lat_data = lat_data[idx]
+ # lon_data = lon_data[idx]
+ # l1a_produc = l1a_produc[idx]
+ # idx = np.logical_not(np.isnan(lat_data))
+ # lat_data = lat_data[idx]
+ # lon_data = lon_data[idx]
+ # l1a_produc = l1a_produc[idx]
+ #
+ # gt = [self._min_lon, pixel_delta_x, 0.0,
+ # self._max_lat, 0.0, -pixel_delta_y]
+ # [lat_min, lat_max, lon_min, lon_max] = [self._min_lat, self._max_lat, self._min_lon, self._max_lon]
+ # lat_count = int((lat_max - lat_min) / pixel_delta_y + 1) # y
+ # lon_count = int((lon_max - lon_min) / pixel_delta_x + 1) # x
+ #
+ # # 获取地理坐标系统信息,用于选取需要的地理坐标系统
+ # srs = osr.SpatialReference()
+ # srs.ImportFromEPSG(4326) # 定义输出的坐标系为"WGS 84"
+ # proj = srs.ExportToWkt()
+ #
+ # projection = srs.ExportToPROJJSON()
+ # # (lower_left_x、lower_left_y、upper_right_x、upper_right_y)
+ # target_def = AreaDefinition("id1", "WGS84", "proj_id", projection,
+ # lon_count, lat_count, [lon_min, lat_min, lon_max, lat_max])
+ # lon_data = lon_data.reshape(-1, 1)
+ # lat_data = lat_data.reshape(-1, 1)
+ # l1a_produc = l1a_produc.reshape(-1, 1)
+ # source_def = geometry.SwathDefinition(lons=lon_data, lats=lat_data)
+ # lalo_step = [pixel_delta_x, -pixel_delta_y]
+ # radius = TransImgL1A.get_radius_of_influence(lalo_step, src_meta='radar2geo')
+ # if method == 'linear':
+ # geo_produc = pr.bilinear.resample_bilinear(l1a_produc, source_def, target_def,
+ # radius=radius, neighbours=32,
+ # nprocs=8, fill_value=0,
+ # epsilon=0)
+ # elif method == 'nearest':
+ # geo_produc = pr.kd_tree.resample_nearest(source_def, l1a_produc, target_def, epsilon=0,
+ # radius_of_influence=50000,
+ # fill_value=0, nprocs=8
+ # )
+ # geo_produc = geo_produc[:,:,0]
+ # ImageHandle.ImageHandler.write_img(geo_produc_path, proj, gt, geo_produc)
+
+ @property
+ def mask(self):
+ return self._mask
+
+
+if __name__ == '__main__':
+ # ori_sim_path = r"I:\坐标转换\坐标转换接口\L1A数据(l1a_img_path数据)\RPC_ori_sim.tif"
+ # roi_Extend = [[120.53, 31.5], [120.61, 31.5], [120.61, 31.45], [120.53, 31.45]]
+ # conver_path = r"I:\坐标转换\坐标转换接口\裁剪后辅助数据(geo_img_path数据)\Covering_cut.tif"
+ # ndvi_path = r"I:\坐标转换\坐标转换接口\裁剪后辅助数据(geo_img_path数据)\NDVI_cut.tif"
+ # out_path = r"I:\坐标转换\SAR2GEO\test"
+ #
+ # tr = TransImgL1A(ori_sim_path,roi_Extend)
+ # tr.l1a_2_geo("I:/cut.tif", "I:/salinity.tif", "I:/salinity_geo2.tif")
+ ori_sim = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\preprocessed\ori_sim_preprocessed.tif'
+ product_tif = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\SurfaceRoughnessProduct_temp.tif'
+ result = r'D:\micro\WorkSpace\SurfaceRoughness\Temporary\SurfaceRoughnessProduct.tif'
+ method = 'linear'
+ """
+ 31.14;31.50;120.34;120.75
+ """
+ # roi_Extend = [[102.12, 33.879], [102.327, 33.879], [102.327, 33.66], [102.12, 31.45]]
+ ori_sim_data = ImageHandle.ImageHandler.get_data(ori_sim)
+ lon = ori_sim_data[0, :, :]
+ lat = ori_sim_data[1, :, :]
+ min_lon = np.nanmin(lon)
+ max_lon = np.nanmax(lon)
+ min_lat = np.nanmin(lat)
+ max_lat = np.nanmax(lat)
+ print(np.nanmin(lon))
+ print(np.nanmax(lon))
+ print(np.nanmin(lat))
+ print(np.nanmax(lat))
+
+ # roi_Extend = [[min_lon, max_lat], [max_lon, max_lat], [min_lon, min_lat], [max_lon, min_lat]]
+ roi_Extend = [[116.17328, 43.727577], [116.652504, 43.727577], [116.652504, 44.119164], [116.17328, 44.119164]]
+ # roi_Extend = [[108.51960117899473, 38.192443138079895], [109.62308480328566, 38.192443138079895], [109.62308480328566, 37.69300142375064], [108.51960117899473, 37.69300142375064]]
+
+ tr = TransImgL1A(ori_sim, roi_Extend)
+ tr.l1a_2_geo_int(ori_sim, product_tif, result, method)
+
+ pass
+"""
+import numpy as np
+from pyresample import kd_tree, geometry
+area_def = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD',
+ {'a': '6378144.0', 'b': '6356759.0',
+ 'lat_0': '50.00', 'lat_ts': '50.00',
+ 'lon_0': '8.00', 'proj': 'stere'},
+ 800, 800,
+ [-1370912.72, -909968.64,
+ 1029087.28, 1490031.36])
+data = np.fromfunction(lambda y, x: y*x, (50, 10))
+lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
+lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
+swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
+result = kd_tree.resample_nearest(swath_def, data,area_def, radius_of_influence=50000, epsilon=0.5)
+"""
diff --git a/Ortho-NoS1GBM/tool/algorithm/xml/AlgXmlHandle.py b/Ortho-NoS1GBM/tool/algorithm/xml/AlgXmlHandle.py
new file mode 100644
index 0000000..f2891da
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/xml/AlgXmlHandle.py
@@ -0,0 +1,746 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :AlgXmlHandle.py
+@Function :算法描述文件读写和检查
+@Contact :https://www.cnblogs.com/feifeifeisir/p/10893127.html
+@Author :SHJ
+@Date :2021/9/6
+@Version :1.0.0
+"""
+import logging
+from xml.etree.ElementTree import ElementTree
+
+from tool.algorithm.block.blockprocess import BlockProcess
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.file.fileHandle import fileHandle
+import os
+import re
+import platform
+import psutil
+import multiprocessing
+import ctypes
+logger = logging.getLogger("mylog")
+import glob
+
+class ManageAlgXML:
+ """
+ 检查和读取XML文件信息
+ """
+ def __init__(self, xml_path):
+ self.in_path = xml_path
+ self.__tree = ElementTree()
+ self.__root = None
+ self.__alg_compt = None
+ self.__workspace_path = None
+ self.__taskID = None
+ self.__algorithm_name = None
+ self.__envs = {}
+ self.__input_paras = {}
+ self.__output_paras = {}
+ self.__init_flag = False
+
+
+ def init_xml(self):
+ """
+ 初始化XML文件
+ :return: True:初始化成功 False: 初始化失败
+ """
+ try:
+ self.__tree.parse(self.in_path)
+ except FileNotFoundError as ex:
+ msg = ex + "xml_path = " + self.in_path
+ raise Exception(msg)
+ except BaseException:
+ raise Exception("cannot open algXMl")
+
+ self.__root = self.__tree.getroot()
+ if self.__root is None:
+ raise Exception("get root failed")
+
+ self.__alg_compt = self.__root.find("AlgCompt")
+ if self.__alg_compt is None:
+ raise Exception("get AlgCompt failed")
+
+ self.__workspace_path = self.__check_workspace_path()
+ if self.__workspace_path is None:
+ raise Exception("check workspace_path failed")
+
+ self.__taskID = self.__check_task_id()
+ if self.__taskID is None:
+ raise Exception("check taskID failed")
+
+ self.__algorithm_name = self.__check_algorithm_name()
+ if self.__algorithm_name is None:
+ raise Exception("check AlgorithmName failed")
+
+ self.__envs = self.__check_environment()
+ if self.__envs is None or self.__envs == {}:
+ raise Exception("check environment failed")
+
+ self.__input_paras = self.__check_input_para()
+ if self.__input_paras is None or self.__input_paras == {}:
+ raise Exception("check input para failed")
+
+ self.__output_paras = self.__check_output_para()
+
+ self.__init_flag = True
+ return True
+
+ def get_workspace_path(self):
+ """
+ 获取工作空间路径
+ :return: 工作空间路径, None-异常
+ """
+ if not self.__init_flag:
+ raise Exception("XML is not initialized")
+ return self.__workspace_path
+
+ def get_task_id(self):
+ """
+ 获取任务ID
+ :return: taskID, None-异常
+ """
+ if not self.__init_flag:
+ raise Exception("XML is not initialized")
+ return self.__taskID
+
+ def get_algorithm_name(self):
+ """
+ 获取算法名
+ :return:
+ """
+ if not self.__init_flag:
+ raise Exception("AlgorithmName is not initialized")
+ return self.__algorithm_name
+
+ def get_envs(self):
+ """
+ 获取运行环境要求
+ :return:运行环境要求, None-异常
+ """
+ if not self.__init_flag:
+ raise Exception("XML is not initialized")
+ return self.__envs
+
+ def get_input_paras(self):
+ """
+ 获取输入参数
+ :return:输入参数, None-异常
+ """
+ if not self.__init_flag:
+ raise Exception("XML is not initialized")
+ return self.__input_paras
+
+ def get_output_paras(self):
+ """
+ 获取输出参数
+ :return:输出参数, None-异常
+ """
+ if not self.__init_flag:
+ raise Exception("XML is not initialized")
+ return self.__output_paras
+
+ def __check_workspace_path(self):
+ """
+ 检查工作空间路径
+ :return: 工作空间路径, None-异常
+ """
+ workspace_note = self.__root.find("WorkSpace")
+ workspace_path = str(workspace_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
+ if workspace_path is None:
+ raise Exception("'workspace_path' is None")
+ if not os.path.isdir(workspace_path):
+ raise Exception("'workspace_path' is not save:%s",workspace_path)
+
+ if workspace_path[-1] != '\\':
+ workspace_path += "'\'"
+
+ return workspace_path
+
+ def __check_environment(self):
+ """
+ 检查XML文件中运行环境要求
+ :return: dic-运行环境要求, None-异常
+ """
+ env_note = self.__alg_compt.find("Environment")
+
+ is_cluster = int(env_note.find("IsCluster").text.replace("\n", "").replace(' ', ''))
+ is_legal = is_cluster in [0, 1]
+ if not is_legal:
+ raise Exception("IsCluster is not 0 or 1")
+
+ cluster_num = int(env_note.find("ClusterNum").text)
+ is_legal = cluster_num in [0, 1, 2, 3, 4, 5, 6, 7]
+ if not is_legal:
+ raise Exception("cluster_num is beyond [0,1,2,3,4,5,6,7]")
+
+ operating_system = env_note.find("OperatingSystem").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ # is_legal = operating_system in ["Windows10", "Windows7", "WindowsXP"]
+ # if not is_legal:
+ # raise Exception("OperatingSystem is beyond [Windows10, Windows7, WindowsXP]")
+
+ cpu = env_note.find("CPU").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ is_legal = cpu in ["单核", "双核", "3核", "4核", "6核", "8核"]
+ if not is_legal:
+ raise Exception("OperatingSystem is beyond [单核, 双核, 3核, 4核, 6核, 8核]")
+
+ memory = env_note.find("Memory").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ is_legal = memory in ["1GB", "2GB", "4GB", "6GB", "8GB", "10GB", "12GB", "16GB"]
+ # if not is_legal:
+ # raise Exception("OperatingSystem is beyond [1GB, 2GB, 4GB, 6GB, 8GB, 10GB, 12GB, 16GB]")
+
+ storage = env_note.find("Storage").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ is_legal = int(storage[:-2]) > 0
+ if not is_legal:
+ raise Exception("Storage < 0GB")
+
+ network_card = env_note.find("NetworkCard").text
+ # is_legal = network_card in ["无需求"]
+ # if not is_legal:
+ # # 输出异常
+ # return
+
+ band_width = env_note.find("Bandwidth").text
+ # is_legal = band_width in ["无需求"]
+ # if not is_legal:
+ # # 输出异常
+ # return
+
+ gpu = env_note.find("GPU").text
+ # is_legal = GPU in ["无需求"]
+ # if not is_legal:
+ # # 输出异常
+ # return
+ envs = {"is_Cluster": is_cluster, "cluster_num": cluster_num, "operating_system": operating_system,
+ "CPU": cpu, "memory": memory}
+ envs.update({"Storage": storage, "network_card": network_card, "band_width": band_width, "GPU": gpu})
+ return envs
+
+ def __check_input_para(self):
+ """
+ 检查XML文件中输入参数
+ :return: dic-输入参数, None-异常
+ """
+ input_paras_note = self.__alg_compt.find("Inputs")
+ paras_num = int(input_paras_note.attrib.get("ParameterNum"))
+ para_list = input_paras_note.findall("Parameter")
+
+ if paras_num != len(para_list):
+ msg ="'ParameterNum':"+ str(paras_num) + " != number of 'Parameter':" + str(len(para_list))
+ logger.warning(msg)
+
+ input_paras = {}
+ for para in para_list:
+ para_name = para.find("ParaName").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ para_chs_name = para.find("ParaChsName").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ para_type = para.find("ParaType").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ data_type = para.find("DataType").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ para_value = para.find("ParaValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ input_para = {"ParaName": para_name, "ParaChsName": para_chs_name, "ParaType": para_type,
+ "DataType": data_type, "ParaValue": para_value}
+ #print(para_name)
+ if para_type == "Value":
+ # max_value = para.find("MaxValue").text
+ # min_value = para.find("MinValue").text
+ # option_value = para.find("OptionValue").text.replace("\n", "").replace(' ', '') #去除空格和回车
+ # input_para.update({"MaxValue": max_value, "MinValue": min_value, "OptionValue": option_value})
+ # input_para.update({"OptionValue": option_value}) todo
+ pass
+ if para_name is None or para_type is None or para_value is None:
+ msg = 'there is None among para_name:' + para_name + ',para_type:' + para_type + 'or para_value:' + para_value + '!'
+ raise Exception(msg)
+
+ input_paras.update({para_name: input_para})
+ return input_paras
+
+ def __check_output_para(self):
+ """
+ 检查XML文件中输出参数
+ :return: dic-输出参数, None-异常
+ """
+ output_paras_note = self.__alg_compt.find("Outputs")
+ paras_num = int(output_paras_note.attrib.get("ParameterNum"))
+ para_list = output_paras_note.findall("Parameter")
+
+ if paras_num != len(para_list):
+ raise Exception("'ParameterNum' != number of 'Parameter'")
+ output_paras = {}
+ return output_paras
+
+ def write_out_para(self, para_name, para_value):
+ """
+ 写入输出参数
+ """
+ output_paras_note = self.__alg_compt.find("Outputs")
+ para_list = output_paras_note.findall("Parameter")
+ flag = False
+ for para in para_list:
+ if para.find("ParaName").text == para_name:
+ para.find("ParaValue").text = para_value
+ flag = True
+ if flag == False:
+ raise Exception('Cannot find Output Parameter:'+para_name+'!')
+ self.__tree.write(self.in_path, encoding="utf-8", xml_declaration=True)
+
+ def __check_task_id(self):
+ """
+ 检查任务ID
+ :return: taskID, None-异常
+ """
+ task_id_note = self.__root.find("TaskID")
+ task_id = str(task_id_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
+ if task_id is None:
+ raise Exception("'TaskID' is None")
+ return task_id
+
+ def __check_algorithm_name(self):
+
+ algorithm_name_note = self.__alg_compt.find("AlgorithmName")
+ algorithm_name = str(algorithm_name_note.text).replace("\n", "").replace(' ', '') #去除空格和回车
+ if algorithm_name is None:
+ raise Exception("'AlgorithmName' is None")
+ return algorithm_name
+
+
+class CheckSource:
+ """
+ 检查配置文件中资源的完整性和有效性
+ """
+ def __init__(self, alg_xml_handle):
+ self.__alg_xml_handle = alg_xml_handle
+ self.imageHandler = ImageHandler()
+ self.__ParameterDic={}
+
+
+ def check_alg_xml(self):
+ """
+ 检查算法配置文件
+ """
+ if self.__alg_xml_handle.init_xml():
+ logger.info('init algXML succeed')
+ return True
+ else:
+ raise Exception('init algXML failed')
+
+ def check_run_env(self):
+ """
+ :return: True-正常,False-异常
+ """
+ envs = self.__alg_xml_handle.get_envs()
+ # 检查操作系统
+ local_plat = platform.platform()
+ local_plat_list = local_plat.split("-")
+ flag = envs['operating_system'] == local_plat_list[0]+local_plat_list[1]
+ if flag is False:
+ msg = 'operating_system:' + local_plat_list[0] + local_plat_list[1] + ' is not ' + envs['operating_system']
+ #raise Exception(msg)
+
+ # 检查电脑显存
+ mem = psutil.virtual_memory()
+ mem_total = int(round(mem.total / 1024 / 1024 / 1024, 0))
+ mem_free = round(mem.free / 1024 / 1024 / 1024, 0)
+ env_memory = envs['memory']
+ env_memory = int(env_memory[:-2])
+ if env_memory > mem_total:
+ msg = 'memory_total ' + str(mem_total) + ' less than'+str(env_memory) + 'GB'
+ # raise Exception(msg)
+
+ if env_memory >= mem_free:
+ msg = 'mem_free ' + str(mem_free) + 'GB less than' + str(env_memory) + 'GB'
+ logger.warning(msg)
+
+ # 检查CPU核数
+ env_cpu = envs['CPU']
+ if env_cpu == "单核":
+ env_cpu_core_num = 1
+ elif env_cpu == "双核":
+ env_cpu_core_num = 2
+ elif env_cpu == "三核":
+ env_cpu_core_num = 3
+ else:
+ env_cpu_core_num = int(env_cpu[:-1])
+
+ local_cpu_core_num = int(multiprocessing.cpu_count()/2)
+ if env_cpu_core_num > local_cpu_core_num:
+ msg = 'CPU_core_num ' + str(local_cpu_core_num) + 'core less than' + str(env_cpu_core_num) + ' core'
+ # raise Exception(msg)
+
+ # 检查磁盘的内存
+ env_storage = envs['Storage']
+ env_storage = int(env_storage[:-2])
+ workspace_path = self.__alg_xml_handle.get_workspace_path()
+ if not os.path.isdir(workspace_path):
+ raise Exception('workspace_path:%s do not exist!', workspace_path)
+
+ local_storage = self.__get_free_space_mb(workspace_path)
+ if env_storage > local_storage:
+ msg = 'workspace storage ' + str(local_storage) + 'GB less than' + envs['Storage'] +"GB"
+ # raise Exception(msg)
+
+ return True
+
+ @staticmethod
+ def __get_free_space_mb(folder):
+ """
+ :param folder:检查的路径 eg:'C:\\'
+ :return: folder/drive free space (GB)
+ """
+ if platform.system() == 'Windows':
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value / 1024 / 1024 / 1024
+ else:
+ st = os.statvfs(folder)
+ return st.f_bavail * st.f_frsize / 1024 / 1024
+
+ def check_input_paras(self, input_para_names):
+ """
+ :param input_para_names :需要检查参数的名称列表[name1,name2,...]
+ :return: 检测是否正常
+ """
+ workspace_path = self.__alg_xml_handle.get_workspace_path()
+ input_paras = self.__alg_xml_handle.get_input_paras()
+ for name in input_para_names:
+ para = input_paras[name]
+ if para is None:
+ msg = "check para:"+name + " is failed!"+"para is None!"
+ raise Exception(msg)
+
+ if para['ParaType'] == 'File':
+ if para['DataType'] == 'tif':
+ if para['ParaValue'] != 'empty' and para['ParaValue'] != 'Empty'and para['ParaValue'] != '':
+ para_value_list = para['ParaValue'].split(";")
+ for para_value in para_value_list:
+ para_path = para_value
+ if self.__check_tif(para_path) is False:
+ msg = "check para:"+name + " is failed!" + "Path:" + para_path
+ raise Exception(msg)
+
+ if para['DataType'] == 'xml':
+ para_path = para['ParaValue']
+ if not os.path.exists(para_path):
+ raise Exception('para_file:%s is inexistent!', para_path)
+
+ if para['DataType'] == 'File':
+ para_path = para['ParaValue']
+ if os.path.isdir(para_path) is False:
+ msg = "check para:" + name + " is failed!" + "FilePath:" + para_path
+ raise Exception(msg)
+ if para["DataType"]=="ymal":
+ para_path = para['ParaValue']
+ if os.path.isfile(para_path) is False:
+ msg = "check para: " + name + " is failed! " + " FilePath: " + para_path
+ raise Exception(msg)
+
+ elif para['ParaType'] == 'Value':
+ if para['DataType'] == 'float' or para['DataType'] == 'int' or para['DataType'] == 'double':
+ if para['ParaValue'] is None:
+ msg = "check para:"+name + " is failed!"+"'ParaValue' is None"
+ raise Exception(msg)
+ if self.__is_number(para['ParaValue']) is False:
+ raise Exception("para:"+name+" is not number!")
+ # if (para['MaxValue'] is not None) and (self.__is_number(para['MaxValue']) is True):
+ # value = para['ParaValue']
+ # max = para['MaxValue']
+ # if float(value) > float(max):
+ # msg = "para:" + name + " > max, para:" + value + "max:" + max
+ # raise Exception(msg)
+ # if (para['MinValue'] is not None) and (self.__is_number(para['MinValue']) is True):
+ # value = para['ParaValue']
+ # min = para['MinValue']
+ # if float(value) < float(min):
+ # msg = "para:" + name + " < min, para:" + value + "min:" + min
+ # raise Exception(msg)
+
+ self.__ParameterDic[name] = para['ParaValue']
+ __workspace_path = workspace_path
+ __input_paras = input_paras
+ return True, self.__ParameterDic
+
+ def check_output_paras(self, output_para_names):
+ """
+ :param output_para_names :需要检查参数的名称列表[name1,name2,...]
+ :return: Ture or False
+ """
+ workspace_path = self.__alg_xml_handle.get_workspace_path()
+ output_paras = self.__alg_xml_handle.get_output_paras()
+
+ for name in output_para_names:
+ para = output_paras[name]
+ #print(para)
+ if para is None:
+ msg = "check para:" + name + " is failed!" + "para is None!"
+ raise Exception(msg)
+
+ if para['ParaType'] == 'File':
+ if para['DataType'] == 'tif':
+ para_path = workspace_path + para['ParaValue']
+ para_dir = os.path.split(para_path)
+ flag_isdir = os.path.isdir(para_dir[0])
+ flag_istif = (para_dir[1].split(".", 1)[1] == "tif")
+ if flag_isdir and flag_istif is False:
+ msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
+ raise Exception(msg)
+
+ if para['DataType'] == 'File':
+ para_path = workspace_path + para['ParaValue']
+ if os.path.isdir(para_path) is False:
+ os.makedirs(para_path)
+ if os.path.isdir(para_path) is False:
+ msg = "check para:" + name + " is failed!" + para_path + "is invalid!"
+ raise Exception(msg)
+ return True
+
+ @staticmethod
+ def __is_number(str_num):
+ """
+ :param str_num :检查str是否为float或者double
+ :return: True or False
+ """
+ if str_num[0] == '-':
+ str_num = str_num[1:]
+ pattern = re.compile(r'(.*)\.(.*)\.(.*)')
+ if pattern.match(str_num):
+ return False
+ return str_num.replace(".", "").isdigit()
+
+ def __check_tif(self, filename):
+ """
+ :filename: 文件的路径
+ :return: True or False
+ """
+ if self.imageHandler.get_dataset(filename) is None:
+ msg = "read tif error!,finame: " + filename
+ raise Exception(msg)
+ return True
+
+
+class InitPara:
+ def __init__(self,debug = False):
+ self._debug = debug
+
+ @staticmethod
+ def init_processing_paras(input_paras, out_path):
+ """
+ :param names:字典列表,每个字典为一个输入产品的配置信息
+ """
+ processing_paras = {}
+ for name in input_paras:
+ para = input_paras[name]
+ if para is None:
+ logger.error(name + "is None!")
+ return False
+
+ if para['ParaType'] == 'File':
+ if para['DataType'] == 'tif' or para['DataType'] == 'csv':
+ para_value_list = para['ParaValue'].split(";")
+ if len(para_value_list) == 1:
+ para_path = para['ParaValue']
+ if para_path != 'empty' and para_path != '':
+ processing_paras.update({name: para_path})
+ else:
+ for n, para_value in zip(range(len(para_value_list)), para_value_list):
+ processing_paras.update({name+str(n): para_value})
+ elif para['DataType'] == 'zip':
+ # temp_para = para['ParaValue'].split(".")[0]
+ para_value_list = para['ParaValue'].split(";")
+ if len(para_value_list) == 1:
+ para_path = para['ParaValue']
+ if para_path != 'empty' and para_path != '':
+ file_path = BlockProcess.unzip_file(para_path, out_path)
+ processing_paras.update({name: file_path})
+ else:
+ for n, para_value_zip in zip(range(len(para_value_list)), para_value_list):
+ file_path = BlockProcess.unzip_file(para_value_zip, out_path)
+ processing_paras.update({name+str(n): file_path})
+ elif para['DataType'] == 'tar.gz':
+ paths = para['ParaValue'].split(';')
+ for n, path in zip(range(len(paths)), paths):
+ processing_paras.update({'sar_path' + str(n): path})
+ else:
+ para_path = para['ParaValue']
+ processing_paras.update({name: para_path})
+
+ elif para['ParaType'] == 'Value':
+ if para['DataType'] == 'float':
+ value = float(para['ParaValue'])
+ elif para['DataType'] == 'int':
+ value = int(para['ParaValue'])
+ else: # 默认string
+ value = para['ParaValue']
+ processing_paras.update({name: value})
+ elif para['ParaType'] == 'String':
+ value = para['ParaValue']
+ if value == 'empty':
+ continue
+ else:
+ processing_paras.update({name: value})
+ return processing_paras
+
+ # 获取文件夹内的文件
+
+ @staticmethod
+ def get_tif_paths(file_dir,name):
+ in_tif_paths = []
+ if os.path.exists(file_dir + name + '\\'):
+ in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
+ if in_tif_paths1 != []:
+ in_tif_paths = in_tif_paths + in_tif_paths1
+ else:
+ in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
+ if in_tif_paths != []:
+ in_tif_paths = in_tif_paths + in_tif_paths1
+ return in_tif_paths
+
+ @staticmethod
+ def get_tif_paths_new(file_dir, name):
+ in_tif_paths = []
+ if os.path.exists(file_dir + name + '\\'):
+ in_tif_paths = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(file_dir + name + '\\', '*.tiff')))
+ if in_tif_paths1 != []:
+ in_tif_paths = in_tif_paths + in_tif_paths1
+ else:
+ in_tif_paths = list(glob.glob(os.path.join(file_dir, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(file_dir, '*.tiff')))
+ if len(in_tif_paths) == 0:
+ in_tif_paths = in_tif_paths + in_tif_paths1
+ return in_tif_paths
+
+ @staticmethod
+ def get_polarization_mode(in_tif_paths):
+ pol_dic = {}
+ pola_list = [0,0,0,0]
+ for in_tif_path in in_tif_paths:
+ # 获取极化类型
+ if '_HH_' in os.path.basename(in_tif_path):
+ pol_dic.update({'HH': in_tif_path})
+ pola_list[0] = 1
+ elif '_HV_' in os.path.basename(in_tif_path):
+ pol_dic.update({'HV': in_tif_path})
+ pola_list[1] = 1
+ elif '_VH_' in os.path.basename(in_tif_path):
+ pol_dic.update({'VH': in_tif_path})
+ pola_list[2] = 1
+ elif '_VV_' in os.path.basename(in_tif_path):
+ pol_dic.update({'VV': in_tif_path})
+ pola_list[3] = 1
+ elif 'LocalIncidenceAngle' in os.path.basename(in_tif_path) or 'ncidenceAngle' in os.path.basename(in_tif_path):
+ pol_dic.update({'LocalIncidenceAngle': in_tif_path})
+ elif 'inc_angle' in os.path.basename(in_tif_path):
+ pol_dic.update({'inc_angle': in_tif_path})
+ elif 'inci_Angle-ortho' in os.path.basename(in_tif_path):
+ pol_dic.update({'inci_Angle-ortho': in_tif_path})
+ elif 'LocalincidentAngle-ortho' in os.path.basename(in_tif_path):
+ pol_dic.update({'LocalIncidentAngle-ortho': in_tif_path})
+ elif 'ori_sim' in os.path.basename(in_tif_path):
+ pol_dic.update({'ori_sim': in_tif_path})
+ elif 'sim_ori' in os.path.basename(in_tif_path):
+ pol_dic.update({'sim_ori': in_tif_path})
+ pol_dic.update({'pola':pola_list})
+ return pol_dic
+
+ @staticmethod
+ def get_meta_paths(file_dir, name):
+ meta_xml_paths = []
+ if os.path.exists(file_dir + name + '\\'):
+ meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.meta.xml')))
+ meta_xml_paths += list(glob.glob(os.path.join(file_dir + name, '*.xml')))
+ else:
+ meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.meta.xml')))
+ meta_xml_paths += list(glob.glob(os.path.join(file_dir, '*.xml')))
+ if meta_xml_paths is None or meta_xml_paths == []:
+ raise Exception('there is not .meta.xml in path: ', file_dir + '\\')
+ return meta_xml_paths
+
+ @staticmethod
+ def get_incidence_xml_paths(file_dir, name):
+ meta_xml_paths = []
+ if os.path.exists(file_dir + name + '\\'):
+ meta_xml_paths = list(glob.glob(os.path.join(file_dir + name, '*.incidence.xml')))
+ else:
+ meta_xml_paths = list(glob.glob(os.path.join(file_dir, '*.incidence.xml')))
+ if meta_xml_paths is None or meta_xml_paths == []:
+ raise Exception('there is not .incidence.xml in path: ', file_dir + '\\')
+ return meta_xml_paths
+
+
+ @staticmethod
+ def get_meta_dic(meta_xml_paths, name):
+ para_dic = {}
+ for mete_path in meta_xml_paths:
+ if name in mete_path:
+ para_dic.update({'META': mete_path})
+ if para_dic is {}:
+ raise Exception('the name of .meta.xml is error!')
+ return para_dic
+
+ @staticmethod
+ def get_incidence_dic(meta_xml_paths, name):
+ para_dic = {}
+ for mete_path in meta_xml_paths:
+ if name in mete_path:
+ para_dic.update({'Incidence': mete_path})
+ if para_dic is {}:
+ raise Exception('the name of .incidence.xml is error!')
+ return para_dic
+
+
+ @staticmethod
+ def get_meta_dic_new(meta_xml_paths, name):
+ para_dic = {}
+ for mete_path in meta_xml_paths:
+ if name in os.path.basename(mete_path):
+ para_dic.update({'META': mete_path})
+ else:
+ para_dic.update({'Origin_META': mete_path})
+ if para_dic is {}:
+ raise Exception('the name of .meta.xml is error!')
+ return para_dic
+
+ @staticmethod
+ def get_meta_dic_VP(meta_xml_paths, name):
+ para_dic = {}
+ for mete_path in meta_xml_paths:
+ if name in os.path.basename(mete_path):
+ para_dic.update({name + '_META': mete_path})
+ else:
+ para_dic.update({name + '_Origin_META': mete_path})
+ if para_dic is {}:
+ raise Exception('the name of .meta.xml is error!')
+ return para_dic
+
+ def get_mult_tar_gz_inf(self,tar_gz_path, workspace_preprocessing_path):
+ para_dic = {}
+ name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
+ para_dic.update({'name': name})
+
+ file_dir = os.path.join(workspace_preprocessing_path, name + '\\')
+ if self._debug == False:
+ fileHandle().de_targz(tar_gz_path, file_dir)
+ # 元文件字典
+ para_dic.update(InitPara.get_meta_dic_VP(InitPara.get_meta_paths(file_dir, name), name))
+ # tif路径字典
+ pol_dic = InitPara.get_polarization_mode(InitPara.get_tif_paths(file_dir, name))
+ parameter_path = os.path.join(file_dir, "orth_para.txt")
+ para_dic.update({name + "paraMeter": parameter_path})
+ for key, in_tif_path in pol_dic.items():
+ para_dic.update({name + '_' + key: in_tif_path})
+ return para_dic
+
+
+ def get_mult_tar_gz_infs(self,processing_paras, workspace_preprocessing_path):
+ tif_names_list = []
+ tar_inf_dic = {}
+ for key, value in processing_paras.items():
+ if 'sar_path' in key:
+ para_dic = self.get_mult_tar_gz_inf(value, workspace_preprocessing_path)
+ tif_names_list.append(para_dic['name'])
+ para_dic.pop('name')
+ tar_inf_dic.update(para_dic)
+ tar_inf_dic.update({'name_list': tif_names_list})
+
+ return tar_inf_dic
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/algorithm/xml/AnalysisXml.py b/Ortho-NoS1GBM/tool/algorithm/xml/AnalysisXml.py
new file mode 100644
index 0000000..abfb611
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/xml/AnalysisXml.py
@@ -0,0 +1,142 @@
+from xml.etree.ElementTree import ElementTree
+import os
+
+import numpy as np
+
+
+class DictXml:
+ def __init__(self, xml_path):
+ self.xml_path = xml_path
+ self.__tree = ElementTree()
+ self.__root = None
+ self.init_xml()
+
+ def init_xml(self):
+ self.__root = self.__tree.parse(self.xml_path)
+ if self.__root is None:
+ raise Exception("get root failed")
+
+ def get_extend(self):
+ productInfo = self.__root.find("imageinfo")
+ if productInfo is None:
+ raise Exception("get imageInfo failed")
+
+ corner = productInfo.find("corner")
+ if corner is None:
+ raise Exception("get corner failed")
+
+ topLeft = corner.find("topLeft")
+ if topLeft is None:
+ raise Exception("get topLeft failed")
+
+ topRight = corner.find("topRight")
+ if topRight is None:
+ raise Exception("get topRight failed")
+
+ bottomLeft = corner.find("bottomLeft")
+ if bottomLeft is None:
+ raise Exception("get bottomLeft failed")
+
+ bottomRight = corner.find("bottomRight")
+ if bottomRight is None:
+ raise Exception("get bottomRight failed")
+
+ lons = [float(topLeft.find("longitude").text), float(topRight.find("longitude").text), float(bottomLeft.find("longitude").text), float(bottomRight.find("longitude").text)]
+ lats = [float(topLeft.find("latitude").text), float(topRight.find("latitude").text), float(bottomLeft.find("latitude").text), float(bottomRight.find("latitude").text)]
+ lon_min = np.min(lons)
+ lon_max = np.max(lons)
+ lat_min = np.min(lats)
+ lat_max = np.max(lats)
+
+ point_upleft = [float(topLeft.find("longitude").text), float(topLeft.find("latitude").text)]
+ point_upright = [float(topRight.find("longitude").text), float(topRight.find("latitude").text)]
+ point_downleft = [float(bottomLeft.find("longitude").text), float(bottomLeft.find("latitude").text)]
+ point_downright = [float(bottomRight.find("longitude").text), float(bottomRight.find("latitude").text)]
+ scopes = [point_upleft, point_upright, point_downleft, point_downright]
+
+ point_upleft_buf = [lon_min - 0.6, lat_max + 0.6]
+ point_upright_buf = [lon_max + 0.6, lat_max + 0.6]
+ point_downleft_buf = [lon_min - 0.6, lat_min - 0.6]
+ point_downright_buf = [lon_max + 0.6, lat_min - 0.6]
+ scopes_buf = ([point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf], )
+ return scopes, scopes_buf
+
+
+class xml_extend:
+ def __init__(self, xml_path):
+ self.xml_path = xml_path
+ self.__tree = ElementTree()
+ self.__root = None
+ self.init_xml()
+
+ def init_xml(self):
+ self.__root = self.__tree.parse(self.xml_path)
+ if self.__root is None:
+ raise Exception("get root failed")
+
+ def get_extend(self):
+ ProductBasicInfo = self.__root.find("ProductBasicInfo")
+ if ProductBasicInfo is None:
+ raise Exception("get ProductBasicInfo failed")
+
+ SpatialCoverageInformation = ProductBasicInfo.find("SpatialCoverageInformation")
+ if SpatialCoverageInformation is None:
+ raise Exception("get SpatialCoverageInformation failed")
+
+ TopLeftLongitude = SpatialCoverageInformation.find("TopLeftLongitude")
+ if TopLeftLongitude is None:
+ raise Exception("get TopLeftLongitude failed")
+
+ TopLeftLatitude = SpatialCoverageInformation.find("TopLeftLatitude")
+ if TopLeftLatitude is None:
+ raise Exception("get TopLeftLatitude failed")
+
+ TopRightLongitude = SpatialCoverageInformation.find("TopRightLongitude")
+ if TopRightLongitude is None:
+ raise Exception("get TopRightLongitude failed")
+
+ TopRightLatitude = SpatialCoverageInformation.find("TopRightLatitude")
+ if TopRightLatitude is None:
+ raise Exception("get TopRightLatitude failed")
+
+ BottomRightLongitude = SpatialCoverageInformation.find("BottomRightLongitude")
+ if BottomRightLongitude is None:
+ raise Exception("get BottomRightLongitude failed")
+
+ BottomRightLatitude = SpatialCoverageInformation.find("BottomRightLatitude")
+ if BottomRightLatitude is None:
+ raise Exception("get BottomRightLatitude failed")
+
+ BottomLeftLongitude = SpatialCoverageInformation.find("BottomLeftLongitude")
+ if BottomLeftLongitude is None:
+ raise Exception("get BottomLeftLongitude failed")
+
+ BottomLeftLatitude = SpatialCoverageInformation.find("BottomLeftLatitude")
+ if BottomLeftLatitude is None:
+ raise Exception("get BottomLeftLatitude failed")
+
+ point_upleft = [float(TopLeftLongitude.text), float(TopLeftLatitude.text)]
+ point_upright = [float(TopRightLongitude.text), float(TopRightLatitude.text)]
+ point_downleft = [float(BottomLeftLongitude.text), float(BottomLeftLatitude.text)]
+ point_downright = [float(BottomRightLongitude.text), float(BottomRightLatitude.text)]
+ scopes = [point_upleft, point_upright, point_downleft, point_downright]
+
+ point_upleft_buf = [float(TopLeftLongitude.text) - 0.6, float(TopLeftLatitude.text) + 0.6]
+ point_upright_buf = [float(TopRightLongitude.text) + 0.6, float(TopRightLatitude.text) + 0.6]
+ point_downleft_buf = [float(BottomLeftLongitude.text) - 0.6, float(BottomLeftLatitude.text) - 0.6]
+ point_downright_buf = [float(BottomRightLongitude.text) + 0.6, float(BottomRightLatitude.text) - 0.6]
+ scopes_buf = [point_upleft_buf, point_upright_buf, point_downleft_buf, point_downright_buf]
+ return scopes
+
+
+if __name__ == '__main__':
+ xml_path = r'E:\MicroWorkspace\GF3A_nanjing\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422.meta.xml'
+ scopes, scopes_buf = DictXml(xml_path).get_extend()
+ print(scopes)
+ print(scopes_buf)
+# path = r'D:\BaiduNetdiskDownload\GZ\lon.rdr'
+# path2 = r'D:\BaiduNetdiskDownload\GZ\lat.rdr'
+# path3 = r'D:\BaiduNetdiskDownload\GZ\lon_lat.tif'
+# s = ImageHandler().band_merge(path, path2, path3)
+# print(s)
+# pass
diff --git a/Ortho-NoS1GBM/tool/algorithm/xml/CreatMetafile.py b/Ortho-NoS1GBM/tool/algorithm/xml/CreatMetafile.py
new file mode 100644
index 0000000..7fd728e
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/xml/CreatMetafile.py
@@ -0,0 +1,319 @@
+"""
+@Project :microproduct
+@File :OnePlantHeight.PY
+@Function :主函数
+
+@Author :LMM
+@Date :2021/10/19 14:39
+@Version :1.0.0
+"""
+from xml.dom import minidom
+from xml.etree.ElementTree import ElementTree, Element
+import xml.dom.minidom
+from lxml import etree
+import codecs
+import xml.etree.cElementTree as ET
+
+
+class CreateMetafile:
+ """
+ 生成元文件案例
+ """
+ def __init__(self, input_image_path, input_para_file, an_li_path, path):
+ """
+ input_image_path: 影像头文件
+ input_para_file: 配置文件
+ an_li_path:案例路径
+ path:保存路径
+ """
+ self.input_image_path = input_image_path
+ self.input_para_file = input_para_file
+ self.an_li_path= an_li_path
+ self.path = path
+ pass
+
+ def create_xml(self):
+ """
+ 读取元文件(只保留从头文件到sensor节点的部分)
+ 输出sensor的节点位置
+ """
+ tree = ElementTree()
+ tree.parse(self.input_image_path) # 影像头文件
+ root = tree.getroot()
+ # 1、只保留从头文件到sensor节点的部分
+ element_trees = list(root)
+ count = 0
+ count_01=1
+ for element in element_trees:
+ count = count+1
+ if element.tag == "sensor":
+ element.tail = "\n\n\t"
+ count_01 = count-1
+ for i in range(0, len(element_trees)):
+ if i > count_01:
+ root.remove(element_trees[i])
+ # 2、只保留"satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"的部分
+ element_trees2 = list(root)
+ for i in element_trees2:
+ if i.tag not in ["satellite", "orbitType", "attiType", "Direction", "ReceiveTime", "sensor"]:
+ root.remove(i)
+ # 3、获取"sensor"节点的位置,并输出
+ count2 = 0
+ count2_01=1
+ element_trees3 = list(root)
+ for element in element_trees3:
+ count2 = count2+1
+ if element.tag == "sensor":
+ element.tail = "\n\n\t"
+ count2_01 = count2-1
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+ return count2_01
+
+ @staticmethod
+ def create_node(tag, property_map, content):
+ """
+ fun: 新造一个节点
+ para: tag:节点标签
+ para: property_map:属性及属性值map
+ para: content: 节点闭合标签里的文本内容
+ para: return 新节点
+ """
+ element = Element(tag, property_map)
+ element.text = content
+ element.tail = "\n\t"
+ return element
+
+ def add_standard_xml(self, num):
+ """
+ 模板直接写入到元文件中
+ """
+ tree = ElementTree()
+ tree.parse(self.path) # 影像头文件
+ root = tree.getroot()
+
+ tree2 = ElementTree()
+ tree2.parse(self.an_li_path) # 影像头文件
+ root2 = tree2.getroot()
+
+ productinfo = root2.find("productinfo")
+ root.insert(num + 1, productinfo)
+ processinfo = root2.find("processinfo")
+ root.insert(num + 2, processinfo)
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def add_img_xml(self, num,SrcImageName):
+ """添加影像信息"""
+ tree = ElementTree()
+ tree.parse(self.path)
+ root = tree.getroot()
+
+ a = self.create_node("SrcImageName", {"desc": "原始影像名称"}, SrcImageName)
+ root.insert(num+1, a)
+ # root.append(a)
+ b = self.create_node("AlgCompt", {"desc": "算法信息"}, "\n\t\t")
+ b.tail = "\n\n\t"
+ # root.append(b)
+ root.insert(num+2, b)
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def add_info_xml(self):
+ """
+ 向元文件中添加配置文件的部分节("AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
+ "AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author")
+ """
+ tree = ElementTree()
+ tree.parse(self.input_para_file) # 配置文件
+ root = tree.getroot()
+
+ tree2 = ElementTree()
+ tree2.parse(self.path)
+ root2 = tree2.getroot()
+ AlgCompt = root2.find("AlgCompt")
+
+ a = root.find("AlgCompt")
+
+ element_trees = list(a)
+ for element in element_trees:
+ if element.tag in ["AlgorithmName", "ChsName", "AlgorithmDesc", "Version",
+ "AlgorithmClass", "AlgorithmLevel", "AlgoirthmID", "Author"]:
+ element.tail = "\n\t\t"
+ AlgCompt.append(element)
+ if element.tag == "Author":
+ element.tail = "\n\t"
+
+ tree2.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def add_class_info(self, type_id_name, type_id_parent=None):
+ """
+ 向元文件中:
+ 1.删除productinfo-productType信息;
+ 2.加入地物类别信息;
+ 输出:
+
+ 1
+ 101
+ 耕地
+
+
+ 5
+ 502
+ 草地
+
+ """
+ tree = ElementTree()
+ tree.parse(self.path) # 配置文件
+ root = tree.getroot()
+ productinfo = root.find("productinfo")
+ # element_trees = list(productinfo)
+ # for element in element_trees:
+ # if element.tag == "productType":
+ # productinfo.remove(element) # 移除 "productType"
+ productinfo.find("productConsumeTime").tail = "\n\t\t" # 定位到productConsumeTime,设置好位置
+ b = self.create_node("LandCoverClass", {}, "\n\t\t\t")
+ b.tail = "\n\t\t"
+ productinfo_count=0
+ for i in list(productinfo):
+ productinfo_count=productinfo_count+1
+ if i.tag=="productConsumeTime":
+ break
+ productinfo.insert(productinfo_count, b)
+
+ # productinfo.insert(num, b) # 插入LandCoverClass
+ class_num = 1
+ for key, value in type_id_name.items():
+
+ LandCoverClass = productinfo.find("LandCoverClass")
+ name="Class"+str(class_num)
+ # name = "Class"
+ c = self.create_node(name, {}, "\n\t\t\t\t")
+ if class_num!=(len(type_id_name.keys())):
+ c.tail = "\n\t\t\t"
+ else:
+ c.tail = "\n\t\t"
+ LandCoverClass.append(c) # 插入LandCoverClass
+
+ # LandCoverClass.find("Class")[num].tail = "\n\t\t\t"
+ aaa=LandCoverClass.find(name)
+
+ if type_id_parent is not None:
+ parent_id = self.create_node("parent_id", {}, type_id_parent[key])
+ parent_id.tail="\n\t\t\t\t"
+ LandCoverClass.find(name).append(parent_id)
+ id = self.create_node("id", {}, str(key))
+ id.tail = "\n\t\t\t\t"
+ LandCoverClass.find(name).append(id)
+ covernm = self.create_node("covernm", {}, value)
+ covernm.tail = "\n\t\t\t"
+ LandCoverClass.find(name).append(covernm)
+ class_num=class_num+1
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def rewrite_name(self):
+ """
+ 修改class的名字:
+ class1->class
+ class2->class
+ """
+ tree = ElementTree()
+ tree.parse(self.path) # 配置文件
+ root = tree.getroot()
+ productinfo = root.find("productinfo")
+ LandCoverClass=productinfo.find("LandCoverClass")
+ element_trees = list(LandCoverClass)
+ for element in element_trees:
+ element.tag="Class"
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def OrthoInsertNode(self):
+ """正射算法专用,插入节点"""
+ tree = ElementTree()
+ tree.parse(self.path) # 影像头文件
+ root = tree.getroot()
+
+ # 插入节点
+ count2 = 0
+ count2_01=1
+ element_trees3 = list(root)
+ for element in element_trees3:
+ count2 = count2+1
+ if element.tag == "sensor":
+ element.tail = "\n\n\t"
+ count2_01 = count2-1
+ b = self.create_node("l1aInfo", {}, "\n\t\t")
+ b.tail = "\n\n\t"
+ root.insert(count2_01+1, b)
+
+ # 查询节点位置
+ node_l1aInfo=root.find("l1aInfo")
+
+ img_tree = ElementTree()
+ img_tree.parse(self.input_image_path) # 影像头文件
+ img_root = img_tree.getroot()
+
+ node_imageinfo = img_root.find("imageinfo")
+ node_processinfo=img_root.find("processinfo")
+
+ ele_node_imageinfo = list(node_imageinfo)
+ ele_node_processinfo= list(node_processinfo)
+
+ for i in ele_node_imageinfo:
+ if i.tag == "QualifyValue":
+ i.tail = "\n\t\t"
+ node_l1aInfo.append(i)
+
+ for j in ele_node_processinfo:
+ if j.tag == "CalibrationConst":
+ j.tail = "\n\t" #后一个节点的位置
+ node_l1aInfo.append(j)
+ tree.write(self.path, encoding="utf-8", xml_declaration=True)
+
+ def process(self,SrcImageName):
+ """
+ 不涉及到地表覆盖调用此函数
+ """
+ if self.input_image_path is None:
+ import xml.etree.cElementTree as ET
+ product = ET.Element("product") # 根节点tag= "product"
+ product.text = "\n\t"
+ tree = ET.ElementTree(product)
+ tree.write(self.path)
+ count = 0
+ count_2 = -1
+ else:
+ count = self.create_xml()
+ count_2 = count
+ self.add_standard_xml(count)
+ self.add_img_xml(count_2, SrcImageName)
+ self.add_info_xml()
+
+ def process2(self, type_id_name, type_id_parent,SrcImageName):
+ """
+ 涉及到地表覆盖的调用此函数
+ type_id_name={"101":"耕地","502":"草地"}
+ type_id_parent={"101":"1","502":"5"}
+ """
+ count = self.create_xml()
+ self.add_standard_xml(count)
+ self.add_img_xml(count,SrcImageName)
+ self.add_info_xml()
+ self.add_class_info(type_id_name, type_id_parent)
+ self.rewrite_name()
+
+ def process3(self,SrcImageName):
+ """
+ 正射调用此函数
+ """
+ if self.input_image_path is None:
+ import xml.etree.cElementTree as ET
+ product = ET.Element("product") # 根节点tag= "product"
+ product.text = "\n\t"
+ tree = ET.ElementTree(product)
+ tree.write(self.path)
+ count = 0
+ else:
+ count = self.create_xml()
+ self.add_standard_xml(count)
+ self.add_img_xml(count, SrcImageName)
+ self.add_info_xml()
+ self.OrthoInsertNode()
+
diff --git a/Ortho-NoS1GBM/tool/algorithm/xml/CreateMetaDict.py b/Ortho-NoS1GBM/tool/algorithm/xml/CreateMetaDict.py
new file mode 100644
index 0000000..f9791c5
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/algorithm/xml/CreateMetaDict.py
@@ -0,0 +1,246 @@
+import json
+from xml.etree.ElementTree import ElementTree, Element
+import shutil
+
+import xmltodict
+
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.algorithm.algtools.PreProcess import PreProcess as pp
+from osgeo import gdal
+import numpy as np
+import datetime
+import os
+import glob
+os.environ['PROJ_LIB'] = r"E:\soft\Anaconda\envs\micro\Lib\site-packages\osgeo\data\proj"
+
+class CreateMetaDict:
+ def __init__(self, image_path, origin_xml, pack_path, out_path1, out_path2):
+ self.ImageHandler = ImageHandler()
+ self.image_path = image_path
+ self.origin_xml = origin_xml
+ self.pack_path = pack_path
+ self.file_size = self.get_file_size()
+ self.out_path1 = out_path1
+ self.out_path2 = out_path2
+ self.timeDict = self.get_productTime()
+ pass
+
+ def calu_nature(self):
+ """
+ 将productinfo节点需要填写的信息存入字典中
+ image_path:影像路径
+ image_pair:输入的压缩包中的极化对 例:hh,hv,vh,vv=【1,1,1,1】
+ out_path1:地理转平面的输出路径
+ out_path2:平面转地理的输出路径
+ """
+
+ para_dict = {}
+
+ proj = self.ImageHandler.get_projection(self.image_path) # 输出的影像若是投影坐标系则先转成地理坐标系
+ keyword = proj.split("[", 2)[0] # 若是地理坐标系则pass
+ if keyword == "GEOGCS":
+ pass
+ elif keyword == "PROJCS":
+ pp.trans_projcs2geogcs(self.out_path2, self.image_path)
+ image_path = self.out_path2
+ elif len(keyword) == 0 or keyword.strip() == "" or keyword.isspace() is True:
+ raise Exception('image projection is missing!')
+
+ pp.trans_geogcs2projcs(self.out_path1, self.image_path) # 坐标投影, 地理转平面投影坐标
+ imageinfo_widthspace = self.ImageHandler.get_geotransform(self.out_path1)[1] # 投影后的分辨率
+ # imageinfo_heightspace = -self.ImageHandler.get_geotransform(out_path1)[5] # 投影后的分辨率
+ # para_dict.update({"imageinfo_widthspace": imageinfo_widthspace})
+ # para_dict.update({"imageinfo_heightspace": imageinfo_heightspace})
+
+ para_dict.update({"imageinfo_ProductResolution": imageinfo_widthspace})
+
+ para_dict.update({"imageinfo_ProductFormat": "GEOTIFF"})
+ para_dict.update({"imageinfo_CompressionMethod": "None"})
+ para_dict.update({"imageinfo_ProductSize": str(self.file_size) + "MB"}) #todo 产品总大小
+
+ get_scope = self.ImageHandler.get_scope(self.image_path)
+ point_upleft, point_upright, point_downleft, point_downright = get_scope[0], get_scope[1], get_scope[2], get_scope[3]
+ para_dict.update({"SpatialCoverageInformation_TopLeftLatitude": point_upleft[1]})
+ para_dict.update({"SpatialCoverageInformation_TopLeftLongitude": point_upleft[0]})
+ para_dict.update({"SpatialCoverageInformation_TopRightLatitude": point_upright[1]})
+ para_dict.update({"SpatialCoverageInformation_TopRightLongitude": point_upright[0]})
+ para_dict.update({"SpatialCoverageInformation_BottomLeftLatitude": point_downleft[1]})
+ para_dict.update({"SpatialCoverageInformation_BottomLeftLongitude": point_downleft[0]})
+ para_dict.update({"SpatialCoverageInformation_BottomRightLatitude": point_downright[1]})
+ para_dict.update({"SpatialCoverageInformation_BottomRightLongitude": point_downright[0]})
+ longitude_max = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).max()
+ longitude_min = np.array([point_upleft[0], point_upright[0], point_downleft[0], point_downright[0]]).min()
+ latitude_max = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).max()
+ latitude_min = np.array([point_upleft[1], point_upright[1], point_downleft[1], point_downright[1]]).min()
+ imageinfo_center_latitude = (latitude_max + latitude_min) / 2
+ imageinfo_center_longitude = (longitude_max + longitude_min) / 2
+ para_dict.update({"SpatialCoverageInformation_CenterLatitude": imageinfo_center_latitude})
+ para_dict.update({"SpatialCoverageInformation_CenterLongitude": imageinfo_center_longitude})
+
+ para_dict.update({"TimeCoverageInformation_StartTime": self.timeDict.get("startTime")})
+ para_dict.update({"TimeCoverageInformation_CenterTime": self.timeDict.get("centerTime")})
+ para_dict.update({"TimeCoverageInformation_EndTime": self.timeDict.get("endTime")})
+
+ para_dict.update({"CoordinateReferenceSystemInformation_EarthEllipsoid": "WGS84"})
+ para_dict.update({"CoordinateReferenceSystemInformation_MapProjection": "UTM"})
+ para_dict.update({"CoordinateReferenceSystemInformation_ZoneNo": "None"})
+
+ para_dict.update({"MetaInfo_Unit": "none"}) # 设置单位
+ para_dict.update({"MetaInfo_UnitDes": "无量纲"}) # 设置单位
+
+ # 补充ProductProductionInfo节信息
+ data_name = os.path.basename(self.image_path)
+ strs = data_name.split("_")
+ para_dict.update({"DataSources_DataSource_Satellite": strs[0]})
+ para_dict.update({"DataSources_DataSource_Sensor": strs[0]})
+
+ para_dict.update({"ObservationGeometry_SatelliteAzimuth": "None"})
+ para_dict.update({"ObservationGeometry_SatelliteRange": "None"})
+
+ para_dict.update({"ProductProductionInfo_BandSelection": "1"})
+ para_dict.update({"ProductProductionInfo_DataSourceDescription": "None"})
+ para_dict.update({"ProductProductionInfo_DataSourceProcessingDescription": "参考产品介绍PDF"})
+ productGentime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ para_dict.update({"ProductProductionInfo_ProductionDate": productGentime})
+ para_dict.update({"ProductProductionInfo_AuxiliaryDataDescription": ""})
+
+ # para_dict.update({"ProductPublishInfo_Processor": "德清院"}) # 生产者
+ # para_dict.update({"ProductPublishInfo_DistributionUnit": "none"}) # 分发单位
+ # para_dict.update({"ProductPublishInfo_ContactInformation": "none"}) # 联系信息
+ return para_dict
+
+ def get_productTime(self):
+ time_dict = {}
+ tree = ElementTree()
+ tree.parse(self.origin_xml)
+ root = tree.getroot()
+
+ platform = root.find("platform")
+ if platform is None:
+ centerTime = " "
+ else:
+ centerTime = platform.find("CenterTime").text.split(".")[0]
+
+ productInfo = root.find("imageinfo")
+ imagingTime = productInfo.find("imagingTime")
+ if imagingTime is None:
+ startTime = " "
+ endTime = " "
+ else:
+ startTime = imagingTime.find("start").text.split(".")[0]
+ endTime = imagingTime.find("end").text.split(".")[0]
+
+ time_dict.update({"startTime": startTime})
+ time_dict.update({"centerTime": centerTime})
+ time_dict.update({"endTime": endTime})
+ return time_dict
+
+ def get_file_size(self):
+ in_tif_paths = list(glob.glob(os.path.join(self.pack_path, '*.tif')))
+ in_tif_paths1 = list(glob.glob(os.path.join(self.pack_path, '*.tiff')))
+ in_tif_paths += in_tif_paths1
+ size = 0
+ for file in in_tif_paths:
+ fsize = os.path.getsize(file) # 返回的是字节大小
+ size += fsize
+ return round(size / float(1024*1024), 2)
+
+
+class OrthoAzimuth:
+
+ @staticmethod
+ def FindInfomationFromJson(HeaderFile_dom_json, node_path_list):
+ """
+ 在Json文件中,按照指定路径解析出制定节点
+ """
+ result_node = HeaderFile_dom_json
+ for nodename in node_path_list:
+ result_node = result_node[nodename]
+ return result_node
+
+ @staticmethod
+ def get_Azimuth_incidence(Azimuth_path):
+ Azimuth_incidence = 0
+ if not os.path.exists(Azimuth_path):
+ return Azimuth_incidence
+ with open(Azimuth_path) as f:
+ Azimuth_incidence = f.readline()
+ return Azimuth_incidence
+
+ @staticmethod
+ def read_Azimuth_incidence(xml_path):
+ # tree = ElementTree()
+ # tree.parse(xml_path)
+ # root = tree.getroot()
+ # Azimuth_incidence = float(root.find('ProductProductionInfo').find('ObservationGeometry').find('SatelliteAzimuth').text)
+ # return Azimuth_incidence
+ with open(xml_path, 'r', encoding='utf-8') as fp:
+ HeaderFile_dom_str = fp.read()
+ HeaderFile_dom = xmltodict.parse(HeaderFile_dom_str) # 将XML转成json文本
+ HeaderFile_dom_json = json.loads(json.dumps(HeaderFile_dom))
+ node_path_list = ['Root', 'ProductProductionInfo', 'ObservationGeometry', 'SatelliteAzimuth']
+ Azimuth_incidence = OrthoAzimuth.FindInfomationFromJson(HeaderFile_dom_json, node_path_list)
+ return Azimuth_incidence
+
+
+class CreateProductXml:
+ def __init__(self, par_dict, model_path, xml_path):
+ self.par_dict = par_dict
+ self.xml_path = xml_path
+ shutil.copy(model_path, xml_path)
+ pass
+
+ def create_standard_xml(self):
+ """将字典中的信息写入到copy的xml文件中"""
+ tree = ElementTree()
+ tree.parse(self.xml_path) # 影像头文件
+ root = tree.getroot()
+
+ productinfo = root.find("ProductBasicInfo")
+ for key, value in self.par_dict.items():
+ if key.split("_")[0] == "imageinfo":
+ productinfo.find(key.split("_")[1]).text = str(value)
+ elif key.split("_")[0] == "SpatialCoverageInformation":
+ imageinfo = productinfo.find("SpatialCoverageInformation")
+ imageinfo.find(key.split("_")[1]).text = str(value)
+ elif key.split("_")[0] == "TimeCoverageInformation":
+ timeInfo = productinfo.find("TimeCoverageInformation")
+ timeInfo.find(key.split("_")[1]).text = str(value)
+ elif key.split("_")[0] == "CoordinateReferenceSystemInformation":
+ geoInfo = productinfo.find("CoordinateReferenceSystemInformation")
+ geoInfo.find(key.split("_")[1]).text = str(value)
+ elif key.split("_")[0] == "MetaInfo":
+ metaInfo = productinfo.find("MetaInfo")
+ metaInfo.find(key.split("_")[1]).text = str(value)
+ ProductProductionInfo = root.find("ProductProductionInfo") # 原始数据信息
+ for key, value in self.par_dict.items():
+ if key.split("_")[0] == "DataSources":
+ dataSources = ProductProductionInfo.find("DataSources")
+ dataSource = dataSources.find("DataSource")
+ dataSource.find(key.split("_")[2]).text = str(value)
+ elif key.split("_")[0] == "ObservationGeometry":
+ ObservationGeometry = ProductProductionInfo.find("ObservationGeometry")
+ ObservationGeometry.find(key.split("_")[1]).text = str(value)
+ elif key.split("_")[0] == "ProductProductionInfo":
+ ProductProductionInfo.find(key.split("_")[1]).text = str(value)
+
+ # ProductPublishInfo = root.find("ProductPublishInfo") # 发布者信息
+ # for key, value in self.par_dict.items():
+ # if key.split("_")[0] == "ProductPublishInfo":
+ # ProductPublishInfo.find(key.split("_")[1]).text = str(value)
+
+ tree.write(self.xml_path, encoding="utf-8", xml_declaration=True)
+
+if __name__ == '__main__':
+
+ image_path = r'D:\Micro\WorkSpace\test\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1B_h_h_L10000073024_db_RD_geo.tif'
+ origin_xml = r'D:\Micro\WorkSpace\Ortho\Temporary\package\GF3B_MYC_QPSI_003581_E120.6_N31.3_20220729_L1A_AHV_L10000073024.meta.xml'
+ tem_folder = r'D:\Micro\WorkSpace\test'
+ pack_path = r'D:\Micro\WorkSpace\Ortho\Temporary\package'
+ out_dem_path1 = os.path.join(tem_folder, "trans_dem_geo_projcs.tif")
+ out_dem_path2 = os.path.join(tem_folder, "trans_dem_projcs_geo.tif")
+ para_dict = CreateMetaDict(image_path, origin_xml, pack_path, out_dem_path1, out_dem_path2).calu_nature()
+
+ model_path = r'D:\Project\microproduct\Ortho\product.xml'
+ xml_path = r'D:\Micro\WorkSpace\test\test.xml'
+ CreateProductXml(para_dict, model_path, xml_path).create_standard_xml()
diff --git a/Ortho-NoS1GBM/tool/config/ConfigeHandle.py b/Ortho-NoS1GBM/tool/config/ConfigeHandle.py
new file mode 100644
index 0000000..05c9423
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/config/ConfigeHandle.py
@@ -0,0 +1,48 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :onestar
+@File :ConfigeHandle.py
+@Contact:https://blog.csdn.net/songlh1234/article/details/83316468
+@Author :SHJ
+@Date :2021/11/23 16:57
+@Version :1.0.0
+"""
+import os
+import configparser
+
+
+class Config:
+ """读写初始化配置文件"""
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def get(para_name, option='config', config_name='config.ini'):
+ config = configparser.ConfigParser()
+ config_path = os.path.join(os.getcwd(), config_name)
+ config.read(config_path, encoding='utf-8')
+ config.sections()
+ exe_name = config.get(option, para_name)
+ return exe_name
+
+ def get_list(self, para_name, option='config', config_name='config.ini'):
+ config = configparser.ConfigParser()
+ config_path = os.path.join(os.getcwd(), config_name)
+ config.read(config_path, encoding='utf-8')
+ config.sections()
+ str_name = config.get(option, para_name)
+ # 去除空格和回车
+ str_name = str(str_name).replace("\n", "").replace(' ', '') # 去除空格和回车
+ # 分割成lists
+ name_list = str_name.split(',')
+ return name_list
+
+
+if __name__ == '__main__':
+ # c = Config()
+ # a = c.get('exe_name')
+ # b = bool(c.get('debug'))
+ # d = int(c.get('cover_threshold'))
+ # f = float(c.get('ndvi_threshold'))
+
+ print('done')
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/csv/csvHandle.py b/Ortho-NoS1GBM/tool/csv/csvHandle.py
new file mode 100644
index 0000000..e34c0c6
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/csv/csvHandle.py
@@ -0,0 +1,267 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project : microproduct
+@File : csvHandle.py
+@Function : 读写csv文件
+@Contact :
+@Author:SHJ
+@Date:2022/11/6
+@Version:1.0.0
+"""
+import random
+import csv
+import logging
+import numpy as np
+from tool.algorithm.image.ImageHandle import ImageHandler
+from tool.algorithm.algtools.CoordinateTransformation import geo2imagexy
+from tool.algorithm.transforml1a.transHandle import TransImgL1A
+logger = logging.getLogger("mylog")
+
+
+class csvHandle:
+ def __init__(self, row=0, col=0):
+ self.imageHandler = ImageHandler()
+ self.row = row
+ self.col = col
+ self.img_falg = False
+ if row != 0 and col != 0:
+ self.roi_img = np.zeros((row, col), dtype=float)
+ self.img_falg = True
+
+ def get_roi_img(self):
+ if self.img_falg:
+ self.roi_img[self.roi_img == 0] = np.nan
+ return self.roi_img
+ else:
+ return np.array([])
+
+ @staticmethod
+ def readcsv(csv_path):
+ reader = csv.reader(open(csv_path, newline=''))
+ csv_list = []
+ for line_data in reader:
+ csv_list.append(line_data)
+ return csv_list[1:]
+
+ def trans_measuredata(self, meas_data, tif_path):
+ file_name = tif_path
+ dataset = self.imageHandler.get_dataset(file_name)
+ rows = self.imageHandler.get_img_height(file_name)
+ cols = self.imageHandler.get_img_width(file_name)
+ measdata_list = []
+ logger.info('[MEASURE DATA]')
+ for data in meas_data:
+ lon = float(data[1])
+ lat = float(data[2])
+ coord = geo2imagexy(dataset, lon, lat)
+ row = round(coord[1])
+ col = round(coord[0])
+
+ if row >= 0 and row <= rows and col >= 0 and col <= cols:
+ measdata_list.append([row, col, float(data[3])])
+ logger.info([row, col, float(data[3])])
+ else:
+ logger.warning("measure data: %s is beyond tif scope !", data)
+ pass
+ return measdata_list
+
+ def write_roi_img_data(self, points, type_id):
+ if self.img_falg:
+ for p in points:
+ r = p[0]
+ c = p[1]
+ if r < self.row and c < self.col:
+ self.roi_img[r, c] = type_id
+
+
+ def trans_landCover_measuredata(self, meas_data, cuted_ori_sim_path, max_train_num =100000):
+ """
+ 获取多边形区域内所有的点,分为训练集数据和测试集数据
+ :para meas_data: csv读取的实测数据
+ """
+ type_data = {}
+ n = 1
+ train_data_list = []
+ for data in meas_data:
+ for d in data:
+ if d == '':
+ raise Exception('there are empty data!', data)
+
+ type_id = int(data[1])
+ type_name = data[2]
+ if type_id not in type_data.keys():
+ train_data_list.append([n, type_id, type_name, []])
+ type_data.update({type_id: type_name})
+ n += 1
+
+ pointList = self.__roiPolygonAnalysis(data[3])
+ for points in pointList:
+ roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
+ tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
+ if tr._mask is not None:
+ points = tr.get_roi_points()
+ for train_data in train_data_list:
+ if train_data[1] == type_id:
+ train_data[3] += points
+ self.write_roi_img_data(points, type_id)
+ if train_data[3] == [] :
+ raise Exception('there are empty data!', train_data)
+ if len(train_data_list) <= 1:
+ raise Exception('there is only one label type!', train_data_list)
+
+ for train_data in train_data_list:
+ logger.info(str(train_data[0]) + "," + str(train_data[2]) + "," + "num:" + str(len(train_data[3])))
+ max_num = max_train_num
+ if (len(train_data[3]) > max_num):
+ logger.info("max number =" + str(max_num) + ", random select" + str(max_num) + " point as train data!")
+ train_data[3] = random.sample(train_data[3], max_num)
+
+ return train_data_list
+
+ def trans_landCover_measuredata_dic(self, meas_data, cuted_ori_sim_path,max_train_num=100000):
+ train_data_list = self.trans_landCover_measuredata(meas_data, cuted_ori_sim_path,max_train_num)
+ return self.trans_landCover_list2dic(train_data_list)
+
+ @staticmethod
+ def trans_landCover_list2dic(train_data_list):
+ ids = []
+ class_ids = []
+ ch_names = []
+ positions = []
+ for data in train_data_list:
+ if data[3] == []:
+ continue
+ ids.append(data[0])
+ class_ids.append(data[1])
+ ch_names.append(data[2])
+ positions.append(data[3])
+
+ train_data_dic = {}
+ train_data_dic.update({"ids": ids})
+ train_data_dic.update({"class_ids": class_ids})
+ train_data_dic.update({"ch_names": ch_names})
+ train_data_dic.update({"positions": positions})
+ return train_data_dic
+
+ @staticmethod
+ def __roiPolygonAnalysis(roiStr):
+ """
+ 将csv的POLY数据转为数组
+ :para roiStr: poly数据
+ :return pointList: 保存多边形的list
+ """
+ pointList = []
+ strContent = roiStr.replace("POLYGON", "")
+ # 解析轮廓字符串为二维数组
+ bracketsList = []
+ strTemp = ''
+ strList = []
+ for c in strContent:
+ if c == '(':
+ bracketsList.append(c)
+ continue
+ elif c == ')':
+ if len(bracketsList) > 0:
+ bracketsList.pop(0)
+ if len(strTemp) > 0:
+ strList.append(strTemp)
+ strTemp = ''
+ else:
+ strTemp += c
+ for item in strList:
+ if len(item) == 0:
+ continue
+ pTempList = item.split(',')
+ pList = []
+ for row in pTempList:
+ cells = row.split(' ')
+ if len(cells) != 2:
+ continue
+ point = [float(cells[0]), float(cells[1])]
+ pList.append(point)
+ pointList.append(pList)
+ return pointList
+
+ def class_landcover_list(self, csv_path):
+ """
+ 输出csv表中的前三列
+ """
+ reader = csv.reader(open(csv_path, newline=''))
+ class_list=[]
+ type_id_name = {}
+ type_id_parent = {}
+ for line_data in reader:
+ class_list.append(line_data) # class_list含有四列
+ for data in class_list[1:]:
+ type_parent= data[0]
+ type_id = int(data[1])
+ type_name = data[2]
+
+ if type_id not in type_id_name.keys():
+ type_id_name.update({type_id: type_name})
+ type_id_parent.update({type_id: type_parent})
+ return type_id_name, type_id_parent
+
+ def trans_VegePhenology_measdata_dic(self, meas_data, cuted_ori_sim_path):
+ """
+ 获取多边形区域内所有的点,分为训练集数据和测试集数据
+ :para meas_data: csv读取的实测数据
+ """
+ train_data = []
+ test_data = []
+ type_data = {}
+
+ for data in meas_data:
+ data_use_type = data[0]
+ sar_img_name = data[1]
+ name = sar_img_name.rstrip('.tar.gz')
+
+ if data_use_type == 'train':
+ phenology_id = int(data[2])
+ phenology_name = data[3]
+ if phenology_id not in type_data.keys():
+ type_data.update({phenology_id: phenology_name})
+ else:
+ phenology_id = -1
+
+ pointList = self.__roiPolygonAnalysis(data[4])
+ l1a_points = []
+ for points in pointList:
+ roi_poly = [(float(lon), float(lat)) for (lon, lat) in points]
+ tr = TransImgL1A(cuted_ori_sim_path, roi_poly)
+ l1a_points = tr.get_roi_points()
+ # l1a_points = tr.get_lonlat_points()
+ if data_use_type == 'train':
+ train_data.append([name, phenology_id, l1a_points, type_data[phenology_id]])
+ elif data_use_type == 'test':
+ test_data.append([name, phenology_id, l1a_points])
+ type_map = []
+ for n, id in zip(range(len(type_data)), type_data):
+ type_map.append([n + 1, id, type_data[id]])
+
+ return train_data, test_data, type_map
+
+ @staticmethod
+ def vegePhenology_class_list(csv_path):
+ """
+ 输出csv表中的前三列
+ """
+ reader = csv.reader(open(csv_path, newline=''))
+ class_list=[]
+ type_id_name = {}
+ for line_data in reader:
+ class_list.append(line_data) # class_list含有四列
+ for data in class_list[1:]:
+ type_id = data[2]
+ type_name = data[3]
+
+ if type_id not in type_id_name.keys():
+ if type_id.strip() != "":
+ type_id_name.update({type_id: type_name})
+ return type_id_name
+
+# if __name__ == '__main__':
+ # csvh = csvHandle()
+ # csv_path = r"I:\preprocessed\VegetationPhenologyMeasureData_E118.9_N31.4.csv"
+ # data = csvh.trans_VegePhenology_measdata_dic(csvh.readcsv(csv_path),r"I:\preprocessed\GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422_RPC_ori_sim_preprocessed.tif")
+ # pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/file/fileHandle.py b/Ortho-NoS1GBM/tool/file/fileHandle.py
new file mode 100644
index 0000000..28ce04e
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/file/fileHandle.py
@@ -0,0 +1,88 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project : microproduct
+@File : fileHandle.py
+@Function : 文件创建、删除、解压、打包
+@Contact :
+@Author:SHJ
+@Date:2022/11/6
+@Version:1.0.0
+"""
+import os
+import tarfile
+import shutil
+
+class fileHandle:
+ def __init__(self, debug_mode=False):
+ self.__debug_mode = debug_mode
+
+ def creat_dirs(self, path_list):
+ """
+ 创建文件夹
+ """
+ for path in path_list:
+ if os.path.exists(path):
+ if self.__debug_mode is True:
+ continue
+ self.del_folder(path)
+ os.makedirs(path)
+ else:
+ os.makedirs(path)
+
+ def del_folder(self, dic):
+ """
+ 删除整个文件夹
+ """
+ if self.__debug_mode is True:
+ return
+ if os.path.isdir(dic):
+ shutil.rmtree(dic)
+
+ def del_file(self, path_data):
+ """
+ 只删除文件,不删除文件夹
+ """
+ for i in os.listdir(path_data): # os.listdir(path_data)#返回一个列表,里面是当前目录下面的所有东西的相对路径
+ file_data = path_data + '\\' + i # 当前文件夹的下面的所有东西的绝对路径
+ if os.path.isfile(file_data) is True: # os.path.isfile判断是否为文件,如果是文件,就删除.如果是文件夹.递归给del_file.
+ os.remove(file_data)
+ else:
+ self.del_file(file_data)
+
+ @staticmethod
+ def make_targz(output_filename, source_dir):
+ """
+ 一次性打包整个根目录。空子目录会被打包。
+ 如果只打包不压缩,将"w:gz"参数改为"w:"或"w"即可。
+ :param output_filename:输出压缩包的完整路径,eg:'E:\test.tar.gz'
+ :param source_dir:需要打包的跟目录,eg: 'E:\testFfile\'打包文件夹里面的所有文件,'E:\testFfile'打包文件夹
+ """
+ dir = os.path.split(output_filename)[0]
+ if os.path.exists(dir) is False:
+ os.makedirs(dir)
+ with tarfile.open(output_filename, "w:gz") as tar:
+ tar.add(source_dir, arcname=os.path.basename(source_dir))
+
+ @staticmethod
+ def de_targz(tar_gz_path, file_dir):
+ name = os.path.split(tar_gz_path)[1].rstrip('.tar.gz')
+ if os.path.exists(file_dir) is False:
+ os.makedirs(file_dir)
+ # 解压
+ t = tarfile.open(tar_gz_path)
+ t.extractall(path=file_dir)
+
+ @staticmethod
+ def copyfile2dir(srcfile, dir): # 复制函数
+ if not os.path.isfile(srcfile):
+ print("%s not exist!" % (srcfile))
+ else:
+ fpath, fname = os.path.split(srcfile) # 分离文件名和路径
+ if not os.path.exists(dir):
+ os.makedirs(dir) # 创建路径
+ shutil.copy(srcfile, dir + fname) # 复制文件
+
+# if __name__ == '__main__':
+# file = fileHandle()
+# file.del_floder("I:\preprocessed")
+# pass
\ No newline at end of file
diff --git a/Ortho-NoS1GBM/tool/logs/logHandler.py b/Ortho-NoS1GBM/tool/logs/logHandler.py
new file mode 100644
index 0000000..b4cbad1
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/logs/logHandler.py
@@ -0,0 +1,90 @@
+# -*- coding: UTF-8 -*-
+"""
+@Project :microproduct
+@File :logHandler.py
+@Author :SHJ
+@Date :2021/9/6
+@Version :1.0.0
+"""
+import logging
+import os
+# from logging import handlers
+import time
+import datetime
+
+
+class LogHandler:
+ """
+ 生成日志
+ """
+ __logger = logging.getLogger("mylog")
+ __format_str = logging.Formatter("[%(asctime)s] [%(process)d] [%(levelname)s] - %(module)s.%(funcName)s "
+ "(%(filename)s:%(lineno)d) - %(message)s")
+ __log_path = None
+
+ @staticmethod
+ def init_log_handler(log_name):
+ """
+ 初始化日志
+ :param log_name: 日志保存的路径和名称
+ :return:
+ """
+ path = os.getcwd()
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
+ LogHandler.__log_path = os.path.join(path, log_name + current_time + ".log")
+ para_dir = os.path.split(LogHandler.__log_path)
+ if not os.path.exists(para_dir[0]):
+ os.makedirs(para_dir[0])
+ # 删除七天以前的文件
+ LogHandler.delete_outdate_files(para_dir[0], 7)
+
+ # 方法1:普通日志
+ LOG_FORMAT = "[%(asctime)s] [%(process)d] [%(levelname)s]- %(message)s ---from: %(module)s.%(funcName)s" \
+ " (%(filename)s:Line%(lineno)d) "
+ DATE_FORMAT = "%m/%d/%Y %H:%M:%S"
+ fp = logging.FileHandler(LogHandler.__log_path, encoding='utf-8')
+ fs = logging.StreamHandler()
+ logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT, handlers=[fp, fs]) # 调用
+
+ # 方法2:回滚日志
+ # LogHandler.__logger.setLevel(logging.DEBUG)
+ # th = handlers.TimedRotatingFileHandler(filename=LogHandler.__log_path, when='S', interval=1,
+ # backupCount=2, encoding='utf-8')
+ # th.suffix = "%Y-%m-%d-%H-%M-%S.log"
+ # th.setFormatter(LogHandler.__format_str)
+ # th.setLevel(level=logging.DEBUG)
+
+ # console = logging.StreamHandler()
+ # console.setLevel(logging.INFO)
+ # LogHandler.__logger.addHandler(console)
+ # LogHandler.__logger.addHandler(th)
+
+ @staticmethod
+ def delete_outdate_files(path, date_interval=7):
+ """
+ 删除目录下七天前创建的文件
+ """
+ current_time = time.strftime("%Y-%m-%d", time.localtime(time.time()))
+ current_timeList = current_time.split("-")
+ current_time_day = datetime.datetime(int(current_timeList[0]), int(current_timeList[1]),
+ int(current_timeList[2]))
+ for root, dirs, files in os.walk(path):
+ for item in files:
+ item_format = item.split(".", 2)
+ if item_format[1] == "log":
+ file_path = os.path.join(root, item)
+ create_time = time.strftime("%Y-%m-%d", time.localtime((os.stat(file_path)).st_mtime))
+ create_time_list = create_time.split("-")
+ create_time_day = datetime.datetime(int(create_time_list[0]), int(create_time_list[1]),
+ int(create_time_list[2]))
+ time_difference = (current_time_day - create_time_day).days
+ if time_difference > date_interval:
+ os.remove(file_path)
+
+#
+# if __name__ == "__main__":
+# # eg2:
+# log_handler = LogHandler()
+# log_handler.init_log_handler(r"run_log\myrun1")
+# logging.warning("1")
+# print("done")
diff --git a/Ortho-NoS1GBM/tool/newimage.csv b/Ortho-NoS1GBM/tool/newimage.csv
new file mode 100644
index 0000000..6394f2b
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/newimage.csv
@@ -0,0 +1,47 @@
+sar_img_name,phenology_id,phenology_name,roi_polygon
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,10,water,"POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,20,build,"POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))"
+GF3_SAY_QPSI_011444_E118.9_N31.4_20181012_L1A_AHV_L10003515422-ortho,30,road,"POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.91799748 31.46893509,118.91762055 31.46674878,118.9210883 31.46637183,118.9210883 31.46855814,118.91799748 31.46893509))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.90864966 31.46388396,118.90861196 31.46384627,118.90857427 31.46380857,118.90608654 31.46188613,118.90608654 31.46181074,118.90940351 31.46015216,118.91155201 31.46199921,118.90864966 31.46388396))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,10,water,"POLYGON((118.91898928 31.45778718,118.91893038 31.45478336,118.91893038 31.45472446,118.91898928 31.45472446,118.9246432 31.45472446,118.9247021 31.45472446,118.92499657 31.45872956,118.91898928 31.45778718))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89543794 31.46174102,118.89452125 31.4583153,118.8948831 31.45807405,118.89599277 31.46171689,118.89543794 31.46174102))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.8940629 31.45653006,118.89280848 31.45312847,118.8932427 31.45308022,118.89326683 31.45308022,118.89442475 31.45636119,118.8940629 31.45653006))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89132626 31.44844806,118.89046628 31.4453566,118.89079168 31.44540309,118.89081492 31.44542633,118.89083817 31.44544958,118.89086141 31.44547282,118.89172139 31.44840157,118.89132626 31.44844806))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89028034 31.4441944,118.89016413 31.44256732,118.89044304 31.44256732,118.89046628 31.44259056,118.89048953 31.4426138,118.89051277 31.44266029,118.8907452 31.44472901,118.89028034 31.4441944))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89000143 31.44193973,118.88997819 31.44193973,118.88997819 31.44189324,118.88923442 31.43399026,118.8894436 31.43403675,118.89018737 31.44191648,118.89000143 31.44193973))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.88897526 31.42114936,118.8888637 31.4205915,118.8888637 31.4205729,118.89468368 31.41977331,118.89470227 31.41977331,118.89472087 31.41977331,118.89499978 31.42042414,118.88897526 31.42114936))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.88025459 31.42230226,118.88010583 31.42153986,118.88594441 31.42088902,118.885963 31.42088902,118.8866138 31.42155845,118.88025459 31.42230226))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.87595933 31.42280433,118.87577339 31.42222788,118.87977114 31.42178159,118.87999427 31.42228367,118.87595933 31.42280433))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.89870564 31.41809418,118.89826403 31.41748983,118.89826403 31.41746659,118.89828727 31.41744335,118.90193638 31.41511894,118.90261042 31.41551409,118.89870564 31.41809418))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,20,build,"POLYGON((118.86530883 31.42443948,118.86630754 31.42325911,118.87384328 31.42257814,118.87388867 31.42257814,118.87397946 31.42253274,118.87411565 31.42253274,118.87493278 31.42307752,118.87488738 31.42312292,118.87479659 31.42312292,118.86530883 31.42443948))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.91830432 31.38444815,118.91828107 31.3844249,118.91828107 31.38440166,118.9183508 31.38433193,118.91958267 31.3826816,118.91960591 31.3826816,118.9223718 31.38421571,118.9223718 31.38423895,118.9223718 31.38426219,118.92146533 31.38628443,118.91830432 31.38444815))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93304021 31.37921823,118.93396992 31.37812576,118.93399316 31.37812576,118.93401641 31.37812576,118.93536449 31.37919499,118.93464396 31.38033395,118.93304021 31.37921823))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93629419 31.37882309,118.93627095 31.37882309,118.93627095 31.37879984,118.93701472 31.37756791,118.93752606 31.37780035,118.93752606 31.37782359,118.93666608 31.37905553,118.93629419 31.37882309))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93680553 31.38465734,118.9372239 31.38396002,118.93817686 31.38454112,118.9382001 31.38454112,118.93822334 31.38456437,118.93810713 31.38533142,118.93808389 31.38535467,118.93680553 31.38465734))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94384808 31.38312765,118.94420137 31.38271856,118.94421997 31.38271856,118.94423856 31.38271856,118.94505671 31.38325782,118.94453607 31.38387146,118.94384808 31.38312765))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94066848 31.37855322,118.94096598 31.37806975,118.94096598 31.37805115,118.94096598 31.37803256,118.94096598 31.37801396,118.94096598 31.37799537,118.94223039 31.37881356,118.94163538 31.37946439,118.94161678 31.37946439,118.94066848 31.37855322))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.93942267 31.3813797,118.9396272 31.38108218,118.9396458 31.38108218,118.94048254 31.38154706,118.94022222 31.38204913,118.93942267 31.3813797))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94446169 31.38115656,118.94479639 31.38069168,118.94576329 31.38136111,118.94546578 31.38193756,118.94446169 31.38115656))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.94968666 31.38693968,118.94968666 31.38690249,118.9498912 31.38660496,118.94990979 31.38660496,118.95063496 31.38701406,118.95033746 31.38740456,118.94968666 31.38693968))"
+GF3_SAY_QPSI_013952_E118.9_N31.5_20190404_L1A_AHV_L10003923848-ortho,30,road,"POLYGON((118.95310799 31.37931563,118.95310799 31.37929703,118.9537216 31.37885075,118.95374019 31.37883215,118.95379598 31.37879496,118.95463271 31.37931563,118.95418645 31.3798363,118.95310799 31.37931563))"
diff --git a/Ortho-NoS1GBM/tool/testxmlreading.py b/Ortho-NoS1GBM/tool/testxmlreading.py
new file mode 100644
index 0000000..93e3742
--- /dev/null
+++ b/Ortho-NoS1GBM/tool/testxmlreading.py
@@ -0,0 +1,98 @@
+#encoding=utf-8
+import os
+import xml.etree.ElementTree as ET
+import pandas as pd
+import csv
+
+def xml2csv(xmlpath):
+ tree_obj = ET.parse(xmlpath)
+
+ # 得到所有匹配Region 标签的Element对象的list集合
+ list_Region = tree_obj.findall("Region")
+ for Region in list_Region:
+ # 几何面对应的类(phenology_name)在标签
+ Region_dict = Region.attrib
+ phenology_name = Region_dict.get("name")
+ print(phenology_name)
+ list_GeometryDef = Region.findall("GeometryDef")
+ list_Polygon = list_GeometryDef[0].findall("Polygon") # 获得该类下的几何面list
+ for polygon in list_Polygon:
+ # 从polygon list中获取得到标签的数据 注意是空格分隔的和csv中不同
+ Coordinates_list = coordinates = polygon.find('.//Coordinates').text.strip().split()
+ # POLYGON((119.035 31.51,119.035 31.50,119.033 31.50)) csv中
+ print("value")
+
+# 向csv中写
+def csvfile(csvpath,data):
+
+ with open(csvpath, 'a', newline='') as file:
+ # 2. step
+ writer = csv.writer(file)
+ # data example
+ #data = ["This", "is", "a", "Test"]
+ writer.writerow(data)
+
+
+# Define the structure of the data
+#data示例 student_header = ['name', 'age', 'major', 'minor']
+def csvcreateTitile(csvpath,data):
+ # 1. Open a new CSV file
+ with open(csvpath, 'w') as file:
+ # 2. Create a CSV writer
+ writer = csv.writer(file)
+ # 3. Write data to the file
+ writer.writerow(data)
+
+# 将列表中的坐标对转换为字符串
+def createcsv_roi_polygon(coordinates):
+ coord_str = ','.join([f'{coordinates[i]} {coordinates[i + 1]}' for i in range(0, len(coordinates), 2)])
+ # 构建最终的POLYGON字符串
+ polygon_str = f'POLYGON(({coord_str}))'
+ print(polygon_str)
+ return polygon_str
+
+def get_Azimuth_incidence(Azimuth_path):
+ Azimuth_incidence = 0
+ if not os.path.exists(Azimuth_path):
+ print('get Azimuth_incidence failed!')
+ return Azimuth_incidence
+ with open(Azimuth_path) as f:
+ Azimuth_incidence = f.readline()
+ return Azimuth_incidence
+
+# if __name__ == '__main__':
+# path = r"D:\micro\WorkSpace\Ortho1\Temporary\test.txt"
+# value = get_Azimuth_incidence(path)
+# print(value)
+if __name__ == '__main__':
+ xmlpath = r"E:\MicroWorkspace\S_SAR\AHV\landCover.xml"
+
+ tree_obj = ET.parse(xmlpath)
+ csv_header = ['sar_img_name', 'phenology_id', 'phenology_name', 'roi_polygon']
+ csvpath = r"E:\MicroWorkspace\S_SAR\AHV\newimage.csv"
+ # csvcreateTitile(csvpath,csv_header)
+ csvfile(csvpath,csv_header)
+ # 得到所有匹配Region 标签的Element对象的list集合
+ list_Region = tree_obj.findall("Region")
+ name_list = {}
+ count = 10
+ for Region in list_Region:
+ # 几何面对应的类(phenology_name)在标签
+ Region_dict = Region.attrib
+ phenology_name = Region_dict.get("name")
+
+ if not phenology_name in name_list.keys():
+ name_list.update({phenology_name: count})
+ count += 10
+ print(phenology_name)
+ # list_GeometryDef = Region.findall("GeometryDef")
+ list_Polygon = Region.findall(".//Polygon") # 获得该类下的几何面list
+
+ for polygon in list_Polygon:
+ # 从polygon list中获取得到标签的数据 注意是空格分隔的和csv中不同
+ Coordinates_list = coordinates = polygon.find('.//Coordinates').text.strip().split()
+ # 将坐标和ploygon对应的写入到.csv中
+ polygon_str=createcsv_roi_polygon(Coordinates_list)
+ # POLYGON((119.035 31.51,119.035 31.50,119.033 31.50)) csv中
+ data = ['0', name_list.get(phenology_name), phenology_name, polygon_str]
+ csvfile(csvpath, data)