diff --git a/.circleci/config.yml b/.circleci/config.yml
index 444447f..607ca5b 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -77,7 +77,7 @@ jobs:
pwd
mkdir config build install
. /opt/conda/bin/activate root
- conda install --yes cython gdal h5py libgdal pytest numpy fftw scipy scons hdf4 hdf5 netcdf4 libgcc libstdcxx-ng cmake astropy
+ conda install --yes cython gdal h5py libgdal pytest numpy fftw scipy scons hdf4 hdf5 netcdf4 libgcc libstdcxx-ng cmake astropy opencv
yum install -y uuid-devel x11-devel motif-devel jq gcc-gfortran
ln -s /opt/conda/bin/cython /opt/conda/bin/cython3
cd /opt/conda/lib
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c526ec4..7529e8a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -18,6 +18,8 @@ find_package(Python 3.5 REQUIRED COMPONENTS Interpreter Development
find_package(FFTW REQUIRED)
find_package(Motif)
find_package(OpenMP REQUIRED COMPONENTS C CXX Fortran)
+find_package(OpenCV COMPONENTS core highgui imgproc)
+
# Find these, and create IMPORTED INTERFACE libraries for them if they exist
include(TargetGDAL)
include(TargetMotif)
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index e1c8031..4395db0 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -4,6 +4,7 @@ add_subdirectory(demUtils)
add_subdirectory(frameUtils)
#add_subdirectory(unwUtils)
add_subdirectory(downsample_unwrapper)
+add_subdirectory(geo_autoRIFT)
add_subdirectory(PyCuAmpcor)
add_subdirectory(splitSpectrum)
diff --git a/contrib/SConscript b/contrib/SConscript
index bfc08a5..11366b4 100644
--- a/contrib/SConscript
+++ b/contrib/SConscript
@@ -80,6 +80,4 @@ SConscript('splitSpectrum/SConscript')
SConscript('alos2filter/SConscript')
SConscript('alos2proc/SConscript')
SConscript('alos2proc_f/SConscript')
-
-if os.path.exists('geo_autoRIFT'):
- SConscript('geo_autoRIFT/SConscript')
+SConscript('geo_autoRIFT/SConscript')
diff --git a/contrib/geo_autoRIFT/CMakeLists.txt b/contrib/geo_autoRIFT/CMakeLists.txt
new file mode 100644
index 0000000..45b35cf
--- /dev/null
+++ b/contrib/geo_autoRIFT/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Early exit if prereqs not found
+if(NOT TARGET Python::NumPy
+OR NOT TARGET GDAL::GDAL
+OR NOT OpenCV_FOUND
+ )
+ return()
+endif()
+
+InstallSameDir(__init__.py)
+
+add_subdirectory(autoRIFT)
+add_subdirectory(geogrid)
diff --git a/contrib/geo_autoRIFT/README.md b/contrib/geo_autoRIFT/README.md
new file mode 100644
index 0000000..4e5a84d
--- /dev/null
+++ b/contrib/geo_autoRIFT/README.md
@@ -0,0 +1,240 @@
+# autoRIFT (autonomous Repeat Image Feature Tracking)
+
+
+
+[](https://www.python.org/)
+[](https://github.com/leiyangleon/autoRIFT/releases)
+[](https://github.com/leiyangleon/autoRIFT/blob/master/LICENSE)
+[](https://doi.org/10.5281/zenodo.3756192)
+
+
+
+**A Python module of a fast and intelligent algorithm for finding the pixel displacement between two images**
+
+**autoRIFT can be installed as a standalone Python module (only supports Cartesian coordinates) either manually or as a conda install (https://github.com/conda-forge/autorift-feedstock). To allow support for both Cartesian and radar coordinates, autoRIFT must be installed with the InSAR Scientific Computing Environment (ISCE: https://github.com/isce-framework/isce2)**
+
+**Use cases include all dense feature tracking applications, including the measurement of surface displacements occurring between two repeat satellite images as a result of glacier flow, large earthquake displacements, and land slides**
+
+**autoRIFT can be used for dense feature tracking between two images over a grid defined in an arbitrary geographic Cartesian (northing/easting) coordinate projection when used in combination with the sister Geogrid Python module (https://github.com/leiyangleon/Geogrid). Example applications include searching radar-coordinate imagery on a polar stereographic grid and searching Universal Transverse Mercator (UTM) imagery at a specified geographic Cartesian (northing/easting) coordinate grid**
+
+
+Copyright (C) 2019 California Institute of Technology. Government Sponsorship Acknowledged.
+
+Link: https://github.com/leiyangleon/autoRIFT
+
+Citation: https://doi.org/10.5281/zenodo.3756192
+
+
+## 1. Authors
+
+Alex Gardner (JPL/Caltech; alex.s.gardner@jpl.nasa.gov) first described the algorithm "auto-RIFT" in (Gardner et al., 2018) and developed the first version in MATLAB;
+
+Yang Lei (GPS/Caltech; ylei@caltech.edu) translated it to Python, further optimized and incoporated to the ISCE software;
+
+Piyush Agram (JPL/Caltech; piyush.agram@jpl.nasa.gov) set up the installation as a standalone module and further cleaned the code.
+
+Reference: Gardner, A. S., Moholdt, G., Scambos, T., Fahnstock, M., Ligtenberg, S., van den Broeke, M., & Nilsson, J. (2018). Increased West Antarctic and unchanged East Antarctic ice discharge over the last 7 years. The Cryosphere, 12(2), 521–547. https://doi.org/10.5194/tc-12-521-2018
+
+
+## 2. Acknowledgement
+
+This effort was funded by the NASA MEaSUREs program in contribution to the Inter-mission Time Series of Land Ice Velocity and Elevation (ITS_LIVE) project (https://its-live.jpl.nasa.gov/) and through Alex Gardner’s participation in the NASA NISAR Science Team
+
+
+## 3. Features
+
+* fast algorithm that finds displacement between the two images (where source and template image patches are extracted and correlated) using sparse search and iteratively progressive chip (template) sizes
+* for intelligent use, user can specify unstructured spatially-varying search centers (center of the source image patch), search offsets (center displacement of the template image patch compared to the source image patch), chip sizes (size of template image patch), search ranges (size of the source image patch)
+* faster than the conventional dense feature tracking "ampcor"/"denseampcor" algorithm included in ISCE by nearly 2 orders of magnitude
+* supports various preprocessing modes for specified image pair, e.g. either the raw image (both texture and topography) or the texture only (high-frequency components without the topography) can be used with various choices of the high-pass filter options
+* supports image data format of unsigned integer 8 (uint8; faster) and single-precision float (float32)
+* user can adjust all of the relevant parameters, as detailed below in the instructions
+* a Normalized Displacement Coherence (NDC) filter is developed (Gardner et al., 2018) to filter the chip displacemnt results based on displacement difference thresholds that are scaled to the local search range
+* a sparse integer displacement search is used to narrow the template search range and to eliminate areas of "low coherence" for fine decimal displacement search and following chip size iterations
+* novel nested grid design allows chip size progressively increase iteratively where a smaller chip size failed
+* a light interpolation is done to fill the missing (unreliable) chip displacement results using median filtering and bicubic-mode interpolation (that can remove pixel discrepancy when using other modes) and an interpolation mask is returned
+* the core displacement estimator (normalized cross correlation) and image pre-processing call OpenCV's Python and/or C++ functions for efficiency
+* sub-pixel displacement estimation using a fast Gaussian pyramid upsampling algorithm that is robust to sub-pixel locking; the intelligent (chip size-dependent) selection of oversampling ratio is also supported for a tradeoff between efficiency and accuracy
+* the current version can be installed with the ISCE software (that supports both Cartesian and radar coordinates) or as a standalone Python module (Cartesian coordinates only)
+* when used in combination with the Geogrid Python module (https://github.com/leiyangleon/Geogrid), autoRIFT can be used for feature tracking between image pair over a grid defined in an arbitrary geographic Cartesian (northing/easting) coordinate projection
+* when the grid is provided in geographic Cartesian (northing/easting) coordinates, outputs are returned in geocoded GeoTIFF image file format with the same EPSG projection code as input search grid
+
+## 4. Possible Future Development
+
+* for radar (SAR) images, it is yet to include the complex correlation of the two images, i.e. the current version only uses the amplitude correlation
+* the current version works for single-core CPU while the multi-core parallelization or GPU implementation would be useful to extend
+
+
+## 5. Demo
+
+### 5.1 Optical image over regular grid in imaging coordinates
+
+
+
+***Output of "autoRIFT" module for a pair of Landsat-8 images (20170708-20170724; same as the Demo dataset at https://github.com/leiyangleon/Geogrid) in Greenland over a regular-spacing grid: (a) estimated horizontal pixel displacement, (b) estimated vertical pixel displacement, (c) chip size used, (d) light interpolation mask.***
+
+This is done by implementing the following command line:
+
+Standalone:
+
+ testautoRIFT.py -m I1 -s I2 -fo 1
+
+With ISCE:
+
+ testautoRIFT_ISCE.py -m I1 -s I2 -fo 1
+
+where "I1" and "I2" are the reference and test images as defined in the instructions below. The "-fo" option indicates whether or not to read optical image data.
+
+
+### 5.2 Optical image over user-defined geographic Cartesian coordinate grid
+
+
+
+***Output of "autoRIFT" module for a pair of Landsat-8 images (20170708-20170724; same as the Demo dataset at https://github.com/leiyangleon/Geogrid) in Greenland over user-defined geographic Cartesian (northing/easting) coordinate grid (same grid used in the Demo at https://github.com/leiyangleon/Geogrid): (a) estimated horizontal pixel displacement, (b) estimated vertical pixel displacement, (c) light interpolation mask, (d) chip size used. Notes: all maps are established exactly over the same geographic Cartesian coordinate grid from input.***
+
+This is done by implementing the following command line:
+
+Standalone:
+
+ testautoRIFT.py -m I1 -s I2 -g winlocname -o winoffname -sr winsrname -csmin wincsminname -csmax wincsmaxname -vx winro2vxname -vy winro2vyname -fo 1
+
+With ISCE:
+
+ testautoRIFT_ISCE.py -m I1 -s I2 -g winlocname -o winoffname -sr winsrname -csmin wincsminname -csmax wincsmaxname -vx winro2vxname -vy winro2vyname -fo 1
+
+where "I1" and "I2" are the reference and test images as defined in the instructions below, and the optional inputs "winlocname", "winoffname", "winsrname", "wincsminname", "wincsmaxname", "winro2vxname", "winro2vyname" are outputs from running "testGeogrid_ISCE.py" or "testGeogridOptical.py" as defined at https://github.com/leiyangleon/Geogrid. When "winlocname" (grid location) is specified, each of the rest optional input ("win * name") can be either used or omitted.
+
+
+
+
+***Final motion velocity results by combining outputs from "Geogrid" (i.e. matrix of conversion coefficients from the Demo at https://github.com/leiyangleon/Geogrid) and "autoRIFT" modules: (a) estimated motion velocity from Landsat-8 data (x-direction; in m/yr), (b) motion velocity from input data (x-direction; in m/yr), (c) estimated motion velocity from Landsat-8 data (y-direction; in m/yr), (d) motion velocity from input data (y-direction; in m/yr). Notes: all maps are established exactly over the same geographic Cartesian (northing/easting) coordinate grid from input.***
+
+
+### 5.3 Radar image over regular grid in imaging coordinates
+
+
+
+***Output of "autoRIFT" module for a pair of Sentinel-1A/B images (20170221-20170227; same as the Demo dataset at https://github.com/leiyangleon/Geogrid) at Jakobshavn Glacier of Greenland over a regular-spacing grid: (a) estimated range pixel displacement, (b) estimated azimuth pixel displacement, (c) chip size used, (d) light interpolation mask.***
+
+
+This is obtained by implementing the following command line:
+
+With ISCE:
+
+ testautoRIFT_ISCE.py -m I1 -s I2
+
+where "I1" and "I2" are the reference and test images as defined in the instructions below.
+
+
+### 5.4 Radar image over user-defined geographic Cartesian coordinate grid
+
+
+
+***Output of "autoRIFT" module for a pair of Sentinel-1A/B images (20170221-20170227; same as the Demo dataset at https://github.com/leiyangleon/Geogrid) at Jakobshavn Glacier of Greenland over user-defined geographic Cartesian (northing/easting) coordinate grid (same grid used in the Demo at https://github.com/leiyangleon/Geogrid): (a) estimated range pixel displacement, (b) estimated azimuth pixel displacement, (c) light interpolation mask, (d) chip size used. Notes: all maps are established exactly over the same geographic Cartesian coordinate grid from input.***
+
+This is done by implementing the following command line:
+
+With ISCE:
+
+ testautoRIFT_ISCE.py -m I1 -s I2 -g winlocname -o winoffname -sr winsrname -csmin wincsminname -csmax wincsmaxname -vx winro2vxname -vy winro2vyname
+
+where "I1" and "I2" are the reference and test images as defined in the instructions below, and the optional inputs "winlocname", "winoffname", "winsrname", "wincsminname", "wincsmaxname", "winro2vxname", "winro2vyname" are outputs from running "testGeogrid.py" as defined at https://github.com/leiyangleon/Geogrid. When "winlocname" (grid location) is specified, each of the rest optional input ("win * name") can be either used or omitted.
+
+**Runtime comparison for this test (on an OS X system with 2.9GHz Intel Core i7 processor and 16GB RAM):**
+* __autoRIFT (single core): 10 mins__
+* __Dense ampcor from ISCE (8 cores): 90 mins__
+
+
+
+
+***Final motion velocity results by combining outputs from "Geogrid" (i.e. matrix of conversion coefficients from the Demo at https://github.com/leiyangleon/Geogrid) and "autoRIFT" modules: (a) estimated motion velocity from Sentinel-1 data (x-direction; in m/yr), (b) motion velocity from input data (x-direction; in m/yr), (c) estimated motion velocity from Sentinel-1 data (y-direction; in m/yr), (d) motion velocity from input data (y-direction; in m/yr). Notes: all maps are established exactly over the same geographic Cartesian (northing/easting) coordinate grid from input.***
+
+
+## 6. Instructions
+
+**Note:**
+
+* When the grid is provided in geographic Cartesian (northing/easting) coordinates (geographic coordinates lat/lon is not supported), it is required to run the "Geogrid" module (https://github.com/leiyangleon/Geogrid) first before running "autoRIFT". In other words, the outputs from "testGeogrid_ISCE.py" or "testGeogridOptical.py" (a.k.a "winlocname", "winoffname", "winsrname", "wincsminname", "wincsmaxname", "winro2vxname", "winro2vyname") will serve as optional inputs for running "autoRIFT" at a geographic grid, which is required to generate the final motion velocity maps.
+* When the outputs from running the "Geogrid" module are not provided, a regular grid in the imaging coordinates will be automatically assigned
+
+**For quick use:**
+
+* Refer to the file "testautoRIFT.py" (standalone) and "testautoRIFT_ISCE.py" (with ISCE) for the usage of the module and modify it for your own purpose
+* Input files include the reference image (required), test image (required), and the outputs from running "testGeogrid_ISCE.py" or "testGeogridOptical.py" (optional; a.k.a "winlocname", "winoffname", "winsrname", "wincsminname", "wincsmaxname", "winro2vxname", "winro2vyname"). When "winlocname" (grid location) is specified, each of the rest optional input ("win * name") can be either used or omitted.
+* Output files include 1) estimated horizontal displacement (equivalent to range for radar), 2) estimated vertical displacement (equivalent to minus azimuth for radar), 3) light interpolation mask, 4) iteratively progressive chip size used.
+
+_Note: These four output files will be stored in a file named "offset.mat" that can be viewed in Python and MATLAB. When the grid is provided in geographic Cartesian (northing/easting) coordinates, a 4-band GeoTIFF with the same EPSG code as input grid will be created as well and named "offset.tif"; a 2-band GeoTIFF of the final converted motion velocity in geographic x- (easting) and y- (northing) coordinates will be created and named "velocity.tif". Also, it is possible to save the outputs in netCDF standard format by adding the "-nc" option to the "testautoRIFT.py" (standalone) and "testautoRIFT_ISCE.py" (with ISCE) command._
+
+**For modular use:**
+
+* In Python environment, type the following to import the "autoRIFT" module and initialize the "autoRIFT" object
+
+_With ISCE:_
+
+ import isce
+ from contrib.geo_autoRIFT.autoRIFT import autoRIFT_ISCE
+ obj = autoRIFT_ISCE()
+ obj.configure()
+
+_Standalone:_
+
+ from autoRIFT import autoRIFT
+ obj = autoRIFT()
+
+* The "autoRIFT" object has several inputs that have to be assigned (listed below; can also be obtained by referring to "testautoRIFT.py"):
+
+ ------------------input------------------
+ I1 reference image (extracted image patches defined as "source")
+ I2 test image (extracted image patches defined as "template"; displacement = motion vector of I2 relative to I1)
+ xGrid [units = integer image pixels] horizontal pixel index at each grid point
+ yGrid [units = integer image pixels] vertical pixel index at each grid point
+ (if xGrid and yGrid not provided, a regular grid spanning the entire image will be automatically set up, which is similar to the conventional ISCE module, "ampcor" or "denseampcor")
+ Dx0 [units = integer image pixels] horizontal "downstream" reach location (that specifies the horizontal pixel displacement of the template's search center relative to the source's) at each grid point
+ Dy0 [units = integer image pixels] vertical "downstream" reach location (that specifies the vertical pixel displacement of the template's search center relative to the source's) at each grid point
+ (if Dx0 and Dy0 not provided, an array with zero values will be automatically assigned and there will be no offsets of the search centers)
+
+* After the inputs are specified, run the module as below
+
+ obj.preprocess_filt_XXX() or obj.preprocess_db()
+ obj.uniform_data_type()
+ obj.runAutorift()
+
+where "XXX" can be "wal" for the Wallis filter, "hps" for the trivial high-pass filter, "sob" for the Sobel filter, "lap" for the Laplacian filter, and also a logarithmic operator without filtering is adopted for occasions where low-frequency components (i.e. topography) are desired, i.e. "obj.preprocess_db()".
+
+* The "autoRIFT" object has the following four outputs:
+
+ ------------------output------------------
+ Dx [units = decimal image pixels] estimated horizontal pixel displacement
+ Dy [units = decimal image pixels] estimated vertical pixel displacement
+ InterpMask [unitless; boolean data type] light interpolation mask
+ ChipSizeX [units = integer image pixels] iteratively progressive chip size in horizontal direction (different chip size allowed for vertical)
+
+* The "autoRIFT" object has many parameters that can be flexibly tweaked by the users for their own purpose (listed below; can also be obtained by referring to "geo_autoRIFT/autoRIFT/autoRIFT_ISCE.py"):
+
+ ------------------parameter list: general function------------------
+ ChipSizeMinX [units = integer image pixels] Minimum size (in horizontal direction) of the template (chip) to correlate (default = 32; could be scalar or array with same dimension as xGrid)
+ ChipSizeMaxX [units = integer image pixels] Maximum size (in horizontal direction) of the template (chip) to correlate (default = 64; could be scalar or array with same dimension as xGrid)
+ ChipSize0X [units = integer image pixels] Minimum acceptable size (in horizontal direction) of the template (chip) to correlate (default = 32)
+ ScaleChipSizeY [unitless; integer data type] Scaling factor to get the vertically-directed chip size in reference to the horizontally-directed size (default = 1)
+ SearchLimitX [units = integer image pixels] Range (in horizontal direction) to search for displacement in the source (default = 25; could be scalar or array with same dimension as xGrid; when provided in array, set its elements to 0 if excluded for finding displacement)
+ SearchLimitY [units = integer image pixels] Range (in vertical direction) to search for displacement in the source (default = 25; could be scalar or array with same dimension as xGrid; when provided in array, set its elements to 0 if excluded for finding displacement)
+ SkipSampleX [units = integer image pixels] Number of samples to skip between search windows in horizontal direction for automatically-created grid if not specified by the user (default = 32)
+ SkipSampleY [units = integer image pixels] Number of lines to skip between search windows in vertical direction for automatically-created grid if not specified by the user (default = 32)
+ minSearch [units = integer image pixels] Minimum search limit (default = 6)
+
+ ------------------parameter list: about Normalized Displacement Coherence (NDC) filter ------------------
+ FracValid Fraction of valid displacements (default = 8/25) to be multiplied by filter window size "FiltWidth^2" and then used for thresholding the number of chip displacements that have passed the "FracSearch" disparity filtering
+ FracSearch Fraction of search limit used as threshold for disparity filtering of the chip displacement difference that is normalized by the search limit (default = 0.25)
+ FiltWidth Disparity filter width (default = 5)
+ Iter Number of iterations (default = 3)
+ MadScalar Scalar to be multiplied by Mad used as threshold for disparity filtering of the chip displacement deviation from the median (default = 4)
+
+ ------------------parameter list: miscellaneous------------------
+ WallisFilterWidth Width of the filter to be used for the preprocessing (default = 21)
+ fillFiltWidth Light interpolation filling filter width (default = 3)
+ sparseSearchSampleRate downsampling rate for sparse search (default = 4)
+ BuffDistanceC Buffer coarse correlation mask by this many pixels for use as fine search mask (default = 8)
+ CoarseCorCutoff Coarse correlation search cutoff (default = 0.01)
+ OverSampleRatio Factor for pyramid upsampling for sub-pixel level offset refinement (default = 16; can be scalar or Python dictionary for intelligent use, i.e. chip size-dependent, such as {ChipSize_1:OverSampleRatio_1,..., ChipSize_N:OverSampleRatio_N})
+ DataTypeInput Image data type: 0 -> uint8, 1 -> float32 (default = 0)
+ zeroMask Force the margin (no data) to zeros which is useful for Wallis-filter-preprocessed images (default = None; 1 for Wallis filter)
diff --git a/contrib/geo_autoRIFT/SConscript b/contrib/geo_autoRIFT/SConscript
new file mode 100755
index 0000000..e49f703
--- /dev/null
+++ b/contrib/geo_autoRIFT/SConscript
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Copyright 2019, by the California Institute of Technology. ALL RIGHTS RESERVED.
+# United States Government Sponsorship acknowledged. Any commercial use must be
+# negotiated with the Office of Technology Transfer at the California Institute of
+# Technology. This software is subject to U.S. export control laws and regulations
+# and has been classified as EAR99. By accepting this software, the user agrees to
+# comply with all applicable U.S. export laws and regulations. User has the
+# responsibility to obtain export licenses, or other export authority as may be
+# required before exporting such information to foreign countries or providing
+# access to foreign persons.
+#
+# Author: Yang Lei
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+
+
+import os
+Import('envcontrib')
+package = 'geo_autoRIFT'
+envgeoAutorift = envcontrib.Clone()
+envgeoAutorift['PACKAGE'] = envcontrib['PACKAGE'] + '/' + package
+install = envcontrib['PRJ_SCONS_INSTALL'] + '/' + envgeoAutorift['PACKAGE']
+listFiles = ['__init__.py']
+envgeoAutorift.Install(install,listFiles)
+envgeoAutorift.Alias('install',install)
+Export('envgeoAutorift')
+
+autorift='autoRIFT/SConscript'
+SConscript(autorift)
+
+geogrid='geogrid/SConscript'
+SConscript(geogrid)
diff --git a/contrib/geo_autoRIFT/__init__.py b/contrib/geo_autoRIFT/__init__.py
new file mode 100755
index 0000000..f363046
--- /dev/null
+++ b/contrib/geo_autoRIFT/__init__.py
@@ -0,0 +1,37 @@
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Copyright 2019, by the California Institute of Technology. ALL RIGHTS RESERVED.
+# United States Government Sponsorship acknowledged. Any commercial use must be
+# negotiated with the Office of Technology Transfer at the California Institute of
+# Technology. This software is subject to U.S. export control laws and regulations
+# and has been classified as EAR99. By accepting this software, the user agrees to
+# comply with all applicable U.S. export laws and regulations. User has the
+# responsibility to obtain export licenses, or other export authority as may be
+# required before exporting such information to foreign countries or providing
+# access to foreign persons.
+#
+# Author: Yang Lei
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+
+
+def createAutorift(name=''):
+ from contrib.geo_autoRIFT.Autorift import Autorift
+ return Autorift(name=name)
+
+def createGeogrid(name=''):
+ from contrib.geo_autoRIFT.Geogrid import Geogrid
+ return Geogrid(name=name)
+
+
+def getFactoriesInfo():
+ return {'Autorift':
+ {
+ 'factory':'createAutorift'
+ },
+ 'Geogrid':
+ {
+ 'factory':'createGeogrid'
+ }
+ }
diff --git a/contrib/geo_autoRIFT/autoRIFT/CMakeLists.txt b/contrib/geo_autoRIFT/autoRIFT/CMakeLists.txt
new file mode 100644
index 0000000..55ec5a0
--- /dev/null
+++ b/contrib/geo_autoRIFT/autoRIFT/CMakeLists.txt
@@ -0,0 +1,18 @@
+Python_add_library(autoriftcore MODULE
+ bindings/autoriftcoremodule.cpp
+ )
+target_include_directories(autoriftcore PRIVATE
+ include
+ ${OpenCV_INCLUDE_DIRS}
+ )
+target_link_libraries(autoriftcore PRIVATE
+ Python::NumPy
+ ${OpenCV_LIBS}
+ )
+
+InstallSameDir(
+ autoriftcore
+ __init__.py
+ autoRIFT_ISCE.py
+ autoRIFT.py
+ )
diff --git a/contrib/geo_autoRIFT/autoRIFT/SConscript b/contrib/geo_autoRIFT/autoRIFT/SConscript
new file mode 100755
index 0000000..a9099d9
--- /dev/null
+++ b/contrib/geo_autoRIFT/autoRIFT/SConscript
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Author: Yang Lei
+# Copyright 2019, by the California Institute of Technology. ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
+# Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology.
+#
+# This software may be subject to U.S. export control laws. By accepting this software, the user agrees to comply with all applicable U.S.
+# export laws and regulations. User has the responsibility to obtain export licenses, or other export authority as may be required before
+# exporting such information to foreign countries or providing access to foreign persons.
+#
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+import os
+
+Import('envgeoAutorift')
+envautorift = envgeoAutorift.Clone()
+package = envautorift['PACKAGE']
+project = 'autoRIFT'
+envautorift['PROJECT'] = project
+install = envautorift['PRJ_SCONS_INSTALL'] + '/' + package + '/' + project
+initFile = '__init__.py'
+
+
+listFiles = ['autoRIFT.py','autoRIFT_ISCE.py',initFile]
+envautorift.Install(install,listFiles)
+envautorift.Alias('install',install)
+Export('envautorift')
+bindingsScons = 'bindings/SConscript'
+SConscript(bindingsScons,variant_dir = envautorift['PRJ_SCONS_BUILD'] + '/' + package + '/' + project + '/bindings')
+includeScons = 'include/SConscript'
+SConscript(includeScons)
+#srcScons = 'src/SConscript'
+#SConscript(srcScons,variant_dir = envautorift['PRJ_SCONS_BUILD'] + '/' + package + '/' + project + '/src')
diff --git a/contrib/geo_autoRIFT/autoRIFT/__init__.py b/contrib/geo_autoRIFT/autoRIFT/__init__.py
new file mode 100755
index 0000000..08a6351
--- /dev/null
+++ b/contrib/geo_autoRIFT/autoRIFT/__init__.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+#def SplitRangeSpectrum():
+# from .splitSpectrum import PySplitRangeSpectrum
+# return PySplitRangeSpectrum()
+
+# should always work - standalone or with ISCE
+from .autoRIFT import autoRIFT
+
+try:
+ from .autoRIFT_ISCE import autoRIFT_ISCE
+except ImportError:
+ # this means ISCE support not available. Don't raise error. Allow standalone use
+ pass
diff --git a/contrib/geo_autoRIFT/autoRIFT/autoRIFT.py b/contrib/geo_autoRIFT/autoRIFT/autoRIFT.py
new file mode 100755
index 0000000..976a29b
--- /dev/null
+++ b/contrib/geo_autoRIFT/autoRIFT/autoRIFT.py
@@ -0,0 +1,1093 @@
+#!/usr/bin/env python3
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Copyright 2019 California Institute of Technology. ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# United States Government Sponsorship acknowledged. This software is subject to
+# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
+# (No [Export] License Required except when exporting to an embargoed country,
+# end user, or in support of a prohibited end use). By downloading this software,
+# the user agrees to comply with all applicable U.S. export laws and regulations.
+# The user has the responsibility to obtain export licenses, or other export
+# authority as may be required before exporting this software to any 'EAR99'
+# embargoed foreign country or citizen of those countries.
+#
+# Author: Yang Lei
+#
+# Note: this is based on the MATLAB code, "auto-RIFT", written by Alex Gardner,
+# and has been translated to Python and further optimized.
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+import pdb
+import subprocess
+import re
+import string
+import sys
+
+
+
+class autoRIFT:
+ '''
+ Class for mapping regular geographic grid on radar imagery.
+ '''
+
+
+
+ def preprocess_filt_wal(self):
+ '''
+ Do the pre processing using wallis filter (10 min vs 15 min in Matlab).
+ '''
+ import cv2
+ import numpy as np
+# import scipy.io as sio
+
+
+ if self.zeroMask is not None:
+ self.zeroMask = (self.I1 == 0)
+
+ kernel = np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)
+
+ m = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+
+ m2 = (self.I1)**2
+
+ m2 = cv2.filter2D(m2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+
+ s = np.sqrt(m2 - m**2) * np.sqrt(np.sum(kernel)/(np.sum(kernel)-1.0))
+
+ self.I1 = (self.I1 - m) / s
+
+# pdb.set_trace()
+
+ m = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+
+ m2 = (self.I2)**2
+
+ m2 = cv2.filter2D(m2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+
+ s = np.sqrt(m2 - m**2) * np.sqrt(np.sum(kernel)/(np.sum(kernel)-1.0))
+
+ self.I2 = (self.I2 - m) / s
+
+
+
+# #### obsolete definition of "preprocess_filt_hps"
+# def preprocess_filt_hps(self):
+# '''
+# Do the pre processing using (orig - low-pass filter) = high-pass filter filter (3.9/5.3 min).
+# '''
+# import cv2
+# import numpy as np
+#
+# if self.zeroMask is not None:
+# self.zeroMask = (self.I1 == 0)
+#
+# kernel = np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)
+#
+# lp = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+#
+# self.I1 = (self.I1 - lp)
+#
+# lp = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
+#
+# self.I2 = (self.I2 - lp)
+
+
+ def preprocess_filt_hps(self):
+ '''
+ Do the pre processing using (orig - low-pass filter) = high-pass filter filter (3.9/5.3 min).
+ '''
+ import cv2
+ import numpy as np
+
+
+ if self.zeroMask is not None:
+ self.zeroMask = (self.I1 == 0)
+
+ kernel = -np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)
+
+ kernel[int((self.WallisFilterWidth-1)/2),int((self.WallisFilterWidth-1)/2)] = kernel.size - 1
+
+ kernel = kernel / kernel.size
+
+# pdb.set_trace()
+
+ self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)
+
+ self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT)
+
+
+
+ def preprocess_db(self):
+ '''
+ Do the pre processing using db scale (4 min).
+ '''
+ import cv2
+ import numpy as np
+
+
+ if self.zeroMask is not None:
+ self.zeroMask = (self.I1 == 0)
+
+# pdb.set_trace()
+
+ self.I1 = 20.0 * np.log10(self.I1)
+
+ self.I2 = 20.0 * np.log10(self.I2)
+
+
+
+
+ def preprocess_filt_sob(self):
+ '''
+ Do the pre processing using sobel filter (4.5/5.8 min).
+ '''
+ import cv2
+ import numpy as np
+
+
+
+ if self.zeroMask is not None:
+ self.zeroMask = (self.I1 == 0)
+
+ sobelx = cv2.getDerivKernels(1,0,self.WallisFilterWidth)
+
+ kernelx = np.outer(sobelx[0],sobelx[1])
+
+ sobely = cv2.getDerivKernels(0,1,self.WallisFilterWidth)
+
+ kernely = np.outer(sobely[0],sobely[1])
+
+ kernel = kernelx + kernely
+
+ self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)
+
+ self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT)
+
+
+
+
+
+
+
+
+
+ def preprocess_filt_lap(self):
+ '''
+ Do the pre processing using Laplacian filter (2.5 min / 4 min).
+ '''
+ import cv2
+ import numpy as np
+
+
+ if self.zeroMask is not None:
+ self.zeroMask = (self.I1 == 0)
+
+ self.I1 = 20.0 * np.log10(self.I1)
+ self.I1 = cv2.Laplacian(self.I1,-1,ksize=self.WallisFilterWidth,borderType=cv2.BORDER_CONSTANT)
+
+ self.I2 = 20.0 * np.log10(self.I2)
+ self.I2 = cv2.Laplacian(self.I2,-1,ksize=self.WallisFilterWidth,borderType=cv2.BORDER_CONSTANT)
+
+
+
+
+
+
+
+
+
+ def uniform_data_type(self):
+
+ import numpy as np
+
+ if self.DataType == 0:
+# validData = np.logical_not(np.isnan(self.I1))
+ validData = np.isfinite(self.I1)
+ S1 = np.std(self.I1[validData])*np.sqrt(self.I1[validData].size/(self.I1[validData].size-1.0))
+ M1 = np.mean(self.I1[validData])
+ self.I1 = (self.I1 - (M1 - 3*S1)) / (6*S1) * (2**8 - 0)
+
+# self.I1[np.logical_not(np.isfinite(self.I1))] = 0
+ self.I1 = np.round(np.clip(self.I1, 0, 255)).astype(np.uint8)
+
+# validData = np.logical_not(np.isnan(self.I2))
+ validData = np.isfinite(self.I2)
+ S2 = np.std(self.I2[validData])*np.sqrt(self.I2[validData].size/(self.I2[validData].size-1.0))
+ M2 = np.mean(self.I2[validData])
+ self.I2 = (self.I2 - (M2 - 3*S2)) / (6*S2) * (2**8 - 0)
+
+# self.I2[np.logical_not(np.isfinite(self.I2))] = 0
+ self.I2 = np.round(np.clip(self.I2, 0, 255)).astype(np.uint8)
+
+ if self.zeroMask is not None:
+ self.I1[self.zeroMask] = 0
+ self.I2[self.zeroMask] = 0
+ self.zeroMask = None
+
+ elif self.DataType == 1:
+ self.I1[np.logical_not(np.isfinite(self.I1))] = 0
+ self.I2[np.logical_not(np.isfinite(self.I2))] = 0
+ self.I1 = self.I1.astype(np.float32)
+ self.I2 = self.I2.astype(np.float32)
+
+ if self.zeroMask is not None:
+ self.I1[self.zeroMask] = 0
+ self.I2[self.zeroMask] = 0
+ self.zeroMask = None
+
+ else:
+ sys.exit('invalid data type for the image pair which must be unsigned integer 8 or 32-bit float')
+
+
+ def autorift(self):
+ '''
+ Do the actual processing.
+ '''
+ import numpy as np
+ import cv2
+ from scipy import ndimage
+
+
+ ChipSizeUniX = np.unique(np.append(np.unique(self.ChipSizeMinX), np.unique(self.ChipSizeMaxX)))
+ ChipSizeUniX = np.delete(ChipSizeUniX,np.where(ChipSizeUniX == 0)[0])
+
+ if np.any(np.mod(ChipSizeUniX,self.ChipSize0X) != 0):
+ sys.exit('chip sizes must be even integers of ChipSize0')
+
+ ChipRangeX = self.ChipSize0X * np.array([1,2,4,8,16,32,64],np.float32)
+# ChipRangeX = ChipRangeX[ChipRangeX < (2**8 - 1)]
+ if np.max(ChipSizeUniX) > np.max(ChipRangeX):
+ sys.exit('max each chip size is out of range')
+
+ ChipSizeUniX = ChipRangeX[(ChipRangeX >= np.min(ChipSizeUniX)) & (ChipRangeX <= np.max(ChipSizeUniX))]
+
+ maxScale = np.max(ChipSizeUniX) / self.ChipSize0X
+
+ if (np.mod(self.xGrid.shape[0],maxScale) != 0)|(np.mod(self.xGrid.shape[1],maxScale) != 0):
+ message = 'xgrid and ygrid have an incorect size ' + str(self.xGrid.shape) + ' for nested search, they must have dimensions that an interger multiple of ' + str(maxScale)
+ sys.exit(message)
+
+ self.xGrid = self.xGrid.astype(np.float32)
+ self.yGrid = self.yGrid.astype(np.float32)
+
+ if np.size(self.Dx0) == 1:
+ self.Dx0 = np.ones(self.xGrid.shape, np.float32) * np.round(self.Dx0)
+ else:
+ self.Dx0 = self.Dx0.astype(np.float32)
+ if np.size(self.Dy0) == 1:
+ self.Dy0 = np.ones(self.xGrid.shape, np.float32) * np.round(self.Dy0)
+ else:
+ self.Dy0 = self.Dy0.astype(np.float32)
+ if np.size(self.SearchLimitX) == 1:
+ self.SearchLimitX = np.ones(self.xGrid.shape, np.float32) * np.round(self.SearchLimitX)
+ else:
+ self.SearchLimitX = self.SearchLimitX.astype(np.float32)
+ if np.size(self.SearchLimitY) == 1:
+ self.SearchLimitY = np.ones(self.xGrid.shape, np.float32) * np.round(self.SearchLimitY)
+ else:
+ self.SearchLimitY = self.SearchLimitY.astype(np.float32)
+ if np.size(self.ChipSizeMinX) == 1:
+ self.ChipSizeMinX = np.ones(self.xGrid.shape, np.float32) * np.round(self.ChipSizeMinX)
+ else:
+ self.ChipSizeMinX = self.ChipSizeMinX.astype(np.float32)
+ if np.size(self.ChipSizeMaxX) == 1:
+ self.ChipSizeMaxX = np.ones(self.xGrid.shape, np.float32) * np.round(self.ChipSizeMaxX)
+ else:
+ self.ChipSizeMaxX = self.ChipSizeMaxX.astype(np.float32)
+
+ ChipSizeX = np.zeros(self.xGrid.shape, np.float32)
+ InterpMask = np.zeros(self.xGrid.shape, np.bool)
+ Dx = np.empty(self.xGrid.shape, dtype=np.float32)
+ Dx.fill(np.nan)
+ Dy = np.empty(self.xGrid.shape, dtype=np.float32)
+ Dy.fill(np.nan)
+
+ Flag = 3
+
+ for i in range(ChipSizeUniX.__len__()):
+
+ # Nested grid setup: chip size being ChipSize0X no need to resize, otherwise has to resize the arrays
+ if self.ChipSize0X != ChipSizeUniX[i]:
+ Scale = self.ChipSize0X / ChipSizeUniX[i]
+ dstShape = (int(self.xGrid.shape[0]*Scale),int(self.xGrid.shape[1]*Scale))
+ xGrid0 = cv2.resize(self.xGrid.astype(np.float32),dstShape[::-1],interpolation=cv2.INTER_AREA)
+ yGrid0 = cv2.resize(self.yGrid.astype(np.float32),dstShape[::-1],interpolation=cv2.INTER_AREA)
+
+ if np.mod(ChipSizeUniX[i],2) == 0:
+ xGrid0 = np.round(xGrid0+0.5)-0.5
+ yGrid0 = np.round(yGrid0+0.5)-0.5
+ else:
+ xGrid0 = np.round(xGrid0)
+ yGrid0 = np.round(yGrid0)
+
+ M0 = (ChipSizeX == 0) & (self.ChipSizeMinX <= ChipSizeUniX[i]) & (self.ChipSizeMaxX >= ChipSizeUniX[i])
+ M0 = colfilt(M0.copy(), (int(1/Scale*6), int(1/Scale*6)), 0)
+ M0 = cv2.resize(np.logical_not(M0).astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool)
+
+ SearchLimitX0 = colfilt(self.SearchLimitX.copy(), (int(1/Scale), int(1/Scale)), 0) + colfilt(self.Dx0.copy(), (int(1/Scale), int(1/Scale)), 4)
+ SearchLimitY0 = colfilt(self.SearchLimitY.copy(), (int(1/Scale), int(1/Scale)), 0) + colfilt(self.Dy0.copy(), (int(1/Scale), int(1/Scale)), 4)
+ Dx00 = colfilt(self.Dx0.copy(), (int(1/Scale), int(1/Scale)), 2)
+ Dy00 = colfilt(self.Dy0.copy(), (int(1/Scale), int(1/Scale)), 2)
+
+ SearchLimitX0 = np.ceil(cv2.resize(SearchLimitX0,dstShape[::-1]))
+ SearchLimitY0 = np.ceil(cv2.resize(SearchLimitY0,dstShape[::-1]))
+ SearchLimitX0[M0] = 0
+ SearchLimitY0[M0] = 0
+ Dx00 = np.round(cv2.resize(Dx00,dstShape[::-1],interpolation=cv2.INTER_NEAREST))
+ Dy00 = np.round(cv2.resize(Dy00,dstShape[::-1],interpolation=cv2.INTER_NEAREST))
+# pdb.set_trace()
+ else:
+ SearchLimitX0 = self.SearchLimitX.copy()
+ SearchLimitY0 = self.SearchLimitY.copy()
+ Dx00 = self.Dx0.copy()
+ Dy00 = self.Dy0.copy()
+ xGrid0 = self.xGrid.copy()
+ yGrid0 = self.yGrid.copy()
+# M0 = (ChipSizeX == 0) & (self.ChipSizeMinX <= ChipSizeUniX[i]) & (self.ChipSizeMaxX >= ChipSizeUniX[i])
+# SearchLimitX0[np.logical_not(M0)] = 0
+# SearchLimitY0[np.logical_not(M0)] = 0
+
+ if np.logical_not(np.any(SearchLimitX0 != 0)):
+ continue
+
+ idxZero = (SearchLimitX0 <= 0) | (SearchLimitY0 <= 0)
+ SearchLimitX0[idxZero] = 0
+ SearchLimitY0[idxZero] = 0
+ SearchLimitX0[(np.logical_not(idxZero)) & (SearchLimitX0 < self.minSearch)] = self.minSearch
+ SearchLimitY0[(np.logical_not(idxZero)) & (SearchLimitY0 < self.minSearch)] = self.minSearch
+
+ if ((xGrid0.shape[0] - 2)/self.sparseSearchSampleRate < 5) | ((xGrid0.shape[1] - 2)/self.sparseSearchSampleRate < 5):
+ Flag = 2
+ return Flag
+
+ # Setup for coarse search: sparse sampling / resize
+ rIdxC = slice(self.sparseSearchSampleRate-1,xGrid0.shape[0],self.sparseSearchSampleRate)
+ cIdxC = slice(self.sparseSearchSampleRate-1,xGrid0.shape[1],self.sparseSearchSampleRate)
+ xGrid0C = xGrid0[rIdxC,cIdxC]
+ yGrid0C = yGrid0[rIdxC,cIdxC]
+
+# pdb.set_trace()
+
+ if np.remainder(self.sparseSearchSampleRate,2) == 0:
+ filtWidth = self.sparseSearchSampleRate + 1
+ else:
+ filtWidth = self.sparseSearchSampleRate
+
+ SearchLimitX0C = colfilt(SearchLimitX0.copy(), (int(filtWidth), int(filtWidth)), 0)
+ SearchLimitY0C = colfilt(SearchLimitY0.copy(), (int(filtWidth), int(filtWidth)), 0)
+ SearchLimitX0C = SearchLimitX0C[rIdxC,cIdxC]
+ SearchLimitY0C = SearchLimitY0C[rIdxC,cIdxC]
+
+ Dx0C = Dx00[rIdxC,cIdxC]
+ Dy0C = Dy00[rIdxC,cIdxC]
+
+ # Coarse search
+ SubPixFlag = False
+ ChipSizeXC = ChipSizeUniX[i]
+ ChipSizeYC = np.float32(np.round(ChipSizeXC*self.ScaleChipSizeY/2)*2)
+
+ if type(self.OverSampleRatio) is dict:
+ overSampleRatio = self.OverSampleRatio[ChipSizeUniX[i]]
+ else:
+ overSampleRatio = self.OverSampleRatio
+
+# pdb.set_trace()
+
+ if self.I1.dtype == np.uint8:
+ DxC, DyC = arImgDisp_u(self.I2.copy(), self.I1.copy(), xGrid0C.copy(), yGrid0C.copy(), ChipSizeXC, ChipSizeYC, SearchLimitX0C.copy(), SearchLimitY0C.copy(), Dx0C.copy(), Dy0C.copy(), SubPixFlag, overSampleRatio)
+ elif self.I1.dtype == np.float32:
+ DxC, DyC = arImgDisp_s(self.I2.copy(), self.I1.copy(), xGrid0C.copy(), yGrid0C.copy(), ChipSizeXC, ChipSizeYC, SearchLimitX0C.copy(), SearchLimitY0C.copy(), Dx0C.copy(), Dy0C.copy(), SubPixFlag, overSampleRatio)
+ else:
+ sys.exit('invalid data type for the image pair which must be unsigned integer 8 or 32-bit float')
+
+# pdb.set_trace()
+
+ # M0C is the mask for reliable estimates after coarse search, MC is the mask after disparity filtering, MC2 is the mask after area closing for fine search
+ M0C = np.logical_not(np.isnan(DxC))
+
+ DispFiltC = DISP_FILT()
+ if ChipSizeUniX[i] == ChipSizeUniX[0]:
+ DispFiltC.FracValid = self.FracValid
+ DispFiltC.FracSearch = self.FracSearch
+ DispFiltC.FiltWidth = self.FiltWidth
+ DispFiltC.Iter = self.Iter
+ DispFiltC.MadScalar = self.MadScalar
+ DispFiltC.Iter = DispFiltC.Iter - 1
+ MC = DispFiltC.filtDisp(DxC.copy(), DyC.copy(), SearchLimitX0C.copy(), SearchLimitY0C.copy(), M0C.copy(), overSampleRatio)
+
+ MC[np.logical_not(M0C)] = False
+
+ ROIC = (SearchLimitX0C > 0)
+ CoarseCorValidFac = np.sum(MC[ROIC]) / np.sum(M0C[ROIC])
+ if (CoarseCorValidFac < self.CoarseCorCutoff):
+ continue
+
+ MC2 = ndimage.distance_transform_edt(np.logical_not(MC)) < self.BuffDistanceC
+ dstShape = (int(MC2.shape[0]*self.sparseSearchSampleRate),int(MC2.shape[1]*self.sparseSearchSampleRate))
+
+ MC2 = cv2.resize(MC2.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool)
+# pdb.set_trace()
+ if np.logical_not(np.all(MC2.shape == SearchLimitX0.shape)):
+ rowAdd = SearchLimitX0.shape[0] - MC2.shape[0]
+ colAdd = SearchLimitX0.shape[1] - MC2.shape[1]
+ if rowAdd>0:
+ MC2 = np.append(MC2,MC2[-rowAdd:,:],axis=0)
+ if colAdd>0:
+ MC2 = np.append(MC2,MC2[:,-colAdd:],axis=1)
+
+ SearchLimitX0[np.logical_not(MC2)] = 0
+ SearchLimitY0[np.logical_not(MC2)] = 0
+
+ # Fine Search
+ SubPixFlag = True
+ ChipSizeXF = ChipSizeUniX[i]
+ ChipSizeYF = np.float32(np.round(ChipSizeXF*self.ScaleChipSizeY/2)*2)
+# pdb.set_trace()
+ if self.I1.dtype == np.uint8:
+ DxF, DyF = arImgDisp_u(self.I2.copy(), self.I1.copy(), xGrid0.copy(), yGrid0.copy(), ChipSizeXF, ChipSizeYF, SearchLimitX0.copy(), SearchLimitY0.copy(), Dx00.copy(), Dy00.copy(), SubPixFlag, overSampleRatio)
+ elif self.I1.dtype == np.float32:
+ DxF, DyF = arImgDisp_s(self.I2.copy(), self.I1.copy(), xGrid0.copy(), yGrid0.copy(), ChipSizeXF, ChipSizeYF, SearchLimitX0.copy(), SearchLimitY0.copy(), Dx00.copy(), Dy00.copy(), SubPixFlag, overSampleRatio)
+ else:
+ sys.exit('invalid data type for the image pair which must be unsigned integer 8 or 32-bit float')
+
+# pdb.set_trace()
+
+ DispFiltF = DISP_FILT()
+ if ChipSizeUniX[i] == ChipSizeUniX[0]:
+ DispFiltF.FracValid = self.FracValid
+ DispFiltF.FracSearch = self.FracSearch
+ DispFiltF.FiltWidth = self.FiltWidth
+ DispFiltF.Iter = self.Iter
+ DispFiltF.MadScalar = self.MadScalar
+
+
+ M0 = DispFiltF.filtDisp(DxF.copy(), DyF.copy(), SearchLimitX0.copy(), SearchLimitY0.copy(), np.logical_not(np.isnan(DxF)), overSampleRatio)
+# pdb.set_trace()
+ DxF[np.logical_not(M0)] = np.nan
+ DyF[np.logical_not(M0)] = np.nan
+
+ # Light interpolation with median filtered values: DxFM (filtered) and DxF (unfiltered)
+ DxFM = colfilt(DxF.copy(), (self.fillFiltWidth, self.fillFiltWidth), 3)
+ DyFM = colfilt(DyF.copy(), (self.fillFiltWidth, self.fillFiltWidth), 3)
+
+ # M0 is mask for original valid estimates, MF is mask for filled ones, MM is mask where filtered ones exist for filling
+ MF = np.zeros(M0.shape, dtype=np.bool)
+ MM = np.logical_not(np.isnan(DxFM))
+
+ for j in range(3):
+ foo = MF | M0 # initial valid estimates
+ foo1 = (cv2.filter2D(foo.astype(np.float32),-1,np.ones((3,3)),borderType=cv2.BORDER_CONSTANT) >= 6) | foo # 1st area closing followed by the 2nd (part of the next line calling OpenCV)
+# pdb.set_trace()
+ fillIdx = np.logical_not(bwareaopen(np.logical_not(foo1).astype(np.uint8), 5)) & np.logical_not(foo) & MM
+ MF[fillIdx] = True
+ DxF[fillIdx] = DxFM[fillIdx]
+ DyF[fillIdx] = DyFM[fillIdx]
+
+ # Below is for replacing the valid estimates with the bicubic filtered values for robust and accurate estimation
+ if self.ChipSize0X == ChipSizeUniX[i]:
+ Dx = DxF
+ Dy = DyF
+ ChipSizeX[M0|MF] = ChipSizeUniX[i]
+ InterpMask[MF] = True
+# pdb.set_trace()
+ else:
+# pdb.set_trace()
+ Scale = ChipSizeUniX[i] / self.ChipSize0X
+ dstShape = (int(Dx.shape[0]/Scale),int(Dx.shape[1]/Scale))
+
+ # DxF0 (filtered) / Dx (unfiltered) is the result from earlier iterations, DxFM (filtered) / DxF (unfiltered) is that of the current iteration
+ # first colfilt nans within 2-by-2 area (otherwise 1 nan will contaminate all 4 points)
+ DxF0 = colfilt(Dx.copy(),(int(Scale+1),int(Scale+1)),2)
+ # then resize to half size using area (similar to averaging) to match the current iteration
+ DxF0 = cv2.resize(DxF0,dstShape[::-1],interpolation=cv2.INTER_AREA)
+ DyF0 = colfilt(Dy.copy(),(int(Scale+1),int(Scale+1)),2)
+ DyF0 = cv2.resize(DyF0,dstShape[::-1],interpolation=cv2.INTER_AREA)
+
+ # Note this DxFM is almost the same as DxFM (same variable) in the light interpolation (only slightly better); however, only small portion of it will be used later at locations specified by M0 and MF that are determined in the light interpolation. So even without the following two lines, the final Dx and Dy result is still the same.
+ # to fill out all of the missing values in DxF
+ DxFM = colfilt(DxF.copy(), (5,5), 3)
+ DyFM = colfilt(DyF.copy(), (5,5), 3)
+
+ # fill the current-iteration result with previously determined reliable estimates that are not searched in the current iteration
+ idx = np.isnan(DxF) & np.logical_not(np.isnan(DxF0))
+ DxFM[idx] = DxF0[idx]
+ DyFM[idx] = DyF0[idx]
+
+ # Strong interpolation: use filtered estimates wherever the unfiltered estimates do not exist
+ idx = np.isnan(DxF) & np.logical_not(np.isnan(DxFM))
+ DxF[idx] = DxFM[idx]
+ DyF[idx] = DyFM[idx]
+
+ dstShape = (Dx.shape[0],Dx.shape[1])
+ DxF = cv2.resize(DxF,dstShape[::-1],interpolation=cv2.INTER_CUBIC)
+ DyF = cv2.resize(DyF,dstShape[::-1],interpolation=cv2.INTER_CUBIC)
+ MF = cv2.resize(MF.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool)
+ M0 = cv2.resize(M0.astype(np.uint8),dstShape[::-1],interpolation=cv2.INTER_NEAREST).astype(np.bool)
+
+ idxRaw = M0 & (ChipSizeX == 0)
+ idxFill = MF & (ChipSizeX == 0)
+ ChipSizeX[idxRaw | idxFill] = ChipSizeUniX[i]
+ InterpMask[idxFill] = True
+ Dx[idxRaw | idxFill] = DxF[idxRaw | idxFill]
+ Dy[idxRaw | idxFill] = DyF[idxRaw | idxFill]
+
+ Flag = 1
+ ChipSizeY = np.round(ChipSizeX * self.ScaleChipSizeY /2) * 2
+ self.Dx = Dx
+ self.Dy = Dy
+ self.InterpMask = InterpMask
+ self.Flag = Flag
+ self.ChipSizeX = ChipSizeX
+ self.ChipSizeY = ChipSizeY
+
+
+
+
+
+
+
+
+ def runAutorift(self):
+ '''
+ quick processing routine which calls autorift main function (user can define their own way by mimicing the workflow here).
+ '''
+ import numpy as np
+
+
+ # truncate the grid to fit the nested grid
+ if np.size(self.ChipSizeMaxX) == 1:
+ chopFactor = self.ChipSizeMaxX / self.ChipSize0X
+ else:
+ chopFactor = np.max(self.ChipSizeMaxX) / self.ChipSize0X
+ rlim = int(np.floor(self.xGrid.shape[0] / chopFactor) * chopFactor)
+ clim = int(np.floor(self.xGrid.shape[1] / chopFactor) * chopFactor)
+ self.origSize = self.xGrid.shape
+# pdb.set_trace()
+ self.xGrid = np.round(self.xGrid[0:rlim,0:clim]) + 0.5
+ self.yGrid = np.round(self.yGrid[0:rlim,0:clim]) + 0.5
+
+ # truncate the initial offset as well if they exist
+ if np.size(self.Dx0) != 1:
+ self.Dx0 = self.Dx0[0:rlim,0:clim]
+ self.Dy0 = self.Dy0[0:rlim,0:clim]
+
+ # truncate the search limits as well if they exist
+ if np.size(self.SearchLimitX) != 1:
+ self.SearchLimitX = self.SearchLimitX[0:rlim,0:clim]
+ self.SearchLimitY = self.SearchLimitY[0:rlim,0:clim]
+
+ # truncate the chip sizes as well if they exist
+ if np.size(self.ChipSizeMaxX) != 1:
+ self.ChipSizeMaxX = self.ChipSizeMaxX[0:rlim,0:clim]
+ self.ChipSizeMinX = self.ChipSizeMinX[0:rlim,0:clim]
+
+ # call autoRIFT main function
+ self.autorift()
+
+
+
+
+
+ def __init__(self):
+
+ super(autoRIFT, self).__init__()
+
+ ##Input related parameters
+ self.I1 = None
+ self.I2 = None
+ self.xGrid = None
+ self.yGrid = None
+ self.Dx0 = 0
+ self.Dy0 = 0
+ self.origSize = None
+ self.zeroMask = None
+
+ ##Output file
+ self.Dx = None
+ self.Dy = None
+ self.InterpMask = None
+ self.Flag = None
+ self.ChipSizeX = None
+ self.ChipSizeY = None
+
+ ##Parameter list
+ self.WallisFilterWidth = 21
+ self.ChipSizeMinX = 32
+ self.ChipSizeMaxX = 64
+ self.ChipSize0X = 32
+ self.ScaleChipSizeY = 1
+ self.SearchLimitX = 25
+ self.SearchLimitY = 25
+ self.SkipSampleX = 32
+ self.SkipSampleY = 32
+ self.fillFiltWidth = 3
+ self.minSearch = 6
+ self.sparseSearchSampleRate = 4
+ self.FracValid = 8/25
+ self.FracSearch = 0.25
+ self.FiltWidth = 5
+ self.Iter = 3
+ self.MadScalar = 4
+ self.BuffDistanceC = 8
+ self.CoarseCorCutoff = 0.01
+ self.OverSampleRatio = 16
+ self.DataType = 0
+
+
+
+
+
+
+class AUTO_RIFT_CORE:
+ def __init__(self):
+ ##Pointer to C
+ self._autoriftcore = None
+
+
+
+def arImgDisp_u(I1, I2, xGrid, yGrid, ChipSizeX, ChipSizeY, SearchLimitX, SearchLimitY, Dx0, Dy0, SubPixFlag, oversample):
+
+ import numpy as np
+ from . import autoriftcore
+
+ core = AUTO_RIFT_CORE()
+ if core._autoriftcore is not None:
+ autoriftcore.destroyAutoRiftCore_Py(core._autoriftcore)
+
+ core._autoriftcore = autoriftcore.createAutoRiftCore_Py()
+
+
+# if np.size(I1) == 1:
+# if np.logical_not(isinstance(I1,np.uint8) & isinstance(I2,np.uint8)):
+# sys.exit('input images must be uint8')
+# else:
+# if np.logical_not((I1.dtype == np.uint8) & (I2.dtype == np.uint8)):
+# sys.exit('input images must be uint8')
+
+ if np.size(SearchLimitX) == 1:
+ if np.logical_not(isinstance(SearchLimitX,np.float32) & isinstance(SearchLimitY,np.float32)):
+ sys.exit('SearchLimit must be float')
+ else:
+ if np.logical_not((SearchLimitX.dtype == np.float32) & (SearchLimitY.dtype == np.float32)):
+ sys.exit('SearchLimit must be float')
+
+ if np.size(Dx0) == 1:
+ if np.logical_not(isinstance(Dx0,np.float32) & isinstance(Dy0,np.float32)):
+ sys.exit('Search offsets must be float')
+ else:
+ if np.logical_not((Dx0.dtype == np.float32) & (Dy0.dtype == np.float32)):
+ sys.exit('Search offsets must be float')
+
+ if np.size(ChipSizeX) == 1:
+ if np.logical_not(isinstance(ChipSizeX,np.float32) & isinstance(ChipSizeY,np.float32)):
+ sys.exit('ChipSize must be float')
+ else:
+ if np.logical_not((ChipSizeX.dtype == np.float32) & (ChipSizeY.dtype == np.float32)):
+ sys.exit('ChipSize must be float')
+
+
+
+ if np.any(np.mod(ChipSizeX,2) != 0) | np.any(np.mod(ChipSizeY,2) != 0):
+# if np.any(np.mod(xGrid-0.5,1) == 0) & np.any(np.mod(yGrid-0.5,1) == 0):
+# sys.exit('for an even chip size ImgDisp returns displacements centered at pixel boundaries so xGrid and yGrid must an inter - 1/2 [example: if you want the velocity centered between pixel (1,1) and (2,2) then specify a grid center of (1.5, 1.5)]')
+# else:
+# xGrid = np.ceil(xGrid)
+# yGrid = np.ceil(yGrid)
+ sys.exit('it is better to have ChipSize = even number')
+
+ if np.any(np.mod(SearchLimitX,1) != 0) | np.any(np.mod(SearchLimitY,1) != 0):
+ sys.exit('SearchLimit must be an integar value')
+
+ if np.any(SearchLimitX < 0) | np.any(SearchLimitY < 0):
+ sys.exit('SearchLimit cannot be negative')
+
+ if np.any(np.mod(ChipSizeX,4) != 0) | np.any(np.mod(ChipSizeY,4) != 0):
+ sys.exit('ChipSize should be evenly divisible by 4')
+
+ if np.size(Dx0) == 1:
+ Dx0 = np.ones(xGrid.shape, dtype=np.float32) * Dx0
+
+ if np.size(Dy0) == 1:
+ Dy0 = np.ones(xGrid.shape, dtype=np.float32) * Dy0
+
+ if np.size(SearchLimitX) == 1:
+ SearchLimitX = np.ones(xGrid.shape, dtype=np.float32) * SearchLimitX
+
+ if np.size(SearchLimitY) == 1:
+ SearchLimitY = np.ones(xGrid.shape, dtype=np.float32) * SearchLimitY
+
+ if np.size(ChipSizeX) == 1:
+ ChipSizeX = np.ones(xGrid.shape, dtype=np.float32) * ChipSizeX
+
+ if np.size(ChipSizeY) == 1:
+ ChipSizeY = np.ones(xGrid.shape, dtype=np.float32) * ChipSizeY
+
+ # convert from cartesian X-Y to matrix X-Y: X no change, Y from up being positive to down being positive
+ Dy0 = -Dy0
+
+ SLx_max = np.max(SearchLimitX + np.abs(Dx0))
+ Px = np.int(np.max(ChipSizeX)/2 + SLx_max + 2)
+ SLy_max = np.max(SearchLimitY + np.abs(Dy0))
+ Py = np.int(np.max(ChipSizeY)/2 + SLy_max + 2)
+
+ I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant')
+ I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant')
+
+ # adjust center location by the padarray size and 0.5 is added because we need to extract the chip centered at X+1 with -chipsize/2:chipsize/2-1, which equivalently centers at X+0.5 (X is the original grid point location). So for even chipsize, always returns offset estimates at (X+0.5).
+ xGrid += (Px + 0.5)
+ yGrid += (Py + 0.5)
+
+ Dx = np.empty(xGrid.shape,dtype=np.float32)
+ Dx.fill(np.nan)
+ Dy = Dx.copy()
+
+
+ for jj in range(xGrid.shape[1]):
+ if np.all(SearchLimitX[:,jj] == 0) & np.all(SearchLimitY[:,jj] == 0):
+ continue
+ Dx1 = Dx[:,jj]
+ Dy1 = Dy[:,jj]
+ for ii in range(xGrid.shape[0]):
+ if (SearchLimitX[ii,jj] == 0) & (SearchLimitY[ii,jj] == 0):
+ continue
+
+ # remember motion terms Dx and Dy correspond to I1 relative to I2 (reference)
+ clx = np.floor(ChipSizeX[ii,jj]/2)
+ ChipRangeX = slice(int(-clx - Dx0[ii,jj] + xGrid[ii,jj]) , int(clx - Dx0[ii,jj] + xGrid[ii,jj]))
+ cly = np.floor(ChipSizeY[ii,jj]/2)
+ ChipRangeY = slice(int(-cly - Dy0[ii,jj] + yGrid[ii,jj]) , int(cly - Dy0[ii,jj] + yGrid[ii,jj]))
+ ChipI = I2[ChipRangeY,ChipRangeX]
+
+ SearchRangeX = slice(int(-clx - SearchLimitX[ii,jj] + xGrid[ii,jj]) , int(clx + SearchLimitX[ii,jj] - 1 + xGrid[ii,jj]))
+ SearchRangeY = slice(int(-cly - SearchLimitY[ii,jj] + yGrid[ii,jj]) , int(cly + SearchLimitY[ii,jj] - 1 + yGrid[ii,jj]))
+ RefI = I1[SearchRangeY,SearchRangeX]
+
+ minChipI = np.min(ChipI)
+ if minChipI < 0:
+ ChipI = ChipI - minChipI
+ if np.all(ChipI == ChipI[0,0]):
+ continue
+
+ minRefI = np.min(RefI)
+ if minRefI < 0:
+ RefI = RefI - minRefI
+ if np.all(RefI == RefI[0,0]):
+ continue
+
+
+ if SubPixFlag:
+ # call C++
+ Dx1[ii], Dy1[ii] = np.float32(autoriftcore.arSubPixDisp_u_Py(core._autoriftcore,ChipI.shape[1],ChipI.shape[0],ChipI.ravel(),RefI.shape[1],RefI.shape[0],RefI.ravel(),oversample))
+# # call Python
+# Dx1[ii], Dy1[ii] = arSubPixDisp(ChipI,RefI)
+ else:
+ # call C++
+ Dx1[ii], Dy1[ii] = np.float32(autoriftcore.arPixDisp_u_Py(core._autoriftcore,ChipI.shape[1],ChipI.shape[0],ChipI.ravel(),RefI.shape[1],RefI.shape[0],RefI.ravel()))
+# # call Python
+# Dx1[ii], Dy1[ii] = arPixDisp(ChipI,RefI)
+
+
+ # add back 1) I1 (RefI) relative to I2 (ChipI) initial offset Dx0 and Dy0, and
+ # 2) RefI relative to ChipI has a left/top boundary offset of -SearchLimitX and -SearchLimitY
+ idx = np.logical_not(np.isnan(Dx))
+ Dx[idx] += (Dx0[idx] - SearchLimitX[idx])
+ Dy[idx] += (Dy0[idx] - SearchLimitY[idx])
+
+ # convert from matrix X-Y to cartesian X-Y: X no change, Y from down being positive to up being positive
+ Dy = -Dy
+
+ autoriftcore.destroyAutoRiftCore_Py(core._autoriftcore)
+ core._autoriftcore = None
+
+ return Dx, Dy
+
+
+
+
+
+
+def arImgDisp_s(I1, I2, xGrid, yGrid, ChipSizeX, ChipSizeY, SearchLimitX, SearchLimitY, Dx0, Dy0, SubPixFlag, oversample):
+
+ import numpy as np
+ from . import autoriftcore
+
+ core = AUTO_RIFT_CORE()
+ if core._autoriftcore is not None:
+ autoriftcore.destroyAutoRiftCore_Py(core._autoriftcore)
+
+ core._autoriftcore = autoriftcore.createAutoRiftCore_Py()
+
+
+# if np.size(I1) == 1:
+# if np.logical_not(isinstance(I1,np.uint8) & isinstance(I2,np.uint8)):
+# sys.exit('input images must be uint8')
+# else:
+# if np.logical_not((I1.dtype == np.uint8) & (I2.dtype == np.uint8)):
+# sys.exit('input images must be uint8')
+
+ if np.size(SearchLimitX) == 1:
+ if np.logical_not(isinstance(SearchLimitX,np.float32) & isinstance(SearchLimitY,np.float32)):
+ sys.exit('SearchLimit must be float')
+ else:
+ if np.logical_not((SearchLimitX.dtype == np.float32) & (SearchLimitY.dtype == np.float32)):
+ sys.exit('SearchLimit must be float')
+
+ if np.size(Dx0) == 1:
+ if np.logical_not(isinstance(Dx0,np.float32) & isinstance(Dy0,np.float32)):
+ sys.exit('Search offsets must be float')
+ else:
+ if np.logical_not((Dx0.dtype == np.float32) & (Dy0.dtype == np.float32)):
+ sys.exit('Search offsets must be float')
+
+ if np.size(ChipSizeX) == 1:
+ if np.logical_not(isinstance(ChipSizeX,np.float32) & isinstance(ChipSizeY,np.float32)):
+ sys.exit('ChipSize must be float')
+ else:
+ if np.logical_not((ChipSizeX.dtype == np.float32) & (ChipSizeY.dtype == np.float32)):
+ sys.exit('ChipSize must be float')
+
+
+
+ if np.any(np.mod(ChipSizeX,2) != 0) | np.any(np.mod(ChipSizeY,2) != 0):
+# if np.any(np.mod(xGrid-0.5,1) == 0) & np.any(np.mod(yGrid-0.5,1) == 0):
+# sys.exit('for an even chip size ImgDisp returns displacements centered at pixel boundaries so xGrid and yGrid must an inter - 1/2 [example: if you want the velocity centered between pixel (1,1) and (2,2) then specify a grid center of (1.5, 1.5)]')
+# else:
+# xGrid = np.ceil(xGrid)
+# yGrid = np.ceil(yGrid)
+ sys.exit('it is better to have ChipSize = even number')
+
+ if np.any(np.mod(SearchLimitX,1) != 0) | np.any(np.mod(SearchLimitY,1) != 0):
+ sys.exit('SearchLimit must be an integar value')
+
+ if np.any(SearchLimitX < 0) | np.any(SearchLimitY < 0):
+ sys.exit('SearchLimit cannot be negative')
+
+ if np.any(np.mod(ChipSizeX,4) != 0) | np.any(np.mod(ChipSizeY,4) != 0):
+ sys.exit('ChipSize should be evenly divisible by 4')
+
+ if np.size(Dx0) == 1:
+ Dx0 = np.ones(xGrid.shape, dtype=np.float32) * Dx0
+
+ if np.size(Dy0) == 1:
+ Dy0 = np.ones(xGrid.shape, dtype=np.float32) * Dy0
+
+ if np.size(SearchLimitX) == 1:
+ SearchLimitX = np.ones(xGrid.shape, dtype=np.float32) * SearchLimitX
+
+ if np.size(SearchLimitY) == 1:
+ SearchLimitY = np.ones(xGrid.shape, dtype=np.float32) * SearchLimitY
+
+ if np.size(ChipSizeX) == 1:
+ ChipSizeX = np.ones(xGrid.shape, dtype=np.float32) * ChipSizeX
+
+ if np.size(ChipSizeY) == 1:
+ ChipSizeY = np.ones(xGrid.shape, dtype=np.float32) * ChipSizeY
+
+ # convert from cartesian X-Y to matrix X-Y: X no change, Y from up being positive to down being positive
+ Dy0 = -Dy0
+
+ SLx_max = np.max(SearchLimitX + np.abs(Dx0))
+ Px = np.int(np.max(ChipSizeX)/2 + SLx_max + 2)
+ SLy_max = np.max(SearchLimitY + np.abs(Dy0))
+ Py = np.int(np.max(ChipSizeY)/2 + SLy_max + 2)
+
+ I1 = np.lib.pad(I1,((Py,Py),(Px,Px)),'constant')
+ I2 = np.lib.pad(I2,((Py,Py),(Px,Px)),'constant')
+
+ # adjust center location by the padarray size and 0.5 is added because we need to extract the chip centered at X+1 with -chipsize/2:chipsize/2-1, which equivalently centers at X+0.5 (X is the original grid point location). So for even chipsize, always returns offset estimates at (X+0.5).
+ xGrid += (Px + 0.5)
+ yGrid += (Py + 0.5)
+
+ Dx = np.empty(xGrid.shape,dtype=np.float32)
+ Dx.fill(np.nan)
+ Dy = Dx.copy()
+
+
+ for jj in range(xGrid.shape[1]):
+ if np.all(SearchLimitX[:,jj] == 0) & np.all(SearchLimitY[:,jj] == 0):
+ continue
+ Dx1 = Dx[:,jj]
+ Dy1 = Dy[:,jj]
+ for ii in range(xGrid.shape[0]):
+ if (SearchLimitX[ii,jj] == 0) & (SearchLimitY[ii,jj] == 0):
+ continue
+
+ # remember motion terms Dx and Dy correspond to I1 relative to I2 (reference)
+ clx = np.floor(ChipSizeX[ii,jj]/2)
+ ChipRangeX = slice(int(-clx - Dx0[ii,jj] + xGrid[ii,jj]) , int(clx - Dx0[ii,jj] + xGrid[ii,jj]))
+ cly = np.floor(ChipSizeY[ii,jj]/2)
+ ChipRangeY = slice(int(-cly - Dy0[ii,jj] + yGrid[ii,jj]) , int(cly - Dy0[ii,jj] + yGrid[ii,jj]))
+ ChipI = I2[ChipRangeY,ChipRangeX]
+
+ SearchRangeX = slice(int(-clx - SearchLimitX[ii,jj] + xGrid[ii,jj]) , int(clx + SearchLimitX[ii,jj] - 1 + xGrid[ii,jj]))
+ SearchRangeY = slice(int(-cly - SearchLimitY[ii,jj] + yGrid[ii,jj]) , int(cly + SearchLimitY[ii,jj] - 1 + yGrid[ii,jj]))
+ RefI = I1[SearchRangeY,SearchRangeX]
+
+ minChipI = np.min(ChipI)
+ if minChipI < 0:
+ ChipI = ChipI - minChipI
+ if np.all(ChipI == ChipI[0,0]):
+ continue
+
+ minRefI = np.min(RefI)
+ if minRefI < 0:
+ RefI = RefI - minRefI
+ if np.all(RefI == RefI[0,0]):
+ continue
+
+
+ if SubPixFlag:
+ # call C++
+ Dx1[ii], Dy1[ii] = np.float32(autoriftcore.arSubPixDisp_s_Py(core._autoriftcore,ChipI.shape[1],ChipI.shape[0],ChipI.ravel(),RefI.shape[1],RefI.shape[0],RefI.ravel(), oversample))
+# # call Python
+# Dx1[ii], Dy1[ii] = arSubPixDisp(ChipI,RefI)
+ else:
+ # call C++
+ Dx1[ii], Dy1[ii] = np.float32(autoriftcore.arPixDisp_s_Py(core._autoriftcore,ChipI.shape[1],ChipI.shape[0],ChipI.ravel(),RefI.shape[1],RefI.shape[0],RefI.ravel()))
+# # call Python
+# Dx1[ii], Dy1[ii] = arPixDisp(ChipI,RefI)
+
+
+ # add back 1) I1 (RefI) relative to I2 (ChipI) initial offset Dx0 and Dy0, and
+ # 2) RefI relative to ChipI has a left/top boundary offset of -SearchLimitX and -SearchLimitY
+ idx = np.logical_not(np.isnan(Dx))
+ Dx[idx] += (Dx0[idx] - SearchLimitX[idx])
+ Dy[idx] += (Dy0[idx] - SearchLimitY[idx])
+
+ # convert from matrix X-Y to cartesian X-Y: X no change, Y from down being positive to up being positive
+ Dy = -Dy
+
+ autoriftcore.destroyAutoRiftCore_Py(core._autoriftcore)
+ core._autoriftcore = None
+
+ return Dx, Dy
+
+
+
+
+def colfilt(A, kernelSize, option):
+
+ from skimage.util import view_as_windows as viewW
+ import numpy as np
+
+ A = np.lib.pad(A,((int((kernelSize[0]-1)/2),int((kernelSize[0]-1)/2)),(int((kernelSize[1]-1)/2),int((kernelSize[1]-1)/2))),mode='constant',constant_values=np.nan)
+
+ B = viewW(A, kernelSize).reshape(-1,kernelSize[0]*kernelSize[1]).T[:,::1]
+
+ output_size = (A.shape[0]-kernelSize[0]+1,A.shape[1]-kernelSize[1]+1)
+ C = np.zeros(output_size,dtype=A.dtype)
+ if option == 0:# max
+ C = np.nanmax(B,axis=0).reshape(output_size)
+ elif option == 1:# min
+ C = np.nanmin(B,axis=0).reshape(output_size)
+ elif option == 2:# mean
+ C = np.nanmean(B,axis=0).reshape(output_size)
+ elif option == 3:# median
+ C = np.nanmedian(B,axis=0).reshape(output_size)
+ elif option == 4:# range
+ C = np.nanmax(B,axis=0).reshape(output_size) - np.nanmin(B,axis=0).reshape(output_size)
+ elif option == 6:# MAD (Median Absolute Deviation)
+ m = B.shape[0]
+ D = np.abs(B - np.dot(np.ones((m,1),dtype=A.dtype), np.array([np.nanmedian(B,axis=0)])))
+ C = np.nanmedian(D,axis=0).reshape(output_size)
+ elif option[0] == 5:# displacement distance count with option[1] being the threshold
+ m = B.shape[0]
+ c = int(np.round((m + 1) / 2)-1)
+# c = 0
+ D = np.abs(B - np.dot(np.ones((m,1),dtype=A.dtype), np.array([B[c,:]])))
+ C = np.sum(D