[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P # pre-release
+ [-_\.]?
+ (?P(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ (?P # post release
+ (?:-(?P[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?Ppost|rev|r)
+ [-_\.]?
+ (?P[0-9]+)?
+ )
+ )?
+ (?P # dev release
+ [-_\.]?
+ (?Pdev)
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ )
+ (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
+ re.VERBOSE | re.IGNORECASE,
+ )
+
+ def __init__(self, version):
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(
+ match.group("pre_l"),
+ match.group("pre_n"),
+ ),
+ post=_parse_letter_version(
+ match.group("post_l"),
+ match.group("post_n1") or match.group("post_n2"),
+ ),
+ dev=_parse_letter_version(
+ match.group("dev_l"),
+ match.group("dev_n"),
+ ),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ return "".format(repr(str(self)))
+
+ def __str__(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ # Pre-release
+ if self._version.pre is not None:
+ parts.append("".join(str(x) for x in self._version.pre))
+
+ # Post-release
+ if self._version.post is not None:
+ parts.append(".post{0}".format(self._version.post[1]))
+
+ # Development release
+ if self._version.dev is not None:
+ parts.append(".dev{0}".format(self._version.dev[1]))
+
+ # Local version segment
+ if self._version.local is not None:
+ parts.append(
+ "+{0}".format(".".join(str(x) for x in self._version.local))
+ )
+
+ return "".join(parts)
+
+ @property
+ def public(self):
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ return "".join(parts)
+
+ @property
+ def local(self):
+ version_string = str(self)
+ if "+" in version_string:
+ return version_string.split("+", 1)[1]
+
+ @property
+ def is_prerelease(self):
+ return bool(self._version.dev or self._version.pre)
+
+ @property
+ def is_postrelease(self):
+ return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+ if letter:
+ # We assume there is an implicit 0 in a pre-release if there is
+ # no numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower-case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume that if we are given a number but not given a letter,
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_seperators.split(local)
+ )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non-zero, then take the rest,
+ # re-reverse it back into the correct order, and make it a tuple and use
+ # that for our sorting key.
+ release = tuple(
+ reversed(list(
+ itertools.dropwhile(
+ lambda x: x == 0,
+ reversed(release),
+ )
+ ))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
+ # if there is no pre- or a post-segment. If we have one of those, then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ pre = -Infinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ pre = Infinity
+
+ # Versions without a post-segment should sort before those with one.
+ if post is None:
+ post = -Infinity
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ dev = Infinity
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ local = -Infinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alphanumeric segments sort before numeric segments
+ # - Alphanumeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ local = tuple(
+ (i, "") if isinstance(i, int) else (-Infinity, i)
+ for i in local
+ )
+
+ return epoch, release, pre, post, dev, local
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_ccallback.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_ccallback.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..3d86bfd
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_ccallback.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_call.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_call.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..e6632f1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_call.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_def.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_def.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..0581d48
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_test_deprecation_def.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_testutils.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_testutils.py
new file mode 100644
index 0000000..df529d8
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_testutils.py
@@ -0,0 +1,143 @@
+"""
+Generic test utilities.
+
+"""
+
+import os
+import re
+import sys
+
+
+__all__ = ['PytestTester', 'check_free_memory']
+
+
+class FPUModeChangeWarning(RuntimeWarning):
+ """Warning about FPU mode change"""
+ pass
+
+
+class PytestTester(object):
+ """
+ Pytest test runner entry point.
+ """
+
+ def __init__(self, module_name):
+ self.module_name = module_name
+
+ def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
+ coverage=False, tests=None, parallel=None):
+ import pytest
+
+ module = sys.modules[self.module_name]
+ module_path = os.path.abspath(module.__path__[0])
+
+ pytest_args = ['--showlocals', '--tb=short']
+
+ if doctests:
+ raise ValueError("Doctests not supported")
+
+ if extra_argv:
+ pytest_args += list(extra_argv)
+
+ if verbose and int(verbose) > 1:
+ pytest_args += ["-" + "v"*(int(verbose)-1)]
+
+ if coverage:
+ pytest_args += ["--cov=" + module_path]
+
+ if label == "fast":
+ pytest_args += ["-m", "not slow"]
+ elif label != "full":
+ pytest_args += ["-m", label]
+
+ if tests is None:
+ tests = [self.module_name]
+
+ if parallel is not None and parallel > 1:
+ if _pytest_has_xdist():
+ pytest_args += ['-n', str(parallel)]
+ else:
+ import warnings
+ warnings.warn('Could not run tests in parallel because '
+ 'pytest-xdist plugin is not available.')
+
+ pytest_args += ['--pyargs'] + list(tests)
+
+ try:
+ code = pytest.main(pytest_args)
+ except SystemExit as exc:
+ code = exc.code
+
+ return (code == 0)
+
+
+def _pytest_has_xdist():
+ """
+ Check if the pytest-xdist plugin is installed, providing parallel tests
+ """
+ # Check xdist exists without importing, otherwise pytests emits warnings
+ from importlib.util import find_spec
+ return find_spec('xdist') is not None
+
+
+def check_free_memory(free_mb):
+ """
+ Check *free_mb* of memory is available, otherwise do pytest.skip
+ """
+ import pytest
+
+ try:
+ mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
+ msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format(
+ free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
+ except KeyError:
+ mem_free = _get_mem_available()
+ if mem_free is None:
+ pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
+ "variable to free memory in MB to run the test.")
+ msg = '{0} MB memory required, but {1} MB available'.format(
+ free_mb, mem_free/1e6)
+
+ if mem_free < free_mb * 1e6:
+ pytest.skip(msg)
+
+
+def _parse_size(size_str):
+ suffixes = {'': 1e6,
+ 'b': 1.0,
+ 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
+ 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
+ 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
+ m = re.match(r'^\s*(\d+)\s*({0})\s*$'.format('|'.join(suffixes.keys())),
+ size_str,
+ re.I)
+ if not m or m.group(2) not in suffixes:
+ raise ValueError("Invalid size string")
+
+ return float(m.group(1)) * suffixes[m.group(2)]
+
+
+def _get_mem_available():
+ """
+ Get information about memory available, not counting swap.
+ """
+ try:
+ import psutil
+ return psutil.virtual_memory().available
+ except (ImportError, AttributeError):
+ pass
+
+ if sys.platform.startswith('linux'):
+ info = {}
+ with open('/proc/meminfo', 'r') as f:
+ for line in f:
+ p = line.split()
+ info[p[0].strip(':').lower()] = float(p[1]) * 1e3
+
+ if 'memavailable' in info:
+ # Linux >= 3.14
+ return info['memavailable']
+ else:
+ return info['memfree'] + info['cached']
+
+ return None
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_threadsafety.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_threadsafety.py
new file mode 100644
index 0000000..692aea4
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_threadsafety.py
@@ -0,0 +1,58 @@
+import threading
+
+import scipy._lib.decorator
+
+
+__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
+
+
+class ReentrancyError(RuntimeError):
+ pass
+
+
+class ReentrancyLock(object):
+ """
+ Threading lock that raises an exception for reentrant calls.
+
+ Calls from different threads are serialized, and nested calls from the
+ same thread result to an error.
+
+ The object can be used as a context manager or to decorate functions
+ via the decorate() method.
+
+ """
+
+ def __init__(self, err_msg):
+ self._rlock = threading.RLock()
+ self._entered = False
+ self._err_msg = err_msg
+
+ def __enter__(self):
+ self._rlock.acquire()
+ if self._entered:
+ self._rlock.release()
+ raise ReentrancyError(self._err_msg)
+ self._entered = True
+
+ def __exit__(self, type, value, traceback):
+ self._entered = False
+ self._rlock.release()
+
+ def decorate(self, func):
+ def caller(func, *a, **kw):
+ with self:
+ return func(*a, **kw)
+ return scipy._lib.decorator.decorate(func, caller)
+
+
+def non_reentrant(err_msg=None):
+ """
+ Decorate a function with a threading lock and prevent reentrant calls.
+ """
+ def decorator(func):
+ msg = err_msg
+ if msg is None:
+ msg = "%s is not re-entrant" % func.__name__
+ lock = ReentrancyLock(msg)
+ return lock.decorate(func)
+ return decorator
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_tmpdirs.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_tmpdirs.py
new file mode 100644
index 0000000..0f9fd54
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_tmpdirs.py
@@ -0,0 +1,86 @@
+''' Contexts for *with* statement providing temporary directories
+'''
+import os
+from contextlib import contextmanager
+from shutil import rmtree
+from tempfile import mkdtemp
+
+
+@contextmanager
+def tempdir():
+ """Create and return a temporary directory. This has the same
+ behavior as mkdtemp but can be used as a context manager.
+
+ Upon exiting the context, the directory and everything contained
+ in it are removed.
+
+ Examples
+ --------
+ >>> import os
+ >>> with tempdir() as tmpdir:
+ ... fname = os.path.join(tmpdir, 'example_file.txt')
+ ... with open(fname, 'wt') as fobj:
+ ... _ = fobj.write('a string\\n')
+ >>> os.path.exists(tmpdir)
+ False
+ """
+ d = mkdtemp()
+ yield d
+ rmtree(d)
+
+
+@contextmanager
+def in_tempdir():
+ ''' Create, return, and change directory to a temporary directory
+
+ Examples
+ --------
+ >>> import os
+ >>> my_cwd = os.getcwd()
+ >>> with in_tempdir() as tmpdir:
+ ... _ = open('test.txt', 'wt').write('some text')
+ ... assert os.path.isfile('test.txt')
+ ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
+ >>> os.path.exists(tmpdir)
+ False
+ >>> os.getcwd() == my_cwd
+ True
+ '''
+ pwd = os.getcwd()
+ d = mkdtemp()
+ os.chdir(d)
+ yield d
+ os.chdir(pwd)
+ rmtree(d)
+
+
+@contextmanager
+def in_dir(dir=None):
+ """ Change directory to given directory for duration of ``with`` block
+
+ Useful when you want to use `in_tempdir` for the final test, but
+ you are still debugging. For example, you may want to do this in the end:
+
+ >>> with in_tempdir() as tmpdir:
+ ... # do something complicated which might break
+ ... pass
+
+ But, indeed, the complicated thing does break, and meanwhile, the
+ ``in_tempdir`` context manager wiped out the directory with the
+ temporary files that you wanted for debugging. So, while debugging, you
+ replace with something like:
+
+ >>> with in_dir() as tmpdir: # Use working directory by default
+ ... # do something complicated which might break
+ ... pass
+
+ You can then look at the temporary file outputs to debug what is happening,
+ fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
+ """
+ cwd = os.getcwd()
+ if dir is None:
+ yield cwd
+ return
+ os.chdir(dir)
+ yield dir
+ os.chdir(cwd)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/LICENSE b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/LICENSE
new file mode 100644
index 0000000..5f2b90a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2018, Quansight-Labs
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/__init__.py
new file mode 100644
index 0000000..4cb4f79
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/__init__.py
@@ -0,0 +1,117 @@
+"""
+.. note:
+ If you are looking for overrides for NumPy-specific methods, see the
+ documentation for :obj:`unumpy`. This page explains how to write
+ back-ends and multimethods.
+
+``uarray`` is built around a back-end protocol and overridable multimethods.
+It is necessary to define multimethods for back-ends to be able to override them.
+See the documentation of :obj:`generate_multimethod` on how to write multimethods.
+
+
+
+Let's start with the simplest:
+
+``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
+separated string consisting of the modules you extend plus the submodule. For
+example, if a submodule ``module2.submodule`` extends ``module1``
+(i.e., it exposes dispatchables marked as types available in ``module1``),
+then the domain string should be ``"module1.module2.submodule"``.
+
+
+For the purpose of this demonstration, we'll be creating an object and setting
+its attributes directly. However, note that you can use a module or your own type
+as a backend as well.
+
+>>> class Backend: pass
+>>> be = Backend()
+>>> be.__ua_domain__ = "ua_examples"
+
+It might be useful at this point to sidetrack to the documentation of
+:obj:`generate_multimethod` to find out how to generate a multimethod
+overridable by :obj:`uarray`. Needless to say, writing a backend and
+creating multimethods are mostly orthogonal activities, and knowing
+one doesn't necessarily require knowledge of the other, although it
+is certainly helpful. We expect core API designers/specifiers to write the
+multimethods, and implementors to override them. But, as is often the case,
+similar people write both.
+
+Without further ado, here's an example multimethod:
+
+>>> import uarray as ua
+>>> from uarray import Dispatchable
+>>> def override_me(a, b):
+... return Dispatchable(a, int),
+>>> def override_replacer(args, kwargs, dispatchables):
+... return (dispatchables[0], args[1]), {}
+>>> overridden_me = ua.generate_multimethod(
+... override_me, override_replacer, "ua_examples"
+... )
+
+Next comes the part about overriding the multimethod. This requires
+the ``__ua_function__`` protocol, and the ``__ua_convert__``
+protocol. The ``__ua_function__`` protocol has the signature
+``(method, args, kwargs)`` where ``method`` is the passed
+multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
+is the list of converted dispatchables passed in.
+
+>>> def __ua_function__(method, args, kwargs):
+... return method.__name__, args, kwargs
+>>> be.__ua_function__ = __ua_function__
+
+The other protocol of interest is the ``__ua_convert__`` protocol. It has the
+signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
+between the formats should ideally be an ``O(1)`` operation, but it means that
+no memory copying should be involved, only views of the existing data.
+
+>>> def __ua_convert__(dispatchables, coerce):
+... for d in dispatchables:
+... if d.type is int:
+... if coerce and d.coercible:
+... yield str(d.value)
+... else:
+... yield d.value
+>>> be.__ua_convert__ = __ua_convert__
+
+Now that we have defined the backend, the next thing to do is to call the multimethod.
+
+>>> with ua.set_backend(be):
+... overridden_me(1, "2")
+('override_me', (1, '2'), {})
+
+Note that the marked type has no effect on the actual type of the passed object.
+We can also coerce the type of the input.
+
+>>> with ua.set_backend(be, coerce=True):
+... overridden_me(1, "2")
+... overridden_me(1.0, "2")
+('override_me', ('1', '2'), {})
+('override_me', ('1.0', '2'), {})
+
+Another feature is that if you remove ``__ua_convert__``, the arguments are not
+converted at all and it's up to the backend to handle that.
+
+>>> del be.__ua_convert__
+>>> with ua.set_backend(be):
+... overridden_me(1, "2")
+('override_me', (1, '2'), {})
+
+You also have the option to return ``NotImplemented``, in which case processing moves on
+to the next back-end, which, in this case, doesn't exist. The same applies to
+``__ua_convert__``.
+
+>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
+>>> with ua.set_backend(be):
+... overridden_me(1, "2")
+Traceback (most recent call last):
+ ...
+uarray.backend.BackendNotImplementedError: ...
+
+The last possibility is if we don't have ``__ua_convert__``, in which case the job is left
+up to ``__ua_function__``, but putting things back into arrays after conversion will not be
+possible.
+"""
+
+from ._backend import *
+
+__version__ = '0.5.1+49.g4c3f1d7.scipy'
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_backend.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_backend.py
new file mode 100644
index 0000000..af3c899
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_backend.py
@@ -0,0 +1,426 @@
+import typing
+import inspect
+import functools
+from . import _uarray # type: ignore
+import copyreg # type: ignore
+import atexit
+import pickle
+
+ArgumentExtractorType = typing.Callable[..., typing.Tuple["Dispatchable", ...]]
+ArgumentReplacerType = typing.Callable[
+ [typing.Tuple, typing.Dict, typing.Tuple], typing.Tuple[typing.Tuple, typing.Dict]
+]
+
+from ._uarray import ( # type: ignore
+ BackendNotImplementedError,
+ _Function,
+ _SkipBackendContext,
+ _SetBackendContext,
+)
+
+__all__ = [
+ "set_backend",
+ "set_global_backend",
+ "skip_backend",
+ "register_backend",
+ "clear_backends",
+ "create_multimethod",
+ "generate_multimethod",
+ "_Function",
+ "BackendNotImplementedError",
+ "Dispatchable",
+ "wrap_single_convertor",
+ "all_of_type",
+ "mark_as",
+]
+
+
+def unpickle_function(mod_name, qname):
+ import importlib
+
+ try:
+ module = importlib.import_module(mod_name)
+ func = getattr(module, qname)
+ return func
+ except (ImportError, AttributeError) as e:
+ from pickle import UnpicklingError
+
+ raise UnpicklingError from e
+
+
+def pickle_function(func):
+ mod_name = getattr(func, "__module__", None)
+ qname = getattr(func, "__qualname__", None)
+
+ try:
+ test = unpickle_function(mod_name, qname)
+ except pickle.UnpicklingError:
+ test = None
+
+ if test is not func:
+ raise pickle.PicklingError(
+ "Can't pickle {}: it's not the same object as {}".format(func, test)
+ )
+
+ return unpickle_function, (mod_name, qname)
+
+
+copyreg.pickle(_Function, pickle_function)
+atexit.register(_uarray.clear_all_globals)
+
+
+def create_multimethod(*args, **kwargs):
+ """
+ Creates a decorator for generating multimethods.
+
+ This function creates a decorator that can be used with an argument
+ extractor in order to generate a multimethod. Other than for the
+ argument extractor, all arguments are passed on to
+ :obj:`generate_multimethod`.
+
+ See Also
+ --------
+ generate_multimethod
+ Generates a multimethod.
+ """
+
+ def wrapper(a):
+ return generate_multimethod(a, *args, **kwargs)
+
+ return wrapper
+
+
+def generate_multimethod(
+ argument_extractor: ArgumentExtractorType,
+ argument_replacer: ArgumentReplacerType,
+ domain: str,
+ default: typing.Optional[typing.Callable] = None,
+):
+ """
+ Generates a multimethod.
+
+ Parameters
+ ----------
+ argument_extractor : ArgumentExtractorType
+ A callable which extracts the dispatchable arguments. Extracted arguments
+ should be marked by the :obj:`Dispatchable` class. It has the same signature
+ as the desired multimethod.
+ argument_replacer : ArgumentReplacerType
+ A callable with the signature (args, kwargs, dispatchables), which should also
+ return an (args, kwargs) pair with the dispatchables replaced inside the args/kwargs.
+ domain : str
+ A string value indicating the domain of this multimethod.
+ default: Optional[Callable], optional
+ The default implementation of this multimethod, where ``None`` (the default) specifies
+ there is no default implementation.
+
+ Examples
+ --------
+ In this example, ``a`` is to be dispatched over, so we return it, while marking it as an ``int``.
+ The trailing comma is needed because the args have to be returned as an iterable.
+
+ >>> def override_me(a, b):
+ ... return Dispatchable(a, int),
+
+ Next, we define the argument replacer that replaces the dispatchables inside args/kwargs with the
+ supplied ones.
+
+ >>> def override_replacer(args, kwargs, dispatchables):
+ ... return (dispatchables[0], args[1]), {}
+
+ Next, we define the multimethod.
+
+ >>> overridden_me = generate_multimethod(
+ ... override_me, override_replacer, "ua_examples"
+ ... )
+
+ Notice that there's no default implementation, unless you supply one.
+
+ >>> overridden_me(1, "a")
+ Traceback (most recent call last):
+ ...
+ uarray.backend.BackendNotImplementedError: ...
+ >>> overridden_me2 = generate_multimethod(
+ ... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
+ ... )
+ >>> overridden_me2(1, "a")
+ (1, 'a')
+
+ See Also
+ --------
+ uarray
+ See the module documentation for how to override the method by creating backends.
+ """
+ kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
+ ua_func = _Function(
+ argument_extractor,
+ argument_replacer,
+ domain,
+ arg_defaults,
+ kw_defaults,
+ default,
+ )
+
+ return functools.update_wrapper(ua_func, argument_extractor)
+
+
+def set_backend(backend, coerce=False, only=False):
+ """
+ A context manager that sets the preferred backend.
+
+ Parameters
+ ----------
+ backend
+ The backend to set.
+ coerce
+ Whether or not to coerce to a specific backend's types. Implies ``only``.
+ only
+ Whether or not this should be the last backend to try.
+
+ See Also
+ --------
+ skip_backend: A context manager that allows skipping of backends.
+ set_global_backend: Set a single, global backend for a domain.
+ """
+ try:
+ return backend.__ua_cache__["set", coerce, only]
+ except AttributeError:
+ backend.__ua_cache__ = {}
+ except KeyError:
+ pass
+
+ ctx = _SetBackendContext(backend, coerce, only)
+ backend.__ua_cache__["set", coerce, only] = ctx
+ return ctx
+
+
+def skip_backend(backend):
+ """
+ A context manager that allows one to skip a given backend from processing
+ entirely. This allows one to use another backend's code in a library that
+ is also a consumer of the same backend.
+
+ Parameters
+ ----------
+ backend
+ The backend to skip.
+
+ See Also
+ --------
+ set_backend: A context manager that allows setting of backends.
+ set_global_backend: Set a single, global backend for a domain.
+ """
+ try:
+ return backend.__ua_cache__["skip"]
+ except AttributeError:
+ backend.__ua_cache__ = {}
+ except KeyError:
+ pass
+
+ ctx = _SkipBackendContext(backend)
+ backend.__ua_cache__["skip"] = ctx
+ return ctx
+
+
+def get_defaults(f):
+ sig = inspect.signature(f)
+ kw_defaults = {}
+ arg_defaults = []
+ opts = set()
+ for k, v in sig.parameters.items():
+ if v.default is not inspect.Parameter.empty:
+ kw_defaults[k] = v.default
+ if v.kind in (
+ inspect.Parameter.POSITIONAL_ONLY,
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
+ ):
+ arg_defaults.append(v.default)
+ opts.add(k)
+
+ return kw_defaults, tuple(arg_defaults), opts
+
+
+def set_global_backend(backend, coerce=False, only=False):
+ """
+ This utility method replaces the default backend for permanent use. It
+ will be tried in the list of backends automatically, unless the
+ ``only`` flag is set on a backend. This will be the first tried
+ backend outside the :obj:`set_backend` context manager.
+
+ Note that this method is not thread-safe.
+
+ .. warning::
+ We caution library authors against using this function in
+ their code. We do *not* support this use-case. This function
+ is meant to be used only by users themselves, or by a reference
+ implementation, if one exists.
+
+ Parameters
+ ----------
+ backend
+ The backend to register.
+
+ See Also
+ --------
+ set_backend: A context manager that allows setting of backends.
+ skip_backend: A context manager that allows skipping of backends.
+ """
+ _uarray.set_global_backend(backend, coerce, only)
+
+
+def register_backend(backend):
+ """
+ This utility method sets registers backend for permanent use. It
+ will be tried in the list of backends automatically, unless the
+ ``only`` flag is set on a backend.
+
+ Note that this method is not thread-safe.
+
+ Parameters
+ ----------
+ backend
+ The backend to register.
+ """
+ _uarray.register_backend(backend)
+
+
+def clear_backends(domain, registered=True, globals=False):
+ """
+ This utility method clears registered backends.
+
+ .. warning::
+ We caution library authors against using this function in
+ their code. We do *not* support this use-case. This function
+ is meant to be used only by the users themselves.
+
+ .. warning::
+ Do NOT use this method inside a multimethod call, or the
+ program is likely to crash.
+
+ Parameters
+ ----------
+ domain : Optional[str]
+ The domain for which to de-register backends. ``None`` means
+ de-register for all domains.
+ registered : bool
+ Whether or not to clear registered backends. See :obj:`register_backend`.
+ globals : bool
+ Whether or not to clear global backends. See :obj:`set_global_backend`.
+
+ See Also
+ --------
+ register_backend : Register a backend globally.
+ set_global_backend : Set a global backend.
+ """
+ _uarray.clear_backends(domain, registered, globals)
+
+
+class Dispatchable:
+ """
+ A utility class which marks an argument with a specific dispatch type.
+
+
+ Attributes
+ ----------
+ value
+ The value of the Dispatchable.
+
+ type
+ The type of the Dispatchable.
+
+ Examples
+ --------
+ >>> x = Dispatchable(1, str)
+ >>> x
+ , value=1>
+
+ See Also
+ --------
+ all_of_type
+ Marks all unmarked parameters of a function.
+
+ mark_as
+ Allows one to create a utility function to mark as a given type.
+ """
+
+ def __init__(self, value, dispatch_type, coercible=True):
+ self.value = value
+ self.type = dispatch_type
+ self.coercible = coercible
+
+ def __getitem__(self, index):
+ return (self.type, self.value)[index]
+
+ def __str__(self):
+ return "<{0}: type={1!r}, value={2!r}>".format(
+ type(self).__name__, self.type, self.value
+ )
+
+ __repr__ = __str__
+
+
+def mark_as(dispatch_type):
+ """
+ Creates a utility function to mark something as a specific type.
+
+ Examples
+ --------
+ >>> mark_int = mark_as(int)
+ >>> mark_int(1)
+ , value=1>
+ """
+ return functools.partial(Dispatchable, dispatch_type=dispatch_type)
+
+
+def all_of_type(arg_type):
+ """
+ Marks all unmarked arguments as a given type.
+
+ Examples
+ --------
+ >>> @all_of_type(str)
+ ... def f(a, b):
+ ... return a, Dispatchable(b, int)
+ >>> f('a', 1)
+ (, value='a'>, , value=1>)
+ """
+
+ def outer(func):
+ @functools.wraps(func)
+ def inner(*args, **kwargs):
+ extracted_args = func(*args, **kwargs)
+ return tuple(
+ Dispatchable(arg, arg_type)
+ if not isinstance(arg, Dispatchable)
+ else arg
+ for arg in extracted_args
+ )
+
+ return inner
+
+ return outer
+
+
+def wrap_single_convertor(convert_single):
+ """
+ Wraps a ``__ua_convert__`` defined for a single element to all elements.
+ If any of them return ``NotImplemented``, the operation is assumed to be
+ undefined.
+
+ Accepts a signature of (value, type, coerce).
+ """
+
+ @functools.wraps(convert_single)
+ def __ua_convert__(dispatchables, coerce):
+ converted = []
+ for d in dispatchables:
+ c = convert_single(d.value, d.type, coerce and d.coercible)
+
+ if c is NotImplemented:
+ return NotImplemented
+
+ converted.append(c)
+
+ return converted
+
+ return __ua_convert__
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_uarray.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_uarray.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..5ec401d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/_uarray.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/setup.py
new file mode 100644
index 0000000..e002ec9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_uarray/setup.py
@@ -0,0 +1,30 @@
+
+def pre_build_hook(build_ext, ext):
+ from scipy._build_utils.compiler_helper import (
+ set_cxx_flags_hook, try_add_flag)
+ cc = build_ext._cxx_compiler
+ args = ext.extra_compile_args
+
+ set_cxx_flags_hook(build_ext, ext)
+
+ if cc.compiler_type == 'msvc':
+ args.append('/EHsc')
+ else:
+ try_add_flag(args, cc, '-fvisibility=hidden')
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('_uarray', parent_package, top_path)
+ config.add_data_files('LICENSE')
+ ext = config.add_extension('_uarray',
+ sources=['_uarray_dispatch.cxx'],
+ language='c++')
+ ext._pre_build_hook = pre_build_hook
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_util.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_util.py
new file mode 100644
index 0000000..1c35a8e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/_util.py
@@ -0,0 +1,478 @@
+import functools
+import operator
+import sys
+import warnings
+import numbers
+from collections import namedtuple
+import inspect
+import math
+
+import numpy as np
+
+try:
+ from numpy.random import Generator as Generator
+except ImportError:
+ class Generator(): # type: ignore[no-redef]
+ pass
+
+
+def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
+ """
+ np.where(cond, x, fillvalue) always evaluates x even where cond is False.
+ This one only evaluates f(arr1[cond], arr2[cond], ...).
+
+ Examples
+ --------
+
+ >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
+ >>> def f(a, b):
+ ... return a*b
+ >>> _lazywhere(a > 2, (a, b), f, np.nan)
+ array([ nan, nan, 21., 32.])
+
+ Notice, it assumes that all `arrays` are of the same shape, or can be
+ broadcasted together.
+
+ """
+ if fillvalue is None:
+ if f2 is None:
+ raise ValueError("One of (fillvalue, f2) must be given.")
+ else:
+ fillvalue = np.nan
+ else:
+ if f2 is not None:
+ raise ValueError("Only one of (fillvalue, f2) can be given.")
+
+ arrays = np.broadcast_arrays(*arrays)
+ temp = tuple(np.extract(cond, arr) for arr in arrays)
+ tcode = np.mintypecode([a.dtype.char for a in arrays])
+ out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
+ np.place(out, cond, f(*temp))
+ if f2 is not None:
+ temp = tuple(np.extract(~cond, arr) for arr in arrays)
+ np.place(out, ~cond, f2(*temp))
+
+ return out
+
+
+def _lazyselect(condlist, choicelist, arrays, default=0):
+ """
+ Mimic `np.select(condlist, choicelist)`.
+
+ Notice, it assumes that all `arrays` are of the same shape or can be
+ broadcasted together.
+
+ All functions in `choicelist` must accept array arguments in the order
+ given in `arrays` and must return an array of the same shape as broadcasted
+ `arrays`.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> np.select([x <3, x > 3], [x**2, x**3], default=0)
+ array([ 0, 1, 4, 0, 64, 125])
+
+ >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
+ array([ 0., 1., 4., 0., 64., 125.])
+
+ >>> a = -np.ones_like(x)
+ >>> _lazyselect([x < 3, x > 3],
+ ... [lambda x, a: x**2, lambda x, a: a * x**3],
+ ... (x, a), default=np.nan)
+ array([ 0., 1., 4., nan, -64., -125.])
+
+ """
+ arrays = np.broadcast_arrays(*arrays)
+ tcode = np.mintypecode([a.dtype.char for a in arrays])
+ out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
+ for index in range(len(condlist)):
+ func, cond = choicelist[index], condlist[index]
+ if np.all(cond is False):
+ continue
+ cond, _ = np.broadcast_arrays(cond, arrays[0])
+ temp = tuple(np.extract(cond, arr) for arr in arrays)
+ np.place(out, cond, func(*temp))
+ return out
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+ """Allocate a new ndarray with aligned memory.
+
+ Primary use case for this currently is working around a f2py issue
+ in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
+ not necessarily create arrays aligned up to it.
+
+ """
+ dtype = np.dtype(dtype)
+ if align is None:
+ align = dtype.alignment
+ if not hasattr(shape, '__len__'):
+ shape = (shape,)
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
+ buf = np.empty(size + align + 1, np.uint8)
+ offset = buf.__array_interface__['data'][0] % align
+ if offset != 0:
+ offset = align - offset
+ # Note: slices producing 0-size arrays do not necessarily change
+ # data pointer --- so we use and allocate size+1
+ buf = buf[offset:offset+size+1][:-1]
+ data = np.ndarray(shape, dtype, buf, order=order)
+ data.fill(0)
+ return data
+
+
+def _prune_array(array):
+ """Return an array equivalent to the input array. If the input
+ array is a view of a much larger array, copy its contents to a
+ newly allocated array. Otherwise, return the input unchanged.
+ """
+ if array.base is not None and array.size < array.base.size // 2:
+ return array.copy()
+ return array
+
+
+def prod(iterable):
+ """
+ Product of a sequence of numbers.
+
+ Faster than np.prod for short lists like array shapes, and does
+ not overflow if using Python integers.
+ """
+ product = 1
+ for x in iterable:
+ product *= x
+ return product
+
+
+def float_factorial(n: int) -> float:
+ """Compute the factorial and return as a float
+
+ Returns infinity when result is too large for a double
+ """
+ return float(math.factorial(n)) if n < 171 else np.inf
+
+
+class DeprecatedImport(object):
+ """
+ Deprecated import with redirection and warning.
+
+ Examples
+ --------
+ Suppose you previously had in some module::
+
+ from foo import spam
+
+ If this has to be deprecated, do::
+
+ spam = DeprecatedImport("foo.spam", "baz")
+
+ to redirect users to use "baz" module instead.
+
+ """
+
+ def __init__(self, old_module_name, new_module_name):
+ self._old_name = old_module_name
+ self._new_name = new_module_name
+ __import__(self._new_name)
+ self._mod = sys.modules[self._new_name]
+
+ def __dir__(self):
+ return dir(self._mod)
+
+ def __getattr__(self, name):
+ warnings.warn("Module %s is deprecated, use %s instead"
+ % (self._old_name, self._new_name),
+ DeprecationWarning)
+ return getattr(self._mod, name)
+
+
+# copy-pasted from scikit-learn utils/validation.py
+def check_random_state(seed):
+ """Turn seed into a np.random.RandomState instance
+
+ If seed is None (or np.random), return the RandomState singleton used
+ by np.random.
+ If seed is an int, return a new RandomState instance seeded with seed.
+ If seed is already a RandomState instance, return it.
+ If seed is a new-style np.random.Generator, return it.
+ Otherwise, raise ValueError.
+ """
+ if seed is None or seed is np.random:
+ return np.random.mtrand._rand
+ if isinstance(seed, (numbers.Integral, np.integer)):
+ return np.random.RandomState(seed)
+ if isinstance(seed, np.random.RandomState):
+ return seed
+ try:
+ # Generator is only available in numpy >= 1.17
+ if isinstance(seed, np.random.Generator):
+ return seed
+ except AttributeError:
+ pass
+ raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+ ' instance' % seed)
+
+
+def _asarray_validated(a, check_finite=True,
+ sparse_ok=False, objects_ok=False, mask_ok=False,
+ as_inexact=False):
+ """
+ Helper function for SciPy argument validation.
+
+ Many SciPy linear algebra functions do support arbitrary array-like
+ input arguments. Examples of commonly unsupported inputs include
+ matrices containing inf/nan, sparse matrix representations, and
+ matrices with complicated elements.
+
+ Parameters
+ ----------
+ a : array_like
+ The array-like input.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+ sparse_ok : bool, optional
+ True if scipy sparse matrices are allowed.
+ objects_ok : bool, optional
+ True if arrays with dype('O') are allowed.
+ mask_ok : bool, optional
+ True if masked arrays are allowed.
+ as_inexact : bool, optional
+ True to convert the input array to a np.inexact dtype.
+
+ Returns
+ -------
+ ret : ndarray
+ The converted validated array.
+
+ """
+ if not sparse_ok:
+ import scipy.sparse
+ if scipy.sparse.issparse(a):
+ msg = ('Sparse matrices are not supported by this function. '
+ 'Perhaps one of the scipy.sparse.linalg functions '
+ 'would work instead.')
+ raise ValueError(msg)
+ if not mask_ok:
+ if np.ma.isMaskedArray(a):
+ raise ValueError('masked arrays are not supported')
+ toarray = np.asarray_chkfinite if check_finite else np.asarray
+ a = toarray(a)
+ if not objects_ok:
+ if a.dtype is np.dtype('O'):
+ raise ValueError('object arrays are not supported')
+ if as_inexact:
+ if not np.issubdtype(a.dtype, np.inexact):
+ a = toarray(a, dtype=np.float_)
+ return a
+
+
+# Add a replacement for inspect.getfullargspec()/
+# The version below is borrowed from Django,
+# https://github.com/django/django/pull/4846.
+
+# Note an inconsistency between inspect.getfullargspec(func) and
+# inspect.signature(func). If `func` is a bound method, the latter does *not*
+# list `self` as a first argument, while the former *does*.
+# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
+# mimics `inspect.getfullargspec` but does not list `self`.
+#
+# This way, the caller code does not need to know whether it uses a legacy
+# .getfullargspec or a bright and shiny .signature.
+
+FullArgSpec = namedtuple('FullArgSpec',
+ ['args', 'varargs', 'varkw', 'defaults',
+ 'kwonlyargs', 'kwonlydefaults', 'annotations'])
+
+def getfullargspec_no_self(func):
+ """inspect.getfullargspec replacement using inspect.signature.
+
+ If func is a bound method, do not list the 'self' parameter.
+
+ Parameters
+ ----------
+ func : callable
+ A callable to inspect
+
+ Returns
+ -------
+ fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+ kwonlydefaults, annotations)
+
+ NOTE: if the first argument of `func` is self, it is *not*, I repeat
+ *not*, included in fullargspec.args.
+ This is done for consistency between inspect.getargspec() under
+ Python 2.x, and inspect.signature() under Python 3.x.
+
+ """
+ sig = inspect.signature(func)
+ args = [
+ p.name for p in sig.parameters.values()
+ if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
+ inspect.Parameter.POSITIONAL_ONLY]
+ ]
+ varargs = [
+ p.name for p in sig.parameters.values()
+ if p.kind == inspect.Parameter.VAR_POSITIONAL
+ ]
+ varargs = varargs[0] if varargs else None
+ varkw = [
+ p.name for p in sig.parameters.values()
+ if p.kind == inspect.Parameter.VAR_KEYWORD
+ ]
+ varkw = varkw[0] if varkw else None
+ defaults = tuple(
+ p.default for p in sig.parameters.values()
+ if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
+ p.default is not p.empty)
+ ) or None
+ kwonlyargs = [
+ p.name for p in sig.parameters.values()
+ if p.kind == inspect.Parameter.KEYWORD_ONLY
+ ]
+ kwdefaults = {p.name: p.default for p in sig.parameters.values()
+ if p.kind == inspect.Parameter.KEYWORD_ONLY and
+ p.default is not p.empty}
+ annotations = {p.name: p.annotation for p in sig.parameters.values()
+ if p.annotation is not p.empty}
+ return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+ kwdefaults or None, annotations)
+
+
+class MapWrapper(object):
+ """
+ Parallelisation wrapper for working with map-like callables, such as
+ `multiprocessing.Pool.map`.
+
+ Parameters
+ ----------
+ pool : int or map-like callable
+ If `pool` is an integer, then it specifies the number of threads to
+ use for parallelization. If ``int(pool) == 1``, then no parallel
+ processing is used and the map builtin is used.
+ If ``pool == -1``, then the pool will utilize all available CPUs.
+ If `pool` is a map-like callable that follows the same
+ calling sequence as the built-in map function, then this callable is
+ used for parallelization.
+ """
+ def __init__(self, pool=1):
+ self.pool = None
+ self._mapfunc = map
+ self._own_pool = False
+
+ if callable(pool):
+ self.pool = pool
+ self._mapfunc = self.pool
+ else:
+ from multiprocessing import Pool
+ # user supplies a number
+ if int(pool) == -1:
+ # use as many processors as possible
+ self.pool = Pool()
+ self._mapfunc = self.pool.map
+ self._own_pool = True
+ elif int(pool) == 1:
+ pass
+ elif int(pool) > 1:
+ # use the number of processors requested
+ self.pool = Pool(processes=int(pool))
+ self._mapfunc = self.pool.map
+ self._own_pool = True
+ else:
+ raise RuntimeError("Number of workers specified must be -1,"
+ " an int >= 1, or an object with a 'map' method")
+
+ def __enter__(self):
+ return self
+
+ def terminate(self):
+ if self._own_pool:
+ self.pool.terminate()
+
+ def join(self):
+ if self._own_pool:
+ self.pool.join()
+
+ def close(self):
+ if self._own_pool:
+ self.pool.close()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self._own_pool:
+ self.pool.close()
+ self.pool.terminate()
+
+ def __call__(self, func, iterable):
+ # only accept one iterable because that's all Pool.map accepts
+ try:
+ return self._mapfunc(func, iterable)
+ except TypeError as e:
+ # wrong number of arguments
+ raise TypeError("The map-like callable must be of the"
+ " form f(func, iterable)") from e
+
+
+def rng_integers(gen, low, high=None, size=None, dtype='int64',
+ endpoint=False):
+ """
+ Return random integers from low (inclusive) to high (exclusive), or if
+ endpoint=True, low (inclusive) to high (inclusive). Replaces
+ `RandomState.randint` (with endpoint=False) and
+ `RandomState.random_integers` (with endpoint=True).
+
+ Return random integers from the "discrete uniform" distribution of the
+ specified dtype. If high is None (the default), then results are from
+ 0 to low.
+
+ Parameters
+ ----------
+ gen: {None, np.random.RandomState, np.random.Generator}
+ Random number generator. If None, then the np.random.RandomState
+ singleton is used.
+ low: int or array-like of ints
+ Lowest (signed) integers to be drawn from the distribution (unless
+ high=None, in which case this parameter is 0 and this value is used
+ for high).
+ high: int or array-like of ints
+ If provided, one above the largest (signed) integer to be drawn from
+ the distribution (see above for behavior if high=None). If array-like,
+ must contain integer values.
+ size: None
+ Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
+ samples are drawn. Default is None, in which case a single value is
+ returned.
+ dtype: {str, dtype}, optional
+ Desired dtype of the result. All dtypes are determined by their name,
+ i.e., 'int64', 'int', etc, so byteorder is not available and a specific
+ precision may have different C types depending on the platform.
+ The default value is np.int_.
+ endpoint: bool, optional
+ If True, sample from the interval [low, high] instead of the default
+ [low, high) Defaults to False.
+
+ Returns
+ -------
+ out: int or ndarray of ints
+ size-shaped array of random integers from the appropriate distribution,
+ or a single such random int if size not provided.
+ """
+ if isinstance(gen, Generator):
+ return gen.integers(low, high=high, size=size, dtype=dtype,
+ endpoint=endpoint)
+ else:
+ if gen is None:
+ # default is RandomState singleton used by np.random.
+ gen = np.random.mtrand._rand
+ if endpoint:
+ # inclusive of endpoint
+ # remember that low and high can be arrays, so don't modify in
+ # place
+ if high is None:
+ return gen.randint(low + 1, size=size, dtype=dtype)
+ if high is not None:
+ return gen.randint(low, high=high + 1, size=size, dtype=dtype)
+
+ # exclusive
+ return gen.randint(low, high=high, size=size, dtype=dtype)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/decorator.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/decorator.py
new file mode 100644
index 0000000..7b92d53
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/decorator.py
@@ -0,0 +1,399 @@
+# ######################### LICENSE ############################ #
+
+# Copyright (c) 2005-2015, Michele Simionato
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+# Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# Redistributions in bytecode form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see https://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+import re
+import sys
+import inspect
+import operator
+import itertools
+import collections
+
+from inspect import getfullargspec
+
+__version__ = '4.0.5'
+
+
+def get_init(cls):
+ return cls.__init__
+
+
+# getargspec has been deprecated in Python 3.5
+ArgSpec = collections.namedtuple(
+ 'ArgSpec', 'args varargs varkw defaults')
+
+
+def getargspec(f):
+ """A replacement for inspect.getargspec"""
+ spec = getfullargspec(f)
+ return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
+
+
+DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+
+# basic functionality
+class FunctionMaker(object):
+ """
+ An object with the ability to create functions with a given signature.
+ It has attributes name, doc, module, signature, defaults, dict, and
+ methods update and make.
+ """
+
+ # Atomic get-and-increment provided by the GIL
+ _compile_count = itertools.count()
+
+ def __init__(self, func=None, name=None, signature=None,
+ defaults=None, doc=None, module=None, funcdict=None):
+ self.shortsignature = signature
+ if func:
+ # func can be a class or a callable, but not an instance method
+ self.name = func.__name__
+ if self.name == '': # small hack for lambda functions
+ self.name = '_lambda_'
+ self.doc = func.__doc__
+ self.module = func.__module__
+ if inspect.isfunction(func):
+ argspec = getfullargspec(func)
+ self.annotations = getattr(func, '__annotations__', {})
+ for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+ 'kwonlydefaults'):
+ setattr(self, a, getattr(argspec, a))
+ for i, arg in enumerate(self.args):
+ setattr(self, 'arg%d' % i, arg)
+ allargs = list(self.args)
+ allshortargs = list(self.args)
+ if self.varargs:
+ allargs.append('*' + self.varargs)
+ allshortargs.append('*' + self.varargs)
+ elif self.kwonlyargs:
+ allargs.append('*') # single star syntax
+ for a in self.kwonlyargs:
+ allargs.append('%s=None' % a)
+ allshortargs.append('%s=%s' % (a, a))
+ if self.varkw:
+ allargs.append('**' + self.varkw)
+ allshortargs.append('**' + self.varkw)
+ self.signature = ', '.join(allargs)
+ self.shortsignature = ', '.join(allshortargs)
+ self.dict = func.__dict__.copy()
+ # func=None happens when decorating a caller
+ if name:
+ self.name = name
+ if signature is not None:
+ self.signature = signature
+ if defaults:
+ self.defaults = defaults
+ if doc:
+ self.doc = doc
+ if module:
+ self.module = module
+ if funcdict:
+ self.dict = funcdict
+ # check existence required attributes
+ assert hasattr(self, 'name')
+ if not hasattr(self, 'signature'):
+ raise TypeError('You are decorating a non-function: %s' % func)
+
+ def update(self, func, **kw):
+ "Update the signature of func with the data in self"
+ func.__name__ = self.name
+ func.__doc__ = getattr(self, 'doc', None)
+ func.__dict__ = getattr(self, 'dict', {})
+ func.__defaults__ = getattr(self, 'defaults', ())
+ func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+ func.__annotations__ = getattr(self, 'annotations', None)
+ try:
+ frame = sys._getframe(3)
+ except AttributeError: # for IronPython and similar implementations
+ callermodule = '?'
+ else:
+ callermodule = frame.f_globals.get('__name__', '?')
+ func.__module__ = getattr(self, 'module', callermodule)
+ func.__dict__.update(kw)
+
+ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+ "Make a new function from a given template and update the signature"
+ src = src_templ % vars(self) # expand name and signature
+ evaldict = evaldict or {}
+ mo = DEF.match(src)
+ if mo is None:
+ raise SyntaxError('not a valid function template\n%s' % src)
+ name = mo.group(1) # extract the function name
+ names = set([name] + [arg.strip(' *') for arg in
+ self.shortsignature.split(',')])
+ for n in names:
+ if n in ('_func_', '_call_'):
+ raise NameError('%s is overridden in\n%s' % (n, src))
+ if not src.endswith('\n'): # add a newline just for safety
+ src += '\n' # this is needed in old versions of Python
+
+ # Ensure each generated function has a unique filename for profilers
+ # (such as cProfile) that depend on the tuple of (,
+ # , ) being unique.
+ filename = '' % (next(self._compile_count),)
+ try:
+ code = compile(src, filename, 'single')
+ exec(code, evaldict)
+ except: # noqa: E722
+ print('Error in generated code:', file=sys.stderr)
+ print(src, file=sys.stderr)
+ raise
+ func = evaldict[name]
+ if addsource:
+ attrs['__source__'] = src
+ self.update(func, **attrs)
+ return func
+
+ @classmethod
+ def create(cls, obj, body, evaldict, defaults=None,
+ doc=None, module=None, addsource=True, **attrs):
+ """
+ Create a function from the strings name, signature, and body.
+ evaldict is the evaluation dictionary. If addsource is true, an
+ attribute __source__ is added to the result. The attributes attrs
+ are added, if any.
+ """
+ if isinstance(obj, str): # "name(signature)"
+ name, rest = obj.strip().split('(', 1)
+ signature = rest[:-1] # strip a right parens
+ func = None
+ else: # a function
+ name = None
+ signature = None
+ func = obj
+ self = cls(func, name, signature, defaults, doc, module)
+ ibody = '\n'.join(' ' + line for line in body.splitlines())
+ return self.make('def %(name)s(%(signature)s):\n' + ibody,
+ evaldict, addsource, **attrs)
+
+
+def decorate(func, caller):
+ """
+ decorate(func, caller) decorates a function using a caller.
+ """
+ evaldict = func.__globals__.copy()
+ evaldict['_call_'] = caller
+ evaldict['_func_'] = func
+ fun = FunctionMaker.create(
+ func, "return _call_(_func_, %(shortsignature)s)",
+ evaldict, __wrapped__=func)
+ if hasattr(func, '__qualname__'):
+ fun.__qualname__ = func.__qualname__
+ return fun
+
+
+def decorator(caller, _func=None):
+ """decorator(caller) converts a caller function into a decorator"""
+ if _func is not None: # return a decorated function
+ # this is obsolete behavior; you should use decorate instead
+ return decorate(_func, caller)
+ # else return a decorator function
+ if inspect.isclass(caller):
+ name = caller.__name__.lower()
+ callerfunc = get_init(caller)
+ doc = 'decorator(%s) converts functions/generators into ' \
+ 'factories of %s objects' % (caller.__name__, caller.__name__)
+ elif inspect.isfunction(caller):
+ if caller.__name__ == '':
+ name = '_lambda_'
+ else:
+ name = caller.__name__
+ callerfunc = caller
+ doc = caller.__doc__
+ else: # assume caller is an object with a __call__ method
+ name = caller.__class__.__name__.lower()
+ callerfunc = caller.__call__.__func__
+ doc = caller.__call__.__doc__
+ evaldict = callerfunc.__globals__.copy()
+ evaldict['_call_'] = caller
+ evaldict['_decorate_'] = decorate
+ return FunctionMaker.create(
+ '%s(func)' % name, 'return _decorate_(func, _call_)',
+ evaldict, doc=doc, module=caller.__module__,
+ __wrapped__=caller)
+
+
+# ####################### contextmanager ####################### #
+
+try: # Python >= 3.2
+ from contextlib import _GeneratorContextManager
+except ImportError: # Python >= 2.5
+ from contextlib import GeneratorContextManager as _GeneratorContextManager
+
+
+class ContextManager(_GeneratorContextManager):
+ def __call__(self, func):
+ """Context manager decorator"""
+ return FunctionMaker.create(
+ func, "with _self_: return _func_(%(shortsignature)s)",
+ dict(_self_=self, _func_=func), __wrapped__=func)
+
+
+init = getfullargspec(_GeneratorContextManager.__init__)
+n_args = len(init.args)
+if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
+ def __init__(self, g, *a, **k):
+ return _GeneratorContextManager.__init__(self, g(*a, **k))
+ ContextManager.__init__ = __init__
+elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
+ pass
+elif n_args == 4: # (self, gen, args, kwds) Python 3.5
+ def __init__(self, g, *a, **k):
+ return _GeneratorContextManager.__init__(self, g, a, k)
+ ContextManager.__init__ = __init__
+
+contextmanager = decorator(ContextManager)
+
+
+# ############################ dispatch_on ############################ #
+
+def append(a, vancestors):
+ """
+ Append ``a`` to the list of the virtual ancestors, unless it is already
+ included.
+ """
+ add = True
+ for j, va in enumerate(vancestors):
+ if issubclass(va, a):
+ add = False
+ break
+ if issubclass(a, va):
+ vancestors[j] = a
+ add = False
+ if add:
+ vancestors.append(a)
+
+
+# inspired from simplegeneric by P.J. Eby and functools.singledispatch
+def dispatch_on(*dispatch_args):
+ """
+ Factory of decorators turning a function into a generic function
+ dispatching on the given arguments.
+ """
+ assert dispatch_args, 'No dispatch args passed'
+ dispatch_str = '(%s,)' % ', '.join(dispatch_args)
+
+ def check(arguments, wrong=operator.ne, msg=''):
+ """Make sure one passes the expected number of arguments"""
+ if wrong(len(arguments), len(dispatch_args)):
+ raise TypeError('Expected %d arguments, got %d%s' %
+ (len(dispatch_args), len(arguments), msg))
+
+ def gen_func_dec(func):
+ """Decorator turning a function into a generic function"""
+
+ # first check the dispatch arguments
+ argset = set(getfullargspec(func).args)
+ if not set(dispatch_args) <= argset:
+ raise NameError('Unknown dispatch arguments %s' % dispatch_str)
+
+ typemap = {}
+
+ def vancestors(*types):
+ """
+ Get a list of sets of virtual ancestors for the given types
+ """
+ check(types)
+ ras = [[] for _ in range(len(dispatch_args))]
+ for types_ in typemap:
+ for t, type_, ra in zip(types, types_, ras):
+ if issubclass(t, type_) and type_ not in t.__mro__:
+ append(type_, ra)
+ return [set(ra) for ra in ras]
+
+ def ancestors(*types):
+ """
+ Get a list of virtual MROs, one for each type
+ """
+ check(types)
+ lists = []
+ for t, vas in zip(types, vancestors(*types)):
+ n_vas = len(vas)
+ if n_vas > 1:
+ raise RuntimeError(
+ 'Ambiguous dispatch for %s: %s' % (t, vas))
+ elif n_vas == 1:
+ va, = vas
+ mro = type('t', (t, va), {}).__mro__[1:]
+ else:
+ mro = t.__mro__
+ lists.append(mro[:-1]) # discard t and object
+ return lists
+
+ def register(*types):
+ """
+ Decorator to register an implementation for the given types
+ """
+ check(types)
+
+ def dec(f):
+ check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
+ typemap[types] = f
+ return f
+ return dec
+
+ def dispatch_info(*types):
+ """
+ An utility to introspect the dispatch algorithm
+ """
+ check(types)
+ lst = [tuple(a.__name__ for a in anc)
+ for anc in itertools.product(*ancestors(*types))]
+ return lst
+
+ def _dispatch(dispatch_args, *args, **kw):
+ types = tuple(type(arg) for arg in dispatch_args)
+ try: # fast path
+ f = typemap[types]
+ except KeyError:
+ pass
+ else:
+ return f(*args, **kw)
+ combinations = itertools.product(*ancestors(*types))
+ next(combinations) # the first one has been already tried
+ for types_ in combinations:
+ f = typemap.get(types_)
+ if f is not None:
+ return f(*args, **kw)
+
+ # else call the default implementation
+ return func(*args, **kw)
+
+ return FunctionMaker.create(
+ func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
+ dict(_f_=_dispatch), register=register, default=func,
+ typemap=typemap, vancestors=vancestors, ancestors=ancestors,
+ dispatch_info=dispatch_info, __wrapped__=func)
+
+ gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
+ return gen_func_dec
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/deprecation.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/deprecation.py
new file mode 100644
index 0000000..6630a48
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/deprecation.py
@@ -0,0 +1,107 @@
+import functools
+import warnings
+
+__all__ = ["_deprecated"]
+
+
+def _deprecated(msg, stacklevel=2):
+ """Deprecate a function by emitting a warning on use."""
+ def wrap(fun):
+ if isinstance(fun, type):
+ warnings.warn(
+ "Trying to deprecate class {!r}".format(fun),
+ category=RuntimeWarning, stacklevel=2)
+ return fun
+
+ @functools.wraps(fun)
+ def call(*args, **kwargs):
+ warnings.warn(msg, category=DeprecationWarning,
+ stacklevel=stacklevel)
+ return fun(*args, **kwargs)
+ call.__doc__ = msg
+ return call
+
+ return wrap
+
+
+class _DeprecationHelperStr(object):
+ """
+ Helper class used by deprecate_cython_api
+ """
+ def __init__(self, content, message):
+ self._content = content
+ self._message = message
+
+ def __hash__(self):
+ return hash(self._content)
+
+ def __eq__(self, other):
+ res = (self._content == other)
+ if res:
+ warnings.warn(self._message, category=DeprecationWarning,
+ stacklevel=2)
+ return res
+
+
+def deprecate_cython_api(module, routine_name, new_name=None, message=None):
+ """
+ Deprecate an exported cdef function in a public Cython API module.
+
+ Only functions can be deprecated; typedefs etc. cannot.
+
+ Parameters
+ ----------
+ module : module
+ Public Cython API module (e.g. scipy.linalg.cython_blas).
+ routine_name : str
+ Name of the routine to deprecate. May also be a fused-type
+ routine (in which case its all specializations are deprecated).
+ new_name : str
+ New name to include in the deprecation warning message
+ message : str
+ Additional text in the deprecation warning message
+
+ Examples
+ --------
+ Usually, this function would be used in the top-level of the
+ module ``.pyx`` file:
+
+ >>> from scipy._lib.deprecation import deprecate_cython_api
+ >>> import scipy.linalg.cython_blas as mod
+ >>> deprecate_cython_api(mod, "dgemm", "dgemm_new",
+ ... message="Deprecated in Scipy 1.5.0")
+ >>> del deprecate_cython_api, mod
+
+ After this, Cython modules that use the deprecated function emit a
+ deprecation warning when they are imported.
+
+ """
+ old_name = "{}.{}".format(module.__name__, routine_name)
+
+ if new_name is None:
+ depdoc = "`%s` is deprecated!" % old_name
+ else:
+ depdoc = "`%s` is deprecated, use `%s` instead!" % \
+ (old_name, new_name)
+
+ if message is not None:
+ depdoc += "\n" + message
+
+ d = module.__pyx_capi__
+
+ # Check if the function is a fused-type function with a mangled name
+ j = 0
+ has_fused = False
+ while True:
+ fused_name = "__pyx_fuse_{}{}".format(j, routine_name)
+ if fused_name in d:
+ has_fused = True
+ d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)
+ j += 1
+ else:
+ break
+
+ # If not, apply deprecation to the named routine
+ if not has_fused:
+ d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/doccer.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/doccer.py
new file mode 100644
index 0000000..cfaff2b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/doccer.py
@@ -0,0 +1,272 @@
+''' Utilities to allow inserting docstring fragments for common
+parameters into function and method docstrings'''
+
+import sys
+
+__all__ = ['docformat', 'inherit_docstring_from', 'indentcount_lines',
+ 'filldoc', 'unindent_dict', 'unindent_string', 'doc_replace']
+
+
+def docformat(docstring, docdict=None):
+ ''' Fill a function docstring from variables in dictionary
+
+ Adapt the indent of the inserted docs
+
+ Parameters
+ ----------
+ docstring : string
+ docstring from function, possibly with dict formatting strings
+ docdict : dict, optional
+ dictionary with keys that match the dict formatting strings
+ and values that are docstring fragments to be inserted. The
+ indentation of the inserted docstrings is set to match the
+ minimum indentation of the ``docstring`` by adding this
+ indentation to all lines of the inserted string, except the
+ first.
+
+ Returns
+ -------
+ outstring : string
+ string with requested ``docdict`` strings inserted
+
+ Examples
+ --------
+ >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
+ ' Test string with inserted value'
+ >>> docstring = 'First line\\n Second line\\n %(value)s'
+ >>> inserted_string = "indented\\nstring"
+ >>> docdict = {'value': inserted_string}
+ >>> docformat(docstring, docdict)
+ 'First line\\n Second line\\n indented\\n string'
+ '''
+ if not docstring:
+ return docstring
+ if docdict is None:
+ docdict = {}
+ if not docdict:
+ return docstring
+ lines = docstring.expandtabs().splitlines()
+ # Find the minimum indent of the main docstring, after first line
+ if len(lines) < 2:
+ icount = 0
+ else:
+ icount = indentcount_lines(lines[1:])
+ indent = ' ' * icount
+ # Insert this indent to dictionary docstrings
+ indented = {}
+ for name, dstr in docdict.items():
+ lines = dstr.expandtabs().splitlines()
+ try:
+ newlines = [lines[0]]
+ for line in lines[1:]:
+ newlines.append(indent+line)
+ indented[name] = '\n'.join(newlines)
+ except IndexError:
+ indented[name] = dstr
+ return docstring % indented
+
+
+def inherit_docstring_from(cls):
+ """
+ This decorator modifies the decorated function's docstring by
+ replacing occurrences of '%(super)s' with the docstring of the
+ method of the same name from the class `cls`.
+
+ If the decorated method has no docstring, it is simply given the
+ docstring of `cls`s method.
+
+ Parameters
+ ----------
+ cls : Python class or instance
+ A class with a method with the same name as the decorated method.
+ The docstring of the method in this class replaces '%(super)s' in the
+ docstring of the decorated method.
+
+ Returns
+ -------
+ f : function
+ The decorator function that modifies the __doc__ attribute
+ of its argument.
+
+ Examples
+ --------
+ In the following, the docstring for Bar.func created using the
+ docstring of `Foo.func`.
+
+ >>> class Foo(object):
+ ... def func(self):
+ ... '''Do something useful.'''
+ ... return
+ ...
+ >>> class Bar(Foo):
+ ... @inherit_docstring_from(Foo)
+ ... def func(self):
+ ... '''%(super)s
+ ... Do it fast.
+ ... '''
+ ... return
+ ...
+ >>> b = Bar()
+ >>> b.func.__doc__
+ 'Do something useful.\n Do it fast.\n '
+
+ """
+ def _doc(func):
+ cls_docstring = getattr(cls, func.__name__).__doc__
+ func_docstring = func.__doc__
+ if func_docstring is None:
+ func.__doc__ = cls_docstring
+ else:
+ new_docstring = func_docstring % dict(super=cls_docstring)
+ func.__doc__ = new_docstring
+ return func
+ return _doc
+
+
+def extend_notes_in_docstring(cls, notes):
+ """
+ This decorator replaces the decorated function's docstring
+ with the docstring from corresponding method in `cls`.
+ It extends the 'Notes' section of that docstring to include
+ the given `notes`.
+ """
+ def _doc(func):
+ cls_docstring = getattr(cls, func.__name__).__doc__
+ # If python is called with -OO option,
+ # there is no docstring
+ if cls_docstring is None:
+ return func
+ end_of_notes = cls_docstring.find(' References\n')
+ if end_of_notes == -1:
+ end_of_notes = cls_docstring.find(' Examples\n')
+ if end_of_notes == -1:
+ end_of_notes = len(cls_docstring)
+ func.__doc__ = (cls_docstring[:end_of_notes] + notes +
+ cls_docstring[end_of_notes:])
+ return func
+ return _doc
+
+
+def replace_notes_in_docstring(cls, notes):
+ """
+ This decorator replaces the decorated function's docstring
+ with the docstring from corresponding method in `cls`.
+ It replaces the 'Notes' section of that docstring with
+ the given `notes`.
+ """
+ def _doc(func):
+ cls_docstring = getattr(cls, func.__name__).__doc__
+ notes_header = ' Notes\n -----\n'
+ # If python is called with -OO option,
+ # there is no docstring
+ if cls_docstring is None:
+ return func
+ start_of_notes = cls_docstring.find(notes_header)
+ end_of_notes = cls_docstring.find(' References\n')
+ if end_of_notes == -1:
+ end_of_notes = cls_docstring.find(' Examples\n')
+ if end_of_notes == -1:
+ end_of_notes = len(cls_docstring)
+ func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] +
+ notes +
+ cls_docstring[end_of_notes:])
+ return func
+ return _doc
+
+
+def indentcount_lines(lines):
+ ''' Minimum indent for all lines in line list
+
+ >>> lines = [' one', ' two', ' three']
+ >>> indentcount_lines(lines)
+ 1
+ >>> lines = []
+ >>> indentcount_lines(lines)
+ 0
+ >>> lines = [' one']
+ >>> indentcount_lines(lines)
+ 1
+ >>> indentcount_lines([' '])
+ 0
+ '''
+ indentno = sys.maxsize
+ for line in lines:
+ stripped = line.lstrip()
+ if stripped:
+ indentno = min(indentno, len(line) - len(stripped))
+ if indentno == sys.maxsize:
+ return 0
+ return indentno
+
+
+def filldoc(docdict, unindent_params=True):
+ ''' Return docstring decorator using docdict variable dictionary
+
+ Parameters
+ ----------
+ docdict : dictionary
+ dictionary containing name, docstring fragment pairs
+ unindent_params : {False, True}, boolean, optional
+ If True, strip common indentation from all parameters in
+ docdict
+
+ Returns
+ -------
+ decfunc : function
+ decorator that applies dictionary to input function docstring
+
+ '''
+ if unindent_params:
+ docdict = unindent_dict(docdict)
+
+ def decorate(f):
+ f.__doc__ = docformat(f.__doc__, docdict)
+ return f
+ return decorate
+
+
+def unindent_dict(docdict):
+ ''' Unindent all strings in a docdict '''
+ can_dict = {}
+ for name, dstr in docdict.items():
+ can_dict[name] = unindent_string(dstr)
+ return can_dict
+
+
+def unindent_string(docstring):
+ ''' Set docstring to minimum indent for all lines, including first
+
+ >>> unindent_string(' two')
+ 'two'
+ >>> unindent_string(' two\\n three')
+ 'two\\n three'
+ '''
+ lines = docstring.expandtabs().splitlines()
+ icount = indentcount_lines(lines)
+ if icount == 0:
+ return docstring
+ return '\n'.join([line[icount:] for line in lines])
+
+
+def doc_replace(obj, oldval, newval):
+ """Decorator to take the docstring from obj, with oldval replaced by newval
+
+ Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
+
+ Parameters
+ ----------
+ obj: object
+ The object to take the docstring from.
+ oldval: string
+ The string to replace from the original docstring.
+ newval: string
+ The string to replace ``oldval`` with.
+ """
+ # __doc__ may be None for optimized Python (-OO)
+ doc = (obj.__doc__ or '').replace(oldval, newval)
+
+ def inner(func):
+ func.__doc__ = doc
+ return func
+
+ return inner
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/messagestream.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/messagestream.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..719b821
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/messagestream.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/setup.py
new file mode 100644
index 0000000..391b575
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/setup.py
@@ -0,0 +1,60 @@
+import os
+
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('_lib', parent_package, top_path)
+ config.add_data_files('tests/*.py')
+
+ include_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
+ depends = [os.path.join(include_dir, 'ccallback.h')]
+
+ config.add_extension("_ccallback_c",
+ sources=["_ccallback_c.c"],
+ depends=depends,
+ include_dirs=[include_dir])
+
+ config.add_extension("_test_ccallback",
+ sources=["src/_test_ccallback.c"],
+ depends=depends,
+ include_dirs=[include_dir])
+
+ config.add_extension("_fpumode",
+ sources=["_fpumode.c"])
+
+ def get_messagestream_config(ext, build_dir):
+ # Generate a header file containing defines
+ config_cmd = config.get_config_cmd()
+ defines = []
+ if config_cmd.check_func('open_memstream', decl=True, call=True):
+ defines.append(('HAVE_OPEN_MEMSTREAM', '1'))
+ target = os.path.join(os.path.dirname(__file__), 'src',
+ 'messagestream_config.h')
+ with open(target, 'w') as f:
+ for name, value in defines:
+ f.write('#define {0} {1}\n'.format(name, value))
+
+ depends = [os.path.join(include_dir, 'messagestream.h')]
+ config.add_extension("messagestream",
+ sources=["messagestream.c"] + [get_messagestream_config],
+ depends=depends,
+ include_dirs=[include_dir])
+
+ config.add_extension("_test_deprecation_call",
+ sources=["_test_deprecation_call.c"],
+ include_dirs=[include_dir])
+
+ config.add_extension("_test_deprecation_def",
+ sources=["_test_deprecation_def.c"],
+ include_dirs=[include_dir])
+
+ config.add_subpackage('_uarray')
+
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__gcutils.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__gcutils.py
new file mode 100644
index 0000000..80a76cf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__gcutils.py
@@ -0,0 +1,101 @@
+""" Test for assert_deallocated context manager and gc utilities
+"""
+import gc
+
+from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
+ ReferenceError, IS_PYPY)
+
+from numpy.testing import assert_equal
+
+import pytest
+
+
+def test_set_gc_state():
+ gc_status = gc.isenabled()
+ try:
+ for state in (True, False):
+ gc.enable()
+ set_gc_state(state)
+ assert_equal(gc.isenabled(), state)
+ gc.disable()
+ set_gc_state(state)
+ assert_equal(gc.isenabled(), state)
+ finally:
+ if gc_status:
+ gc.enable()
+
+
+def test_gc_state():
+ # Test gc_state context manager
+ gc_status = gc.isenabled()
+ try:
+ for pre_state in (True, False):
+ set_gc_state(pre_state)
+ for with_state in (True, False):
+ # Check the gc state is with_state in with block
+ with gc_state(with_state):
+ assert_equal(gc.isenabled(), with_state)
+ # And returns to previous state outside block
+ assert_equal(gc.isenabled(), pre_state)
+ # Even if the gc state is set explicitly within the block
+ with gc_state(with_state):
+ assert_equal(gc.isenabled(), with_state)
+ set_gc_state(not with_state)
+ assert_equal(gc.isenabled(), pre_state)
+ finally:
+ if gc_status:
+ gc.enable()
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated():
+ # Ordinary use
+ class C(object):
+ def __init__(self, arg0, arg1, name='myname'):
+ self.name = name
+ for gc_current in (True, False):
+ with gc_state(gc_current):
+ # We are deleting from with-block context, so that's OK
+ with assert_deallocated(C, 0, 2, 'another name') as c:
+ assert_equal(c.name, 'another name')
+ del c
+ # Or not using the thing in with-block context, also OK
+ with assert_deallocated(C, 0, 2, name='third name'):
+ pass
+ assert_equal(gc.isenabled(), gc_current)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_nodel():
+ class C(object):
+ pass
+ with pytest.raises(ReferenceError):
+ # Need to delete after using if in with-block context
+ # Note: assert_deallocated(C) needs to be assigned for the test
+ # to function correctly. It is assigned to c, but c itself is
+ # not referenced in the body of the with, it is only there for
+ # the refcount.
+ with assert_deallocated(C) as c:
+ pass
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular():
+ class C(object):
+ def __init__(self):
+ self._circular = self
+ with pytest.raises(ReferenceError):
+ # Circular reference, no automatic garbage collection
+ with assert_deallocated(C) as c:
+ del c
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular2():
+ class C(object):
+ def __init__(self):
+ self._circular = self
+ with pytest.raises(ReferenceError):
+ # Still circular reference, no automatic garbage collection
+ with assert_deallocated(C):
+ pass
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__pep440.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__pep440.py
new file mode 100644
index 0000000..7f5b71c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__pep440.py
@@ -0,0 +1,67 @@
+from pytest import raises as assert_raises
+from scipy._lib._pep440 import Version, parse
+
+
+def test_main_versions():
+ assert Version('1.8.0') == Version('1.8.0')
+ for ver in ['1.9.0', '2.0.0', '1.8.1']:
+ assert Version('1.8.0') < Version(ver)
+
+ for ver in ['1.7.0', '1.7.1', '0.9.9']:
+ assert Version('1.8.0') > Version(ver)
+
+
+def test_version_1_point_10():
+ # regression test for gh-2998.
+ assert Version('1.9.0') < Version('1.10.0')
+ assert Version('1.11.0') < Version('1.11.1')
+ assert Version('1.11.0') == Version('1.11.0')
+ assert Version('1.99.11') < Version('1.99.12')
+
+
+def test_alpha_beta_rc():
+ assert Version('1.8.0rc1') == Version('1.8.0rc1')
+ for ver in ['1.8.0', '1.8.0rc2']:
+ assert Version('1.8.0rc1') < Version(ver)
+
+ for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
+ assert Version('1.8.0rc1') > Version(ver)
+
+ assert Version('1.8.0b1') > Version('1.8.0a2')
+
+
+def test_dev_version():
+ assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
+ assert Version('1.9.0.dev+f16acvda') < Version(ver)
+
+ assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
+
+
+def test_dev_a_b_rc_mixed():
+ assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
+ assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
+
+
+def test_dev0_version():
+ assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+ assert Version('1.9.0.dev0+f16acvda') < Version(ver)
+
+ assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
+
+
+def test_dev0_a_b_rc_mixed():
+ assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
+ assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
+
+
+def test_raises():
+ for ver in ['1,9.0', '1.7.x']:
+ assert_raises(ValueError, Version, ver)
+
+def test_legacy_version():
+ # Non-PEP-440 version identifiers always compare less. For NumPy this only
+ # occurs on dev builds prior to 1.10.0 which are unsupported anyway.
+ assert parse('invalid') < Version('0.0.0')
+ assert parse('1.9.0-f16acvda') < Version('1.0.0')
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__testutils.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__testutils.py
new file mode 100644
index 0000000..88db113
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__testutils.py
@@ -0,0 +1,32 @@
+import sys
+from scipy._lib._testutils import _parse_size, _get_mem_available
+import pytest
+
+
+def test__parse_size():
+ expected = {
+ '12': 12e6,
+ '12 b': 12,
+ '12k': 12e3,
+ ' 12 M ': 12e6,
+ ' 12 G ': 12e9,
+ ' 12Tb ': 12e12,
+ '12 Mib ': 12 * 1024.0**2,
+ '12Tib': 12 * 1024.0**4,
+ }
+
+ for inp, outp in sorted(expected.items()):
+ if outp is None:
+ with pytest.raises(ValueError):
+ _parse_size(inp)
+ else:
+ assert _parse_size(inp) == outp
+
+
+def test__mem_available():
+ # May return None on non-Linux platforms
+ available = _get_mem_available()
+ if sys.platform.startswith('linux'):
+ assert available >= 0
+ else:
+ assert available is None or available >= 0
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__threadsafety.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__threadsafety.py
new file mode 100644
index 0000000..87ae85e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__threadsafety.py
@@ -0,0 +1,51 @@
+import threading
+import time
+import traceback
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
+
+
+def test_parallel_threads():
+ # Check that ReentrancyLock serializes work in parallel threads.
+ #
+ # The test is not fully deterministic, and may succeed falsely if
+ # the timings go wrong.
+
+ lock = ReentrancyLock("failure")
+
+ failflag = [False]
+ exceptions_raised = []
+
+ def worker(k):
+ try:
+ with lock:
+ assert_(not failflag[0])
+ failflag[0] = True
+ time.sleep(0.1 * k)
+ assert_(failflag[0])
+ failflag[0] = False
+ except Exception:
+ exceptions_raised.append(traceback.format_exc(2))
+
+ threads = [threading.Thread(target=lambda k=k: worker(k))
+ for k in range(3)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ exceptions_raised = "\n".join(exceptions_raised)
+ assert_(not exceptions_raised, exceptions_raised)
+
+
+def test_reentering():
+ # Check that ReentrancyLock prevents re-entering from the same thread.
+
+ @non_reentrant()
+ def func(x):
+ return func(x)
+
+ assert_raises(ReentrancyError, func, 0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__util.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__util.py
new file mode 100644
index 0000000..e79575c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test__util.py
@@ -0,0 +1,242 @@
+from multiprocessing import Pool
+from multiprocessing.pool import Pool as PWL
+import os
+import math
+
+import numpy as np
+from numpy.testing import assert_equal, assert_
+import pytest
+from pytest import raises as assert_raises, deprecated_call
+
+import scipy
+from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
+ getfullargspec_no_self, FullArgSpec,
+ rng_integers)
+
+
+def test__aligned_zeros():
+ niter = 10
+
+ def check(shape, dtype, order, align):
+ err_msg = repr((shape, dtype, order, align))
+ x = _aligned_zeros(shape, dtype, order, align=align)
+ if align is None:
+ align = np.dtype(dtype).alignment
+ assert_equal(x.__array_interface__['data'][0] % align, 0)
+ if hasattr(shape, '__len__'):
+ assert_equal(x.shape, shape, err_msg)
+ else:
+ assert_equal(x.shape, (shape,), err_msg)
+ assert_equal(x.dtype, dtype)
+ if order == "C":
+ assert_(x.flags.c_contiguous, err_msg)
+ elif order == "F":
+ if x.size > 0:
+ # Size-0 arrays get invalid flags on NumPy 1.5
+ assert_(x.flags.f_contiguous, err_msg)
+ elif order is None:
+ assert_(x.flags.c_contiguous, err_msg)
+ else:
+ raise ValueError()
+
+ # try various alignments
+ for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
+ for n in [0, 1, 3, 11]:
+ for order in ["C", "F", None]:
+ for dtype in [np.uint8, np.float64]:
+ for shape in [n, (1, 2, 3, n)]:
+ for j in range(niter):
+ check(shape, dtype, order, align)
+
+
+def test_check_random_state():
+ # If seed is None, return the RandomState singleton used by np.random.
+ # If seed is an int, return a new RandomState instance seeded with seed.
+ # If seed is already a RandomState instance, return it.
+ # Otherwise raise ValueError.
+ rsi = check_random_state(1)
+ assert_equal(type(rsi), np.random.RandomState)
+ rsi = check_random_state(rsi)
+ assert_equal(type(rsi), np.random.RandomState)
+ rsi = check_random_state(None)
+ assert_equal(type(rsi), np.random.RandomState)
+ assert_raises(ValueError, check_random_state, 'a')
+ if hasattr(np.random, 'Generator'):
+ # np.random.Generator is only available in NumPy >= 1.17
+ rg = np.random.Generator(np.random.PCG64())
+ rsi = check_random_state(rg)
+ assert_equal(type(rsi), np.random.Generator)
+
+
+def test_getfullargspec_no_self():
+ p = MapWrapper(1)
+ argspec = getfullargspec_no_self(p.__init__)
+ assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [], None, {}))
+ argspec = getfullargspec_no_self(p.__call__)
+ assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None, [], None, {}))
+
+ class _rv_generic(object):
+ def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
+ return None
+
+ rv_obj = _rv_generic()
+ argspec = getfullargspec_no_self(rv_obj._rvs)
+ assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs', (2, 3), ['size'], {'size': None}, {}))
+
+
+def test_mapwrapper_serial():
+ in_arg = np.arange(10.)
+ out_arg = np.sin(in_arg)
+
+ p = MapWrapper(1)
+ assert_(p._mapfunc is map)
+ assert_(p.pool is None)
+ assert_(p._own_pool is False)
+ out = list(p(np.sin, in_arg))
+ assert_equal(out, out_arg)
+
+ with assert_raises(RuntimeError):
+ p = MapWrapper(0)
+
+
+def test_pool():
+ with Pool(2) as p:
+ p.map(math.sin, [1,2,3, 4])
+
+
+def test_mapwrapper_parallel():
+ in_arg = np.arange(10.)
+ out_arg = np.sin(in_arg)
+
+ with MapWrapper(2) as p:
+ out = p(np.sin, in_arg)
+ assert_equal(list(out), out_arg)
+
+ assert_(p._own_pool is True)
+ assert_(isinstance(p.pool, PWL))
+ assert_(p._mapfunc is not None)
+
+ # the context manager should've closed the internal pool
+ # check that it has by asking it to calculate again.
+ with assert_raises(Exception) as excinfo:
+ p(np.sin, in_arg)
+
+ assert_(excinfo.type is ValueError)
+
+ # can also set a PoolWrapper up with a map-like callable instance
+ with Pool(2) as p:
+ q = MapWrapper(p.map)
+
+ assert_(q._own_pool is False)
+ q.close()
+
+ # closing the PoolWrapper shouldn't close the internal pool
+ # because it didn't create it
+ out = p.map(np.sin, in_arg)
+ assert_equal(list(out), out_arg)
+
+
+# get our custom ones and a few from the "import *" cases
+@pytest.mark.parametrize(
+ 'key', ('ifft', 'diag', 'arccos', 'randn', 'rand', 'array'))
+def test_numpy_deprecation(key):
+ """Test that 'from numpy import *' functions are deprecated."""
+ if key in ('ifft', 'diag', 'arccos'):
+ arg = [1.0, 0.]
+ elif key == 'finfo':
+ arg = float
+ else:
+ arg = 2
+ func = getattr(scipy, key)
+ match = r'scipy\.%s is deprecated.*2\.0\.0' % key
+ with deprecated_call(match=match) as dep:
+ func(arg) # deprecated
+ # in case we catch more than one dep warning
+ fnames = [os.path.splitext(d.filename)[0] for d in dep.list]
+ basenames = [os.path.basename(fname) for fname in fnames]
+ assert 'test__util' in basenames
+ if key in ('rand', 'randn'):
+ root = np.random
+ elif key == 'ifft':
+ root = np.fft
+ else:
+ root = np
+ func_np = getattr(root, key)
+ func_np(arg) # not deprecated
+ assert func_np is not func
+ # classes should remain classes
+ if isinstance(func_np, type):
+ assert isinstance(func, type)
+
+
+def test_numpy_deprecation_functionality():
+ # Check that the deprecation wrappers don't break basic NumPy
+ # functionality
+ with deprecated_call():
+ x = scipy.array([1, 2, 3], dtype=scipy.float64)
+ assert x.dtype == scipy.float64
+ assert x.dtype == np.float64
+
+ x = scipy.finfo(scipy.float32)
+ assert x.eps == np.finfo(np.float32).eps
+
+ assert scipy.float64 == np.float64
+ assert issubclass(np.float64, scipy.float64)
+
+
+def test_rng_integers():
+ rng = np.random.RandomState()
+
+ # test that numbers are inclusive of high point
+ arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+ assert np.max(arr) == 5
+ assert np.min(arr) == 2
+ assert arr.shape == (100, )
+
+ # test that numbers are inclusive of high point
+ arr = rng_integers(rng, low=5, size=100, endpoint=True)
+ assert np.max(arr) == 5
+ assert np.min(arr) == 0
+ assert arr.shape == (100, )
+
+ # test that numbers are exclusive of high point
+ arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+ assert np.max(arr) == 4
+ assert np.min(arr) == 2
+ assert arr.shape == (100, )
+
+ # test that numbers are exclusive of high point
+ arr = rng_integers(rng, low=5, size=100, endpoint=False)
+ assert np.max(arr) == 4
+ assert np.min(arr) == 0
+ assert arr.shape == (100, )
+
+ # now try with np.random.Generator
+ try:
+ rng = np.random.default_rng()
+ except AttributeError:
+ return
+
+ # test that numbers are inclusive of high point
+ arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+ assert np.max(arr) == 5
+ assert np.min(arr) == 2
+ assert arr.shape == (100, )
+
+ # test that numbers are inclusive of high point
+ arr = rng_integers(rng, low=5, size=100, endpoint=True)
+ assert np.max(arr) == 5
+ assert np.min(arr) == 0
+ assert arr.shape == (100, )
+
+ # test that numbers are exclusive of high point
+ arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+ assert np.max(arr) == 4
+ assert np.min(arr) == 2
+ assert arr.shape == (100, )
+
+ # test that numbers are exclusive of high point
+ arr = rng_integers(rng, low=5, size=100, endpoint=False)
+ assert np.max(arr) == 4
+ assert np.min(arr) == 0
+ assert arr.shape == (100, )
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_bunch.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_bunch.py
new file mode 100644
index 0000000..f216b3e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_bunch.py
@@ -0,0 +1,163 @@
+
+import pytest
+import pickle
+from numpy.testing import assert_equal
+from scipy._lib._bunch import _make_tuple_bunch
+
+
+# `Result` is defined at the top level of the module so it can be
+# used to test pickling.
+Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
+
+
+class TestMakeTupleBunch:
+
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ # Tests with Result
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def setup(self):
+ # Set up an instance of Result.
+ self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
+
+ def test_attribute_access(self):
+ assert_equal(self.result.x, 1)
+ assert_equal(self.result.y, 2)
+ assert_equal(self.result.z, 3)
+ assert_equal(self.result.w, 99)
+ assert_equal(self.result.beta, 0.5)
+
+ def test_indexing(self):
+ assert_equal(self.result[0], 1)
+ assert_equal(self.result[1], 2)
+ assert_equal(self.result[2], 3)
+ assert_equal(self.result[-1], 3)
+ with pytest.raises(IndexError, match='index out of range'):
+ self.result[3]
+
+ def test_unpacking(self):
+ x0, y0, z0 = self.result
+ assert_equal((x0, y0, z0), (1, 2, 3))
+ assert_equal(self.result, (1, 2, 3))
+
+ def test_slice(self):
+ assert_equal(self.result[1:], (2, 3))
+ assert_equal(self.result[::2], (1, 3))
+ assert_equal(self.result[::-1], (3, 2, 1))
+
+ def test_len(self):
+ assert_equal(len(self.result), 3)
+
+ def test_repr(self):
+ s = repr(self.result)
+ assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
+
+ def test_hash(self):
+ assert_equal(hash(self.result), hash((1, 2, 3)))
+
+ def test_pickle(self):
+ s = pickle.dumps(self.result)
+ obj = pickle.loads(s)
+ assert isinstance(obj, Result)
+ assert_equal(obj.x, self.result.x)
+ assert_equal(obj.y, self.result.y)
+ assert_equal(obj.z, self.result.z)
+ assert_equal(obj.w, self.result.w)
+ assert_equal(obj.beta, self.result.beta)
+
+ def test_read_only_existing(self):
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ self.result.x = -1
+
+ def test_read_only_new(self):
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ self.result.plate_of_shrimp = "lattice of coincidence"
+
+ def test_constructor_missing_parameter(self):
+ with pytest.raises(TypeError, match='missing'):
+ # `w` is missing.
+ Result(x=1, y=2, z=3, beta=0.75)
+
+ def test_constructor_incorrect_parameter(self):
+ with pytest.raises(TypeError, match='unexpected'):
+ # `foo` is not an existing field.
+ Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
+
+ def test_module(self):
+ m = 'scipy._lib.tests.test_bunch'
+ assert_equal(Result.__module__, m)
+ assert_equal(self.result.__module__, m)
+
+ def test_extra_fields_per_instance(self):
+ # This test exists to ensure that instances of the same class
+ # store their own values for the extra fields. That is, the values
+ # are stored per instance and not in the class.
+ result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
+ result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
+ assert_equal(result1.w, -1)
+ assert_equal(result1.beta, 0.0)
+ # The rest of these checks aren't essential, but let's check
+ # them anyway.
+ assert_equal(result1[:], (1, 2, 3))
+ assert_equal(result2.w, 99)
+ assert_equal(result2.beta, 1.0)
+ assert_equal(result2[:], (4, 5, 6))
+
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ # Other tests
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def test_extra_field_names_is_optional(self):
+ Square = _make_tuple_bunch('Square', ['width', 'height'])
+ sq = Square(width=1, height=2)
+ assert_equal(sq.width, 1)
+ assert_equal(sq.height, 2)
+ s = repr(sq)
+ assert_equal(s, 'Square(width=1, height=2)')
+
+ def test_tuple_like(self):
+ Tup = _make_tuple_bunch('Tup', ['a', 'b'])
+ tu = Tup(a=1, b=2)
+ assert isinstance(tu, tuple)
+ assert isinstance(tu + (1,), tuple)
+
+ def test_explicit_module(self):
+ m = 'some.module.name'
+ Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
+ foo = Foo(x=1, a=355, b=113)
+ assert_equal(Foo.__module__, m)
+ assert_equal(foo.__module__, m)
+
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ # Argument validation
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+ @pytest.mark.parametrize('args', [('123', ['a'], ['b']),
+ ('Foo', ['-3'], ['x']),
+ ('Foo', ['a'], ['+-*/'])])
+ def test_identifiers_not_allowed(self, args):
+ with pytest.raises(ValueError, match='identifiers'):
+ _make_tuple_bunch(*args)
+
+ @pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
+ ('Foo', ['a', 'b'], ['b', 'x'])])
+ def test_repeated_field_names(self, args):
+ with pytest.raises(ValueError, match='Duplicate'):
+ _make_tuple_bunch(*args)
+
+ @pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
+ ('Foo', ['a'], ['_x'])])
+ def test_leading_underscore_not_allowed(self, args):
+ with pytest.raises(ValueError, match='underscore'):
+ _make_tuple_bunch(*args)
+
+ @pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
+ ('Foo', ['a'], ['or']),
+ ('and', ['a'], ['x'])])
+ def test_keyword_not_allowed_in_fields(self, args):
+ with pytest.raises(ValueError, match='keyword'):
+ _make_tuple_bunch(*args)
+
+ def test_at_least_one_field_name_required(self):
+ with pytest.raises(ValueError, match='at least one name'):
+ _make_tuple_bunch('Qwerty', [], ['a', 'b'])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_ccallback.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_ccallback.py
new file mode 100644
index 0000000..a35adce
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_ccallback.py
@@ -0,0 +1,197 @@
+from numpy.testing import assert_equal, assert_
+from pytest import raises as assert_raises
+
+import time
+import pytest
+import ctypes
+import threading
+from scipy._lib import _ccallback_c as _test_ccallback_cython
+from scipy._lib import _test_ccallback
+from scipy._lib._ccallback import LowLevelCallable
+
+try:
+ import cffi
+ HAVE_CFFI = True
+except ImportError:
+ HAVE_CFFI = False
+
+
+ERROR_VALUE = 2.0
+
+
+def callback_python(a, user_data=None):
+ if a == ERROR_VALUE:
+ raise ValueError("bad value")
+
+ if user_data is None:
+ return a + 1
+ else:
+ return a + user_data
+
+def _get_cffi_func(base, signature):
+ if not HAVE_CFFI:
+ pytest.skip("cffi not installed")
+
+ # Get function address
+ voidp = ctypes.cast(base, ctypes.c_void_p)
+ address = voidp.value
+
+ # Create corresponding cffi handle
+ ffi = cffi.FFI()
+ func = ffi.cast(signature, address)
+ return func
+
+
+def _get_ctypes_data():
+ value = ctypes.c_double(2.0)
+ return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
+
+
+def _get_cffi_data():
+ if not HAVE_CFFI:
+ pytest.skip("cffi not installed")
+ ffi = cffi.FFI()
+ return ffi.new('double *', 2.0)
+
+
+CALLERS = {
+ 'simple': _test_ccallback.test_call_simple,
+ 'nodata': _test_ccallback.test_call_nodata,
+ 'nonlocal': _test_ccallback.test_call_nonlocal,
+ 'cython': _test_ccallback_cython.test_call_cython,
+}
+
+# These functions have signatures known to the callers
+FUNCS = {
+ 'python': lambda: callback_python,
+ 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
+ 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"),
+ 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
+ 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
+ 'double (*)(double, int *, void *)'),
+ 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
+ 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"),
+ 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
+ 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
+ 'double (*)(double, double, int *, void *)'),
+}
+
+# These functions have signatures the callers don't know
+BAD_FUNCS = {
+ 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
+ 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"),
+ 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
+ 'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes,
+ 'double (*)(double, double, double, int *, void *)'),
+}
+
+USER_DATAS = {
+ 'ctypes': _get_ctypes_data,
+ 'cffi': _get_cffi_data,
+ 'capsule': _test_ccallback.test_get_data_capsule,
+}
+
+
+def test_callbacks():
+ def check(caller, func, user_data):
+ caller = CALLERS[caller]
+ func = FUNCS[func]()
+ user_data = USER_DATAS[user_data]()
+
+ if func is callback_python:
+ func2 = lambda x: func(x, 2.0)
+ else:
+ func2 = LowLevelCallable(func, user_data)
+ func = LowLevelCallable(func)
+
+ # Test basic call
+ assert_equal(caller(func, 1.0), 2.0)
+
+ # Test 'bad' value resulting to an error
+ assert_raises(ValueError, caller, func, ERROR_VALUE)
+
+ # Test passing in user_data
+ assert_equal(caller(func2, 1.0), 3.0)
+
+ for caller in sorted(CALLERS.keys()):
+ for func in sorted(FUNCS.keys()):
+ for user_data in sorted(USER_DATAS.keys()):
+ check(caller, func, user_data)
+
+
+def test_bad_callbacks():
+ def check(caller, func, user_data):
+ caller = CALLERS[caller]
+ user_data = USER_DATAS[user_data]()
+ func = BAD_FUNCS[func]()
+
+ if func is callback_python:
+ func2 = lambda x: func(x, 2.0)
+ else:
+ func2 = LowLevelCallable(func, user_data)
+ func = LowLevelCallable(func)
+
+ # Test that basic call fails
+ assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
+
+ # Test that passing in user_data also fails
+ assert_raises(ValueError, caller, func2, 1.0)
+
+ # Test error message
+ llfunc = LowLevelCallable(func)
+ try:
+ caller(llfunc, 1.0)
+ except ValueError as err:
+ msg = str(err)
+ assert_(llfunc.signature in msg, msg)
+ assert_('double (double, double, int *, void *)' in msg, msg)
+
+ for caller in sorted(CALLERS.keys()):
+ for func in sorted(BAD_FUNCS.keys()):
+ for user_data in sorted(USER_DATAS.keys()):
+ check(caller, func, user_data)
+
+
+def test_signature_override():
+ caller = _test_ccallback.test_call_simple
+ func = _test_ccallback.test_get_plus1_capsule()
+
+ llcallable = LowLevelCallable(func, signature="bad signature")
+ assert_equal(llcallable.signature, "bad signature")
+ assert_raises(ValueError, caller, llcallable, 3)
+
+ llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
+ assert_equal(llcallable.signature, "double (double, int *, void *)")
+ assert_equal(caller(llcallable, 3), 4)
+
+
+def test_threadsafety():
+ def callback(a, caller):
+ if a <= 0:
+ return 1
+ else:
+ res = caller(lambda x: callback(x, caller), a - 1)
+ return 2*res
+
+ def check(caller):
+ caller = CALLERS[caller]
+
+ results = []
+
+ count = 10
+
+ def run():
+ time.sleep(0.01)
+ r = caller(lambda x: callback(x, caller), count)
+ results.append(r)
+
+ threads = [threading.Thread(target=run) for j in range(20)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ assert_equal(results, [2.0**count]*len(threads))
+
+ for caller in CALLERS.keys():
+ check(caller)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_deprecation.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_deprecation.py
new file mode 100644
index 0000000..7910bd5
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_deprecation.py
@@ -0,0 +1,10 @@
+import pytest
+
+
+def test_cython_api_deprecation():
+ match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
+ "is deprecated, use `foo` instead!\n"
+ "Deprecated in Scipy 42.0.0")
+ with pytest.warns(DeprecationWarning, match=match):
+ from .. import _test_deprecation_call
+ assert _test_deprecation_call.call() == (1, 1)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_import_cycles.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_import_cycles.py
new file mode 100644
index 0000000..2eb5170
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_import_cycles.py
@@ -0,0 +1,52 @@
+import sys
+import subprocess
+
+
+MODULES = [
+ "scipy.cluster",
+ "scipy.cluster.vq",
+ "scipy.cluster.hierarchy",
+ "scipy.constants",
+ "scipy.fft",
+ "scipy.fftpack",
+ "scipy.fftpack.convolve",
+ "scipy.integrate",
+ "scipy.interpolate",
+ "scipy.io",
+ "scipy.io.arff",
+ "scipy.io.harwell_boeing",
+ "scipy.io.idl",
+ "scipy.io.matlab",
+ "scipy.io.netcdf",
+ "scipy.io.wavfile",
+ "scipy.linalg",
+ "scipy.linalg.blas",
+ "scipy.linalg.cython_blas",
+ "scipy.linalg.lapack",
+ "scipy.linalg.cython_lapack",
+ "scipy.linalg.interpolative",
+ "scipy.misc",
+ "scipy.ndimage",
+ "scipy.odr",
+ "scipy.optimize",
+ "scipy.signal",
+ "scipy.signal.windows",
+ "scipy.sparse",
+ "scipy.sparse.linalg",
+ "scipy.sparse.csgraph",
+ "scipy.spatial",
+ "scipy.spatial.distance",
+ "scipy.special",
+ "scipy.stats",
+ "scipy.stats.distributions",
+ "scipy.stats.mstats",
+]
+
+
+def test_modules_importable():
+ # Regression test for gh-6793.
+ # Check that all modules are importable in a new Python process.
+ # This is not necessarily true if there are import cycles present.
+ for module in MODULES:
+ cmd = 'import {}'.format(module)
+ subprocess.check_call([sys.executable, '-c', cmd])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_linear_assignment.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_linear_assignment.py
new file mode 100644
index 0000000..5ba729d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_linear_assignment.py
@@ -0,0 +1,100 @@
+from itertools import product
+
+from numpy.testing import assert_array_equal
+import numpy as np
+import pytest
+
+from scipy.optimize import linear_sum_assignment
+from scipy.sparse import csr_matrix, random
+from scipy.sparse.csgraph import min_weight_full_bipartite_matching
+
+
+# Tests that combine scipy.optimize.linear_sum_assignment and
+# scipy.sparse.csgraph.min_weight_full_bipartite_matching
+@pytest.mark.parametrize('solver_type,sign,test_case', product(
+ [(linear_sum_assignment, np.array),
+ (min_weight_full_bipartite_matching, csr_matrix)],
+ [-1, 1],
+ [
+ # Square
+ ([[400, 150, 400],
+ [400, 450, 600],
+ [300, 225, 300]],
+ [150, 400, 300]),
+
+ # Rectangular variant
+ ([[400, 150, 400, 1],
+ [400, 450, 600, 2],
+ [300, 225, 300, 3]],
+ [150, 2, 300]),
+
+ ([[10, 10, 8],
+ [9, 8, 1],
+ [9, 7, 4]],
+ [10, 1, 7]),
+
+ # Square
+ ([[10, 10, 8, 11],
+ [9, 8, 1, 1],
+ [9, 7, 4, 10]],
+ [10, 1, 4]),
+
+ # Rectangular variant
+ ([[10, float("inf"), float("inf")],
+ [float("inf"), float("inf"), 1],
+ [float("inf"), 7, float("inf")]],
+ [10, 1, 7]),
+ ])
+)
+def test_two_methods_give_expected_result_on_small_inputs(
+ solver_type, sign, test_case
+):
+ solver, array_type = solver_type
+ cost_matrix, expected_cost = test_case
+ maximize = sign == -1
+ cost_matrix = sign * array_type(cost_matrix)
+ expected_cost = sign * np.array(expected_cost)
+
+ row_ind, col_ind = solver(cost_matrix, maximize=maximize)
+ assert_array_equal(row_ind, np.sort(row_ind))
+ assert_array_equal(expected_cost,
+ np.array(cost_matrix[row_ind, col_ind]).flatten())
+
+ cost_matrix = cost_matrix.T
+ row_ind, col_ind = solver(cost_matrix, maximize=maximize)
+ assert_array_equal(row_ind, np.sort(row_ind))
+ assert_array_equal(np.sort(expected_cost),
+ np.sort(np.array(
+ cost_matrix[row_ind, col_ind])).flatten())
+
+
+def test_two_methods_give_same_result_on_many_sparse_inputs():
+ # As opposed to the test above, here we do not spell out the expected
+ # output; only assert that the two methods give the same result.
+ # Concretely, the below tests 100 cases of size 100x100, out of which
+ # 36 are infeasible.
+ np.random.seed(1234)
+ for _ in range(100):
+ lsa_raises = False
+ mwfbm_raises = False
+ sparse = random(100, 100, density=0.06,
+ data_rvs=lambda size: np.random.randint(1, 100, size))
+ # In csgraph, zeros correspond to missing edges, so we explicitly
+ # replace those with infinities
+ dense = np.full(sparse.shape, np.inf)
+ dense[sparse.row, sparse.col] = sparse.data
+ sparse = sparse.tocsr()
+ try:
+ row_ind, col_ind = linear_sum_assignment(dense)
+ lsa_cost = dense[row_ind, col_ind].sum()
+ except ValueError:
+ lsa_raises = True
+ try:
+ row_ind, col_ind = min_weight_full_bipartite_matching(sparse)
+ mwfbm_cost = sparse[row_ind, col_ind].sum()
+ except ValueError:
+ mwfbm_raises = True
+ # Ensure that if one method raises, so does the other one.
+ assert lsa_raises == mwfbm_raises
+ if not lsa_raises:
+ assert lsa_cost == mwfbm_cost
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_tmpdirs.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_tmpdirs.py
new file mode 100644
index 0000000..466f926
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_tmpdirs.py
@@ -0,0 +1,42 @@
+""" Test tmpdirs module """
+from os import getcwd
+from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists
+
+from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir
+
+from numpy.testing import assert_, assert_equal
+
+MY_PATH = abspath(__file__)
+MY_DIR = dirname(MY_PATH)
+
+
+def test_tempdir():
+ with tempdir() as tmpdir:
+ fname = pjoin(tmpdir, 'example_file.txt')
+ with open(fname, 'wt') as fobj:
+ fobj.write('a string\\n')
+ assert_(not exists(tmpdir))
+
+
+def test_in_tempdir():
+ my_cwd = getcwd()
+ with in_tempdir() as tmpdir:
+ with open('test.txt', 'wt') as f:
+ f.write('some text')
+ assert_(isfile('test.txt'))
+ assert_(isfile(pjoin(tmpdir, 'test.txt')))
+ assert_(not exists(tmpdir))
+ assert_equal(getcwd(), my_cwd)
+
+
+def test_given_directory():
+ # Test InGivenDirectory
+ cwd = getcwd()
+ with in_dir() as tmpdir:
+ assert_equal(tmpdir, abspath(cwd))
+ assert_equal(tmpdir, abspath(getcwd()))
+ with in_dir(MY_DIR) as tmpdir:
+ assert_equal(tmpdir, MY_DIR)
+ assert_equal(realpath(MY_DIR), realpath(abspath(getcwd())))
+ # We were deleting the given directory! Check not so now.
+ assert_(isfile(MY_PATH))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_warnings.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_warnings.py
new file mode 100644
index 0000000..ca17744
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/tests/test_warnings.py
@@ -0,0 +1,121 @@
+"""
+Tests which scan for certain occurrences in the code, they may not find
+all of these occurrences but should catch almost all. This file was adapted
+from NumPy.
+"""
+
+
+import os
+from pathlib import Path
+import ast
+import tokenize
+
+import scipy
+
+import pytest
+
+
+class ParseCall(ast.NodeVisitor):
+ def __init__(self):
+ self.ls = []
+
+ def visit_Attribute(self, node):
+ ast.NodeVisitor.generic_visit(self, node)
+ self.ls.append(node.attr)
+
+ def visit_Name(self, node):
+ self.ls.append(node.id)
+
+class FindFuncs(ast.NodeVisitor):
+ def __init__(self, filename):
+ super().__init__()
+ self.__filename = filename
+ self.bad_filters = []
+ self.bad_stacklevels = []
+
+ def visit_Call(self, node):
+ p = ParseCall()
+ p.visit(node.func)
+ ast.NodeVisitor.generic_visit(self, node)
+
+ if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
+ if node.args[0].s == "ignore":
+ self.bad_filters.append(
+ "{}:{}".format(self.__filename, node.lineno))
+
+ if p.ls[-1] == 'warn' and (
+ len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+
+ if self.__filename == "_lib/tests/test_warnings.py":
+ # This file
+ return
+
+ # See if stacklevel exists:
+ if len(node.args) == 3:
+ return
+ args = {kw.arg for kw in node.keywords}
+ if "stacklevel" not in args:
+ self.bad_stacklevels.append(
+ "{}:{}".format(self.__filename, node.lineno))
+
+
+@pytest.fixture(scope="session")
+def warning_calls():
+ # combined "ignore" and stacklevel error
+ base = Path(scipy.__file__).parent
+
+ bad_filters = []
+ bad_stacklevels = []
+
+ for path in base.rglob("*.py"):
+ # use tokenize to auto-detect encoding on systems where no
+ # default encoding is defined (e.g., LANG='C')
+ with tokenize.open(str(path)) as file:
+ tree = ast.parse(file.read(), filename=str(path))
+ finder = FindFuncs(path.relative_to(base))
+ finder.visit(tree)
+ bad_filters.extend(finder.bad_filters)
+ bad_stacklevels.extend(finder.bad_stacklevels)
+
+ return bad_filters, bad_stacklevels
+
+
+@pytest.mark.slow
+def test_warning_calls_filters(warning_calls):
+ bad_filters, bad_stacklevels = warning_calls
+
+ # There is still one simplefilter occurrence in optimize.py that could be removed.
+ bad_filters = [item for item in bad_filters
+ if 'optimize.py' not in item]
+ # The filterwarnings calls in sparse are needed.
+ bad_filters = [item for item in bad_filters
+ if os.path.join('sparse', '__init__.py') not in item
+ and os.path.join('sparse', 'sputils.py') not in item]
+
+ if bad_filters:
+ raise AssertionError(
+ "warning ignore filter should not be used, instead, use\n"
+ "numpy.testing.suppress_warnings (in tests only);\n"
+ "found in:\n {}".format(
+ "\n ".join(bad_filters)))
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(reason="stacklevels currently missing")
+def test_warning_calls_stacklevels(warning_calls):
+ bad_filters, bad_stacklevels = warning_calls
+
+ msg = ""
+
+ if bad_filters:
+ msg += ("warning ignore filter should not be used, instead, use\n"
+ "numpy.testing.suppress_warnings (in tests only);\n"
+ "found in:\n {}".format("\n ".join(bad_filters)))
+ msg += "\n\n"
+
+ if bad_stacklevels:
+ msg += "warnings should have an appropriate stacklevel:\n {}".format(
+ "\n ".join(bad_stacklevels))
+
+ if msg:
+ raise AssertionError(msg)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/_lib/uarray.py b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/uarray.py
new file mode 100644
index 0000000..dd43450
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/_lib/uarray.py
@@ -0,0 +1,31 @@
+"""`uarray` provides functions for generating multimethods that dispatch to
+multiple different backends
+
+This should be imported, rather than `_uarray` so that an installed version could
+be used instead, if available. This means that users can call
+`uarray.set_backend` directly instead of going through SciPy.
+
+"""
+
+
+# Prefer an installed version of uarray, if available
+try:
+ import uarray as _uarray
+except ImportError:
+ _has_uarray = False
+else:
+ from scipy._lib._pep440 import Version as _Version
+
+ _has_uarray = _Version(_uarray.__version__) >= _Version("0.5")
+ del _uarray
+ del _Version
+
+
+if _has_uarray:
+ from uarray import *
+ from uarray import _Function
+else:
+ from ._uarray import *
+ from ._uarray import _Function
+
+del _has_uarray
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/__init__.py
new file mode 100644
index 0000000..8fe47ce
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/__init__.py
@@ -0,0 +1,29 @@
+"""
+=========================================
+Clustering package (:mod:`scipy.cluster`)
+=========================================
+
+.. currentmodule:: scipy.cluster
+
+:mod:`scipy.cluster.vq`
+
+Clustering algorithms are useful in information theory, target detection,
+communications, compression, and other areas. The `vq` module only
+supports vector quantization and the k-means algorithms.
+
+:mod:`scipy.cluster.hierarchy`
+
+The `hierarchy` module provides functions for hierarchical and
+agglomerative clustering. Its features include generating hierarchical
+clusters from distance matrices,
+calculating statistics on clusters, cutting linkages
+to generate flat clusters, and visualizing clusters with dendrograms.
+
+"""
+__all__ = ['vq', 'hierarchy']
+
+from . import vq, hierarchy
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_hierarchy.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_hierarchy.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..7f67bd4
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_hierarchy.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_optimal_leaf_ordering.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_optimal_leaf_ordering.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..2dc6d60
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_optimal_leaf_ordering.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_vq.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_vq.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..bc1c440
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/_vq.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/hierarchy.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/hierarchy.py
new file mode 100644
index 0000000..e33df6a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/hierarchy.py
@@ -0,0 +1,4169 @@
+"""
+Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
+========================================================
+
+.. currentmodule:: scipy.cluster.hierarchy
+
+These functions cut hierarchical clusterings into flat clusterings
+or find the roots of the forest formed by a cut by providing the flat
+cluster ids of each observation.
+
+.. autosummary::
+ :toctree: generated/
+
+ fcluster
+ fclusterdata
+ leaders
+
+These are routines for agglomerative clustering.
+
+.. autosummary::
+ :toctree: generated/
+
+ linkage
+ single
+ complete
+ average
+ weighted
+ centroid
+ median
+ ward
+
+These routines compute statistics on hierarchies.
+
+.. autosummary::
+ :toctree: generated/
+
+ cophenet
+ from_mlab_linkage
+ inconsistent
+ maxinconsts
+ maxdists
+ maxRstat
+ to_mlab_linkage
+
+Routines for visualizing flat clusters.
+
+.. autosummary::
+ :toctree: generated/
+
+ dendrogram
+
+These are data structures and routines for representing hierarchies as
+tree objects.
+
+.. autosummary::
+ :toctree: generated/
+
+ ClusterNode
+ leaves_list
+ to_tree
+ cut_tree
+ optimal_leaf_ordering
+
+These are predicates for checking the validity of linkage and
+inconsistency matrices as well as for checking isomorphism of two
+flat cluster assignments.
+
+.. autosummary::
+ :toctree: generated/
+
+ is_valid_im
+ is_valid_linkage
+ is_isomorphic
+ is_monotonic
+ correspond
+ num_obs_linkage
+
+Utility routines for plotting:
+
+.. autosummary::
+ :toctree: generated/
+
+ set_link_color_palette
+
+Utility classes:
+
+.. autosummary::
+ :toctree: generated/
+
+ DisjointSet -- data structure for incremental connectivity queries
+
+"""
+# Copyright (C) Damian Eads, 2007-2008. New BSD License.
+
+# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
+#
+# Author: Damian Eads
+# Date: September 22, 2007
+#
+# Copyright (c) 2007, 2008, Damian Eads
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# - Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the
+# following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# - Neither the name of the author nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import warnings
+import bisect
+from collections import deque
+
+import numpy as np
+from . import _hierarchy, _optimal_leaf_ordering
+import scipy.spatial.distance as distance
+from scipy._lib._disjoint_set import DisjointSet
+
+
+_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
+ 'median': 4, 'ward': 5, 'weighted': 6}
+_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
+
+__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',
+ 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',
+ 'fclusterdata', 'from_mlab_linkage', 'inconsistent',
+ 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',
+ 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',
+ 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',
+ 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',
+ 'ward', 'weighted', 'distance']
+
+
+class ClusterWarning(UserWarning):
+ pass
+
+
+def _warning(s):
+ warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
+
+
+def _copy_array_if_base_present(a):
+ """
+ Copy the array if its base points to a parent array.
+ """
+ if a.base is not None:
+ return a.copy()
+ elif np.issubsctype(a, np.float32):
+ return np.array(a, dtype=np.double)
+ else:
+ return a
+
+
+def _copy_arrays_if_base_present(T):
+ """
+ Accept a tuple of arrays T. Copies the array T[i] if its base array
+ points to an actual array. Otherwise, the reference is just copied.
+ This is useful if the arrays are being passed to a C function that
+ does not do proper striding.
+ """
+ l = [_copy_array_if_base_present(a) for a in T]
+ return l
+
+
+def _randdm(pnts):
+ """
+ Generate a random distance matrix stored in condensed form.
+
+ Parameters
+ ----------
+ pnts : int
+ The number of points in the distance matrix. Has to be at least 2.
+
+ Returns
+ -------
+ D : ndarray
+ A ``pnts * (pnts - 1) / 2`` sized vector is returned.
+ """
+ if pnts >= 2:
+ D = np.random.rand(pnts * (pnts - 1) / 2)
+ else:
+ raise ValueError("The number of points in the distance matrix "
+ "must be at least 2.")
+ return D
+
+
+def single(y):
+ """
+ Perform single/min/nearest linkage on the condensed distance matrix ``y``.
+
+ Parameters
+ ----------
+ y : ndarray
+ The upper triangular of the distance matrix. The result of
+ ``pdist`` is returned in this form.
+
+ Returns
+ -------
+ Z : ndarray
+ The linkage matrix.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import single, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = single(y)
+ >>> Z
+ array([[ 0., 1., 1., 2.],
+ [ 2., 12., 1., 3.],
+ [ 3., 4., 1., 2.],
+ [ 5., 14., 1., 3.],
+ [ 6., 7., 1., 2.],
+ [ 8., 16., 1., 3.],
+ [ 9., 10., 1., 2.],
+ [11., 18., 1., 3.],
+ [13., 15., 2., 6.],
+ [17., 20., 2., 9.],
+ [19., 21., 2., 12.]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32)
+ >>> fcluster(Z, 1, criterion='distance')
+ array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+ >>> fcluster(Z, 2, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+ """
+ return linkage(y, method='single', metric='euclidean')
+
+
+def complete(y):
+ """
+ Perform complete/max/farthest point linkage on a condensed distance matrix.
+
+ Parameters
+ ----------
+ y : ndarray
+ The upper triangular of the distance matrix. The result of
+ ``pdist`` is returned in this form.
+
+ Returns
+ -------
+ Z : ndarray
+ A linkage matrix containing the hierarchical clustering. See
+ the `linkage` function documentation for more information
+ on its structure.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import complete, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = complete(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.41421356, 3. ],
+ [ 5. , 13. , 1.41421356, 3. ],
+ [ 8. , 14. , 1.41421356, 3. ],
+ [11. , 15. , 1.41421356, 3. ],
+ [16. , 17. , 4.12310563, 6. ],
+ [18. , 19. , 4.12310563, 6. ],
+ [20. , 21. , 5.65685425, 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
+ >>> fcluster(Z, 1.5, criterion='distance')
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+ >>> fcluster(Z, 4.5, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
+ >>> fcluster(Z, 6, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+ """
+ return linkage(y, method='complete', metric='euclidean')
+
+
+def average(y):
+ """
+ Perform average/UPGMA linkage on a condensed distance matrix.
+
+ Parameters
+ ----------
+ y : ndarray
+ The upper triangular of the distance matrix. The result of
+ ``pdist`` is returned in this form.
+
+ Returns
+ -------
+ Z : ndarray
+ A linkage matrix containing the hierarchical clustering. See
+ `linkage` for more information on its structure.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import average, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = average(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.20710678, 3. ],
+ [ 5. , 13. , 1.20710678, 3. ],
+ [ 8. , 14. , 1.20710678, 3. ],
+ [11. , 15. , 1.20710678, 3. ],
+ [16. , 17. , 3.39675184, 6. ],
+ [18. , 19. , 3.39675184, 6. ],
+ [20. , 21. , 4.09206523, 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
+ >>> fcluster(Z, 1.5, criterion='distance')
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+ >>> fcluster(Z, 4, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
+ >>> fcluster(Z, 6, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+
+ """
+ return linkage(y, method='average', metric='euclidean')
+
+
+def weighted(y):
+ """
+ Perform weighted/WPGMA linkage on the condensed distance matrix.
+
+ See `linkage` for more information on the return
+ structure and algorithm.
+
+ Parameters
+ ----------
+ y : ndarray
+ The upper triangular of the distance matrix. The result of
+ ``pdist`` is returned in this form.
+
+ Returns
+ -------
+ Z : ndarray
+ A linkage matrix containing the hierarchical clustering. See
+ `linkage` for more information on its structure.
+
+ See Also
+ --------
+ linkage : for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import weighted, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = weighted(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 11. , 1. , 2. ],
+ [ 2. , 12. , 1.20710678, 3. ],
+ [ 8. , 13. , 1.20710678, 3. ],
+ [ 5. , 14. , 1.20710678, 3. ],
+ [10. , 15. , 1.20710678, 3. ],
+ [18. , 19. , 3.05595762, 6. ],
+ [16. , 17. , 3.32379407, 6. ],
+ [20. , 21. , 4.06357713, 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32)
+ >>> fcluster(Z, 1.5, criterion='distance')
+ array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32)
+ >>> fcluster(Z, 4, criterion='distance')
+ array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32)
+ >>> fcluster(Z, 6, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+
+ """
+ return linkage(y, method='weighted', metric='euclidean')
+
+
+def centroid(y):
+ """
+ Perform centroid/UPGMC linkage.
+
+ See `linkage` for more information on the input matrix,
+ return structure, and algorithm.
+
+ The following are common calling conventions:
+
+ 1. ``Z = centroid(y)``
+
+ Performs centroid/UPGMC linkage on the condensed distance
+ matrix ``y``.
+
+ 2. ``Z = centroid(X)``
+
+ Performs centroid/UPGMC linkage on the observation matrix ``X``
+ using Euclidean distance as the distance metric.
+
+ Parameters
+ ----------
+ y : ndarray
+ A condensed distance matrix. A condensed
+ distance matrix is a flat array containing the upper
+ triangular of the distance matrix. This is the form that
+ ``pdist`` returns. Alternatively, a collection of
+ m observation vectors in n dimensions may be passed as
+ an m by n array.
+
+ Returns
+ -------
+ Z : ndarray
+ A linkage matrix containing the hierarchical clustering. See
+ the `linkage` function documentation for more information
+ on its structure.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import centroid, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = centroid(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 2. , 12. , 1.11803399, 3. ],
+ [ 5. , 13. , 1.11803399, 3. ],
+ [ 8. , 15. , 1.11803399, 3. ],
+ [11. , 14. , 1.11803399, 3. ],
+ [18. , 19. , 3.33333333, 6. ],
+ [16. , 17. , 3.33333333, 6. ],
+ [20. , 21. , 3.33333333, 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
+ >>> fcluster(Z, 1.1, criterion='distance')
+ array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
+ >>> fcluster(Z, 2, criterion='distance')
+ array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
+ >>> fcluster(Z, 4, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+
+ """
+ return linkage(y, method='centroid', metric='euclidean')
+
+
+def median(y):
+ """
+ Perform median/WPGMC linkage.
+
+ See `linkage` for more information on the return structure
+ and algorithm.
+
+ The following are common calling conventions:
+
+ 1. ``Z = median(y)``
+
+ Performs median/WPGMC linkage on the condensed distance matrix
+ ``y``. See ``linkage`` for more information on the return
+ structure and algorithm.
+
+ 2. ``Z = median(X)``
+
+ Performs median/WPGMC linkage on the observation matrix ``X``
+ using Euclidean distance as the distance metric. See `linkage`
+ for more information on the return structure and algorithm.
+
+ Parameters
+ ----------
+ y : ndarray
+ A condensed distance matrix. A condensed
+ distance matrix is a flat array containing the upper
+ triangular of the distance matrix. This is the form that
+ ``pdist`` returns. Alternatively, a collection of
+ m observation vectors in n dimensions may be passed as
+ an m by n array.
+
+ Returns
+ -------
+ Z : ndarray
+ The hierarchical clustering encoded as a linkage matrix.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import median, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = median(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 2. , 12. , 1.11803399, 3. ],
+ [ 5. , 13. , 1.11803399, 3. ],
+ [ 8. , 15. , 1.11803399, 3. ],
+ [11. , 14. , 1.11803399, 3. ],
+ [18. , 19. , 3. , 6. ],
+ [16. , 17. , 3.5 , 6. ],
+ [20. , 21. , 3.25 , 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
+ >>> fcluster(Z, 1.1, criterion='distance')
+ array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
+ >>> fcluster(Z, 2, criterion='distance')
+ array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
+ >>> fcluster(Z, 4, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+
+ """
+ return linkage(y, method='median', metric='euclidean')
+
+
+def ward(y):
+ """
+ Perform Ward's linkage on a condensed distance matrix.
+
+ See `linkage` for more information on the return structure
+ and algorithm.
+
+ The following are common calling conventions:
+
+ 1. ``Z = ward(y)``
+ Performs Ward's linkage on the condensed distance matrix ``y``.
+
+ 2. ``Z = ward(X)``
+ Performs Ward's linkage on the observation matrix ``X`` using
+ Euclidean distance as the distance metric.
+
+ Parameters
+ ----------
+ y : ndarray
+ A condensed distance matrix. A condensed
+ distance matrix is a flat array containing the upper
+ triangular of the distance matrix. This is the form that
+ ``pdist`` returns. Alternatively, a collection of
+ m observation vectors in n dimensions may be passed as
+ an m by n array.
+
+ Returns
+ -------
+ Z : ndarray
+ The hierarchical clustering encoded as a linkage matrix. See
+ `linkage` for more information on the return structure and
+ algorithm.
+
+ See Also
+ --------
+ linkage: for advanced creation of hierarchical clusterings.
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ First, we need a toy dataset to play with::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ Then, we get a condensed distance matrix from this dataset:
+
+ >>> y = pdist(X)
+
+ Finally, we can perform the clustering:
+
+ >>> Z = ward(y)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+
+ The linkage matrix ``Z`` represents a dendrogram - see
+ `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+ contents.
+
+ We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+ each initial point would belong given a distance threshold:
+
+ >>> fcluster(Z, 0.9, criterion='distance')
+ array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
+ >>> fcluster(Z, 1.1, criterion='distance')
+ array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
+ >>> fcluster(Z, 3, criterion='distance')
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+ >>> fcluster(Z, 9, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+ plot of the dendrogram.
+
+ """
+ return linkage(y, method='ward', metric='euclidean')
+
+
+def linkage(y, method='single', metric='euclidean', optimal_ordering=False):
+ """
+ Perform hierarchical/agglomerative clustering.
+
+ The input y may be either a 1-D condensed distance matrix
+ or a 2-D array of observation vectors.
+
+ If y is a 1-D condensed distance matrix,
+ then y must be a :math:`\\binom{n}{2}` sized
+ vector, where n is the number of original observations paired
+ in the distance matrix. The behavior of this function is very
+ similar to the MATLAB linkage function.
+
+ A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
+ :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
+ ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
+ cluster with an index less than :math:`n` corresponds to one of
+ the :math:`n` original observations. The distance between
+ clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
+ fourth value ``Z[i, 3]`` represents the number of original
+ observations in the newly formed cluster.
+
+ The following linkage methods are used to compute the distance
+ :math:`d(s, t)` between two clusters :math:`s` and
+ :math:`t`. The algorithm begins with a forest of clusters that
+ have yet to be used in the hierarchy being formed. When two
+ clusters :math:`s` and :math:`t` from this forest are combined
+ into a single cluster :math:`u`, :math:`s` and :math:`t` are
+ removed from the forest, and :math:`u` is added to the
+ forest. When only one cluster remains in the forest, the algorithm
+ stops, and this cluster becomes the root.
+
+ A distance matrix is maintained at each iteration. The ``d[i,j]``
+ entry corresponds to the distance between cluster :math:`i` and
+ :math:`j` in the original forest.
+
+ At each iteration, the algorithm must update the distance matrix
+ to reflect the distance of the newly formed cluster u with the
+ remaining clusters in the forest.
+
+ Suppose there are :math:`|u|` original observations
+ :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
+ :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
+ cluster :math:`v`. Recall, :math:`s` and :math:`t` are
+ combined to form cluster :math:`u`. Let :math:`v` be any
+ remaining cluster in the forest that is not :math:`u`.
+
+ The following are methods for calculating the distance between the
+ newly formed cluster :math:`u` and each :math:`v`.
+
+ * method='single' assigns
+
+ .. math::
+ d(u,v) = \\min(dist(u[i],v[j]))
+
+ for all points :math:`i` in cluster :math:`u` and
+ :math:`j` in cluster :math:`v`. This is also known as the
+ Nearest Point Algorithm.
+
+ * method='complete' assigns
+
+ .. math::
+ d(u, v) = \\max(dist(u[i],v[j]))
+
+ for all points :math:`i` in cluster u and :math:`j` in
+ cluster :math:`v`. This is also known by the Farthest Point
+ Algorithm or Voor Hees Algorithm.
+
+ * method='average' assigns
+
+ .. math::
+ d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
+ {(|u|*|v|)}
+
+ for all points :math:`i` and :math:`j` where :math:`|u|`
+ and :math:`|v|` are the cardinalities of clusters :math:`u`
+ and :math:`v`, respectively. This is also called the UPGMA
+ algorithm.
+
+ * method='weighted' assigns
+
+ .. math::
+ d(u,v) = (dist(s,v) + dist(t,v))/2
+
+ where cluster u was formed with cluster s and t and v
+ is a remaining cluster in the forest (also called WPGMA).
+
+ * method='centroid' assigns
+
+ .. math::
+ dist(s,t) = ||c_s-c_t||_2
+
+ where :math:`c_s` and :math:`c_t` are the centroids of
+ clusters :math:`s` and :math:`t`, respectively. When two
+ clusters :math:`s` and :math:`t` are combined into a new
+ cluster :math:`u`, the new centroid is computed over all the
+ original objects in clusters :math:`s` and :math:`t`. The
+ distance then becomes the Euclidean distance between the
+ centroid of :math:`u` and the centroid of a remaining cluster
+ :math:`v` in the forest. This is also known as the UPGMC
+ algorithm.
+
+ * method='median' assigns :math:`d(s,t)` like the ``centroid``
+ method. When two clusters :math:`s` and :math:`t` are combined
+ into a new cluster :math:`u`, the average of centroids s and t
+ give the new centroid :math:`u`. This is also known as the
+ WPGMC algorithm.
+
+ * method='ward' uses the Ward variance minimization algorithm.
+ The new entry :math:`d(u,v)` is computed as follows,
+
+ .. math::
+
+ d(u,v) = \\sqrt{\\frac{|v|+|s|}
+ {T}d(v,s)^2
+ + \\frac{|v|+|t|}
+ {T}d(v,t)^2
+ - \\frac{|v|}
+ {T}d(s,t)^2}
+
+ where :math:`u` is the newly joined cluster consisting of
+ clusters :math:`s` and :math:`t`, :math:`v` is an unused
+ cluster in the forest, :math:`T=|v|+|s|+|t|`, and
+ :math:`|*|` is the cardinality of its argument. This is also
+ known as the incremental algorithm.
+
+ Warning: When the minimum distance pair in the forest is chosen, there
+ may be two or more pairs with the same minimum distance. This
+ implementation may choose a different minimum than the MATLAB
+ version.
+
+ Parameters
+ ----------
+ y : ndarray
+ A condensed distance matrix. A condensed distance matrix
+ is a flat array containing the upper triangular of the distance matrix.
+ This is the form that ``pdist`` returns. Alternatively, a collection of
+ :math:`m` observation vectors in :math:`n` dimensions may be passed as
+ an :math:`m` by :math:`n` array. All elements of the condensed distance
+ matrix must be finite, i.e., no NaNs or infs.
+ method : str, optional
+ The linkage algorithm to use. See the ``Linkage Methods`` section below
+ for full descriptions.
+ metric : str or function, optional
+ The distance metric to use in the case that y is a collection of
+ observation vectors; ignored otherwise. See the ``pdist``
+ function for a list of valid distance metrics. A custom distance
+ function can also be used.
+ optimal_ordering : bool, optional
+ If True, the linkage matrix will be reordered so that the distance
+ between successive leaves is minimal. This results in a more intuitive
+ tree structure when the data are visualized. defaults to False, because
+ this algorithm can be slow, particularly on large datasets [2]_. See
+ also the `optimal_leaf_ordering` function.
+
+ .. versionadded:: 1.0.0
+
+ Returns
+ -------
+ Z : ndarray
+ The hierarchical clustering encoded as a linkage matrix.
+
+ Notes
+ -----
+ 1. For method 'single', an optimized algorithm based on minimum spanning
+ tree is implemented. It has time complexity :math:`O(n^2)`.
+ For methods 'complete', 'average', 'weighted' and 'ward', an algorithm
+ called nearest-neighbors chain is implemented. It also has time
+ complexity :math:`O(n^2)`.
+ For other methods, a naive algorithm is implemented with :math:`O(n^3)`
+ time complexity.
+ All algorithms use :math:`O(n^2)` memory.
+ Refer to [1]_ for details about the algorithms.
+ 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if
+ Euclidean pairwise metric is used. If `y` is passed as precomputed
+ pairwise distances, then it is the user's responsibility to assure that
+ these distances are in fact Euclidean, otherwise the produced result
+ will be incorrect.
+
+ See Also
+ --------
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ References
+ ----------
+ .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
+ algorithms", :arXiv:`1109.2378v1`.
+ .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal
+ leaf ordering for hierarchical clustering", 2001. Bioinformatics
+ :doi:`10.1093/bioinformatics/17.suppl_1.S22`
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import dendrogram, linkage
+ >>> from matplotlib import pyplot as plt
+ >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
+
+ >>> Z = linkage(X, 'ward')
+ >>> fig = plt.figure(figsize=(25, 10))
+ >>> dn = dendrogram(Z)
+
+ >>> Z = linkage(X, 'single')
+ >>> fig = plt.figure(figsize=(25, 10))
+ >>> dn = dendrogram(Z)
+ >>> plt.show()
+ """
+ if method not in _LINKAGE_METHODS:
+ raise ValueError("Invalid method: {0}".format(method))
+
+ y = _convert_to_double(np.asarray(y, order='c'))
+
+ if y.ndim == 1:
+ distance.is_valid_y(y, throw=True, name='y')
+ [y] = _copy_arrays_if_base_present([y])
+ elif y.ndim == 2:
+ if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
+ raise ValueError("Method '{0}' requires the distance metric "
+ "to be Euclidean".format(method))
+ if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
+ if np.all(y >= 0) and np.allclose(y, y.T):
+ _warning('The symmetric non-negative hollow observation '
+ 'matrix looks suspiciously like an uncondensed '
+ 'distance matrix')
+ y = distance.pdist(y, metric)
+ else:
+ raise ValueError("`y` must be 1 or 2 dimensional.")
+
+ if not np.all(np.isfinite(y)):
+ raise ValueError("The condensed distance matrix must contain only "
+ "finite values.")
+
+ n = int(distance.num_obs_y(y))
+ method_code = _LINKAGE_METHODS[method]
+
+ if method == 'single':
+ result = _hierarchy.mst_single_linkage(y, n)
+ elif method in ['complete', 'average', 'weighted', 'ward']:
+ result = _hierarchy.nn_chain(y, n, method_code)
+ else:
+ result = _hierarchy.fast_linkage(y, n, method_code)
+
+ if optimal_ordering:
+ return optimal_leaf_ordering(result, y)
+ else:
+ return result
+
+
+class ClusterNode(object):
+ """
+ A tree node class for representing a cluster.
+
+ Leaf nodes correspond to original observations, while non-leaf nodes
+ correspond to non-singleton clusters.
+
+ The `to_tree` function converts a matrix returned by the linkage
+ function into an easy-to-use tree representation.
+
+ All parameter names are also attributes.
+
+ Parameters
+ ----------
+ id : int
+ The node id.
+ left : ClusterNode instance, optional
+ The left child tree node.
+ right : ClusterNode instance, optional
+ The right child tree node.
+ dist : float, optional
+ Distance for this cluster in the linkage matrix.
+ count : int, optional
+ The number of samples in this cluster.
+
+ See Also
+ --------
+ to_tree : for converting a linkage matrix ``Z`` into a tree object.
+
+ """
+
+ def __init__(self, id, left=None, right=None, dist=0, count=1):
+ if id < 0:
+ raise ValueError('The id must be non-negative.')
+ if dist < 0:
+ raise ValueError('The distance must be non-negative.')
+ if (left is None and right is not None) or \
+ (left is not None and right is None):
+ raise ValueError('Only full or proper binary trees are permitted.'
+ ' This node has one child.')
+ if count < 1:
+ raise ValueError('A cluster must contain at least one original '
+ 'observation.')
+ self.id = id
+ self.left = left
+ self.right = right
+ self.dist = dist
+ if self.left is None:
+ self.count = count
+ else:
+ self.count = left.count + right.count
+
+ def __lt__(self, node):
+ if not isinstance(node, ClusterNode):
+ raise ValueError("Can't compare ClusterNode "
+ "to type {}".format(type(node)))
+ return self.dist < node.dist
+
+ def __gt__(self, node):
+ if not isinstance(node, ClusterNode):
+ raise ValueError("Can't compare ClusterNode "
+ "to type {}".format(type(node)))
+ return self.dist > node.dist
+
+ def __eq__(self, node):
+ if not isinstance(node, ClusterNode):
+ raise ValueError("Can't compare ClusterNode "
+ "to type {}".format(type(node)))
+ return self.dist == node.dist
+
+ def get_id(self):
+ """
+ The identifier of the target node.
+
+ For ``0 <= i < n``, `i` corresponds to original observation i.
+ For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
+ at iteration ``i-n``.
+
+ Returns
+ -------
+ id : int
+ The identifier of the target node.
+
+ """
+ return self.id
+
+ def get_count(self):
+ """
+ The number of leaf nodes (original observations) belonging to
+ the cluster node nd. If the target node is a leaf, 1 is
+ returned.
+
+ Returns
+ -------
+ get_count : int
+ The number of leaf nodes below the target node.
+
+ """
+ return self.count
+
+ def get_left(self):
+ """
+ Return a reference to the left child tree object.
+
+ Returns
+ -------
+ left : ClusterNode
+ The left child of the target node. If the node is a leaf,
+ None is returned.
+
+ """
+ return self.left
+
+ def get_right(self):
+ """
+ Return a reference to the right child tree object.
+
+ Returns
+ -------
+ right : ClusterNode
+ The left child of the target node. If the node is a leaf,
+ None is returned.
+
+ """
+ return self.right
+
+ def is_leaf(self):
+ """
+ Return True if the target node is a leaf.
+
+ Returns
+ -------
+ leafness : bool
+ True if the target node is a leaf node.
+
+ """
+ return self.left is None
+
+ def pre_order(self, func=(lambda x: x.id)):
+ """
+ Perform pre-order traversal without recursive function calls.
+
+ When a leaf node is first encountered, ``func`` is called with
+ the leaf node as its argument, and its result is appended to
+ the list.
+
+ For example, the statement::
+
+ ids = root.pre_order(lambda x: x.id)
+
+ returns a list of the node ids corresponding to the leaf nodes
+ of the tree as they appear from left to right.
+
+ Parameters
+ ----------
+ func : function
+ Applied to each leaf ClusterNode object in the pre-order traversal.
+ Given the ``i``-th leaf node in the pre-order traversal ``n[i]``,
+ the result of ``func(n[i])`` is stored in ``L[i]``. If not
+ provided, the index of the original observation to which the node
+ corresponds is used.
+
+ Returns
+ -------
+ L : list
+ The pre-order traversal.
+
+ """
+ # Do a preorder traversal, caching the result. To avoid having to do
+ # recursion, we'll store the previous index we've visited in a vector.
+ n = self.count
+
+ curNode = [None] * (2 * n)
+ lvisited = set()
+ rvisited = set()
+ curNode[0] = self
+ k = 0
+ preorder = []
+ while k >= 0:
+ nd = curNode[k]
+ ndid = nd.id
+ if nd.is_leaf():
+ preorder.append(func(nd))
+ k = k - 1
+ else:
+ if ndid not in lvisited:
+ curNode[k + 1] = nd.left
+ lvisited.add(ndid)
+ k = k + 1
+ elif ndid not in rvisited:
+ curNode[k + 1] = nd.right
+ rvisited.add(ndid)
+ k = k + 1
+ # If we've visited the left and right of this non-leaf
+ # node already, go up in the tree.
+ else:
+ k = k - 1
+
+ return preorder
+
+
+_cnode_bare = ClusterNode(0)
+_cnode_type = type(ClusterNode)
+
+
+def _order_cluster_tree(Z):
+ """
+ Return clustering nodes in bottom-up order by distance.
+
+ Parameters
+ ----------
+ Z : scipy.cluster.linkage array
+ The linkage matrix.
+
+ Returns
+ -------
+ nodes : list
+ A list of ClusterNode objects.
+ """
+ q = deque()
+ tree = to_tree(Z)
+ q.append(tree)
+ nodes = []
+
+ while q:
+ node = q.popleft()
+ if not node.is_leaf():
+ bisect.insort_left(nodes, node)
+ q.append(node.get_right())
+ q.append(node.get_left())
+ return nodes
+
+
+def cut_tree(Z, n_clusters=None, height=None):
+ """
+ Given a linkage matrix Z, return the cut tree.
+
+ Parameters
+ ----------
+ Z : scipy.cluster.linkage array
+ The linkage matrix.
+ n_clusters : array_like, optional
+ Number of clusters in the tree at the cut point.
+ height : array_like, optional
+ The height at which to cut the tree. Only possible for ultrametric
+ trees.
+
+ Returns
+ -------
+ cutree : array
+ An array indicating group membership at each agglomeration step. I.e.,
+ for a full cut tree, in the first column each data point is in its own
+ cluster. At the next step, two nodes are merged. Finally, all
+ singleton and non-singleton clusters are in one group. If `n_clusters`
+ or `height` are given, the columns correspond to the columns of
+ `n_clusters` or `height`.
+
+ Examples
+ --------
+ >>> from scipy import cluster
+ >>> np.random.seed(23)
+ >>> X = np.random.randn(50, 4)
+ >>> Z = cluster.hierarchy.ward(X)
+ >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
+ >>> cutree[:10]
+ array([[0, 0],
+ [1, 1],
+ [2, 2],
+ [3, 3],
+ [3, 4],
+ [2, 2],
+ [0, 0],
+ [1, 5],
+ [3, 6],
+ [4, 7]])
+
+ """
+ nobs = num_obs_linkage(Z)
+ nodes = _order_cluster_tree(Z)
+
+ if height is not None and n_clusters is not None:
+ raise ValueError("At least one of either height or n_clusters "
+ "must be None")
+ elif height is None and n_clusters is None: # return the full cut tree
+ cols_idx = np.arange(nobs)
+ elif height is not None:
+ heights = np.array([x.dist for x in nodes])
+ cols_idx = np.searchsorted(heights, height)
+ else:
+ cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
+
+ try:
+ n_cols = len(cols_idx)
+ except TypeError: # scalar
+ n_cols = 1
+ cols_idx = np.array([cols_idx])
+
+ groups = np.zeros((n_cols, nobs), dtype=int)
+ last_group = np.arange(nobs)
+ if 0 in cols_idx:
+ groups[0] = last_group
+
+ for i, node in enumerate(nodes):
+ idx = node.pre_order()
+ this_group = last_group.copy()
+ this_group[idx] = last_group[idx].min()
+ this_group[this_group > last_group[idx].max()] -= 1
+ if i + 1 in cols_idx:
+ groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group
+ last_group = this_group
+
+ return groups.T
+
+
+def to_tree(Z, rd=False):
+ """
+ Convert a linkage matrix into an easy-to-use tree object.
+
+ The reference to the root `ClusterNode` object is returned (by default).
+
+ Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,
+ and ``count`` attribute. The left and right attributes point to
+ ClusterNode objects that were combined to generate the cluster.
+ If both are None then the `ClusterNode` object is a leaf node, its count
+ must be 1, and its distance is meaningless but set to 0.
+
+ *Note: This function is provided for the convenience of the library
+ user. ClusterNodes are not used as input to any of the functions in this
+ library.*
+
+ Parameters
+ ----------
+ Z : ndarray
+ The linkage matrix in proper form (see the `linkage`
+ function documentation).
+ rd : bool, optional
+ When False (default), a reference to the root `ClusterNode` object is
+ returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a
+ reference to the root node while ``d`` is a list of `ClusterNode`
+ objects - one per original entry in the linkage matrix plus entries
+ for all clustering steps. If a cluster id is
+ less than the number of samples ``n`` in the data that the linkage
+ matrix describes, then it corresponds to a singleton cluster (leaf
+ node).
+ See `linkage` for more information on the assignment of cluster ids
+ to clusters.
+
+ Returns
+ -------
+ tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)
+ If ``rd`` is False, a `ClusterNode`.
+ If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number
+ of samples. See the description of `rd` above for more details.
+
+ See Also
+ --------
+ linkage, is_valid_linkage, ClusterNode
+
+ Examples
+ --------
+ >>> from scipy.cluster import hierarchy
+ >>> x = np.random.rand(10).reshape(5, 2)
+ >>> Z = hierarchy.linkage(x)
+ >>> hierarchy.to_tree(Z)
+ >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)
+ >>> rootnode
+ >> len(nodelist)
+ 9
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ # Number of original objects is equal to the number of rows minus 1.
+ n = Z.shape[0] + 1
+
+ # Create a list full of None's to store the node objects
+ d = [None] * (n * 2 - 1)
+
+ # Create the nodes corresponding to the n original objects.
+ for i in range(0, n):
+ d[i] = ClusterNode(i)
+
+ nd = None
+
+ for i in range(0, n - 1):
+ fi = int(Z[i, 0])
+ fj = int(Z[i, 1])
+ if fi > i + n:
+ raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
+ 'is used before it is formed. See row %d, '
+ 'column 0') % fi)
+ if fj > i + n:
+ raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
+ 'is used before it is formed. See row %d, '
+ 'column 1') % fj)
+ nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
+ # ^ id ^ left ^ right ^ dist
+ if Z[i, 3] != nd.count:
+ raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
+ 'incorrect.') % i)
+ d[n + i] = nd
+
+ if rd:
+ return (nd, d)
+ else:
+ return nd
+
+
+def optimal_leaf_ordering(Z, y, metric='euclidean'):
+ """
+ Given a linkage matrix Z and distance, reorder the cut tree.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as a linkage matrix. See
+ `linkage` for more information on the return structure and
+ algorithm.
+ y : ndarray
+ The condensed distance matrix from which Z was generated.
+ Alternatively, a collection of m observation vectors in n
+ dimensions may be passed as an m by n array.
+ metric : str or function, optional
+ The distance metric to use in the case that y is a collection of
+ observation vectors; ignored otherwise. See the ``pdist``
+ function for a list of valid distance metrics. A custom distance
+ function can also be used.
+
+ Returns
+ -------
+ Z_ordered : ndarray
+ A copy of the linkage matrix Z, reordered to minimize the distance
+ between adjacent leaves.
+
+ Examples
+ --------
+ >>> from scipy.cluster import hierarchy
+ >>> np.random.seed(23)
+ >>> X = np.random.randn(10,10)
+ >>> Z = hierarchy.ward(X)
+ >>> hierarchy.leaves_list(Z)
+ array([0, 5, 3, 9, 6, 8, 1, 4, 2, 7], dtype=int32)
+ >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X))
+ array([3, 9, 0, 5, 8, 2, 7, 4, 1, 6], dtype=int32)
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ y = _convert_to_double(np.asarray(y, order='c'))
+
+ if y.ndim == 1:
+ distance.is_valid_y(y, throw=True, name='y')
+ [y] = _copy_arrays_if_base_present([y])
+ elif y.ndim == 2:
+ if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
+ if np.all(y >= 0) and np.allclose(y, y.T):
+ _warning('The symmetric non-negative hollow observation '
+ 'matrix looks suspiciously like an uncondensed '
+ 'distance matrix')
+ y = distance.pdist(y, metric)
+ else:
+ raise ValueError("`y` must be 1 or 2 dimensional.")
+
+ if not np.all(np.isfinite(y)):
+ raise ValueError("The condensed distance matrix must contain only "
+ "finite values.")
+
+ return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y)
+
+
+def _convert_to_bool(X):
+ if X.dtype != bool:
+ X = X.astype(bool)
+ if not X.flags.contiguous:
+ X = X.copy()
+ return X
+
+
+def _convert_to_double(X):
+ if X.dtype != np.double:
+ X = X.astype(np.double)
+ if not X.flags.contiguous:
+ X = X.copy()
+ return X
+
+
+def cophenet(Z, Y=None):
+ """
+ Calculate the cophenetic distances between each observation in
+ the hierarchical clustering defined by the linkage ``Z``.
+
+ Suppose ``p`` and ``q`` are original observations in
+ disjoint clusters ``s`` and ``t``, respectively and
+ ``s`` and ``t`` are joined by a direct parent cluster
+ ``u``. The cophenetic distance between observations
+ ``i`` and ``j`` is simply the distance between
+ clusters ``s`` and ``t``.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as an array
+ (see `linkage` function).
+ Y : ndarray (optional)
+ Calculates the cophenetic correlation coefficient ``c`` of a
+ hierarchical clustering defined by the linkage matrix `Z`
+ of a set of :math:`n` observations in :math:`m`
+ dimensions. `Y` is the condensed distance matrix from which
+ `Z` was generated.
+
+ Returns
+ -------
+ c : ndarray
+ The cophentic correlation distance (if ``Y`` is passed).
+ d : ndarray
+ The cophenetic distance matrix in condensed form. The
+ :math:`ij` th entry is the cophenetic distance between
+ original observations :math:`i` and :math:`j`.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ scipy.spatial.distance.squareform: transforming condensed matrices into square ones.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import single, cophenet
+ >>> from scipy.spatial.distance import pdist, squareform
+
+ Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance
+ between two points of ``X`` is the distance between the largest two
+ distinct clusters that each of the points:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ ``X`` corresponds to this dataset ::
+
+ x x x x
+ x x
+
+ x x
+ x x x x
+
+ >>> Z = single(pdist(X))
+ >>> Z
+ array([[ 0., 1., 1., 2.],
+ [ 2., 12., 1., 3.],
+ [ 3., 4., 1., 2.],
+ [ 5., 14., 1., 3.],
+ [ 6., 7., 1., 2.],
+ [ 8., 16., 1., 3.],
+ [ 9., 10., 1., 2.],
+ [11., 18., 1., 3.],
+ [13., 15., 2., 6.],
+ [17., 20., 2., 9.],
+ [19., 21., 2., 12.]])
+ >>> cophenet(Z)
+ array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2.,
+ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2.,
+ 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+ 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.])
+
+ The output of the `scipy.cluster.hierarchy.cophenet` method is
+ represented in condensed form. We can use
+ `scipy.spatial.distance.squareform` to see the output as a
+ regular matrix (where each element ``ij`` denotes the cophenetic distance
+ between each ``i``, ``j`` pair of points in ``X``):
+
+ >>> squareform(cophenet(Z))
+ array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+ [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+ [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+ [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.],
+ [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.],
+ [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.],
+ [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.],
+ [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.],
+ [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.],
+ [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.],
+ [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.],
+ [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]])
+
+ In this example, the cophenetic distance between points on ``X`` that are
+ very close (i.e., in the same corner) is 1. For other pairs of points is 2,
+ because the points will be located in clusters at different
+ corners - thus, the distance between these clusters will be larger.
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+ Zs = Z.shape
+ n = Zs[0] + 1
+
+ zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ Z = _convert_to_double(Z)
+
+ _hierarchy.cophenetic_distances(Z, zz, int(n))
+ if Y is None:
+ return zz
+
+ Y = np.asarray(Y, order='c')
+ distance.is_valid_y(Y, throw=True, name='Y')
+
+ z = zz.mean()
+ y = Y.mean()
+ Yy = Y - y
+ Zz = zz - z
+ numerator = (Yy * Zz)
+ denomA = Yy**2
+ denomB = Zz**2
+ c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
+ return (c, zz)
+
+
+def inconsistent(Z, d=2):
+ r"""
+ Calculate inconsistency statistics on a linkage matrix.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
+ clustering). See `linkage` documentation for more information on its
+ form.
+ d : int, optional
+ The number of links up to `d` levels below each non-singleton cluster.
+
+ Returns
+ -------
+ R : ndarray
+ A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link
+ statistics for the non-singleton cluster ``i``. The link statistics are
+ computed over the link heights for links :math:`d` levels below the
+ cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
+ deviation of the link heights, respectively; ``R[i,2]`` is the number
+ of links included in the calculation; and ``R[i,3]`` is the
+ inconsistency coefficient,
+
+ .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
+
+ Notes
+ -----
+ This function behaves similarly to the MATLAB(TM) ``inconsistent``
+ function.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import inconsistent, linkage
+ >>> from matplotlib import pyplot as plt
+ >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
+ >>> Z = linkage(X, 'ward')
+ >>> print(Z)
+ [[ 5. 6. 0. 2. ]
+ [ 2. 7. 0. 2. ]
+ [ 0. 4. 1. 2. ]
+ [ 1. 8. 1.15470054 3. ]
+ [ 9. 10. 2.12132034 4. ]
+ [ 3. 12. 4.11096096 5. ]
+ [11. 13. 14.07183949 8. ]]
+ >>> inconsistent(Z)
+ array([[ 0. , 0. , 1. , 0. ],
+ [ 0. , 0. , 1. , 0. ],
+ [ 1. , 0. , 1. , 0. ],
+ [ 0.57735027, 0.81649658, 2. , 0.70710678],
+ [ 1.04044011, 1.06123822, 3. , 1.01850858],
+ [ 3.11614065, 1.40688837, 2. , 0.70710678],
+ [ 6.44583366, 6.76770586, 3. , 1.12682288]])
+
+ """
+ Z = np.asarray(Z, order='c')
+
+ Zs = Z.shape
+ is_valid_linkage(Z, throw=True, name='Z')
+ if (not d == np.floor(d)) or d < 0:
+ raise ValueError('The second argument d must be a nonnegative '
+ 'integer value.')
+
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ [Z] = _copy_arrays_if_base_present([Z])
+
+ n = Zs[0] + 1
+ R = np.zeros((n - 1, 4), dtype=np.double)
+
+ _hierarchy.inconsistent(Z, R, int(n), int(d))
+ return R
+
+
+def from_mlab_linkage(Z):
+ """
+ Convert a linkage matrix generated by MATLAB(TM) to a new
+ linkage matrix compatible with this module.
+
+ The conversion does two things:
+
+ * the indices are converted from ``1..N`` to ``0..(N-1)`` form,
+ and
+
+ * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the
+ number of original observations (leaves) in the non-singleton
+ cluster ``i``.
+
+ This function is useful when loading in linkages from legacy data
+ files generated by MATLAB.
+
+ Parameters
+ ----------
+ Z : ndarray
+ A linkage matrix generated by MATLAB(TM).
+
+ Returns
+ -------
+ ZS : ndarray
+ A linkage matrix compatible with ``scipy.cluster.hierarchy``.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ to_mlab_linkage: transform from SciPy to MATLAB format.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage
+
+ Given a linkage matrix in MATLAB format ``mZ``, we can use
+ `scipy.cluster.hierarchy.from_mlab_linkage` to import
+ it into SciPy format:
+
+ >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1],
+ ... [10, 11, 1], [3, 13, 1.29099445],
+ ... [6, 14, 1.29099445],
+ ... [9, 15, 1.29099445],
+ ... [12, 16, 1.29099445],
+ ... [17, 18, 5.77350269],
+ ... [19, 20, 5.77350269],
+ ... [21, 22, 8.16496581]])
+
+ >>> Z = from_mlab_linkage(mZ)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [ 11. , 15. , 1.29099445, 3. ],
+ [ 16. , 17. , 5.77350269, 6. ],
+ [ 18. , 19. , 5.77350269, 6. ],
+ [ 20. , 21. , 8.16496581, 12. ]])
+
+ As expected, the linkage matrix ``Z`` returned includes an
+ additional column counting the number of original samples in
+ each cluster. Also, all cluster indices are reduced by 1
+ (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing).
+
+ """
+ Z = np.asarray(Z, dtype=np.double, order='c')
+ Zs = Z.shape
+
+ # If it's empty, return it.
+ if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
+ return Z.copy()
+
+ if len(Zs) != 2:
+ raise ValueError("The linkage array must be rectangular.")
+
+ # If it contains no rows, return it.
+ if Zs[0] == 0:
+ return Z.copy()
+
+ Zpart = Z.copy()
+ if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
+ raise ValueError('The format of the indices is not 1..N')
+
+ Zpart[:, 0:2] -= 1.0
+ CS = np.zeros((Zs[0],), dtype=np.double)
+ _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
+ return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
+
+
+def to_mlab_linkage(Z):
+ """
+ Convert a linkage matrix to a MATLAB(TM) compatible one.
+
+ Converts a linkage matrix ``Z`` generated by the linkage function
+ of this module to a MATLAB(TM) compatible one. The return linkage
+ matrix has the last column removed and the cluster indices are
+ converted to ``1..N`` indexing.
+
+ Parameters
+ ----------
+ Z : ndarray
+ A linkage matrix generated by ``scipy.cluster.hierarchy``.
+
+ Returns
+ -------
+ to_mlab_linkage : ndarray
+ A linkage matrix compatible with MATLAB(TM)'s hierarchical
+ clustering functions.
+
+ The return linkage matrix has the last column removed
+ and the cluster indices are converted to ``1..N`` indexing.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ from_mlab_linkage: transform from Matlab to SciPy format.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage
+ >>> from scipy.spatial.distance import pdist
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+
+ After a linkage matrix ``Z`` has been created, we can use
+ `scipy.cluster.hierarchy.to_mlab_linkage` to convert it
+ into MATLAB format:
+
+ >>> mZ = to_mlab_linkage(Z)
+ >>> mZ
+ array([[ 1. , 2. , 1. ],
+ [ 4. , 5. , 1. ],
+ [ 7. , 8. , 1. ],
+ [ 10. , 11. , 1. ],
+ [ 3. , 13. , 1.29099445],
+ [ 6. , 14. , 1.29099445],
+ [ 9. , 15. , 1.29099445],
+ [ 12. , 16. , 1.29099445],
+ [ 17. , 18. , 5.77350269],
+ [ 19. , 20. , 5.77350269],
+ [ 21. , 22. , 8.16496581]])
+
+ The new linkage matrix ``mZ`` uses 1-indexing for all the
+ clusters (instead of 0-indexing). Also, the last column of
+ the original linkage matrix has been dropped.
+
+ """
+ Z = np.asarray(Z, order='c', dtype=np.double)
+ Zs = Z.shape
+ if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
+ return Z.copy()
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ ZP = Z[:, 0:3].copy()
+ ZP[:, 0:2] += 1.0
+
+ return ZP
+
+
+def is_monotonic(Z):
+ """
+ Return True if the linkage passed is monotonic.
+
+ The linkage is monotonic if for every cluster :math:`s` and :math:`t`
+ joined, the distance between them is no less than the distance
+ between any previously joined clusters.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The linkage matrix to check for monotonicity.
+
+ Returns
+ -------
+ b : bool
+ A boolean indicating whether the linkage is monotonic.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import median, ward, is_monotonic
+ >>> from scipy.spatial.distance import pdist
+
+ By definition, some hierarchical clustering algorithms - such as
+ `scipy.cluster.hierarchy.ward` - produce monotonic assignments of
+ samples to clusters; however, this is not always true for other
+ hierarchical methods - e.g. `scipy.cluster.hierarchy.median`.
+
+ Given a linkage matrix ``Z`` (as the result of a hierarchical clustering
+ method) we can test programmatically whether it has the monotonicity
+ property or not, using `scipy.cluster.hierarchy.is_monotonic`:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+ >>> is_monotonic(Z)
+ True
+
+ >>> Z = median(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 2. , 12. , 1.11803399, 3. ],
+ [ 5. , 13. , 1.11803399, 3. ],
+ [ 8. , 15. , 1.11803399, 3. ],
+ [11. , 14. , 1.11803399, 3. ],
+ [18. , 19. , 3. , 6. ],
+ [16. , 17. , 3.5 , 6. ],
+ [20. , 21. , 3.25 , 12. ]])
+ >>> is_monotonic(Z)
+ False
+
+ Note that this method is equivalent to just verifying that the distances
+ in the third column of the linkage matrix appear in a monotonically
+ increasing order.
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ # We expect the i'th value to be greater than its successor.
+ return (Z[1:, 2] >= Z[:-1, 2]).all()
+
+
+def is_valid_im(R, warning=False, throw=False, name=None):
+ """Return True if the inconsistency matrix passed is valid.
+
+ It must be a :math:`n` by 4 array of doubles. The standard
+ deviations ``R[:,1]`` must be nonnegative. The link counts
+ ``R[:,2]`` must be positive and no greater than :math:`n-1`.
+
+ Parameters
+ ----------
+ R : ndarray
+ The inconsistency matrix to check for validity.
+ warning : bool, optional
+ When True, issues a Python warning if the linkage
+ matrix passed is invalid.
+ throw : bool, optional
+ When True, throws a Python exception if the linkage
+ matrix passed is invalid.
+ name : str, optional
+ This string refers to the variable name of the invalid
+ linkage matrix.
+
+ Returns
+ -------
+ b : bool
+ True if the inconsistency matrix is valid.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ inconsistent: for the creation of a inconsistency matrix.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im
+ >>> from scipy.spatial.distance import pdist
+
+ Given a data set ``X``, we can apply a clustering method to obtain a
+ linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+ be also used to obtain the inconsistency matrix ``R`` associated to
+ this clustering process:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+ >>> R = inconsistent(Z)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+ >>> R
+ array([[1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1.14549722, 0.20576415, 2. , 0.70710678],
+ [1.14549722, 0.20576415, 2. , 0.70710678],
+ [1.14549722, 0.20576415, 2. , 0.70710678],
+ [1.14549722, 0.20576415, 2. , 0.70710678],
+ [2.78516386, 2.58797734, 3. , 1.15470054],
+ [2.78516386, 2.58797734, 3. , 1.15470054],
+ [6.57065706, 1.38071187, 3. , 1.15470054]])
+
+ Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that
+ ``R`` is correct:
+
+ >>> is_valid_im(R)
+ True
+
+ However, if ``R`` is wrongly constructed (e.g., one of the standard
+ deviations is set to a negative value), then the check will fail:
+
+ >>> R[-1,1] = R[-1,1] * -1
+ >>> is_valid_im(R)
+ False
+
+ """
+ R = np.asarray(R, order='c')
+ valid = True
+ name_str = "%r " % name if name else ''
+ try:
+ if type(R) != np.ndarray:
+ raise TypeError('Variable %spassed as inconsistency matrix is not '
+ 'a numpy array.' % name_str)
+ if R.dtype != np.double:
+ raise TypeError('Inconsistency matrix %smust contain doubles '
+ '(double).' % name_str)
+ if len(R.shape) != 2:
+ raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
+ 'be two-dimensional).' % name_str)
+ if R.shape[1] != 4:
+ raise ValueError('Inconsistency matrix %smust have 4 columns.' %
+ name_str)
+ if R.shape[0] < 1:
+ raise ValueError('Inconsistency matrix %smust have at least one '
+ 'row.' % name_str)
+ if (R[:, 0] < 0).any():
+ raise ValueError('Inconsistency matrix %scontains negative link '
+ 'height means.' % name_str)
+ if (R[:, 1] < 0).any():
+ raise ValueError('Inconsistency matrix %scontains negative link '
+ 'height standard deviations.' % name_str)
+ if (R[:, 2] < 0).any():
+ raise ValueError('Inconsistency matrix %scontains negative link '
+ 'counts.' % name_str)
+ except Exception as e:
+ if throw:
+ raise
+ if warning:
+ _warning(str(e))
+ valid = False
+
+ return valid
+
+
+def is_valid_linkage(Z, warning=False, throw=False, name=None):
+ """
+ Check the validity of a linkage matrix.
+
+ A linkage matrix is valid if it is a 2-D array (type double)
+ with :math:`n` rows and 4 columns. The first two columns must contain
+ indices between 0 and :math:`2n-1`. For a given row ``i``, the following
+ two expressions have to hold:
+
+ .. math::
+
+ 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
+ 0 \\leq Z[i,1] \\leq i+n-1
+
+ I.e., a cluster cannot join another cluster unless the cluster being joined
+ has been generated.
+
+ Parameters
+ ----------
+ Z : array_like
+ Linkage matrix.
+ warning : bool, optional
+ When True, issues a Python warning if the linkage
+ matrix passed is invalid.
+ throw : bool, optional
+ When True, throws a Python exception if the linkage
+ matrix passed is invalid.
+ name : str, optional
+ This string refers to the variable name of the invalid
+ linkage matrix.
+
+ Returns
+ -------
+ b : bool
+ True if the inconsistency matrix is valid.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, is_valid_linkage
+ >>> from scipy.spatial.distance import pdist
+
+ All linkage matrices generated by the clustering methods in this module
+ will be valid (i.e., they will have the appropriate dimensions and the two
+ required expressions will hold for all the rows).
+
+ We can check this using `scipy.cluster.hierarchy.is_valid_linkage`:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+ >>> is_valid_linkage(Z)
+ True
+
+ However, if we create a linkage matrix in a wrong way - or if we modify
+ a valid one in a way that any of the required expressions don't hold
+ anymore, then the check will fail:
+
+ >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point
+ >>> is_valid_linkage(Z)
+ False
+
+ """
+ Z = np.asarray(Z, order='c')
+ valid = True
+ name_str = "%r " % name if name else ''
+ try:
+ if type(Z) != np.ndarray:
+ raise TypeError('Passed linkage argument %sis not a valid array.' %
+ name_str)
+ if Z.dtype != np.double:
+ raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
+ if len(Z.shape) != 2:
+ raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
+ 'two-dimensional).' % name_str)
+ if Z.shape[1] != 4:
+ raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
+ if Z.shape[0] == 0:
+ raise ValueError('Linkage must be computed on at least two '
+ 'observations.')
+ n = Z.shape[0]
+ if n > 1:
+ if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
+ raise ValueError('Linkage %scontains negative indices.' %
+ name_str)
+ if (Z[:, 2] < 0).any():
+ raise ValueError('Linkage %scontains negative distances.' %
+ name_str)
+ if (Z[:, 3] < 0).any():
+ raise ValueError('Linkage %scontains negative counts.' %
+ name_str)
+ if _check_hierarchy_uses_cluster_before_formed(Z):
+ raise ValueError('Linkage %suses non-singleton cluster before '
+ 'it is formed.' % name_str)
+ if _check_hierarchy_uses_cluster_more_than_once(Z):
+ raise ValueError('Linkage %suses the same cluster more than once.'
+ % name_str)
+ except Exception as e:
+ if throw:
+ raise
+ if warning:
+ _warning(str(e))
+ valid = False
+
+ return valid
+
+
+def _check_hierarchy_uses_cluster_before_formed(Z):
+ n = Z.shape[0] + 1
+ for i in range(0, n - 1):
+ if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
+ return True
+ return False
+
+
+def _check_hierarchy_uses_cluster_more_than_once(Z):
+ n = Z.shape[0] + 1
+ chosen = set([])
+ for i in range(0, n - 1):
+ if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
+ return True
+ chosen.add(Z[i, 0])
+ chosen.add(Z[i, 1])
+ return False
+
+
+def _check_hierarchy_not_all_clusters_used(Z):
+ n = Z.shape[0] + 1
+ chosen = set([])
+ for i in range(0, n - 1):
+ chosen.add(int(Z[i, 0]))
+ chosen.add(int(Z[i, 1]))
+ must_chosen = set(range(0, 2 * n - 2))
+ return len(must_chosen.difference(chosen)) > 0
+
+
+def num_obs_linkage(Z):
+ """
+ Return the number of original observations of the linkage matrix passed.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The linkage matrix on which to perform the operation.
+
+ Returns
+ -------
+ n : int
+ The number of original observations in the linkage.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, num_obs_linkage
+ >>> from scipy.spatial.distance import pdist
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+
+ ``Z`` is a linkage matrix obtained after using the Ward clustering method
+ with ``X``, a dataset with 12 data points.
+
+ >>> num_obs_linkage(Z)
+ 12
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+ return (Z.shape[0] + 1)
+
+
+def correspond(Z, Y):
+ """
+ Check for correspondence between linkage and condensed distance matrices.
+
+ They must have the same number of original observations for
+ the check to succeed.
+
+ This function is useful as a sanity check in algorithms that make
+ extensive use of linkage and distance matrices that must
+ correspond to the same set of original observations.
+
+ Parameters
+ ----------
+ Z : array_like
+ The linkage matrix to check for correspondence.
+ Y : array_like
+ The condensed distance matrix to check for correspondence.
+
+ Returns
+ -------
+ b : bool
+ A boolean indicating whether the linkage matrix and distance
+ matrix could possibly correspond to one another.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, correspond
+ >>> from scipy.spatial.distance import pdist
+
+ This method can be used to check if a given linkage matrix ``Z`` has been
+ obtained from the application of a cluster method over a dataset ``X``:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+ >>> X_condensed = pdist(X)
+ >>> Z = ward(X_condensed)
+
+ Here, we can compare ``Z`` and ``X`` (in condensed form):
+
+ >>> correspond(Z, X_condensed)
+ True
+
+ """
+ is_valid_linkage(Z, throw=True)
+ distance.is_valid_y(Y, throw=True)
+ Z = np.asarray(Z, order='c')
+ Y = np.asarray(Y, order='c')
+ return distance.num_obs_y(Y) == num_obs_linkage(Z)
+
+
+def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
+ """
+ Form flat clusters from the hierarchical clustering defined by
+ the given linkage matrix.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded with the matrix returned
+ by the `linkage` function.
+ t : scalar
+ For criteria 'inconsistent', 'distance' or 'monocrit',
+ this is the threshold to apply when forming flat clusters.
+ For 'maxclust' or 'maxclust_monocrit' criteria,
+ this would be max number of clusters requested.
+ criterion : str, optional
+ The criterion to use in forming flat clusters. This can
+ be any of the following values:
+
+ ``inconsistent`` :
+ If a cluster node and all its
+ descendants have an inconsistent value less than or equal
+ to `t`, then all its leaf descendants belong to the
+ same flat cluster. When no non-singleton cluster meets
+ this criterion, every node is assigned to its own
+ cluster. (Default)
+
+ ``distance`` :
+ Forms flat clusters so that the original
+ observations in each flat cluster have no greater a
+ cophenetic distance than `t`.
+
+ ``maxclust`` :
+ Finds a minimum threshold ``r`` so that
+ the cophenetic distance between any two original
+ observations in the same flat cluster is no more than
+ ``r`` and no more than `t` flat clusters are formed.
+
+ ``monocrit`` :
+ Forms a flat cluster from a cluster node c
+ with index i when ``monocrit[j] <= t``.
+
+ For example, to threshold on the maximum mean distance
+ as computed in the inconsistency matrix R with a
+ threshold of 0.8 do::
+
+ MR = maxRstat(Z, R, 3)
+ fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
+
+ ``maxclust_monocrit`` :
+ Forms a flat cluster from a
+ non-singleton cluster node ``c`` when ``monocrit[i] <=
+ r`` for all cluster indices ``i`` below and including
+ ``c``. ``r`` is minimized such that no more than ``t``
+ flat clusters are formed. monocrit must be
+ monotonic. For example, to minimize the threshold t on
+ maximum inconsistency values so that no more than 3 flat
+ clusters are formed, do::
+
+ MI = maxinconsts(Z, R)
+ fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
+
+ depth : int, optional
+ The maximum depth to perform the inconsistency calculation.
+ It has no meaning for the other criteria. Default is 2.
+ R : ndarray, optional
+ The inconsistency matrix to use for the 'inconsistent'
+ criterion. This matrix is computed if not provided.
+ monocrit : ndarray, optional
+ An array of length n-1. `monocrit[i]` is the
+ statistics upon which non-singleton i is thresholded. The
+ monocrit vector must be monotonic, i.e., given a node c with
+ index i, for all node indices j corresponding to nodes
+ below c, ``monocrit[i] >= monocrit[j]``.
+
+ Returns
+ -------
+ fcluster : ndarray
+ An array of length ``n``. ``T[i]`` is the flat cluster number to
+ which original observation ``i`` belongs.
+
+ See Also
+ --------
+ linkage : for information about hierarchical clustering methods work.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, fcluster
+ >>> from scipy.spatial.distance import pdist
+
+ All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`
+ generate a linkage matrix ``Z`` as their output:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+
+ This matrix represents a dendrogram, where the first and second elements
+ are the two clusters merged at each step, the third element is the
+ distance between these clusters, and the fourth element is the size of
+ the new cluster - the number of original data points included.
+
+ `scipy.cluster.hierarchy.fcluster` can be used to flatten the
+ dendrogram, obtaining as a result an assignation of the original data
+ points to single clusters.
+
+ This assignation mostly depends on a distance threshold ``t`` - the maximum
+ inter-cluster distance allowed:
+
+ >>> fcluster(Z, t=0.9, criterion='distance')
+ array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
+
+ >>> fcluster(Z, t=1.1, criterion='distance')
+ array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
+
+ >>> fcluster(Z, t=3, criterion='distance')
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+ >>> fcluster(Z, t=9, criterion='distance')
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+ In the first case, the threshold ``t`` is too small to allow any two
+ samples in the data to form a cluster, so 12 different clusters are
+ returned.
+
+ In the second case, the threshold is large enough to allow the first
+ 4 points to be merged with their nearest neighbors. So, here, only 8
+ clusters are returned.
+
+ The third case, with a much higher threshold, allows for up to 8 data
+ points to be connected - so 4 clusters are returned here.
+
+ Lastly, the threshold of the fourth case is large enough to allow for
+ all data points to be merged together - so a single cluster is returned.
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ n = Z.shape[0] + 1
+ T = np.zeros((n,), dtype='i')
+
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ [Z] = _copy_arrays_if_base_present([Z])
+
+ if criterion == 'inconsistent':
+ if R is None:
+ R = inconsistent(Z, depth)
+ else:
+ R = np.asarray(R, order='c')
+ is_valid_im(R, throw=True, name='R')
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ [R] = _copy_arrays_if_base_present([R])
+ _hierarchy.cluster_in(Z, R, T, float(t), int(n))
+ elif criterion == 'distance':
+ _hierarchy.cluster_dist(Z, T, float(t), int(n))
+ elif criterion == 'maxclust':
+ _hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
+ elif criterion == 'monocrit':
+ [monocrit] = _copy_arrays_if_base_present([monocrit])
+ _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
+ elif criterion == 'maxclust_monocrit':
+ [monocrit] = _copy_arrays_if_base_present([monocrit])
+ _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
+ else:
+ raise ValueError('Invalid cluster formation criterion: %s'
+ % str(criterion))
+ return T
+
+
+def fclusterdata(X, t, criterion='inconsistent',
+ metric='euclidean', depth=2, method='single', R=None):
+ """
+ Cluster observation data using a given metric.
+
+ Clusters the original observations in the n-by-m data
+ matrix X (n observations in m dimensions), using the euclidean
+ distance metric to calculate distances between original observations,
+ performs hierarchical clustering using the single linkage algorithm,
+ and forms flat clusters using the inconsistency method with `t` as the
+ cut-off threshold.
+
+ A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is
+ the index of the flat cluster to which the original observation ``i``
+ belongs.
+
+ Parameters
+ ----------
+ X : (N, M) ndarray
+ N by M data matrix with N observations in M dimensions.
+ t : scalar
+ For criteria 'inconsistent', 'distance' or 'monocrit',
+ this is the threshold to apply when forming flat clusters.
+ For 'maxclust' or 'maxclust_monocrit' criteria,
+ this would be max number of clusters requested.
+ criterion : str, optional
+ Specifies the criterion for forming flat clusters. Valid
+ values are 'inconsistent' (default), 'distance', or 'maxclust'
+ cluster formation algorithms. See `fcluster` for descriptions.
+ metric : str or function, optional
+ The distance metric for calculating pairwise distances. See
+ ``distance.pdist`` for descriptions and linkage to verify
+ compatibility with the linkage method.
+ depth : int, optional
+ The maximum depth for the inconsistency calculation. See
+ `inconsistent` for more information.
+ method : str, optional
+ The linkage method to use (single, complete, average,
+ weighted, median centroid, ward). See `linkage` for more
+ information. Default is "single".
+ R : ndarray, optional
+ The inconsistency matrix. It will be computed if necessary
+ if it is not passed.
+
+ Returns
+ -------
+ fclusterdata : ndarray
+ A vector of length n. T[i] is the flat cluster number to
+ which original observation i belongs.
+
+ See Also
+ --------
+ scipy.spatial.distance.pdist : pairwise distance metrics
+
+ Notes
+ -----
+ This function is similar to the MATLAB function ``clusterdata``.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import fclusterdata
+
+ This is a convenience method that abstracts all the steps to perform in a
+ typical SciPy's hierarchical clustering workflow.
+
+ * Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`.
+
+ * Apply a clustering method.
+
+ * Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`.
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> fclusterdata(X, t=1)
+ array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+
+ The output here (for the dataset ``X``, distance threshold ``t``, and the
+ default settings) is four clusters with three data points each.
+
+ """
+ X = np.asarray(X, order='c', dtype=np.double)
+
+ if type(X) != np.ndarray or len(X.shape) != 2:
+ raise TypeError('The observation matrix X must be an n by m numpy '
+ 'array.')
+
+ Y = distance.pdist(X, metric=metric)
+ Z = linkage(Y, method=method)
+ if R is None:
+ R = inconsistent(Z, d=depth)
+ else:
+ R = np.asarray(R, order='c')
+ T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
+ return T
+
+
+def leaves_list(Z):
+ """
+ Return a list of leaf node ids.
+
+ The return corresponds to the observation vector index as it appears
+ in the tree from left to right. Z is a linkage matrix.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as a matrix. `Z` is
+ a linkage matrix. See `linkage` for more information.
+
+ Returns
+ -------
+ leaves_list : ndarray
+ The list of leaf node ids.
+
+ See Also
+ --------
+ dendrogram: for information about dendrogram structure.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list
+ >>> from scipy.spatial.distance import pdist
+ >>> from matplotlib import pyplot as plt
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+
+ The linkage matrix ``Z`` represents a dendrogram, that is, a tree that
+ encodes the structure of the clustering performed.
+ `scipy.cluster.hierarchy.leaves_list` shows the mapping between
+ indices in the ``X`` dataset and leaves in the dendrogram:
+
+ >>> leaves_list(Z)
+ array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32)
+
+ >>> fig = plt.figure(figsize=(25, 10))
+ >>> dn = dendrogram(Z)
+ >>> plt.show()
+
+ """
+ Z = np.asarray(Z, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+ n = Z.shape[0] + 1
+ ML = np.zeros((n,), dtype='i')
+ [Z] = _copy_arrays_if_base_present([Z])
+ _hierarchy.prelist(Z, ML, int(n))
+ return ML
+
+
+# Maps number of leaves to text size.
+#
+# p <= 20, size="12"
+# 20 < p <= 30, size="10"
+# 30 < p <= 50, size="8"
+# 50 < p <= np.inf, size="6"
+
+_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
+_drotation = {20: 0, 40: 45, np.inf: 90}
+_dtextsortedkeys = list(_dtextsizes.keys())
+_dtextsortedkeys.sort()
+_drotationsortedkeys = list(_drotation.keys())
+_drotationsortedkeys.sort()
+
+
+def _remove_dups(L):
+ """
+ Remove duplicates AND preserve the original order of the elements.
+
+ The set class is not guaranteed to do this.
+ """
+ seen_before = set([])
+ L2 = []
+ for i in L:
+ if i not in seen_before:
+ seen_before.add(i)
+ L2.append(i)
+ return L2
+
+
+def _get_tick_text_size(p):
+ for k in _dtextsortedkeys:
+ if p <= k:
+ return _dtextsizes[k]
+
+
+def _get_tick_rotation(p):
+ for k in _drotationsortedkeys:
+ if p <= k:
+ return _drotation[k]
+
+
+def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
+ no_labels, color_list, leaf_font_size=None,
+ leaf_rotation=None, contraction_marks=None,
+ ax=None, above_threshold_color='C0'):
+ # Import matplotlib here so that it's not imported unless dendrograms
+ # are plotted. Raise an informative error if importing fails.
+ try:
+ # if an axis is provided, don't use pylab at all
+ if ax is None:
+ import matplotlib.pylab
+ import matplotlib.patches
+ import matplotlib.collections
+ except ImportError as e:
+ raise ImportError("You must install the matplotlib library to plot "
+ "the dendrogram. Use no_plot=True to calculate the "
+ "dendrogram without plotting.") from e
+
+ if ax is None:
+ ax = matplotlib.pylab.gca()
+ # if we're using pylab, we want to trigger a draw at the end
+ trigger_redraw = True
+ else:
+ trigger_redraw = False
+
+ # Independent variable plot width
+ ivw = len(ivl) * 10
+ # Dependent variable plot height
+ dvw = mh + mh * 0.05
+
+ iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
+ if orientation in ('top', 'bottom'):
+ if orientation == 'top':
+ ax.set_ylim([0, dvw])
+ ax.set_xlim([0, ivw])
+ else:
+ ax.set_ylim([dvw, 0])
+ ax.set_xlim([0, ivw])
+
+ xlines = icoords
+ ylines = dcoords
+ if no_labels:
+ ax.set_xticks([])
+ ax.set_xticklabels([])
+ else:
+ ax.set_xticks(iv_ticks)
+
+ if orientation == 'top':
+ ax.xaxis.set_ticks_position('bottom')
+ else:
+ ax.xaxis.set_ticks_position('top')
+
+ # Make the tick marks invisible because they cover up the links
+ for line in ax.get_xticklines():
+ line.set_visible(False)
+
+ leaf_rot = (float(_get_tick_rotation(len(ivl)))
+ if (leaf_rotation is None) else leaf_rotation)
+ leaf_font = (float(_get_tick_text_size(len(ivl)))
+ if (leaf_font_size is None) else leaf_font_size)
+ ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
+
+ elif orientation in ('left', 'right'):
+ if orientation == 'left':
+ ax.set_xlim([dvw, 0])
+ ax.set_ylim([0, ivw])
+ else:
+ ax.set_xlim([0, dvw])
+ ax.set_ylim([0, ivw])
+
+ xlines = dcoords
+ ylines = icoords
+ if no_labels:
+ ax.set_yticks([])
+ ax.set_yticklabels([])
+ else:
+ ax.set_yticks(iv_ticks)
+
+ if orientation == 'left':
+ ax.yaxis.set_ticks_position('right')
+ else:
+ ax.yaxis.set_ticks_position('left')
+
+ # Make the tick marks invisible because they cover up the links
+ for line in ax.get_yticklines():
+ line.set_visible(False)
+
+ leaf_font = (float(_get_tick_text_size(len(ivl)))
+ if (leaf_font_size is None) else leaf_font_size)
+
+ if leaf_rotation is not None:
+ ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
+ else:
+ ax.set_yticklabels(ivl, size=leaf_font)
+
+ # Let's use collections instead. This way there is a separate legend item
+ # for each tree grouping, rather than stupidly one for each line segment.
+ colors_used = _remove_dups(color_list)
+ color_to_lines = {}
+ for color in colors_used:
+ color_to_lines[color] = []
+ for (xline, yline, color) in zip(xlines, ylines, color_list):
+ color_to_lines[color].append(list(zip(xline, yline)))
+
+ colors_to_collections = {}
+ # Construct the collections.
+ for color in colors_used:
+ coll = matplotlib.collections.LineCollection(color_to_lines[color],
+ colors=(color,))
+ colors_to_collections[color] = coll
+
+ # Add all the groupings below the color threshold.
+ for color in colors_used:
+ if color != above_threshold_color:
+ ax.add_collection(colors_to_collections[color])
+ # If there's a grouping of links above the color threshold, it goes last.
+ if above_threshold_color in colors_to_collections:
+ ax.add_collection(colors_to_collections[above_threshold_color])
+
+ if contraction_marks is not None:
+ Ellipse = matplotlib.patches.Ellipse
+ for (x, y) in contraction_marks:
+ if orientation in ('left', 'right'):
+ e = Ellipse((y, x), width=dvw / 100, height=1.0)
+ else:
+ e = Ellipse((x, y), width=1.0, height=dvw / 100)
+ ax.add_artist(e)
+ e.set_clip_box(ax.bbox)
+ e.set_alpha(0.5)
+ e.set_facecolor('k')
+
+ if trigger_redraw:
+ matplotlib.pylab.draw_if_interactive()
+
+
+# C0 is used for above threshhold color
+_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9')
+_link_line_colors = list(_link_line_colors_default)
+
+
+def set_link_color_palette(palette):
+ """
+ Set list of matplotlib color codes for use by dendrogram.
+
+ Note that this palette is global (i.e., setting it once changes the colors
+ for all subsequent calls to `dendrogram`) and that it affects only the
+ the colors below ``color_threshold``.
+
+ Note that `dendrogram` also accepts a custom coloring function through its
+ ``link_color_func`` keyword, which is more flexible and non-global.
+
+ Parameters
+ ----------
+ palette : list of str or None
+ A list of matplotlib color codes. The order of the color codes is the
+ order in which the colors are cycled through when color thresholding in
+ the dendrogram.
+
+ If ``None``, resets the palette to its default (which are matplotlib
+ default colors C1 to C9).
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ dendrogram
+
+ Notes
+ -----
+ Ability to reset the palette with ``None`` added in SciPy 0.17.0.
+
+ Examples
+ --------
+ >>> from scipy.cluster import hierarchy
+ >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
+ ... 400., 754., 564., 138., 219., 869., 669.])
+ >>> Z = hierarchy.linkage(ytdist, 'single')
+ >>> dn = hierarchy.dendrogram(Z, no_plot=True)
+ >>> dn['color_list']
+ ['C1', 'C0', 'C0', 'C0', 'C0']
+ >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
+ >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b')
+ >>> dn['color_list']
+ ['c', 'b', 'b', 'b', 'b']
+ >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
+ ... above_threshold_color='k')
+ >>> dn['color_list']
+ ['c', 'm', 'm', 'k', 'k']
+
+ Now, reset the color palette to its default:
+
+ >>> hierarchy.set_link_color_palette(None)
+
+ """
+ if palette is None:
+ # reset to its default
+ palette = _link_line_colors_default
+ elif type(palette) not in (list, tuple):
+ raise TypeError("palette must be a list or tuple")
+ _ptypes = [isinstance(p, str) for p in palette]
+
+ if False in _ptypes:
+ raise TypeError("all palette list elements must be color strings")
+
+ global _link_line_colors
+ _link_line_colors = palette
+
+
+def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
+ get_leaves=True, orientation='top', labels=None,
+ count_sort=False, distance_sort=False, show_leaf_counts=True,
+ no_plot=False, no_labels=False, leaf_font_size=None,
+ leaf_rotation=None, leaf_label_func=None,
+ show_contracted=False, link_color_func=None, ax=None,
+ above_threshold_color='C0'):
+ """
+ Plot the hierarchical clustering as a dendrogram.
+
+ The dendrogram illustrates how each cluster is
+ composed by drawing a U-shaped link between a non-singleton
+ cluster and its children. The top of the U-link indicates a
+ cluster merge. The two legs of the U-link indicate which clusters
+ were merged. The length of the two legs of the U-link represents
+ the distance between the child clusters. It is also the
+ cophenetic distance between original observations in the two
+ children clusters.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The linkage matrix encoding the hierarchical clustering to
+ render as a dendrogram. See the ``linkage`` function for more
+ information on the format of ``Z``.
+ p : int, optional
+ The ``p`` parameter for ``truncate_mode``.
+ truncate_mode : str, optional
+ The dendrogram can be hard to read when the original
+ observation matrix from which the linkage is derived is
+ large. Truncation is used to condense the dendrogram. There
+ are several modes:
+
+ ``None``
+ No truncation is performed (default).
+ Note: ``'none'`` is an alias for ``None`` that's kept for
+ backward compatibility.
+
+ ``'lastp'``
+ The last ``p`` non-singleton clusters formed in the linkage are the
+ only non-leaf nodes in the linkage; they correspond to rows
+ ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
+ contracted into leaf nodes.
+
+ ``'level'``
+ No more than ``p`` levels of the dendrogram tree are displayed.
+ A "level" includes all nodes with ``p`` merges from the last merge.
+
+ Note: ``'mtica'`` is an alias for ``'level'`` that's kept for
+ backward compatibility.
+
+ color_threshold : double, optional
+ For brevity, let :math:`t` be the ``color_threshold``.
+ Colors all the descendent links below a cluster node
+ :math:`k` the same color if :math:`k` is the first node below
+ the cut threshold :math:`t`. All links connecting nodes with
+ distances greater than or equal to the threshold are colored
+ with de default matplotlib color ``'C0'``. If :math:`t` is less
+ than or equal to zero, all nodes are colored ``'C0'``.
+ If ``color_threshold`` is None or 'default',
+ corresponding with MATLAB(TM) behavior, the threshold is set to
+ ``0.7*max(Z[:,2])``.
+
+ get_leaves : bool, optional
+ Includes a list ``R['leaves']=H`` in the result
+ dictionary. For each :math:`i`, ``H[i] == j``, cluster node
+ ``j`` appears in position ``i`` in the left-to-right traversal
+ of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
+ orientation : str, optional
+ The direction to plot the dendrogram, which can be any
+ of the following strings:
+
+ ``'top'``
+ Plots the root at the top, and plot descendent links going downwards.
+ (default).
+
+ ``'bottom'``
+ Plots the root at the bottom, and plot descendent links going
+ upwards.
+
+ ``'left'``
+ Plots the root at the left, and plot descendent links going right.
+
+ ``'right'``
+ Plots the root at the right, and plot descendent links going left.
+
+ labels : ndarray, optional
+ By default, ``labels`` is None so the index of the original observation
+ is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized
+ sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the
+ text to put under the :math:`i` th leaf node only if it corresponds to
+ an original observation and not a non-singleton cluster.
+ count_sort : str or bool, optional
+ For each node n, the order (visually, from left-to-right) n's
+ two descendent links are plotted is determined by this
+ parameter, which can be any of the following values:
+
+ ``False``
+ Nothing is done.
+
+ ``'ascending'`` or ``True``
+ The child with the minimum number of original objects in its cluster
+ is plotted first.
+
+ ``'descending'``
+ The child with the maximum number of original objects in its cluster
+ is plotted first.
+
+ Note, ``distance_sort`` and ``count_sort`` cannot both be True.
+ distance_sort : str or bool, optional
+ For each node n, the order (visually, from left-to-right) n's
+ two descendent links are plotted is determined by this
+ parameter, which can be any of the following values:
+
+ ``False``
+ Nothing is done.
+
+ ``'ascending'`` or ``True``
+ The child with the minimum distance between its direct descendents is
+ plotted first.
+
+ ``'descending'``
+ The child with the maximum distance between its direct descendents is
+ plotted first.
+
+ Note ``distance_sort`` and ``count_sort`` cannot both be True.
+ show_leaf_counts : bool, optional
+ When True, leaf nodes representing :math:`k>1` original
+ observation are labeled with the number of observations they
+ contain in parentheses.
+ no_plot : bool, optional
+ When True, the final rendering is not performed. This is
+ useful if only the data structures computed for the rendering
+ are needed or if matplotlib is not available.
+ no_labels : bool, optional
+ When True, no labels appear next to the leaf nodes in the
+ rendering of the dendrogram.
+ leaf_rotation : double, optional
+ Specifies the angle (in degrees) to rotate the leaf
+ labels. When unspecified, the rotation is based on the number of
+ nodes in the dendrogram (default is 0).
+ leaf_font_size : int, optional
+ Specifies the font size (in points) of the leaf labels. When
+ unspecified, the size based on the number of nodes in the
+ dendrogram.
+ leaf_label_func : lambda or function, optional
+ When leaf_label_func is a callable function, for each
+ leaf with cluster index :math:`k < 2n-1`. The function
+ is expected to return a string with the label for the
+ leaf.
+
+ Indices :math:`k < n` correspond to original observations
+ while indices :math:`k \\geq n` correspond to non-singleton
+ clusters.
+
+ For example, to label singletons with their node id and
+ non-singletons with their id, count, and inconsistency
+ coefficient, simply do::
+
+ # First define the leaf label function.
+ def llf(id):
+ if id < n:
+ return str(id)
+ else:
+ return '[%d %d %1.2f]' % (id, count, R[n-id,3])
+ # The text for the leaf nodes is going to be big so force
+ # a rotation of 90 degrees.
+ dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
+
+ show_contracted : bool, optional
+ When True the heights of non-singleton nodes contracted
+ into a leaf node are plotted as crosses along the link
+ connecting that leaf node. This really is only useful when
+ truncation is used (see ``truncate_mode`` parameter).
+ link_color_func : callable, optional
+ If given, `link_color_function` is called with each non-singleton id
+ corresponding to each U-shaped link it will paint. The function is
+ expected to return the color to paint the link, encoded as a matplotlib
+ color string code. For example::
+
+ dendrogram(Z, link_color_func=lambda k: colors[k])
+
+ colors the direct links below each untruncated non-singleton node
+ ``k`` using ``colors[k]``.
+ ax : matplotlib Axes instance, optional
+ If None and `no_plot` is not True, the dendrogram will be plotted
+ on the current axes. Otherwise if `no_plot` is not True the
+ dendrogram will be plotted on the given ``Axes`` instance. This can be
+ useful if the dendrogram is part of a more complex figure.
+ above_threshold_color : str, optional
+ This matplotlib color string sets the color of the links above the
+ color_threshold. The default is ``'C0'``.
+
+ Returns
+ -------
+ R : dict
+ A dictionary of data structures computed to render the
+ dendrogram. Its has the following keys:
+
+ ``'color_list'``
+ A list of color names. The k'th element represents the color of the
+ k'th link.
+
+ ``'icoord'`` and ``'dcoord'``
+ Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
+ where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
+ where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
+ ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
+
+ ``'ivl'``
+ A list of labels corresponding to the leaf nodes.
+
+ ``'leaves'``
+ For each i, ``H[i] == j``, cluster node ``j`` appears in position
+ ``i`` in the left-to-right traversal of the leaves, where
+ :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
+ ``i``-th leaf node corresponds to an original observation.
+ Otherwise, it corresponds to a non-singleton cluster.
+
+ ``'leaves_color_list'``
+ A list of color names. The k'th element represents the color of the
+ k'th leaf.
+
+ See Also
+ --------
+ linkage, set_link_color_palette
+
+ Notes
+ -----
+ It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise
+ crossings appear in the dendrogram.
+
+ Examples
+ --------
+ >>> from scipy.cluster import hierarchy
+ >>> import matplotlib.pyplot as plt
+
+ A very basic example:
+
+ >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
+ ... 400., 754., 564., 138., 219., 869., 669.])
+ >>> Z = hierarchy.linkage(ytdist, 'single')
+ >>> plt.figure()
+ >>> dn = hierarchy.dendrogram(Z)
+
+ Now, plot in given axes, improve the color scheme and use both vertical and
+ horizontal orientations:
+
+ >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
+ >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
+ >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
+ ... orientation='top')
+ >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1],
+ ... above_threshold_color='#bcbddc',
+ ... orientation='right')
+ >>> hierarchy.set_link_color_palette(None) # reset to default after use
+ >>> plt.show()
+
+ """
+ # This feature was thought about but never implemented (still useful?):
+ #
+ # ... = dendrogram(..., leaves_order=None)
+ #
+ # Plots the leaves in the order specified by a vector of
+ # original observation indices. If the vector contains duplicates
+ # or results in a crossing, an exception will be thrown. Passing
+ # None orders leaf nodes based on the order they appear in the
+ # pre-order traversal.
+ Z = np.asarray(Z, order='c')
+
+ if orientation not in ["top", "left", "bottom", "right"]:
+ raise ValueError("orientation must be one of 'top', 'left', "
+ "'bottom', or 'right'")
+
+ if labels is not None and Z.shape[0] + 1 != len(labels):
+ raise ValueError("Dimensions of Z and labels must be consistent.")
+
+ is_valid_linkage(Z, throw=True, name='Z')
+ Zs = Z.shape
+ n = Zs[0] + 1
+ if type(p) in (int, float):
+ p = int(p)
+ else:
+ raise TypeError('The second argument must be a number')
+
+ if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
+ # 'mlab' and 'mtica' are kept working for backwards compat.
+ raise ValueError('Invalid truncation mode.')
+
+ if truncate_mode == 'lastp' or truncate_mode == 'mlab':
+ if p > n or p == 0:
+ p = n
+
+ if truncate_mode == 'mtica':
+ # 'mtica' is an alias
+ truncate_mode = 'level'
+
+ if truncate_mode == 'level':
+ if p <= 0:
+ p = np.inf
+
+ if get_leaves:
+ lvs = []
+ else:
+ lvs = None
+
+ icoord_list = []
+ dcoord_list = []
+ color_list = []
+ current_color = [0]
+ currently_below_threshold = [False]
+ ivl = [] # list of leaves
+
+ if color_threshold is None or (isinstance(color_threshold, str) and
+ color_threshold == 'default'):
+ color_threshold = max(Z[:, 2]) * 0.7
+
+ R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
+ 'leaves': lvs, 'color_list': color_list}
+
+ # Empty list will be filled in _dendrogram_calculate_info
+ contraction_marks = [] if show_contracted else None
+
+ _dendrogram_calculate_info(
+ Z=Z, p=p,
+ truncate_mode=truncate_mode,
+ color_threshold=color_threshold,
+ get_leaves=get_leaves,
+ orientation=orientation,
+ labels=labels,
+ count_sort=count_sort,
+ distance_sort=distance_sort,
+ show_leaf_counts=show_leaf_counts,
+ i=2*n - 2,
+ iv=0.0,
+ ivl=ivl,
+ n=n,
+ icoord_list=icoord_list,
+ dcoord_list=dcoord_list,
+ lvs=lvs,
+ current_color=current_color,
+ color_list=color_list,
+ currently_below_threshold=currently_below_threshold,
+ leaf_label_func=leaf_label_func,
+ contraction_marks=contraction_marks,
+ link_color_func=link_color_func,
+ above_threshold_color=above_threshold_color)
+
+ if not no_plot:
+ mh = max(Z[:, 2])
+ _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
+ no_labels, color_list,
+ leaf_font_size=leaf_font_size,
+ leaf_rotation=leaf_rotation,
+ contraction_marks=contraction_marks,
+ ax=ax,
+ above_threshold_color=above_threshold_color)
+
+ R["leaves_color_list"] = _get_leaves_color_list(R)
+
+ return R
+
+
+def _get_leaves_color_list(R):
+ leaves_color_list = [None] * len(R['leaves'])
+ for link_x, link_y, link_color in zip(R['icoord'],
+ R['dcoord'],
+ R['color_list']):
+ for (xi, yi) in zip(link_x, link_y):
+ if yi == 0.0: # if yi is 0.0, the point is a leaf
+ # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`)
+ # index of leaves are 0, 1, 2, 3, ... as below
+ leaf_index = (int(xi) - 5) // 10
+ # each leaf has a same color of its link.
+ leaves_color_list[leaf_index] = link_color
+ return leaves_color_list
+
+
+def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
+ i, labels):
+ # If the leaf id structure is not None and is a list then the caller
+ # to dendrogram has indicated that cluster id's corresponding to the
+ # leaf nodes should be recorded.
+
+ if lvs is not None:
+ lvs.append(int(i))
+
+ # If leaf node labels are to be displayed...
+ if ivl is not None:
+ # If a leaf_label_func has been provided, the label comes from the
+ # string returned from the leaf_label_func, which is a function
+ # passed to dendrogram.
+ if leaf_label_func:
+ ivl.append(leaf_label_func(int(i)))
+ else:
+ # Otherwise, if the dendrogram caller has passed a labels list
+ # for the leaf nodes, use it.
+ if labels is not None:
+ ivl.append(labels[int(i - n)])
+ else:
+ # Otherwise, use the id as the label for the leaf.x
+ ivl.append(str(int(i)))
+
+
+def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
+ i, labels, show_leaf_counts):
+ # If the leaf id structure is not None and is a list then the caller
+ # to dendrogram has indicated that cluster id's corresponding to the
+ # leaf nodes should be recorded.
+
+ if lvs is not None:
+ lvs.append(int(i))
+ if ivl is not None:
+ if leaf_label_func:
+ ivl.append(leaf_label_func(int(i)))
+ else:
+ if show_leaf_counts:
+ ivl.append("(" + str(int(Z[i - n, 3])) + ")")
+ else:
+ ivl.append("")
+
+
+def _append_contraction_marks(Z, iv, i, n, contraction_marks):
+ _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
+ _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
+
+
+def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
+ if i >= n:
+ contraction_marks.append((iv, Z[i - n, 2]))
+ _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
+ _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
+
+
+def _dendrogram_calculate_info(Z, p, truncate_mode,
+ color_threshold=np.inf, get_leaves=True,
+ orientation='top', labels=None,
+ count_sort=False, distance_sort=False,
+ show_leaf_counts=False, i=-1, iv=0.0,
+ ivl=[], n=0, icoord_list=[], dcoord_list=[],
+ lvs=None, mhr=False,
+ current_color=[], color_list=[],
+ currently_below_threshold=[],
+ leaf_label_func=None, level=0,
+ contraction_marks=None,
+ link_color_func=None,
+ above_threshold_color='C0'):
+ """
+ Calculate the endpoints of the links as well as the labels for the
+ the dendrogram rooted at the node with index i. iv is the independent
+ variable value to plot the left-most leaf node below the root node i
+ (if orientation='top', this would be the left-most x value where the
+ plotting of this root node i and its descendents should begin).
+
+ ivl is a list to store the labels of the leaf nodes. The leaf_label_func
+ is called whenever ivl != None, labels == None, and
+ leaf_label_func != None. When ivl != None and labels != None, the
+ labels list is used only for labeling the leaf nodes. When
+ ivl == None, no labels are generated for leaf nodes.
+
+ When get_leaves==True, a list of leaves is built as they are visited
+ in the dendrogram.
+
+ Returns a tuple with l being the independent variable coordinate that
+ corresponds to the midpoint of cluster to the left of cluster i if
+ i is non-singleton, otherwise the independent coordinate of the leaf
+ node if i is a leaf node.
+
+ Returns
+ -------
+ A tuple (left, w, h, md), where:
+
+ * left is the independent variable coordinate of the center of the
+ the U of the subtree
+
+ * w is the amount of space used for the subtree (in independent
+ variable units)
+
+ * h is the height of the subtree in dependent variable units
+
+ * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
+ the target node.
+
+ """
+ if n == 0:
+ raise ValueError("Invalid singleton cluster count n.")
+
+ if i == -1:
+ raise ValueError("Invalid root cluster index i.")
+
+ if truncate_mode == 'lastp':
+ # If the node is a leaf node but corresponds to a non-singleton
+ # cluster, its label is either the empty string or the number of
+ # original observations belonging to cluster i.
+ if 2*n - p > i >= n:
+ d = Z[i - n, 2]
+ _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
+ leaf_label_func, i, labels,
+ show_leaf_counts)
+ if contraction_marks is not None:
+ _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
+ return (iv + 5.0, 10.0, 0.0, d)
+ elif i < n:
+ _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+ leaf_label_func, i, labels)
+ return (iv + 5.0, 10.0, 0.0, 0.0)
+ elif truncate_mode == 'level':
+ if i > n and level > p:
+ d = Z[i - n, 2]
+ _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
+ leaf_label_func, i, labels,
+ show_leaf_counts)
+ if contraction_marks is not None:
+ _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
+ return (iv + 5.0, 10.0, 0.0, d)
+ elif i < n:
+ _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+ leaf_label_func, i, labels)
+ return (iv + 5.0, 10.0, 0.0, 0.0)
+ elif truncate_mode in ('mlab',):
+ msg = "Mode 'mlab' is deprecated in scipy 0.19.0 (it never worked)."
+ warnings.warn(msg, DeprecationWarning)
+
+ # Otherwise, only truncate if we have a leaf node.
+ #
+ # Only place leaves if they correspond to original observations.
+ if i < n:
+ _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+ leaf_label_func, i, labels)
+ return (iv + 5.0, 10.0, 0.0, 0.0)
+
+ # !!! Otherwise, we don't have a leaf node, so work on plotting a
+ # non-leaf node.
+ # Actual indices of a and b
+ aa = int(Z[i - n, 0])
+ ab = int(Z[i - n, 1])
+ if aa >= n:
+ # The number of singletons below cluster a
+ na = Z[aa - n, 3]
+ # The distance between a's two direct children.
+ da = Z[aa - n, 2]
+ else:
+ na = 1
+ da = 0.0
+ if ab >= n:
+ nb = Z[ab - n, 3]
+ db = Z[ab - n, 2]
+ else:
+ nb = 1
+ db = 0.0
+
+ if count_sort == 'ascending' or count_sort == True:
+ # If a has a count greater than b, it and its descendents should
+ # be drawn to the right. Otherwise, to the left.
+ if na > nb:
+ # The cluster index to draw to the left (ua) will be ab
+ # and the one to draw to the right (ub) will be aa
+ ua = ab
+ ub = aa
+ else:
+ ua = aa
+ ub = ab
+ elif count_sort == 'descending':
+ # If a has a count less than or equal to b, it and its
+ # descendents should be drawn to the left. Otherwise, to
+ # the right.
+ if na > nb:
+ ua = aa
+ ub = ab
+ else:
+ ua = ab
+ ub = aa
+ elif distance_sort == 'ascending' or distance_sort == True:
+ # If a has a distance greater than b, it and its descendents should
+ # be drawn to the right. Otherwise, to the left.
+ if da > db:
+ ua = ab
+ ub = aa
+ else:
+ ua = aa
+ ub = ab
+ elif distance_sort == 'descending':
+ # If a has a distance less than or equal to b, it and its
+ # descendents should be drawn to the left. Otherwise, to
+ # the right.
+ if da > db:
+ ua = aa
+ ub = ab
+ else:
+ ua = ab
+ ub = aa
+ else:
+ ua = aa
+ ub = ab
+
+ # Updated iv variable and the amount of space used.
+ (uiva, uwa, uah, uamd) = \
+ _dendrogram_calculate_info(
+ Z=Z, p=p,
+ truncate_mode=truncate_mode,
+ color_threshold=color_threshold,
+ get_leaves=get_leaves,
+ orientation=orientation,
+ labels=labels,
+ count_sort=count_sort,
+ distance_sort=distance_sort,
+ show_leaf_counts=show_leaf_counts,
+ i=ua, iv=iv, ivl=ivl, n=n,
+ icoord_list=icoord_list,
+ dcoord_list=dcoord_list, lvs=lvs,
+ current_color=current_color,
+ color_list=color_list,
+ currently_below_threshold=currently_below_threshold,
+ leaf_label_func=leaf_label_func,
+ level=level + 1, contraction_marks=contraction_marks,
+ link_color_func=link_color_func,
+ above_threshold_color=above_threshold_color)
+
+ h = Z[i - n, 2]
+ if h >= color_threshold or color_threshold <= 0:
+ c = above_threshold_color
+
+ if currently_below_threshold[0]:
+ current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
+ currently_below_threshold[0] = False
+ else:
+ currently_below_threshold[0] = True
+ c = _link_line_colors[current_color[0]]
+
+ (uivb, uwb, ubh, ubmd) = \
+ _dendrogram_calculate_info(
+ Z=Z, p=p,
+ truncate_mode=truncate_mode,
+ color_threshold=color_threshold,
+ get_leaves=get_leaves,
+ orientation=orientation,
+ labels=labels,
+ count_sort=count_sort,
+ distance_sort=distance_sort,
+ show_leaf_counts=show_leaf_counts,
+ i=ub, iv=iv + uwa, ivl=ivl, n=n,
+ icoord_list=icoord_list,
+ dcoord_list=dcoord_list, lvs=lvs,
+ current_color=current_color,
+ color_list=color_list,
+ currently_below_threshold=currently_below_threshold,
+ leaf_label_func=leaf_label_func,
+ level=level + 1, contraction_marks=contraction_marks,
+ link_color_func=link_color_func,
+ above_threshold_color=above_threshold_color)
+
+ max_dist = max(uamd, ubmd, h)
+
+ icoord_list.append([uiva, uiva, uivb, uivb])
+ dcoord_list.append([uah, h, h, ubh])
+ if link_color_func is not None:
+ v = link_color_func(int(i))
+ if not isinstance(v, str):
+ raise TypeError("link_color_func must return a matplotlib "
+ "color string!")
+ color_list.append(v)
+ else:
+ color_list.append(c)
+
+ return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
+
+
+def is_isomorphic(T1, T2):
+ """
+ Determine if two different cluster assignments are equivalent.
+
+ Parameters
+ ----------
+ T1 : array_like
+ An assignment of singleton cluster ids to flat cluster ids.
+ T2 : array_like
+ An assignment of singleton cluster ids to flat cluster ids.
+
+ Returns
+ -------
+ b : bool
+ Whether the flat cluster assignments `T1` and `T2` are
+ equivalent.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ fcluster: for the creation of flat cluster assignments.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic
+ >>> from scipy.cluster.hierarchy import single, complete
+ >>> from scipy.spatial.distance import pdist
+
+ Two flat cluster assignments can be isomorphic if they represent the same
+ cluster assignment, with different labels.
+
+ For example, we can use the `scipy.cluster.hierarchy.single`: method
+ and flatten the output to four clusters:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = single(pdist(X))
+ >>> T = fcluster(Z, 1, criterion='distance')
+ >>> T
+ array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+
+ We can then do the same using the
+ `scipy.cluster.hierarchy.complete`: method:
+
+ >>> Z = complete(pdist(X))
+ >>> T_ = fcluster(Z, 1.5, criterion='distance')
+ >>> T_
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+ As we can see, in both cases we obtain four clusters and all the data
+ points are distributed in the same way - the only thing that changes
+ are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both
+ cluster assignments are isomorphic:
+
+ >>> is_isomorphic(T, T_)
+ True
+
+ """
+ T1 = np.asarray(T1, order='c')
+ T2 = np.asarray(T2, order='c')
+
+ if type(T1) != np.ndarray:
+ raise TypeError('T1 must be a numpy array.')
+ if type(T2) != np.ndarray:
+ raise TypeError('T2 must be a numpy array.')
+
+ T1S = T1.shape
+ T2S = T2.shape
+
+ if len(T1S) != 1:
+ raise ValueError('T1 must be one-dimensional.')
+ if len(T2S) != 1:
+ raise ValueError('T2 must be one-dimensional.')
+ if T1S[0] != T2S[0]:
+ raise ValueError('T1 and T2 must have the same number of elements.')
+ n = T1S[0]
+ d1 = {}
+ d2 = {}
+ for i in range(0, n):
+ if T1[i] in d1:
+ if not T2[i] in d2:
+ return False
+ if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]:
+ return False
+ elif T2[i] in d2:
+ return False
+ else:
+ d1[T1[i]] = T2[i]
+ d2[T2[i]] = T1[i]
+ return True
+
+
+def maxdists(Z):
+ """
+ Return the maximum distance between any non-singleton cluster.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as a matrix. See
+ ``linkage`` for more information.
+
+ Returns
+ -------
+ maxdists : ndarray
+ A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
+ the maximum distance between any cluster (including
+ singletons) below and including the node with index i. More
+ specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
+ set of all node indices below and including node i.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ is_monotonic: for testing for monotonicity of a linkage matrix.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import median, maxdists
+ >>> from scipy.spatial.distance import pdist
+
+ Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists`
+ computes for each new cluster generated (i.e., for each row of the linkage
+ matrix) what is the maximum distance between any two child clusters.
+
+ Due to the nature of hierarchical clustering, in many cases this is going
+ to be just the distance between the two child clusters that were merged
+ to form the current one - that is, Z[:,2].
+
+ However, for non-monotonic cluster assignments such as
+ `scipy.cluster.hierarchy.median` clustering this is not always the
+ case: There may be cluster formations were the distance between the two
+ clusters merged is smaller than the distance between their children.
+
+ We can see this in an example:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = median(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 2. , 12. , 1.11803399, 3. ],
+ [ 5. , 13. , 1.11803399, 3. ],
+ [ 8. , 15. , 1.11803399, 3. ],
+ [11. , 14. , 1.11803399, 3. ],
+ [18. , 19. , 3. , 6. ],
+ [16. , 17. , 3.5 , 6. ],
+ [20. , 21. , 3.25 , 12. ]])
+ >>> maxdists(Z)
+ array([1. , 1. , 1. , 1. , 1.11803399,
+ 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 ,
+ 3.5 ])
+
+ Note that while the distance between the two clusters merged when creating the
+ last cluster is 3.25, there are two children (clusters 16 and 17) whose distance
+ is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in
+ this case.
+
+ """
+ Z = np.asarray(Z, order='c', dtype=np.double)
+ is_valid_linkage(Z, throw=True, name='Z')
+
+ n = Z.shape[0] + 1
+ MD = np.zeros((n - 1,))
+ [Z] = _copy_arrays_if_base_present([Z])
+ _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
+ return MD
+
+
+def maxinconsts(Z, R):
+ """
+ Return the maximum inconsistency coefficient for each
+ non-singleton cluster and its children.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as a matrix. See
+ `linkage` for more information.
+ R : ndarray
+ The inconsistency matrix.
+
+ Returns
+ -------
+ MI : ndarray
+ A monotonic ``(n-1)``-sized numpy array of doubles.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ inconsistent: for the creation of a inconsistency matrix.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts
+ >>> from scipy.spatial.distance import pdist
+
+ Given a data set ``X``, we can apply a clustering method to obtain a
+ linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+ be also used to obtain the inconsistency matrix ``R`` associated to
+ this clustering process:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = median(pdist(X))
+ >>> R = inconsistent(Z)
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 2. , 12. , 1.11803399, 3. ],
+ [ 5. , 13. , 1.11803399, 3. ],
+ [ 8. , 15. , 1.11803399, 3. ],
+ [11. , 14. , 1.11803399, 3. ],
+ [18. , 19. , 3. , 6. ],
+ [16. , 17. , 3.5 , 6. ],
+ [20. , 21. , 3.25 , 12. ]])
+ >>> R
+ array([[1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.74535599, 1.08655358, 3. , 1.15470054],
+ [1.91202266, 1.37522872, 3. , 1.15470054],
+ [3.25 , 0.25 , 3. , 0. ]])
+
+ Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute
+ the maximum value of the inconsistency statistic (the last column of
+ ``R``) for each non-singleton cluster and its children:
+
+ >>> maxinconsts(Z, R)
+ array([0. , 0. , 0. , 0. , 0.70710678,
+ 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
+ 1.15470054])
+
+ """
+ Z = np.asarray(Z, order='c')
+ R = np.asarray(R, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+ is_valid_im(R, throw=True, name='R')
+
+ n = Z.shape[0] + 1
+ if Z.shape[0] != R.shape[0]:
+ raise ValueError("The inconsistency matrix and linkage matrix each "
+ "have a different number of rows.")
+ MI = np.zeros((n - 1,))
+ [Z, R] = _copy_arrays_if_base_present([Z, R])
+ _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
+ return MI
+
+
+def maxRstat(Z, R, i):
+ """
+ Return the maximum statistic for each non-singleton cluster and its
+ children.
+
+ Parameters
+ ----------
+ Z : array_like
+ The hierarchical clustering encoded as a matrix. See `linkage` for more
+ information.
+ R : array_like
+ The inconsistency matrix.
+ i : int
+ The column of `R` to use as the statistic.
+
+ Returns
+ -------
+ MR : ndarray
+ Calculates the maximum statistic for the i'th column of the
+ inconsistency matrix `R` for each non-singleton cluster
+ node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
+ ``Q(j)`` the set of all node ids corresponding to nodes below
+ and including ``j``.
+
+ See Also
+ --------
+ linkage: for a description of what a linkage matrix is.
+ inconsistent: for the creation of a inconsistency matrix.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
+ >>> from scipy.spatial.distance import pdist
+
+ Given a data set ``X``, we can apply a clustering method to obtain a
+ linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+ be also used to obtain the inconsistency matrix ``R`` associated to
+ this clustering process:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = median(pdist(X))
+ >>> R = inconsistent(Z)
+ >>> R
+ array([[1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1. , 0. , 1. , 0. ],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.05901699, 0.08346263, 2. , 0.70710678],
+ [1.74535599, 1.08655358, 3. , 1.15470054],
+ [1.91202266, 1.37522872, 3. , 1.15470054],
+ [3.25 , 0.25 , 3. , 0. ]])
+
+ `scipy.cluster.hierarchy.maxRstat` can be used to compute
+ the maximum value of each column of ``R``, for each non-singleton
+ cluster and its children:
+
+ >>> maxRstat(Z, R, 0)
+ array([1. , 1. , 1. , 1. , 1.05901699,
+ 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
+ 3.25 ])
+ >>> maxRstat(Z, R, 1)
+ array([0. , 0. , 0. , 0. , 0.08346263,
+ 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
+ 1.37522872])
+ >>> maxRstat(Z, R, 3)
+ array([0. , 0. , 0. , 0. , 0.70710678,
+ 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
+ 1.15470054])
+
+ """
+ Z = np.asarray(Z, order='c')
+ R = np.asarray(R, order='c')
+ is_valid_linkage(Z, throw=True, name='Z')
+ is_valid_im(R, throw=True, name='R')
+ if type(i) is not int:
+ raise TypeError('The third argument must be an integer.')
+ if i < 0 or i > 3:
+ raise ValueError('i must be an integer between 0 and 3 inclusive.')
+
+ if Z.shape[0] != R.shape[0]:
+ raise ValueError("The inconsistency matrix and linkage matrix each "
+ "have a different number of rows.")
+
+ n = Z.shape[0] + 1
+ MR = np.zeros((n - 1,))
+ [Z, R] = _copy_arrays_if_base_present([Z, R])
+ _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
+ return MR
+
+
+def leaders(Z, T):
+ """
+ Return the root nodes in a hierarchical clustering.
+
+ Returns the root nodes in a hierarchical clustering corresponding
+ to a cut defined by a flat cluster assignment vector ``T``. See
+ the ``fcluster`` function for more information on the format of ``T``.
+
+ For each flat cluster :math:`j` of the :math:`k` flat clusters
+ represented in the n-sized flat cluster assignment vector ``T``,
+ this function finds the lowest cluster node :math:`i` in the linkage
+ tree Z, such that:
+
+ * leaf descendants belong only to flat cluster j
+ (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where
+ :math:`S(i)` is the set of leaf ids of descendant leaf nodes
+ with cluster node :math:`i`)
+
+ * there does not exist a leaf that is not a descendant with
+ :math:`i` that also belongs to cluster :math:`j`
+ (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
+ this condition is violated, ``T`` is not a valid cluster
+ assignment vector, and an exception will be thrown.
+
+ Parameters
+ ----------
+ Z : ndarray
+ The hierarchical clustering encoded as a matrix. See
+ `linkage` for more information.
+ T : ndarray
+ The flat cluster assignment vector.
+
+ Returns
+ -------
+ L : ndarray
+ The leader linkage node id's stored as a k-element 1-D array,
+ where ``k`` is the number of flat clusters found in ``T``.
+
+ ``L[j]=i`` is the linkage cluster node id that is the
+ leader of flat cluster with id M[j]. If ``i < n``, ``i``
+ corresponds to an original observation, otherwise it
+ corresponds to a non-singleton cluster.
+
+ M : ndarray
+ The leader linkage node id's stored as a k-element 1-D array, where
+ ``k`` is the number of flat clusters found in ``T``. This allows the
+ set of flat cluster ids to be any arbitrary set of ``k`` integers.
+
+ For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
+ id 8's leader is linkage node 2.
+
+ See Also
+ --------
+ fcluster: for the creation of flat cluster assignments.
+
+ Examples
+ --------
+ >>> from scipy.cluster.hierarchy import ward, fcluster, leaders
+ >>> from scipy.spatial.distance import pdist
+
+ Given a linkage matrix ``Z`` - obtained after apply a clustering method
+ to a dataset ``X`` - and a flat cluster assignment array ``T``:
+
+ >>> X = [[0, 0], [0, 1], [1, 0],
+ ... [0, 4], [0, 3], [1, 4],
+ ... [4, 0], [3, 0], [4, 1],
+ ... [4, 4], [3, 4], [4, 3]]
+
+ >>> Z = ward(pdist(X))
+ >>> Z
+ array([[ 0. , 1. , 1. , 2. ],
+ [ 3. , 4. , 1. , 2. ],
+ [ 6. , 7. , 1. , 2. ],
+ [ 9. , 10. , 1. , 2. ],
+ [ 2. , 12. , 1.29099445, 3. ],
+ [ 5. , 13. , 1.29099445, 3. ],
+ [ 8. , 14. , 1.29099445, 3. ],
+ [11. , 15. , 1.29099445, 3. ],
+ [16. , 17. , 5.77350269, 6. ],
+ [18. , 19. , 5.77350269, 6. ],
+ [20. , 21. , 8.16496581, 12. ]])
+
+
+ >>> T = fcluster(Z, 3, criterion='distance')
+ >>> T
+ array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+ `scipy.cluster.hierarchy.leaders` returns the indices of the nodes
+ in the dendrogram that are the leaders of each flat cluster:
+
+ >>> L, M = leaders(Z, T)
+ >>> L
+ array([16, 17, 18, 19], dtype=int32)
+
+ (remember that indices 0-11 point to the 12 data points in ``X``,
+ whereas indices 12-22 point to the 11 rows of ``Z``)
+
+ `scipy.cluster.hierarchy.leaders` also returns the indices of
+ the flat clusters in ``T``:
+
+ >>> M
+ array([1, 2, 3, 4], dtype=int32)
+
+ """
+ Z = np.asarray(Z, order='c')
+ T = np.asarray(T, order='c')
+ if type(T) != np.ndarray or T.dtype != 'i':
+ raise TypeError('T must be a one-dimensional numpy array of integers.')
+ is_valid_linkage(Z, throw=True, name='Z')
+ if len(T) != Z.shape[0] + 1:
+ raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
+
+ Cl = np.unique(T)
+ kk = len(Cl)
+ L = np.zeros((kk,), dtype='i')
+ M = np.zeros((kk,), dtype='i')
+ n = Z.shape[0] + 1
+ [Z, T] = _copy_arrays_if_base_present([Z, T])
+ s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
+ if s >= 0:
+ raise ValueError(('T is not a valid assignment vector. Error found '
+ 'when examining linkage node %d (< 2n-1).') % s)
+ return (L, M)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/setup.py
new file mode 100644
index 0000000..e667d71
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/setup.py
@@ -0,0 +1,27 @@
+DEFINE_MACROS = [("SCIPY_PY3K", None)]
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+ config = Configuration('cluster', parent_package, top_path)
+
+ config.add_data_dir('tests')
+
+ config.add_extension('_vq',
+ sources=[('_vq.c')],
+ include_dirs=[get_numpy_include_dirs()])
+
+ config.add_extension('_hierarchy',
+ sources=[('_hierarchy.c')],
+ include_dirs=[get_numpy_include_dirs()])
+
+ config.add_extension('_optimal_leaf_ordering',
+ sources=[('_optimal_leaf_ordering.c')],
+ include_dirs=[get_numpy_include_dirs()])
+
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/hierarchy_test_data.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/hierarchy_test_data.py
new file mode 100644
index 0000000..7d874ca
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/hierarchy_test_data.py
@@ -0,0 +1,145 @@
+from numpy import array
+
+
+Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
+ [7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
+ [6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
+ [9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
+ [1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
+ [9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
+ [6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
+ [9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
+ [6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
+ [5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
+ [1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
+ [2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
+ [1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
+ [1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
+ [1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
+ [2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
+ [1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
+ [1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
+ [1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
+ [1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
+ [3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
+ [3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
+ [3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
+ [2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
+ [3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
+ [2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
+ [2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
+ [2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
+ [2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
+ [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
+
+ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
+ 564., 138., 219., 869., 669.])
+
+linkage_ytdist_single = array([[2., 5., 138., 2.],
+ [3., 4., 219., 2.],
+ [0., 7., 255., 3.],
+ [1., 8., 268., 4.],
+ [6., 9., 295., 6.]])
+
+linkage_ytdist_complete = array([[2., 5., 138., 2.],
+ [3., 4., 219., 2.],
+ [1., 6., 400., 3.],
+ [0., 7., 412., 3.],
+ [8., 9., 996., 6.]])
+
+linkage_ytdist_average = array([[2., 5., 138., 2.],
+ [3., 4., 219., 2.],
+ [0., 7., 333.5, 3.],
+ [1., 6., 347.5, 3.],
+ [8., 9., 680.77777778, 6.]])
+
+linkage_ytdist_weighted = array([[2., 5., 138., 2.],
+ [3., 4., 219., 2.],
+ [0., 7., 333.5, 3.],
+ [1., 6., 347.5, 3.],
+ [8., 9., 670.125, 6.]])
+
+# the optimal leaf ordering of linkage_ytdist_single
+linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
+ [4., 3., 219., 2.],
+ [7., 0., 255., 3.],
+ [1., 8., 268., 4.],
+ [6., 9., 295., 6.]])
+
+X = array([[1.43054825, -7.5693489],
+ [6.95887839, 6.82293382],
+ [2.87137846, -9.68248579],
+ [7.87974764, -6.05485803],
+ [8.24018364, -6.09495602],
+ [7.39020262, 8.54004355]])
+
+linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
+ [1., 5., 1.77045373, 2.],
+ [0., 2., 2.55760419, 2.],
+ [6., 8., 6.43614494, 4.],
+ [7., 9., 15.17363237, 6.]])
+
+linkage_X_median = array([[3., 4., 0.36265956, 2.],
+ [1., 5., 1.77045373, 2.],
+ [0., 2., 2.55760419, 2.],
+ [6., 8., 6.43614494, 4.],
+ [7., 9., 15.17363237, 6.]])
+
+linkage_X_ward = array([[3., 4., 0.36265956, 2.],
+ [1., 5., 1.77045373, 2.],
+ [0., 2., 2.55760419, 2.],
+ [6., 8., 9.10208346, 4.],
+ [7., 9., 24.7784379, 6.]])
+
+# the optimal leaf ordering of linkage_X_ward
+linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
+ [5., 1., 1.77045373, 2.],
+ [2., 0., 2.55760419, 2.],
+ [6., 8., 9.10208346, 4.],
+ [7., 9., 24.7784379, 6.]])
+
+inconsistent_ytdist = {
+ 1: array([[138., 0., 1., 0.],
+ [219., 0., 1., 0.],
+ [255., 0., 1., 0.],
+ [268., 0., 1., 0.],
+ [295., 0., 1., 0.]]),
+ 2: array([[138., 0., 1., 0.],
+ [219., 0., 1., 0.],
+ [237., 25.45584412, 2., 0.70710678],
+ [261.5, 9.19238816, 2., 0.70710678],
+ [233.66666667, 83.9424406, 3., 0.7306594]]),
+ 3: array([[138., 0., 1., 0.],
+ [219., 0., 1., 0.],
+ [237., 25.45584412, 2., 0.70710678],
+ [247.33333333, 25.38372182, 3., 0.81417007],
+ [239., 69.36377537, 4., 0.80733783]]),
+ 4: array([[138., 0., 1., 0.],
+ [219., 0., 1., 0.],
+ [237., 25.45584412, 2., 0.70710678],
+ [247.33333333, 25.38372182, 3., 0.81417007],
+ [235., 60.73302232, 5., 0.98793042]])}
+
+fcluster_inconsistent = {
+ 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
+
+fcluster_distance = {
+ 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
+ 1, 1, 1, 2, 1, 1, 1, 1, 1]),
+ 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
+
+fcluster_maxclust = {
+ 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
+ 1, 1, 1, 3, 1, 1, 1, 1, 2]),
+ 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_disjoint_set.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_disjoint_set.py
new file mode 100644
index 0000000..8728810
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_disjoint_set.py
@@ -0,0 +1,201 @@
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+import collections
+from scipy.cluster.hierarchy import DisjointSet
+import string
+
+
+def generate_random_token():
+ k = len(string.ascii_letters)
+ tokens = list(np.arange(k, dtype=int))
+ tokens += list(np.arange(k, dtype=float))
+ tokens += list(string.ascii_letters)
+ tokens += [None for i in range(k)]
+ rng = np.random.RandomState(seed=0)
+
+ while 1:
+ size = rng.randint(1, 3)
+ element = rng.choice(tokens, size)
+ if size == 1:
+ yield element[0]
+ else:
+ yield tuple(element)
+
+
+def get_elements(n):
+ # OrderedDict is deterministic without difficulty of comparing numpy ints
+ elements = collections.OrderedDict()
+ for element in generate_random_token():
+ if element not in elements:
+ elements[element] = len(elements)
+ if len(elements) >= n:
+ break
+ return list(elements.keys())
+
+
+def test_init():
+ n = 10
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert dis.n_subsets == n
+ assert list(dis) == elements
+
+
+def test_len():
+ n = 10
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert len(dis) == n
+
+ dis.add("dummy")
+ assert len(dis) == n + 1
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_contains(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ for x in elements:
+ assert x in dis
+
+ assert "dummy" not in dis
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_add(n):
+ elements = get_elements(n)
+ dis1 = DisjointSet(elements)
+
+ dis2 = DisjointSet()
+ for i, x in enumerate(elements):
+ dis2.add(x)
+ assert len(dis2) == i + 1
+
+ # test idempotency by adding element again
+ dis2.add(x)
+ assert len(dis2) == i + 1
+
+ assert list(dis1) == list(dis2)
+
+
+def test_element_not_present():
+ elements = get_elements(n=10)
+ dis = DisjointSet(elements)
+
+ with assert_raises(KeyError):
+ dis["dummy"]
+
+ with assert_raises(KeyError):
+ dis.merge(elements[0], "dummy")
+
+ with assert_raises(KeyError):
+ dis.connected(elements[0], "dummy")
+
+
+@pytest.mark.parametrize("direction", ["forwards", "backwards"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_linear_union_sequence(n, direction):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert elements == list(dis)
+
+ indices = list(range(n - 1))
+ if direction == "backwards":
+ indices = indices[::-1]
+
+ for it, i in enumerate(indices):
+ assert not dis.connected(elements[i], elements[i + 1])
+ assert dis.merge(elements[i], elements[i + 1])
+ assert dis.connected(elements[i], elements[i + 1])
+ assert dis.n_subsets == n - 1 - it
+
+ roots = [dis[i] for i in elements]
+ if direction == "forwards":
+ assert all(elements[0] == r for r in roots)
+ else:
+ assert all(elements[-2] == r for r in roots)
+ assert not dis.merge(elements[0], elements[-1])
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_self_unions(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ for x in elements:
+ assert dis.connected(x, x)
+ assert not dis.merge(x, x)
+ assert dis.connected(x, x)
+ assert dis.n_subsets == len(elements)
+
+ assert elements == list(dis)
+ roots = [dis[x] for x in elements]
+ assert elements == roots
+
+
+@pytest.mark.parametrize("order", ["ab", "ba"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_equal_size_ordering(n, order):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ rng = np.random.RandomState(seed=0)
+ indices = np.arange(n)
+ rng.shuffle(indices)
+
+ for i in range(0, len(indices), 2):
+ a, b = elements[indices[i]], elements[indices[i + 1]]
+ if order == "ab":
+ assert dis.merge(a, b)
+ else:
+ assert dis.merge(b, a)
+
+ expected = elements[min(indices[i], indices[i + 1])]
+ assert dis[a] == expected
+ assert dis[b] == expected
+
+
+@pytest.mark.parametrize("kmax", [5, 10])
+def test_binary_tree(kmax):
+ n = 2**kmax
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ rng = np.random.RandomState(seed=0)
+
+ for k in 2**np.arange(kmax):
+ for i in range(0, n, 2 * k):
+ r1, r2 = rng.randint(0, k, size=2)
+ a, b = elements[i + r1], elements[i + k + r2]
+ assert not dis.connected(a, b)
+ assert dis.merge(a, b)
+ assert dis.connected(a, b)
+
+ assert elements == list(dis)
+ roots = [dis[i] for i in elements]
+ expected_indices = np.arange(n) - np.arange(n) % (2 * k)
+ expected = [elements[i] for i in expected_indices]
+ assert roots == expected
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_subsets(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ rng = np.random.RandomState(seed=0)
+ for i, j in rng.randint(0, n, (n, 2)):
+ x = elements[i]
+ y = elements[j]
+
+ expected = {element for element in dis if {dis[element]} == {dis[x]}}
+ assert expected == dis.subset(x)
+
+ expected = {dis[element]: set() for element in dis}
+ for element in dis:
+ expected[dis[element]].add(element)
+ expected = list(expected.values())
+ assert expected == dis.subsets()
+
+ dis.merge(x, y)
+ assert dis.subset(x) == dis.subset(y)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_hierarchy.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_hierarchy.py
new file mode 100644
index 0000000..2591d8b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_hierarchy.py
@@ -0,0 +1,1091 @@
+#
+# Author: Damian Eads
+# Date: April 17, 2008
+#
+# Copyright (C) 2008 Damian Eads
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.cluster.hierarchy
+from scipy.cluster.hierarchy import (
+ ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
+ num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
+ is_isomorphic, single, leaders,
+ correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
+ is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
+ set_link_color_palette, cut_tree, optimal_leaf_ordering,
+ _order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
+from scipy.spatial.distance import pdist
+from scipy.cluster._hierarchy import Heap
+
+from . import hierarchy_test_data
+
+
+# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
+# check if it's available
+try:
+ import matplotlib # type: ignore[import]
+ # and set the backend to be Agg (no gui)
+ matplotlib.use('Agg')
+ # before importing pyplot
+ import matplotlib.pyplot as plt # type: ignore[import]
+ have_matplotlib = True
+except Exception:
+ have_matplotlib = False
+
+
+class TestLinkage(object):
+ def test_linkage_non_finite_elements_in_distance_matrix(self):
+ # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
+ # Exception expected.
+ y = np.zeros((6,))
+ y[0] = np.nan
+ assert_raises(ValueError, linkage, y)
+
+ def test_linkage_empty_distance_matrix(self):
+ # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
+ y = np.zeros((0,))
+ assert_raises(ValueError, linkage, y)
+
+ def test_linkage_tdist(self):
+ for method in ['single', 'complete', 'average', 'weighted']:
+ self.check_linkage_tdist(method)
+
+ def check_linkage_tdist(self, method):
+ # Tests linkage(Y, method) on the tdist data set.
+ Z = linkage(hierarchy_test_data.ytdist, method)
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
+ assert_allclose(Z, expectedZ, atol=1e-10)
+
+ def test_linkage_X(self):
+ for method in ['centroid', 'median', 'ward']:
+ self.check_linkage_q(method)
+
+ def check_linkage_q(self, method):
+ # Tests linkage(Y, method) on the Q data set.
+ Z = linkage(hierarchy_test_data.X, method)
+ expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
+ assert_allclose(Z, expectedZ, atol=1e-06)
+
+ y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
+ metric="euclidean")
+ Z = linkage(y, method)
+ assert_allclose(Z, expectedZ, atol=1e-06)
+
+ def test_compare_with_trivial(self):
+ rng = np.random.RandomState(0)
+ n = 20
+ X = rng.rand(n, 2)
+ d = pdist(X)
+
+ for method, code in _LINKAGE_METHODS.items():
+ Z_trivial = _hierarchy.linkage(d, n, code)
+ Z = linkage(d, method)
+ assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
+
+ def test_optimal_leaf_ordering(self):
+ Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
+ assert_allclose(Z, expectedZ, atol=1e-10)
+
+
+class TestLinkageTies(object):
+ _expectations = {
+ 'single': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 1.41421356, 3]]),
+ 'complete': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.82842712, 3]]),
+ 'average': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.12132034, 3]]),
+ 'weighted': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.12132034, 3]]),
+ 'centroid': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.12132034, 3]]),
+ 'median': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.12132034, 3]]),
+ 'ward': np.array([[0, 1, 1.41421356, 2],
+ [2, 3, 2.44948974, 3]]),
+ }
+
+ def test_linkage_ties(self):
+ for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
+ self.check_linkage_ties(method)
+
+ def check_linkage_ties(self, method):
+ X = np.array([[-1, -1], [0, 0], [1, 1]])
+ Z = linkage(X, method=method)
+ expectedZ = self._expectations[method]
+ assert_allclose(Z, expectedZ, atol=1e-06)
+
+
+class TestInconsistent(object):
+ def test_inconsistent_tdist(self):
+ for depth in hierarchy_test_data.inconsistent_ytdist:
+ self.check_inconsistent_tdist(depth)
+
+ def check_inconsistent_tdist(self, depth):
+ Z = hierarchy_test_data.linkage_ytdist_single
+ assert_allclose(inconsistent(Z, depth),
+ hierarchy_test_data.inconsistent_ytdist[depth])
+
+
+class TestCopheneticDistance(object):
+ def test_linkage_cophenet_tdist_Z(self):
+ # Tests cophenet(Z) on tdist data set.
+ expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
+ 295, 138, 219, 295, 295])
+ Z = hierarchy_test_data.linkage_ytdist_single
+ M = cophenet(Z)
+ assert_allclose(M, expectedM, atol=1e-10)
+
+ def test_linkage_cophenet_tdist_Z_Y(self):
+ # Tests cophenet(Z, Y) on tdist data set.
+ Z = hierarchy_test_data.linkage_ytdist_single
+ (c, M) = cophenet(Z, hierarchy_test_data.ytdist)
+ expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
+ 295, 138, 219, 295, 295])
+ expectedc = 0.639931296433393415057366837573
+ assert_allclose(c, expectedc, atol=1e-10)
+ assert_allclose(M, expectedM, atol=1e-10)
+
+
+class TestMLabLinkageConversion(object):
+ def test_mlab_linkage_conversion_empty(self):
+ # Tests from/to_mlab_linkage on empty linkage array.
+ X = np.asarray([])
+ assert_equal(from_mlab_linkage([]), X)
+ assert_equal(to_mlab_linkage([]), X)
+
+ def test_mlab_linkage_conversion_single_row(self):
+ # Tests from/to_mlab_linkage on linkage array with single row.
+ Z = np.asarray([[0., 1., 3., 2.]])
+ Zm = [[1, 2, 3]]
+ assert_equal(from_mlab_linkage(Zm), Z)
+ assert_equal(to_mlab_linkage(Z), Zm)
+
+ def test_mlab_linkage_conversion_multiple_rows(self):
+ # Tests from/to_mlab_linkage on linkage array with multiple rows.
+ Zm = np.asarray([[3, 6, 138], [4, 5, 219],
+ [1, 8, 255], [2, 9, 268], [7, 10, 295]])
+ Z = np.array([[2., 5., 138., 2.],
+ [3., 4., 219., 2.],
+ [0., 7., 255., 3.],
+ [1., 8., 268., 4.],
+ [6., 9., 295., 6.]],
+ dtype=np.double)
+ assert_equal(from_mlab_linkage(Zm), Z)
+ assert_equal(to_mlab_linkage(Z), Zm)
+
+
+class TestFcluster(object):
+ def test_fclusterdata(self):
+ for t in hierarchy_test_data.fcluster_inconsistent:
+ self.check_fclusterdata(t, 'inconsistent')
+ for t in hierarchy_test_data.fcluster_distance:
+ self.check_fclusterdata(t, 'distance')
+ for t in hierarchy_test_data.fcluster_maxclust:
+ self.check_fclusterdata(t, 'maxclust')
+
+ def check_fclusterdata(self, t, criterion):
+ # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
+ expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
+ X = hierarchy_test_data.Q_X
+ T = fclusterdata(X, criterion=criterion, t=t)
+ assert_(is_isomorphic(T, expectedT))
+
+ def test_fcluster(self):
+ for t in hierarchy_test_data.fcluster_inconsistent:
+ self.check_fcluster(t, 'inconsistent')
+ for t in hierarchy_test_data.fcluster_distance:
+ self.check_fcluster(t, 'distance')
+ for t in hierarchy_test_data.fcluster_maxclust:
+ self.check_fcluster(t, 'maxclust')
+
+ def check_fcluster(self, t, criterion):
+ # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
+ expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
+ Z = single(hierarchy_test_data.Q_X)
+ T = fcluster(Z, criterion=criterion, t=t)
+ assert_(is_isomorphic(T, expectedT))
+
+ def test_fcluster_monocrit(self):
+ for t in hierarchy_test_data.fcluster_distance:
+ self.check_fcluster_monocrit(t)
+ for t in hierarchy_test_data.fcluster_maxclust:
+ self.check_fcluster_maxclust_monocrit(t)
+
+ def check_fcluster_monocrit(self, t):
+ expectedT = hierarchy_test_data.fcluster_distance[t]
+ Z = single(hierarchy_test_data.Q_X)
+ T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
+ assert_(is_isomorphic(T, expectedT))
+
+ def check_fcluster_maxclust_monocrit(self, t):
+ expectedT = hierarchy_test_data.fcluster_maxclust[t]
+ Z = single(hierarchy_test_data.Q_X)
+ T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
+ assert_(is_isomorphic(T, expectedT))
+
+
+class TestLeaders(object):
+ def test_leaders_single(self):
+ # Tests leaders using a flat clustering generated by single linkage.
+ X = hierarchy_test_data.Q_X
+ Y = pdist(X)
+ Z = linkage(Y)
+ T = fcluster(Z, criterion='maxclust', t=3)
+ Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
+ L = leaders(Z, T)
+ assert_equal(L, Lright)
+
+
+class TestIsIsomorphic(object):
+ def test_is_isomorphic_1(self):
+ # Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
+ a = [1, 1, 1]
+ b = [2, 2, 2]
+ assert_(is_isomorphic(a, b))
+ assert_(is_isomorphic(b, a))
+
+ def test_is_isomorphic_2(self):
+ # Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
+ a = [1, 7, 1]
+ b = [2, 3, 2]
+ assert_(is_isomorphic(a, b))
+ assert_(is_isomorphic(b, a))
+
+ def test_is_isomorphic_3(self):
+ # Tests is_isomorphic on test case #3 (no flat clusters)
+ a = []
+ b = []
+ assert_(is_isomorphic(a, b))
+
+ def test_is_isomorphic_4A(self):
+ # Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
+ a = [1, 2, 3]
+ b = [1, 3, 2]
+ assert_(is_isomorphic(a, b))
+ assert_(is_isomorphic(b, a))
+
+ def test_is_isomorphic_4B(self):
+ # Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
+ a = [1, 2, 3, 3]
+ b = [1, 3, 2, 3]
+ assert_(is_isomorphic(a, b) == False)
+ assert_(is_isomorphic(b, a) == False)
+
+ def test_is_isomorphic_4C(self):
+ # Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
+ a = [7, 2, 3]
+ b = [6, 3, 2]
+ assert_(is_isomorphic(a, b))
+ assert_(is_isomorphic(b, a))
+
+ def test_is_isomorphic_5(self):
+ # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
+ # clusters, random permutation of the labeling).
+ for nc in [2, 3, 5]:
+ self.help_is_isomorphic_randperm(1000, nc)
+
+ def test_is_isomorphic_6(self):
+ # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
+ # clusters, random permutation of the labeling, slightly
+ # nonisomorphic.)
+ for nc in [2, 3, 5]:
+ self.help_is_isomorphic_randperm(1000, nc, True, 5)
+
+ def test_is_isomorphic_7(self):
+ # Regression test for gh-6271
+ assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
+
+ def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
+ for k in range(3):
+ a = np.int_(np.random.rand(nobs) * nclusters)
+ b = np.zeros(a.size, dtype=np.int_)
+ P = np.random.permutation(nclusters)
+ for i in range(0, a.shape[0]):
+ b[i] = P[a[i]]
+ if noniso:
+ Q = np.random.permutation(nobs)
+ b[Q[0:nerrors]] += 1
+ b[Q[0:nerrors]] %= nclusters
+ assert_(is_isomorphic(a, b) == (not noniso))
+ assert_(is_isomorphic(b, a) == (not noniso))
+
+
+class TestIsValidLinkage(object):
+ def test_is_valid_linkage_various_size(self):
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
+ (1, 4, True), (2, 4, True)]:
+ self.check_is_valid_linkage_various_size(nrow, ncol, valid)
+
+ def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
+ # Tests is_valid_linkage(Z) with linkage matrics of various sizes
+ Z = np.asarray([[0, 1, 3.0, 2, 5],
+ [3, 2, 4.0, 3, 3]], dtype=np.double)
+ Z = Z[:nrow, :ncol]
+ assert_(is_valid_linkage(Z) == valid)
+ if not valid:
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_int_type(self):
+ # Tests is_valid_linkage(Z) with integer type.
+ Z = np.asarray([[0, 1, 3.0, 2],
+ [3, 2, 4.0, 3]], dtype=int)
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(TypeError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_empty(self):
+ # Tests is_valid_linkage(Z) with empty linkage.
+ Z = np.zeros((0, 4), dtype=np.double)
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_4_and_up(self):
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
+ # sizes 4 and 15 (step size 3).
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ assert_(is_valid_linkage(Z) == True)
+
+ def test_is_valid_linkage_4_and_up_neg_index_left(self):
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
+ # sizes 4 and 15 (step size 3) with negative indices (left).
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ Z[i//2,0] = -2
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_4_and_up_neg_index_right(self):
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
+ # sizes 4 and 15 (step size 3) with negative indices (right).
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ Z[i//2,1] = -2
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_4_and_up_neg_dist(self):
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
+ # sizes 4 and 15 (step size 3) with negative distances.
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ Z[i//2,2] = -0.5
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+ def test_is_valid_linkage_4_and_up_neg_counts(self):
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
+ # sizes 4 and 15 (step size 3) with negative counts.
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ Z[i//2,3] = -2
+ assert_(is_valid_linkage(Z) == False)
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+
+class TestIsValidInconsistent(object):
+ def test_is_valid_im_int_type(self):
+ # Tests is_valid_im(R) with integer type.
+ R = np.asarray([[0, 1, 3.0, 2],
+ [3, 2, 4.0, 3]], dtype=int)
+ assert_(is_valid_im(R) == False)
+ assert_raises(TypeError, is_valid_im, R, throw=True)
+
+ def test_is_valid_im_various_size(self):
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
+ (1, 4, True), (2, 4, True)]:
+ self.check_is_valid_im_various_size(nrow, ncol, valid)
+
+ def check_is_valid_im_various_size(self, nrow, ncol, valid):
+ # Tests is_valid_im(R) with linkage matrics of various sizes
+ R = np.asarray([[0, 1, 3.0, 2, 5],
+ [3, 2, 4.0, 3, 3]], dtype=np.double)
+ R = R[:nrow, :ncol]
+ assert_(is_valid_im(R) == valid)
+ if not valid:
+ assert_raises(ValueError, is_valid_im, R, throw=True)
+
+ def test_is_valid_im_empty(self):
+ # Tests is_valid_im(R) with empty inconsistency matrix.
+ R = np.zeros((0, 4), dtype=np.double)
+ assert_(is_valid_im(R) == False)
+ assert_raises(ValueError, is_valid_im, R, throw=True)
+
+ def test_is_valid_im_4_and_up(self):
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+ # (step size 3).
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ R = inconsistent(Z)
+ assert_(is_valid_im(R) == True)
+
+ def test_is_valid_im_4_and_up_neg_index_left(self):
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+ # (step size 3) with negative link height means.
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ R = inconsistent(Z)
+ R[i//2,0] = -2.0
+ assert_(is_valid_im(R) == False)
+ assert_raises(ValueError, is_valid_im, R, throw=True)
+
+ def test_is_valid_im_4_and_up_neg_index_right(self):
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+ # (step size 3) with negative link height standard deviations.
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ R = inconsistent(Z)
+ R[i//2,1] = -2.0
+ assert_(is_valid_im(R) == False)
+ assert_raises(ValueError, is_valid_im, R, throw=True)
+
+ def test_is_valid_im_4_and_up_neg_dist(self):
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+ # (step size 3) with negative link counts.
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ R = inconsistent(Z)
+ R[i//2,2] = -0.5
+ assert_(is_valid_im(R) == False)
+ assert_raises(ValueError, is_valid_im, R, throw=True)
+
+
+class TestNumObsLinkage(object):
+ def test_num_obs_linkage_empty(self):
+ # Tests num_obs_linkage(Z) with empty linkage.
+ Z = np.zeros((0, 4), dtype=np.double)
+ assert_raises(ValueError, num_obs_linkage, Z)
+
+ def test_num_obs_linkage_1x4(self):
+ # Tests num_obs_linkage(Z) on linkage over 2 observations.
+ Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
+ assert_equal(num_obs_linkage(Z), 2)
+
+ def test_num_obs_linkage_2x4(self):
+ # Tests num_obs_linkage(Z) on linkage over 3 observations.
+ Z = np.asarray([[0, 1, 3.0, 2],
+ [3, 2, 4.0, 3]], dtype=np.double)
+ assert_equal(num_obs_linkage(Z), 3)
+
+ def test_num_obs_linkage_4_and_up(self):
+ # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
+ # 4 and 15 (step size 3).
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ assert_equal(num_obs_linkage(Z), i)
+
+
+class TestLeavesList(object):
+ def test_leaves_list_1x4(self):
+ # Tests leaves_list(Z) on a 1x4 linkage.
+ Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
+ to_tree(Z)
+ assert_equal(leaves_list(Z), [0, 1])
+
+ def test_leaves_list_2x4(self):
+ # Tests leaves_list(Z) on a 2x4 linkage.
+ Z = np.asarray([[0, 1, 3.0, 2],
+ [3, 2, 4.0, 3]], dtype=np.double)
+ to_tree(Z)
+ assert_equal(leaves_list(Z), [0, 1, 2])
+
+ def test_leaves_list_Q(self):
+ for method in ['single', 'complete', 'average', 'weighted', 'centroid',
+ 'median', 'ward']:
+ self.check_leaves_list_Q(method)
+
+ def check_leaves_list_Q(self, method):
+ # Tests leaves_list(Z) on the Q data set
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, method)
+ node = to_tree(Z)
+ assert_equal(node.pre_order(), leaves_list(Z))
+
+ def test_Q_subtree_pre_order(self):
+ # Tests that pre_order() works when called on sub-trees.
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, 'single')
+ node = to_tree(Z)
+ assert_equal(node.pre_order(), (node.get_left().pre_order()
+ + node.get_right().pre_order()))
+
+
+class TestCorrespond(object):
+ def test_correspond_empty(self):
+ # Tests correspond(Z, y) with empty linkage and condensed distance matrix.
+ y = np.zeros((0,))
+ Z = np.zeros((0,4))
+ assert_raises(ValueError, correspond, Z, y)
+
+ def test_correspond_2_and_up(self):
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+ # different sizes.
+ for i in range(2, 4):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ assert_(correspond(Z, y))
+ for i in range(4, 15, 3):
+ y = np.random.rand(i*(i-1)//2)
+ Z = linkage(y)
+ assert_(correspond(Z, y))
+
+ def test_correspond_4_and_up(self):
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+ # different sizes. Correspondence should be false.
+ for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
+ list(zip(list(range(3, 5)), list(range(2, 4))))):
+ y = np.random.rand(i*(i-1)//2)
+ y2 = np.random.rand(j*(j-1)//2)
+ Z = linkage(y)
+ Z2 = linkage(y2)
+ assert_equal(correspond(Z, y2), False)
+ assert_equal(correspond(Z2, y), False)
+
+ def test_correspond_4_and_up_2(self):
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+ # different sizes. Correspondence should be false.
+ for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
+ list(zip(list(range(2, 7)), list(range(16, 21))))):
+ y = np.random.rand(i*(i-1)//2)
+ y2 = np.random.rand(j*(j-1)//2)
+ Z = linkage(y)
+ Z2 = linkage(y2)
+ assert_equal(correspond(Z, y2), False)
+ assert_equal(correspond(Z2, y), False)
+
+ def test_num_obs_linkage_multi_matrix(self):
+ # Tests num_obs_linkage with observation matrices of multiple sizes.
+ for n in range(2, 10):
+ X = np.random.rand(n, 4)
+ Y = pdist(X)
+ Z = linkage(Y)
+ assert_equal(num_obs_linkage(Z), n)
+
+
+class TestIsMonotonic(object):
+ def test_is_monotonic_empty(self):
+ # Tests is_monotonic(Z) on an empty linkage.
+ Z = np.zeros((0, 4))
+ assert_raises(ValueError, is_monotonic, Z)
+
+ def test_is_monotonic_1x4(self):
+ # Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
+ Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
+ assert_equal(is_monotonic(Z), True)
+
+ def test_is_monotonic_2x4_T(self):
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
+ Z = np.asarray([[0, 1, 0.3, 2],
+ [2, 3, 0.4, 3]], dtype=np.double)
+ assert_equal(is_monotonic(Z), True)
+
+ def test_is_monotonic_2x4_F(self):
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
+ Z = np.asarray([[0, 1, 0.4, 2],
+ [2, 3, 0.3, 3]], dtype=np.double)
+ assert_equal(is_monotonic(Z), False)
+
+ def test_is_monotonic_3x4_T(self):
+ # Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
+ Z = np.asarray([[0, 1, 0.3, 2],
+ [2, 3, 0.4, 2],
+ [4, 5, 0.6, 4]], dtype=np.double)
+ assert_equal(is_monotonic(Z), True)
+
+ def test_is_monotonic_3x4_F1(self):
+ # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
+ Z = np.asarray([[0, 1, 0.3, 2],
+ [2, 3, 0.2, 2],
+ [4, 5, 0.6, 4]], dtype=np.double)
+ assert_equal(is_monotonic(Z), False)
+
+ def test_is_monotonic_3x4_F2(self):
+ # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
+ Z = np.asarray([[0, 1, 0.8, 2],
+ [2, 3, 0.4, 2],
+ [4, 5, 0.6, 4]], dtype=np.double)
+ assert_equal(is_monotonic(Z), False)
+
+ def test_is_monotonic_3x4_F3(self):
+ # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
+ Z = np.asarray([[0, 1, 0.3, 2],
+ [2, 3, 0.4, 2],
+ [4, 5, 0.2, 4]], dtype=np.double)
+ assert_equal(is_monotonic(Z), False)
+
+ def test_is_monotonic_tdist_linkage1(self):
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
+ # tdist data set. Expecting True.
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ assert_equal(is_monotonic(Z), True)
+
+ def test_is_monotonic_tdist_linkage2(self):
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
+ # tdist data set. Perturbing. Expecting False.
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ Z[2,2] = 0.0
+ assert_equal(is_monotonic(Z), False)
+
+ def test_is_monotonic_Q_linkage(self):
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
+ # Q data set. Expecting True.
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, 'single')
+ assert_equal(is_monotonic(Z), True)
+
+
+class TestMaxDists(object):
+ def test_maxdists_empty_linkage(self):
+ # Tests maxdists(Z) on empty linkage. Expecting exception.
+ Z = np.zeros((0, 4), dtype=np.double)
+ assert_raises(ValueError, maxdists, Z)
+
+ def test_maxdists_one_cluster_linkage(self):
+ # Tests maxdists(Z) on linkage with one cluster.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ MD = maxdists(Z)
+ expectedMD = calculate_maximum_distances(Z)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+ def test_maxdists_Q_linkage(self):
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+ self.check_maxdists_Q_linkage(method)
+
+ def check_maxdists_Q_linkage(self, method):
+ # Tests maxdists(Z) on the Q data set
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, method)
+ MD = maxdists(Z)
+ expectedMD = calculate_maximum_distances(Z)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestMaxInconsts(object):
+ def test_maxinconsts_empty_linkage(self):
+ # Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
+ Z = np.zeros((0, 4), dtype=np.double)
+ R = np.zeros((0, 4), dtype=np.double)
+ assert_raises(ValueError, maxinconsts, Z, R)
+
+ def test_maxinconsts_difrow_linkage(self):
+ # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
+ # different numbers of clusters. Expecting exception.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ R = np.random.rand(2, 4)
+ assert_raises(ValueError, maxinconsts, Z, R)
+
+ def test_maxinconsts_one_cluster_linkage(self):
+ # Tests maxinconsts(Z, R) on linkage with one cluster.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+ MD = maxinconsts(Z, R)
+ expectedMD = calculate_maximum_inconsistencies(Z, R)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+ def test_maxinconsts_Q_linkage(self):
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+ self.check_maxinconsts_Q_linkage(method)
+
+ def check_maxinconsts_Q_linkage(self, method):
+ # Tests maxinconsts(Z, R) on the Q data set
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, method)
+ R = inconsistent(Z)
+ MD = maxinconsts(Z, R)
+ expectedMD = calculate_maximum_inconsistencies(Z, R)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestMaxRStat(object):
+ def test_maxRstat_invalid_index(self):
+ for i in [3.3, -1, 4]:
+ self.check_maxRstat_invalid_index(i)
+
+ def check_maxRstat_invalid_index(self, i):
+ # Tests maxRstat(Z, R, i). Expecting exception.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+ if isinstance(i, int):
+ assert_raises(ValueError, maxRstat, Z, R, i)
+ else:
+ assert_raises(TypeError, maxRstat, Z, R, i)
+
+ def test_maxRstat_empty_linkage(self):
+ for i in range(4):
+ self.check_maxRstat_empty_linkage(i)
+
+ def check_maxRstat_empty_linkage(self, i):
+ # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
+ Z = np.zeros((0, 4), dtype=np.double)
+ R = np.zeros((0, 4), dtype=np.double)
+ assert_raises(ValueError, maxRstat, Z, R, i)
+
+ def test_maxRstat_difrow_linkage(self):
+ for i in range(4):
+ self.check_maxRstat_difrow_linkage(i)
+
+ def check_maxRstat_difrow_linkage(self, i):
+ # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
+ # different numbers of clusters. Expecting exception.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ R = np.random.rand(2, 4)
+ assert_raises(ValueError, maxRstat, Z, R, i)
+
+ def test_maxRstat_one_cluster_linkage(self):
+ for i in range(4):
+ self.check_maxRstat_one_cluster_linkage(i)
+
+ def check_maxRstat_one_cluster_linkage(self, i):
+ # Tests maxRstat(Z, R, i) on linkage with one cluster.
+ Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+ R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+ MD = maxRstat(Z, R, 1)
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+ def test_maxRstat_Q_linkage(self):
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+ for i in range(4):
+ self.check_maxRstat_Q_linkage(method, i)
+
+ def check_maxRstat_Q_linkage(self, method, i):
+ # Tests maxRstat(Z, R, i) on the Q data set
+ X = hierarchy_test_data.Q_X
+ Z = linkage(X, method)
+ R = inconsistent(Z)
+ MD = maxRstat(Z, R, 1)
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
+ assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestDendrogram(object):
+ def test_dendrogram_single_linkage_tdist(self):
+ # Tests dendrogram calculation on single linkage of the tdist data set.
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ R = dendrogram(Z, no_plot=True)
+ leaves = R["leaves"]
+ assert_equal(leaves, [2, 5, 1, 0, 3, 4])
+
+ def test_valid_orientation(self):
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ assert_raises(ValueError, dendrogram, Z, orientation="foo")
+
+ def test_labels_as_array_or_list(self):
+ # test for gh-12418
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ labels = np.array([1, 3, 2, 6, 4, 5])
+ result1 = dendrogram(Z, labels=labels, no_plot=True)
+ result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
+ assert result1 == result2
+
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+ def test_valid_label_size(self):
+ link = np.array([
+ [0, 1, 1.0, 4],
+ [2, 3, 1.0, 5],
+ [4, 5, 2.0, 6],
+ ])
+ plt.figure()
+ with pytest.raises(ValueError) as exc_info:
+ dendrogram(link, labels=list(range(100)))
+ assert "Dimensions of Z and labels must be consistent."\
+ in str(exc_info.value)
+
+ with pytest.raises(
+ ValueError,
+ match="Dimensions of Z and labels must be consistent."):
+ dendrogram(link, labels=[])
+
+ plt.close()
+
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+ def test_dendrogram_plot(self):
+ for orientation in ['top', 'bottom', 'left', 'right']:
+ self.check_dendrogram_plot(orientation)
+
+ def check_dendrogram_plot(self, orientation):
+ # Tests dendrogram plotting.
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+ expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
+ [0.0, 219.0, 219.0, 0.0],
+ [0.0, 255.0, 255.0, 219.0],
+ [0.0, 268.0, 268.0, 255.0],
+ [138.0, 295.0, 295.0, 268.0]],
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
+ [45.0, 45.0, 55.0, 55.0],
+ [35.0, 35.0, 50.0, 50.0],
+ [25.0, 25.0, 42.5, 42.5],
+ [10.0, 10.0, 33.75, 33.75]],
+ 'ivl': ['2', '5', '1', '0', '3', '4'],
+ 'leaves': [2, 5, 1, 0, 3, 4],
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
+ }
+
+ fig = plt.figure()
+ ax = fig.add_subplot(221)
+
+ # test that dendrogram accepts ax keyword
+ R1 = dendrogram(Z, ax=ax, orientation=orientation)
+ assert_equal(R1, expected)
+
+ # test that dendrogram accepts and handle the leaf_font_size and
+ # leaf_rotation keywords
+ dendrogram(Z, ax=ax, orientation=orientation,
+ leaf_font_size=20, leaf_rotation=90)
+ testlabel = (
+ ax.get_xticklabels()[0]
+ if orientation in ['top', 'bottom']
+ else ax.get_yticklabels()[0]
+ )
+ assert_equal(testlabel.get_rotation(), 90)
+ assert_equal(testlabel.get_size(), 20)
+ dendrogram(Z, ax=ax, orientation=orientation,
+ leaf_rotation=90)
+ testlabel = (
+ ax.get_xticklabels()[0]
+ if orientation in ['top', 'bottom']
+ else ax.get_yticklabels()[0]
+ )
+ assert_equal(testlabel.get_rotation(), 90)
+ dendrogram(Z, ax=ax, orientation=orientation,
+ leaf_font_size=20)
+ testlabel = (
+ ax.get_xticklabels()[0]
+ if orientation in ['top', 'bottom']
+ else ax.get_yticklabels()[0]
+ )
+ assert_equal(testlabel.get_size(), 20)
+ plt.close()
+
+ # test plotting to gca (will import pylab)
+ R2 = dendrogram(Z, orientation=orientation)
+ plt.close()
+ assert_equal(R2, expected)
+
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+ def test_dendrogram_truncate_mode(self):
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+
+ R = dendrogram(Z, 2, 'lastp', show_contracted=True)
+ plt.close()
+ assert_equal(R, {'color_list': ['C0'],
+ 'dcoord': [[0.0, 295.0, 295.0, 0.0]],
+ 'icoord': [[5.0, 5.0, 15.0, 15.0]],
+ 'ivl': ['(2)', '(4)'],
+ 'leaves': [6, 9],
+ 'leaves_color_list': ['C0', 'C0'],
+ })
+
+ R = dendrogram(Z, 2, 'mtica', show_contracted=True)
+ plt.close()
+ assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
+ [0.0, 255.0, 255.0, 0.0],
+ [0.0, 268.0, 268.0, 255.0],
+ [138.0, 295.0, 295.0, 268.0]],
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
+ [35.0, 35.0, 45.0, 45.0],
+ [25.0, 25.0, 40.0, 40.0],
+ [10.0, 10.0, 32.5, 32.5]],
+ 'ivl': ['2', '5', '1', '0', '(2)'],
+ 'leaves': [2, 5, 1, 0, 7],
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
+ })
+
+ def test_dendrogram_colors(self):
+ # Tests dendrogram plots with alternate colors
+ Z = linkage(hierarchy_test_data.ytdist, 'single')
+
+ set_link_color_palette(['c', 'm', 'y', 'k'])
+ R = dendrogram(Z, no_plot=True,
+ above_threshold_color='g', color_threshold=250)
+ set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
+
+ color_list = R['color_list']
+ assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
+
+ # reset color palette (global list)
+ set_link_color_palette(None)
+
+
+def calculate_maximum_distances(Z):
+ # Used for testing correctness of maxdists.
+ n = Z.shape[0] + 1
+ B = np.zeros((n-1,))
+ q = np.zeros((3,))
+ for i in range(0, n - 1):
+ q[:] = 0.0
+ left = Z[i, 0]
+ right = Z[i, 1]
+ if left >= n:
+ q[0] = B[int(left) - n]
+ if right >= n:
+ q[1] = B[int(right) - n]
+ q[2] = Z[i, 2]
+ B[i] = q.max()
+ return B
+
+
+def calculate_maximum_inconsistencies(Z, R, k=3):
+ # Used for testing correctness of maxinconsts.
+ n = Z.shape[0] + 1
+ B = np.zeros((n-1,))
+ q = np.zeros((3,))
+ for i in range(0, n - 1):
+ q[:] = 0.0
+ left = Z[i, 0]
+ right = Z[i, 1]
+ if left >= n:
+ q[0] = B[int(left) - n]
+ if right >= n:
+ q[1] = B[int(right) - n]
+ q[2] = R[i, k]
+ B[i] = q.max()
+ return B
+
+
+def within_tol(a, b, tol):
+ return np.abs(a - b).max() < tol
+
+
+def test_unsupported_uncondensed_distance_matrix_linkage_warning():
+ assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
+
+
+def test_euclidean_linkage_value_error():
+ for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
+ assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
+ method=method, metric='cityblock')
+
+
+def test_2x2_linkage():
+ Z1 = linkage([1], method='single', metric='euclidean')
+ Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
+ assert_allclose(Z1, Z2)
+
+
+def test_node_compare():
+ np.random.seed(23)
+ nobs = 50
+ X = np.random.randn(nobs, 4)
+ Z = scipy.cluster.hierarchy.ward(X)
+ tree = to_tree(Z)
+ assert_(tree > tree.get_left())
+ assert_(tree.get_right() > tree.get_left())
+ assert_(tree.get_right() == tree.get_right())
+ assert_(tree.get_right() != tree.get_left())
+
+
+def test_cut_tree():
+ np.random.seed(23)
+ nobs = 50
+ X = np.random.randn(nobs, 4)
+ Z = scipy.cluster.hierarchy.ward(X)
+ cutree = cut_tree(Z)
+
+ assert_equal(cutree[:, 0], np.arange(nobs))
+ assert_equal(cutree[:, -1], np.zeros(nobs))
+ assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
+
+ assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
+ assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
+ assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
+
+ nodes = _order_cluster_tree(Z)
+ heights = np.array([node.dist for node in nodes])
+
+ assert_equal(cutree[:, np.searchsorted(heights, [5])],
+ cut_tree(Z, height=5))
+ assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
+ cut_tree(Z, height=[5, 10]))
+ assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
+ cut_tree(Z, height=[10, 5]))
+
+
+def test_optimal_leaf_ordering():
+ # test with the distance vector y
+ Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
+ hierarchy_test_data.ytdist)
+ expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
+ assert_allclose(Z, expectedZ, atol=1e-10)
+
+ # test with the observation matrix X
+ Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
+ hierarchy_test_data.X)
+ expectedZ = hierarchy_test_data.linkage_X_ward_olo
+ assert_allclose(Z, expectedZ, atol=1e-06)
+
+
+def test_Heap():
+ values = np.array([2, -1, 0, -1.5, 3])
+ heap = Heap(values)
+
+ pair = heap.get_min()
+ assert_equal(pair['key'], 3)
+ assert_equal(pair['value'], -1.5)
+
+ heap.remove_min()
+ pair = heap.get_min()
+ assert_equal(pair['key'], 1)
+ assert_equal(pair['value'], -1)
+
+ heap.change_value(1, 2.5)
+ pair = heap.get_min()
+ assert_equal(pair['key'], 2)
+ assert_equal(pair['value'], 0)
+
+ heap.remove_min()
+ heap.remove_min()
+
+ heap.change_value(1, 10)
+ pair = heap.get_min()
+ assert_equal(pair['key'], 4)
+ assert_equal(pair['value'], 3)
+
+ heap.remove_min()
+ pair = heap.get_min()
+ assert_equal(pair['key'], 1)
+ assert_equal(pair['value'], 10)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_vq.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_vq.py
new file mode 100644
index 0000000..37eee56
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/tests/test_vq.py
@@ -0,0 +1,311 @@
+
+import warnings
+import sys
+
+import numpy as np
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+ assert_allclose, assert_equal, assert_,
+ suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
+ ClusterError, _krandinit)
+from scipy.cluster import _vq
+from scipy.sparse.sputils import matrix
+
+
+TESTDATA_2D = np.array([
+ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
+ -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
+ 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
+ -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
+ -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
+ -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
+ 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
+ -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
+ -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
+ -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
+ 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
+ -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
+ 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
+ -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
+ 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
+ -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
+ 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
+ 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
+ -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
+ 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
+ -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
+ -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
+ -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
+ 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
+ -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
+ 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
+ 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
+ -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
+ 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
+ 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
+ -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
+ -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
+ 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
+ -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
+ -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
+ -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
+ 2.11]).reshape((200, 2))
+
+
+# Global data
+X = np.array([[3.0, 3], [4, 3], [4, 2],
+ [9, 2], [5, 1], [6, 2], [9, 4],
+ [5, 2], [5, 4], [7, 4], [6, 5]])
+
+CODET1 = np.array([[3.0000, 3.0000],
+ [6.2000, 4.0000],
+ [5.8000, 1.8000]])
+
+CODET2 = np.array([[11.0/3, 8.0/3],
+ [6.7500, 4.2500],
+ [6.2500, 1.7500]])
+
+LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
+
+
+class TestWhiten(object):
+ def test_whiten(self):
+ desired = np.array([[5.08738849, 2.97091878],
+ [3.19909255, 0.69660580],
+ [4.51041982, 0.02640918],
+ [4.38567074, 0.95120889],
+ [2.32191480, 1.63195503]])
+ for tp in np.array, matrix:
+ obs = tp([[0.98744510, 0.82766775],
+ [0.62093317, 0.19406729],
+ [0.87545741, 0.00735733],
+ [0.85124403, 0.26499712],
+ [0.45067590, 0.45464607]])
+ assert_allclose(whiten(obs), desired, rtol=1e-5)
+
+ def test_whiten_zero_std(self):
+ desired = np.array([[0., 1.0, 2.86666544],
+ [0., 1.0, 1.32460034],
+ [0., 1.0, 3.74382172]])
+ for tp in np.array, matrix:
+ obs = tp([[0., 1., 0.74109533],
+ [0., 1., 0.34243798],
+ [0., 1., 0.96785929]])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_allclose(whiten(obs), desired, rtol=1e-5)
+ assert_equal(len(w), 1)
+ assert_(issubclass(w[-1].category, RuntimeWarning))
+
+ def test_whiten_not_finite(self):
+ for tp in np.array, matrix:
+ for bad_value in np.nan, np.inf, -np.inf:
+ obs = tp([[0.98744510, bad_value],
+ [0.62093317, 0.19406729],
+ [0.87545741, 0.00735733],
+ [0.85124403, 0.26499712],
+ [0.45067590, 0.45464607]])
+ assert_raises(ValueError, whiten, obs)
+
+
+class TestVq(object):
+ def test_py_vq(self):
+ initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+ for tp in np.array, matrix:
+ label1 = py_vq(tp(X), tp(initc))[0]
+ assert_array_equal(label1, LABEL1)
+
+ def test_vq(self):
+ initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+ for tp in np.array, matrix:
+ label1, dist = _vq.vq(tp(X), tp(initc))
+ assert_array_equal(label1, LABEL1)
+ tlabel1, tdist = vq(tp(X), tp(initc))
+
+ def test_vq_1d(self):
+ # Test special rank 1 vq algo, python implementation.
+ data = X[:, 0]
+ initc = data[:3]
+ a, b = _vq.vq(data, initc)
+ ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
+ assert_array_equal(a, ta)
+ assert_array_equal(b, tb)
+
+ def test__vq_sametype(self):
+ a = np.array([1.0, 2.0], dtype=np.float64)
+ b = a.astype(np.float32)
+ assert_raises(TypeError, _vq.vq, a, b)
+
+ def test__vq_invalid_type(self):
+ a = np.array([1, 2], dtype=int)
+ assert_raises(TypeError, _vq.vq, a, a)
+
+ def test_vq_large_nfeat(self):
+ X = np.random.rand(20, 20)
+ code_book = np.random.rand(3, 20)
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(X, code_book)
+ assert_allclose(dis0, dis1, 1e-5)
+ assert_array_equal(codes0, codes1)
+
+ X = X.astype(np.float32)
+ code_book = code_book.astype(np.float32)
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(X, code_book)
+ assert_allclose(dis0, dis1, 1e-5)
+ assert_array_equal(codes0, codes1)
+
+ def test_vq_large_features(self):
+ X = np.random.rand(10, 5) * 1000000
+ code_book = np.random.rand(2, 5) * 1000000
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(X, code_book)
+ assert_allclose(dis0, dis1, 1e-5)
+ assert_array_equal(codes0, codes1)
+
+
+class TestKMean(object):
+ def test_large_features(self):
+ # Generate a data set with large values, and run kmeans on it to
+ # (regression for 1077).
+ d = 300
+ n = 100
+
+ m1 = np.random.randn(d)
+ m2 = np.random.randn(d)
+ x = 10000 * np.random.randn(n, d) - 20000 * m1
+ y = 10000 * np.random.randn(n, d) + 20000 * m2
+
+ data = np.empty((x.shape[0] + y.shape[0], d), np.double)
+ data[:x.shape[0]] = x
+ data[x.shape[0]:] = y
+
+ kmeans(data, 2)
+
+ def test_kmeans_simple(self):
+ np.random.seed(54321)
+ initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+ for tp in np.array, matrix:
+ code1 = kmeans(tp(X), tp(initc), iter=1)[0]
+ assert_array_almost_equal(code1, CODET2)
+
+ def test_kmeans_lost_cluster(self):
+ # This will cause kmeans to have a cluster with no points.
+ data = TESTDATA_2D
+ initk = np.array([[-1.8127404, -0.67128041],
+ [2.04621601, 0.07401111],
+ [-2.31149087, -0.05160469]])
+
+ kmeans(data, initk)
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ "One of the clusters is empty. Re-run kmeans with a "
+ "different initialization")
+ kmeans2(data, initk, missing='warn')
+
+ assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
+
+ def test_kmeans2_simple(self):
+ np.random.seed(12345678)
+ initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+ for tp in np.array, matrix:
+ code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
+ code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
+
+ assert_array_almost_equal(code1, CODET1)
+ assert_array_almost_equal(code2, CODET2)
+
+ def test_kmeans2_rank1(self):
+ data = TESTDATA_2D
+ data1 = data[:, 0]
+
+ initc = data1[:3]
+ code = initc.copy()
+ kmeans2(data1, code, iter=1)[0]
+ kmeans2(data1, code, iter=2)[0]
+
+ def test_kmeans2_rank1_2(self):
+ data = TESTDATA_2D
+ data1 = data[:, 0]
+ kmeans2(data1, 2, iter=1)
+
+ def test_kmeans2_high_dim(self):
+ # test kmeans2 when the number of dimensions exceeds the number
+ # of input points
+ data = TESTDATA_2D
+ data = data.reshape((20, 20))[:10]
+ kmeans2(data, 2)
+
+ def test_kmeans2_init(self):
+ np.random.seed(12345)
+ data = TESTDATA_2D
+
+ kmeans2(data, 3, minit='points')
+ kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
+
+ kmeans2(data, 3, minit='++')
+ kmeans2(data[:, :1], 3, minit='++') # special case (1-D)
+
+ # minit='random' can give warnings, filter those
+ with suppress_warnings() as sup:
+ sup.filter(message="One of the clusters is empty. Re-run.")
+ kmeans2(data, 3, minit='random')
+ kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
+
+ @pytest.mark.skipif(sys.platform == 'win32',
+ reason='Fails with MemoryError in Wine.')
+ def test_krandinit(self):
+ data = TESTDATA_2D
+ datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
+ k = int(1e6)
+ for data in datas:
+ np.random.seed(1234)
+ init = _krandinit(data, k)
+ orig_cov = np.cov(data, rowvar=0)
+ init_cov = np.cov(init, rowvar=0)
+ assert_allclose(orig_cov, init_cov, atol=1e-2)
+
+ def test_kmeans2_empty(self):
+ # Regression test for gh-1032.
+ assert_raises(ValueError, kmeans2, [], 2)
+
+ def test_kmeans_0k(self):
+ # Regression test for gh-1073: fail when k arg is 0.
+ assert_raises(ValueError, kmeans, X, 0)
+ assert_raises(ValueError, kmeans2, X, 0)
+ assert_raises(ValueError, kmeans2, X, np.array([]))
+
+ def test_kmeans_large_thres(self):
+ # Regression test for gh-1774
+ x = np.array([1, 2, 3, 4, 10], dtype=float)
+ res = kmeans(x, 1, thresh=1e16)
+ assert_allclose(res[0], np.array([4.]))
+ assert_allclose(res[1], 2.3999999999999999)
+
+ def test_kmeans2_kpp_low_dim(self):
+ # Regression test for gh-11462
+ prev_res = np.array([[-1.95266667, 0.898],
+ [-3.153375, 3.3945]])
+ np.random.seed(42)
+ res, _ = kmeans2(TESTDATA_2D, 2, minit='++')
+ assert_allclose(res, prev_res)
+
+ def test_kmeans2_kpp_high_dim(self):
+ # Regression test for gh-11462
+ n_dim = 100
+ size = 10
+ centers = np.vstack([5 * np.ones(n_dim),
+ -5 * np.ones(n_dim)])
+ np.random.seed(42)
+ data = np.vstack([
+ np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
+ np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
+ ])
+ res, _ = kmeans2(data, 2, minit='++')
+ assert_array_almost_equal(res, centers, decimal=0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/cluster/vq.py b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/vq.py
new file mode 100644
index 0000000..cd5ff85
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/cluster/vq.py
@@ -0,0 +1,757 @@
+"""
+K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
+====================================================================
+
+Provides routines for k-means clustering, generating code books
+from k-means models and quantizing vectors by comparing them with
+centroids in a code book.
+
+.. autosummary::
+ :toctree: generated/
+
+ whiten -- Normalize a group of observations so each feature has unit variance
+ vq -- Calculate code book membership of a set of observation vectors
+ kmeans -- Perform k-means on a set of observation vectors forming k clusters
+ kmeans2 -- A different implementation of k-means with more methods
+ -- for initializing centroids
+
+Background information
+----------------------
+The k-means algorithm takes as input the number of clusters to
+generate, k, and a set of observation vectors to cluster. It
+returns a set of centroids, one for each of the k clusters. An
+observation vector is classified with the cluster number or
+centroid index of the centroid closest to it.
+
+A vector v belongs to cluster i if it is closer to centroid i than
+any other centroid. If v belongs to i, we say centroid i is the
+dominating centroid of v. The k-means algorithm tries to
+minimize distortion, which is defined as the sum of the squared distances
+between each observation vector and its dominating centroid.
+The minimization is achieved by iteratively reclassifying
+the observations into clusters and recalculating the centroids until
+a configuration is reached in which the centroids are stable. One can
+also define a maximum number of iterations.
+
+Since vector quantization is a natural application for k-means,
+information theory terminology is often used. The centroid index
+or cluster index is also referred to as a "code" and the table
+mapping codes to centroids and, vice versa, is often referred to as a
+"code book". The result of k-means, a set of centroids, can be
+used to quantize vectors. Quantization aims to find an encoding of
+vectors that reduces the expected distortion.
+
+All routines expect obs to be an M by N array, where the rows are
+the observation vectors. The codebook is a k by N array, where the
+ith row is the centroid of code word i. The observation vectors
+and centroids have the same feature dimension.
+
+As an example, suppose we wish to compress a 24-bit color image
+(each pixel is represented by one byte for red, one for blue, and
+one for green) before sending it over the web. By using a smaller
+8-bit encoding, we can reduce the amount of data by two
+thirds. Ideally, the colors for each of the 256 possible 8-bit
+encoding values should be chosen to minimize distortion of the
+color. Running k-means with k=256 generates a code book of 256
+codes, which fills up all possible 8-bit sequences. Instead of
+sending a 3-byte value for each pixel, the 8-bit centroid index
+(or code word) of the dominating centroid is transmitted. The code
+book is also sent over the wire so each 8-bit code can be
+translated back to a 24-bit pixel value representation. If the
+image of interest was of an ocean, we would expect many 24-bit
+blues to be represented by 8-bit codes. If it was an image of a
+human face, more flesh-tone colors would be represented in the
+code book.
+
+"""
+import warnings
+import numpy as np
+from collections import deque
+from scipy._lib._util import _asarray_validated
+from scipy.spatial.distance import cdist
+
+from . import _vq
+
+__docformat__ = 'restructuredtext'
+
+__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
+
+
+class ClusterError(Exception):
+ pass
+
+
+def whiten(obs, check_finite=True):
+ """
+ Normalize a group of observations on a per feature basis.
+
+ Before running k-means, it is beneficial to rescale each feature
+ dimension of the observation set by its standard deviation (i.e. "whiten"
+ it - as in "white noise" where each frequency has equal power).
+ Each feature is divided by its standard deviation across all observations
+ to give it unit variance.
+
+ Parameters
+ ----------
+ obs : ndarray
+ Each row of the array is an observation. The
+ columns are the features seen during each observation.
+
+ >>> # f0 f1 f2
+ >>> obs = [[ 1., 1., 1.], #o0
+ ... [ 2., 2., 2.], #o1
+ ... [ 3., 3., 3.], #o2
+ ... [ 4., 4., 4.]] #o3
+
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+
+ Returns
+ -------
+ result : ndarray
+ Contains the values in `obs` scaled by the standard deviation
+ of each column.
+
+ Examples
+ --------
+ >>> from scipy.cluster.vq import whiten
+ >>> features = np.array([[1.9, 2.3, 1.7],
+ ... [1.5, 2.5, 2.2],
+ ... [0.8, 0.6, 1.7,]])
+ >>> whiten(features)
+ array([[ 4.17944278, 2.69811351, 7.21248917],
+ [ 3.29956009, 2.93273208, 9.33380951],
+ [ 1.75976538, 0.7038557 , 7.21248917]])
+
+ """
+ obs = _asarray_validated(obs, check_finite=check_finite)
+ std_dev = obs.std(axis=0)
+ zero_std_mask = std_dev == 0
+ if zero_std_mask.any():
+ std_dev[zero_std_mask] = 1.0
+ warnings.warn("Some columns have standard deviation zero. "
+ "The values of these columns will not change.",
+ RuntimeWarning)
+ return obs / std_dev
+
+
+def vq(obs, code_book, check_finite=True):
+ """
+ Assign codes from a code book to observations.
+
+ Assigns a code from a code book to each observation. Each
+ observation vector in the 'M' by 'N' `obs` array is compared with the
+ centroids in the code book and assigned the code of the closest
+ centroid.
+
+ The features in `obs` should have unit variance, which can be
+ achieved by passing them through the whiten function. The code
+ book can be created with the k-means algorithm or a different
+ encoding algorithm.
+
+ Parameters
+ ----------
+ obs : ndarray
+ Each row of the 'M' x 'N' array is an observation. The columns are
+ the "features" seen during each observation. The features must be
+ whitened first using the whiten function or something equivalent.
+ code_book : ndarray
+ The code book is usually generated using the k-means algorithm.
+ Each row of the array holds a different code, and the columns are
+ the features of the code.
+
+ >>> # f0 f1 f2 f3
+ >>> code_book = [
+ ... [ 1., 2., 3., 4.], #c0
+ ... [ 1., 2., 3., 4.], #c1
+ ... [ 1., 2., 3., 4.]] #c2
+
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+
+ Returns
+ -------
+ code : ndarray
+ A length M array holding the code book index for each observation.
+ dist : ndarray
+ The distortion (distance) between the observation and its nearest
+ code.
+
+ Examples
+ --------
+ >>> from numpy import array
+ >>> from scipy.cluster.vq import vq
+ >>> code_book = array([[1.,1.,1.],
+ ... [2.,2.,2.]])
+ >>> features = array([[ 1.9,2.3,1.7],
+ ... [ 1.5,2.5,2.2],
+ ... [ 0.8,0.6,1.7]])
+ >>> vq(features,code_book)
+ (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
+
+ """
+ obs = _asarray_validated(obs, check_finite=check_finite)
+ code_book = _asarray_validated(code_book, check_finite=check_finite)
+ ct = np.common_type(obs, code_book)
+
+ c_obs = obs.astype(ct, copy=False)
+ c_code_book = code_book.astype(ct, copy=False)
+
+ if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
+ return _vq.vq(c_obs, c_code_book)
+ return py_vq(obs, code_book, check_finite=False)
+
+
+def py_vq(obs, code_book, check_finite=True):
+ """ Python version of vq algorithm.
+
+ The algorithm computes the Euclidean distance between each
+ observation and every frame in the code_book.
+
+ Parameters
+ ----------
+ obs : ndarray
+ Expects a rank 2 array. Each row is one observation.
+ code_book : ndarray
+ Code book to use. Same format than obs. Should have same number of
+ features (e.g., columns) than obs.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+
+ Returns
+ -------
+ code : ndarray
+ code[i] gives the label of the ith obversation; its code is
+ code_book[code[i]].
+ mind_dist : ndarray
+ min_dist[i] gives the distance between the ith observation and its
+ corresponding code.
+
+ Notes
+ -----
+ This function is slower than the C version but works for
+ all input types. If the inputs have the wrong types for the
+ C versions of the function, this one is called as a last resort.
+
+ It is about 20 times slower than the C version.
+
+ """
+ obs = _asarray_validated(obs, check_finite=check_finite)
+ code_book = _asarray_validated(code_book, check_finite=check_finite)
+
+ if obs.ndim != code_book.ndim:
+ raise ValueError("Observation and code_book should have the same rank")
+
+ if obs.ndim == 1:
+ obs = obs[:, np.newaxis]
+ code_book = code_book[:, np.newaxis]
+
+ dist = cdist(obs, code_book)
+ code = dist.argmin(axis=1)
+ min_dist = dist[np.arange(len(code)), code]
+ return code, min_dist
+
+
+# py_vq2 was equivalent to py_vq
+py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq')
+
+
+def _kmeans(obs, guess, thresh=1e-5):
+ """ "raw" version of k-means.
+
+ Returns
+ -------
+ code_book
+ The lowest distortion codebook found.
+ avg_dist
+ The average distance a observation is from a code in the book.
+ Lower means the code_book matches the data better.
+
+ See Also
+ --------
+ kmeans : wrapper around k-means
+
+ Examples
+ --------
+ Note: not whitened in this example.
+
+ >>> from numpy import array
+ >>> from scipy.cluster.vq import _kmeans
+ >>> features = array([[ 1.9,2.3],
+ ... [ 1.5,2.5],
+ ... [ 0.8,0.6],
+ ... [ 0.4,1.8],
+ ... [ 1.0,1.0]])
+ >>> book = array((features[0],features[2]))
+ >>> _kmeans(features,book)
+ (array([[ 1.7 , 2.4 ],
+ [ 0.73333333, 1.13333333]]), 0.40563916697728591)
+
+ """
+
+ code_book = np.asarray(guess)
+ diff = np.inf
+ prev_avg_dists = deque([diff], maxlen=2)
+ while diff > thresh:
+ # compute membership and distances between obs and code_book
+ obs_code, distort = vq(obs, code_book, check_finite=False)
+ prev_avg_dists.append(distort.mean(axis=-1))
+ # recalc code_book as centroids of associated obs
+ code_book, has_members = _vq.update_cluster_means(obs, obs_code,
+ code_book.shape[0])
+ code_book = code_book[has_members]
+ diff = prev_avg_dists[0] - prev_avg_dists[1]
+
+ return code_book, prev_avg_dists[1]
+
+
+def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
+ """
+ Performs k-means on a set of observation vectors forming k clusters.
+
+ The k-means algorithm adjusts the classification of the observations
+ into clusters and updates the cluster centroids until the position of
+ the centroids is stable over successive iterations. In this
+ implementation of the algorithm, the stability of the centroids is
+ determined by comparing the absolute value of the change in the average
+ Euclidean distance between the observations and their corresponding
+ centroids against a threshold. This yields
+ a code book mapping centroids to codes and vice versa.
+
+ Parameters
+ ----------
+ obs : ndarray
+ Each row of the M by N array is an observation vector. The
+ columns are the features seen during each observation.
+ The features must be whitened first with the `whiten` function.
+
+ k_or_guess : int or ndarray
+ The number of centroids to generate. A code is assigned to
+ each centroid, which is also the row index of the centroid
+ in the code_book matrix generated.
+
+ The initial k centroids are chosen by randomly selecting
+ observations from the observation matrix. Alternatively,
+ passing a k by N array specifies the initial k centroids.
+
+ iter : int, optional
+ The number of times to run k-means, returning the codebook
+ with the lowest distortion. This argument is ignored if
+ initial centroids are specified with an array for the
+ ``k_or_guess`` parameter. This parameter does not represent the
+ number of iterations of the k-means algorithm.
+
+ thresh : float, optional
+ Terminates the k-means algorithm if the change in
+ distortion since the last k-means iteration is less than
+ or equal to threshold.
+
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+
+ Returns
+ -------
+ codebook : ndarray
+ A k by N array of k centroids. The ith centroid
+ codebook[i] is represented with the code i. The centroids
+ and codes generated represent the lowest distortion seen,
+ not necessarily the globally minimal distortion.
+
+ distortion : float
+ The mean (non-squared) Euclidean distance between the observations
+ passed and the centroids generated. Note the difference to the standard
+ definition of distortion in the context of the k-means algorithm, which
+ is the sum of the squared distances.
+
+ See Also
+ --------
+ kmeans2 : a different implementation of k-means clustering
+ with more methods for generating initial centroids but without
+ using a distortion change threshold as a stopping criterion.
+
+ whiten : must be called prior to passing an observation matrix
+ to kmeans.
+
+ Examples
+ --------
+ >>> from numpy import array
+ >>> from scipy.cluster.vq import vq, kmeans, whiten
+ >>> import matplotlib.pyplot as plt
+ >>> features = array([[ 1.9,2.3],
+ ... [ 1.5,2.5],
+ ... [ 0.8,0.6],
+ ... [ 0.4,1.8],
+ ... [ 0.1,0.1],
+ ... [ 0.2,1.8],
+ ... [ 2.0,0.5],
+ ... [ 0.3,1.5],
+ ... [ 1.0,1.0]])
+ >>> whitened = whiten(features)
+ >>> book = np.array((whitened[0],whitened[2]))
+ >>> kmeans(whitened,book)
+ (array([[ 2.3110306 , 2.86287398], # random
+ [ 0.93218041, 1.24398691]]), 0.85684700941625547)
+
+ >>> from numpy import random
+ >>> random.seed((1000,2000))
+ >>> codes = 3
+ >>> kmeans(whitened,codes)
+ (array([[ 2.3110306 , 2.86287398], # random
+ [ 1.32544402, 0.65607529],
+ [ 0.40782893, 2.02786907]]), 0.5196582527686241)
+
+ >>> # Create 50 datapoints in two clusters a and b
+ >>> pts = 50
+ >>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
+ >>> b = np.random.multivariate_normal([30, 10],
+ ... [[10, 2], [2, 1]],
+ ... size=pts)
+ >>> features = np.concatenate((a, b))
+ >>> # Whiten data
+ >>> whitened = whiten(features)
+ >>> # Find 2 clusters in the data
+ >>> codebook, distortion = kmeans(whitened, 2)
+ >>> # Plot whitened data and cluster centers in red
+ >>> plt.scatter(whitened[:, 0], whitened[:, 1])
+ >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
+ >>> plt.show()
+ """
+ obs = _asarray_validated(obs, check_finite=check_finite)
+ if iter < 1:
+ raise ValueError("iter must be at least 1, got %s" % iter)
+
+ # Determine whether a count (scalar) or an initial guess (array) was passed.
+ if not np.isscalar(k_or_guess):
+ guess = _asarray_validated(k_or_guess, check_finite=check_finite)
+ if guess.size < 1:
+ raise ValueError("Asked for 0 clusters. Initial book was %s" %
+ guess)
+ return _kmeans(obs, guess, thresh=thresh)
+
+ # k_or_guess is a scalar, now verify that it's an integer
+ k = int(k_or_guess)
+ if k != k_or_guess:
+ raise ValueError("If k_or_guess is a scalar, it must be an integer.")
+ if k < 1:
+ raise ValueError("Asked for %d clusters." % k)
+
+ # initialize best distance value to a large value
+ best_dist = np.inf
+ for i in range(iter):
+ # the initial code book is randomly selected from observations
+ guess = _kpoints(obs, k)
+ book, dist = _kmeans(obs, guess, thresh=thresh)
+ if dist < best_dist:
+ best_book = book
+ best_dist = dist
+ return best_book, best_dist
+
+
+def _kpoints(data, k):
+ """Pick k points at random in data (one row = one observation).
+
+ Parameters
+ ----------
+ data : ndarray
+ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
+ dimensional data, rank 2 multidimensional data, in which case one
+ row is one observation.
+ k : int
+ Number of samples to generate.
+
+ Returns
+ -------
+ x : ndarray
+ A 'k' by 'N' containing the initial centroids
+
+ """
+ idx = np.random.choice(data.shape[0], size=k, replace=False)
+ return data[idx]
+
+
+def _krandinit(data, k):
+ """Returns k samples of a random variable whose parameters depend on data.
+
+ More precisely, it returns k observations sampled from a Gaussian random
+ variable whose mean and covariances are the ones estimated from the data.
+
+ Parameters
+ ----------
+ data : ndarray
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
+ data, rank 2 multidimensional data, in which case one
+ row is one observation.
+ k : int
+ Number of samples to generate.
+
+ Returns
+ -------
+ x : ndarray
+ A 'k' by 'N' containing the initial centroids
+
+ """
+ mu = data.mean(axis=0)
+
+ if data.ndim == 1:
+ cov = np.cov(data)
+ x = np.random.randn(k)
+ x *= np.sqrt(cov)
+ elif data.shape[1] > data.shape[0]:
+ # initialize when the covariance matrix is rank deficient
+ _, s, vh = np.linalg.svd(data - mu, full_matrices=False)
+ x = np.random.randn(k, s.size)
+ sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
+ x = x.dot(sVh)
+ else:
+ cov = np.atleast_2d(np.cov(data, rowvar=False))
+
+ # k rows, d cols (one row = one obs)
+ # Generate k sample of a random variable ~ Gaussian(mu, cov)
+ x = np.random.randn(k, mu.size)
+ x = x.dot(np.linalg.cholesky(cov).T)
+
+ x += mu
+ return x
+
+
+def _kpp(data, k):
+ """ Picks k points in the data based on the kmeans++ method.
+
+ Parameters
+ ----------
+ data : ndarray
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
+ data, rank 2 multidimensional data, in which case one
+ row is one observation.
+ k : int
+ Number of samples to generate.
+
+ Returns
+ -------
+ init : ndarray
+ A 'k' by 'N' containing the initial centroids.
+
+ References
+ ----------
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
+ on Discrete Algorithms, 2007.
+ """
+
+ dims = data.shape[1] if len(data.shape) > 1 else 1
+ init = np.ndarray((k, dims))
+
+ for i in range(k):
+ if i == 0:
+ init[i, :] = data[np.random.randint(data.shape[0])]
+
+ else:
+ D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
+ probs = D2/D2.sum()
+ cumprobs = probs.cumsum()
+ r = np.random.rand()
+ init[i, :] = data[np.searchsorted(cumprobs, r)]
+
+ return init
+
+
+_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
+
+
+def _missing_warn():
+ """Print a warning when called."""
+ warnings.warn("One of the clusters is empty. "
+ "Re-run kmeans with a different initialization.")
+
+
+def _missing_raise():
+ """Raise a ClusterError when called."""
+ raise ClusterError("One of the clusters is empty. "
+ "Re-run kmeans with a different initialization.")
+
+
+_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
+
+
+def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
+ missing='warn', check_finite=True):
+ """
+ Classify a set of observations into k clusters using the k-means algorithm.
+
+ The algorithm attempts to minimize the Euclidean distance between
+ observations and centroids. Several initialization methods are
+ included.
+
+ Parameters
+ ----------
+ data : ndarray
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
+ 'M' array of 'M' 1-D observations.
+ k : int or ndarray
+ The number of clusters to form as well as the number of
+ centroids to generate. If `minit` initialization string is
+ 'matrix', or if a ndarray is given instead, it is
+ interpreted as initial cluster to use instead.
+ iter : int, optional
+ Number of iterations of the k-means algorithm to run. Note
+ that this differs in meaning from the iters parameter to
+ the kmeans function.
+ thresh : float, optional
+ (not used yet)
+ minit : str, optional
+ Method for initialization. Available methods are 'random',
+ 'points', '++' and 'matrix':
+
+ 'random': generate k centroids from a Gaussian with mean and
+ variance estimated from the data.
+
+ 'points': choose k observations (rows) at random from data for
+ the initial centroids.
+
+ '++': choose k observations accordingly to the kmeans++ method
+ (careful seeding)
+
+ 'matrix': interpret the k parameter as a k by M (or length k
+ array for 1-D data) array of initial centroids.
+ missing : str, optional
+ Method to deal with empty clusters. Available methods are
+ 'warn' and 'raise':
+
+ 'warn': give a warning and continue.
+
+ 'raise': raise an ClusterError and terminate the algorithm.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default: True
+
+ Returns
+ -------
+ centroid : ndarray
+ A 'k' by 'N' array of centroids found at the last iteration of
+ k-means.
+ label : ndarray
+ label[i] is the code or index of the centroid the
+ ith observation is closest to.
+
+ See Also
+ --------
+ kmeans
+
+ References
+ ----------
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
+ on Discrete Algorithms, 2007.
+
+ Examples
+ --------
+ >>> from scipy.cluster.vq import kmeans2
+ >>> import matplotlib.pyplot as plt
+
+ Create z, an array with shape (100, 2) containing a mixture of samples
+ from three multivariate normal distributions.
+
+ >>> np.random.seed(12345678)
+ >>> a = np.random.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
+ >>> b = np.random.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
+ >>> c = np.random.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
+ >>> z = np.concatenate((a, b, c))
+ >>> np.random.shuffle(z)
+
+ Compute three clusters.
+
+ >>> centroid, label = kmeans2(z, 3, minit='points')
+ >>> centroid
+ array([[-0.35770296, 5.31342524],
+ [ 2.32210289, -0.50551972],
+ [ 6.17653859, 4.16719247]])
+
+ How many points are in each cluster?
+
+ >>> counts = np.bincount(label)
+ >>> counts
+ array([52, 27, 21])
+
+ Plot the clusters.
+
+ >>> w0 = z[label == 0]
+ >>> w1 = z[label == 1]
+ >>> w2 = z[label == 2]
+ >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
+ >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
+ >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
+ >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
+ >>> plt.axis('equal')
+ >>> plt.legend(shadow=True)
+ >>> plt.show()
+
+ """
+ if int(iter) < 1:
+ raise ValueError("Invalid iter (%s), "
+ "must be a positive integer." % iter)
+ try:
+ miss_meth = _valid_miss_meth[missing]
+ except KeyError as e:
+ raise ValueError("Unknown missing method %r" % (missing,)) from e
+
+ data = _asarray_validated(data, check_finite=check_finite)
+ if data.ndim == 1:
+ d = 1
+ elif data.ndim == 2:
+ d = data.shape[1]
+ else:
+ raise ValueError("Input of rank > 2 is not supported.")
+
+ if data.size < 1:
+ raise ValueError("Empty input is not supported.")
+
+ # If k is not a single value, it should be compatible with data's shape
+ if minit == 'matrix' or not np.isscalar(k):
+ code_book = np.array(k, copy=True)
+ if data.ndim != code_book.ndim:
+ raise ValueError("k array doesn't match data rank")
+ nc = len(code_book)
+ if data.ndim > 1 and code_book.shape[1] != d:
+ raise ValueError("k array doesn't match data dimension")
+ else:
+ nc = int(k)
+
+ if nc < 1:
+ raise ValueError("Cannot ask kmeans2 for %d clusters"
+ " (k was %s)" % (nc, k))
+ elif nc != k:
+ warnings.warn("k was not an integer, was converted.")
+
+ try:
+ init_meth = _valid_init_meth[minit]
+ except KeyError as e:
+ raise ValueError("Unknown init method %r" % (minit,)) from e
+ else:
+ code_book = init_meth(data, k)
+
+ for i in range(iter):
+ # Compute the nearest neighbor for each obs using the current code book
+ label = vq(data, code_book)[0]
+ # Update the code book by computing centroids
+ new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
+ if not has_members.all():
+ miss_meth()
+ # Set the empty clusters to their previous positions
+ new_code_book[~has_members] = code_book[~has_members]
+ code_book = new_code_book
+
+ return code_book, label
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/conftest.py b/dem-S-SAR/ISCEApp/_internal/scipy/conftest.py
new file mode 100644
index 0000000..dae9dbc
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/conftest.py
@@ -0,0 +1,55 @@
+# Pytest customization
+import os
+import pytest
+import warnings
+
+from distutils.version import LooseVersion
+import numpy as np
+from scipy._lib._fpumode import get_fpu_mode
+from scipy._lib._testutils import FPUModeChangeWarning
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "slow: Tests that are very slow.")
+ config.addinivalue_line("markers",
+ "xslow: mark test as extremely slow (not run unless explicitly requested)")
+ config.addinivalue_line("markers",
+ "xfail_on_32bit: mark test as failing on 32-bit platforms")
+
+
+def _get_mark(item, name):
+ if LooseVersion(pytest.__version__) >= LooseVersion("3.6.0"):
+ mark = item.get_closest_marker(name)
+ else:
+ mark = item.get_marker(name)
+ return mark
+
+
+def pytest_runtest_setup(item):
+ mark = _get_mark(item, "xslow")
+ if mark is not None:
+ try:
+ v = int(os.environ.get('SCIPY_XSLOW', '0'))
+ except ValueError:
+ v = False
+ if not v:
+ pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it")
+ mark = _get_mark(item, 'xfail_on_32bit')
+ if mark is not None and np.intp(0).itemsize < 8:
+ pytest.xfail('Fails on our 32-bit test platform(s): %s' % (mark.args[0],))
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ warnings.warn("FPU mode changed from {0:#x} to {1:#x} during "
+ "the test".format(old_mode, new_mode),
+ category=FPUModeChangeWarning, stacklevel=0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/__init__.py
new file mode 100644
index 0000000..562d7fd
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/__init__.py
@@ -0,0 +1,338 @@
+r"""
+==================================
+Constants (:mod:`scipy.constants`)
+==================================
+
+.. currentmodule:: scipy.constants
+
+Physical and mathematical constants and units.
+
+
+Mathematical constants
+======================
+
+================ =================================================================
+``pi`` Pi
+``golden`` Golden ratio
+``golden_ratio`` Golden ratio
+================ =================================================================
+
+
+Physical constants
+==================
+
+=========================== =================================================================
+``c`` speed of light in vacuum
+``speed_of_light`` speed of light in vacuum
+``mu_0`` the magnetic constant :math:`\mu_0`
+``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
+``h`` the Planck constant :math:`h`
+``Planck`` the Planck constant :math:`h`
+``hbar`` :math:`\hbar = h/(2\pi)`
+``G`` Newtonian constant of gravitation
+``gravitational_constant`` Newtonian constant of gravitation
+``g`` standard acceleration of gravity
+``e`` elementary charge
+``elementary_charge`` elementary charge
+``R`` molar gas constant
+``gas_constant`` molar gas constant
+``alpha`` fine-structure constant
+``fine_structure`` fine-structure constant
+``N_A`` Avogadro constant
+``Avogadro`` Avogadro constant
+``k`` Boltzmann constant
+``Boltzmann`` Boltzmann constant
+``sigma`` Stefan-Boltzmann constant :math:`\sigma`
+``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
+``Wien`` Wien displacement law constant
+``Rydberg`` Rydberg constant
+``m_e`` electron mass
+``electron_mass`` electron mass
+``m_p`` proton mass
+``proton_mass`` proton mass
+``m_n`` neutron mass
+``neutron_mass`` neutron mass
+=========================== =================================================================
+
+
+Constants database
+------------------
+
+In addition to the above variables, :mod:`scipy.constants` also contains the
+2018 CODATA recommended values [CODATA2018]_ database containing more physical
+constants.
+
+.. autosummary::
+ :toctree: generated/
+
+ value -- Value in physical_constants indexed by key
+ unit -- Unit in physical_constants indexed by key
+ precision -- Relative precision in physical_constants indexed by key
+ find -- Return list of physical_constant keys with a given string
+ ConstantWarning -- Constant sought not in newest CODATA data set
+
+.. data:: physical_constants
+
+ Dictionary of physical constants, of the format
+ ``physical_constants[name] = (value, unit, uncertainty)``.
+
+Available constants:
+
+====================================================================== ====
+%(constant_names)s
+====================================================================== ====
+
+
+Units
+=====
+
+SI prefixes
+-----------
+
+============ =================================================================
+``yotta`` :math:`10^{24}`
+``zetta`` :math:`10^{21}`
+``exa`` :math:`10^{18}`
+``peta`` :math:`10^{15}`
+``tera`` :math:`10^{12}`
+``giga`` :math:`10^{9}`
+``mega`` :math:`10^{6}`
+``kilo`` :math:`10^{3}`
+``hecto`` :math:`10^{2}`
+``deka`` :math:`10^{1}`
+``deci`` :math:`10^{-1}`
+``centi`` :math:`10^{-2}`
+``milli`` :math:`10^{-3}`
+``micro`` :math:`10^{-6}`
+``nano`` :math:`10^{-9}`
+``pico`` :math:`10^{-12}`
+``femto`` :math:`10^{-15}`
+``atto`` :math:`10^{-18}`
+``zepto`` :math:`10^{-21}`
+============ =================================================================
+
+Binary prefixes
+---------------
+
+============ =================================================================
+``kibi`` :math:`2^{10}`
+``mebi`` :math:`2^{20}`
+``gibi`` :math:`2^{30}`
+``tebi`` :math:`2^{40}`
+``pebi`` :math:`2^{50}`
+``exbi`` :math:`2^{60}`
+``zebi`` :math:`2^{70}`
+``yobi`` :math:`2^{80}`
+============ =================================================================
+
+Mass
+----
+
+================= ============================================================
+``gram`` :math:`10^{-3}` kg
+``metric_ton`` :math:`10^{3}` kg
+``grain`` one grain in kg
+``lb`` one pound (avoirdupous) in kg
+``pound`` one pound (avoirdupous) in kg
+``blob`` one inch version of a slug in kg (added in 1.0.0)
+``slinch`` one inch version of a slug in kg (added in 1.0.0)
+``slug`` one slug in kg (added in 1.0.0)
+``oz`` one ounce in kg
+``ounce`` one ounce in kg
+``stone`` one stone in kg
+``grain`` one grain in kg
+``long_ton`` one long ton in kg
+``short_ton`` one short ton in kg
+``troy_ounce`` one Troy ounce in kg
+``troy_pound`` one Troy pound in kg
+``carat`` one carat in kg
+``m_u`` atomic mass constant (in kg)
+``u`` atomic mass constant (in kg)
+``atomic_mass`` atomic mass constant (in kg)
+================= ============================================================
+
+Angle
+-----
+
+================= ============================================================
+``degree`` degree in radians
+``arcmin`` arc minute in radians
+``arcminute`` arc minute in radians
+``arcsec`` arc second in radians
+``arcsecond`` arc second in radians
+================= ============================================================
+
+
+Time
+----
+
+================= ============================================================
+``minute`` one minute in seconds
+``hour`` one hour in seconds
+``day`` one day in seconds
+``week`` one week in seconds
+``year`` one year (365 days) in seconds
+``Julian_year`` one Julian year (365.25 days) in seconds
+================= ============================================================
+
+
+Length
+------
+
+===================== ============================================================
+``inch`` one inch in meters
+``foot`` one foot in meters
+``yard`` one yard in meters
+``mile`` one mile in meters
+``mil`` one mil in meters
+``pt`` one point in meters
+``point`` one point in meters
+``survey_foot`` one survey foot in meters
+``survey_mile`` one survey mile in meters
+``nautical_mile`` one nautical mile in meters
+``fermi`` one Fermi in meters
+``angstrom`` one Angstrom in meters
+``micron`` one micron in meters
+``au`` one astronomical unit in meters
+``astronomical_unit`` one astronomical unit in meters
+``light_year`` one light year in meters
+``parsec`` one parsec in meters
+===================== ============================================================
+
+Pressure
+--------
+
+================= ============================================================
+``atm`` standard atmosphere in pascals
+``atmosphere`` standard atmosphere in pascals
+``bar`` one bar in pascals
+``torr`` one torr (mmHg) in pascals
+``mmHg`` one torr (mmHg) in pascals
+``psi`` one psi in pascals
+================= ============================================================
+
+Area
+----
+
+================= ============================================================
+``hectare`` one hectare in square meters
+``acre`` one acre in square meters
+================= ============================================================
+
+
+Volume
+------
+
+=================== ========================================================
+``liter`` one liter in cubic meters
+``litre`` one liter in cubic meters
+``gallon`` one gallon (US) in cubic meters
+``gallon_US`` one gallon (US) in cubic meters
+``gallon_imp`` one gallon (UK) in cubic meters
+``fluid_ounce`` one fluid ounce (US) in cubic meters
+``fluid_ounce_US`` one fluid ounce (US) in cubic meters
+``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
+``bbl`` one barrel in cubic meters
+``barrel`` one barrel in cubic meters
+=================== ========================================================
+
+Speed
+-----
+
+================== ==========================================================
+``kmh`` kilometers per hour in meters per second
+``mph`` miles per hour in meters per second
+``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
+``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
+``knot`` one knot in meters per second
+================== ==========================================================
+
+
+Temperature
+-----------
+
+===================== =======================================================
+``zero_Celsius`` zero of Celsius scale in Kelvin
+``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
+===================== =======================================================
+
+.. autosummary::
+ :toctree: generated/
+
+ convert_temperature
+
+Energy
+------
+
+==================== =======================================================
+``eV`` one electron volt in Joules
+``electron_volt`` one electron volt in Joules
+``calorie`` one calorie (thermochemical) in Joules
+``calorie_th`` one calorie (thermochemical) in Joules
+``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
+``erg`` one erg in Joules
+``Btu`` one British thermal unit (International Steam Table) in Joules
+``Btu_IT`` one British thermal unit (International Steam Table) in Joules
+``Btu_th`` one British thermal unit (thermochemical) in Joules
+``ton_TNT`` one ton of TNT in Joules
+==================== =======================================================
+
+Power
+-----
+
+==================== =======================================================
+``hp`` one horsepower in watts
+``horsepower`` one horsepower in watts
+==================== =======================================================
+
+Force
+-----
+
+==================== =======================================================
+``dyn`` one dyne in newtons
+``dyne`` one dyne in newtons
+``lbf`` one pound force in newtons
+``pound_force`` one pound force in newtons
+``kgf`` one kilogram force in newtons
+``kilogram_force`` one kilogram force in newtons
+==================== =======================================================
+
+Optics
+------
+
+.. autosummary::
+ :toctree: generated/
+
+ lambda2nu
+ nu2lambda
+
+References
+==========
+
+.. [CODATA2018] CODATA Recommended Values of the Fundamental
+ Physical Constants 2018.
+
+ https://physics.nist.gov/cuu/Constants/
+
+"""
+# Modules contributed by BasSw (wegwerp@gmail.com)
+from .codata import *
+from .constants import *
+from .codata import _obsolete_constants
+
+_constant_names = [(_k.lower(), _k, _v)
+ for _k, _v in physical_constants.items()
+ if _k not in _obsolete_constants]
+_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
+ _x[2][0], _x[2][1])
+ for _x in sorted(_constant_names)])
+if __doc__:
+ __doc__ = __doc__ % dict(constant_names=_constant_names)
+
+del _constant_names
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/codata.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/codata.py
new file mode 100644
index 0000000..0fee777
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/codata.py
@@ -0,0 +1,1754 @@
+# Compiled by Charles Harris, dated October 3, 2002
+# updated to 2002 values by BasSw, 2006
+# Updated to 2006 values by Vincent Davis June 2010
+# Updated to 2014 values by Joseph Booker, 2015
+# Updated to 2018 values by Jakob Jakobson, 2019
+
+"""
+Fundamental Physical Constants
+------------------------------
+
+These constants are taken from CODATA Recommended Values of the Fundamental
+Physical Constants 2018.
+
+Object
+------
+physical_constants : dict
+ A dictionary containing physical constants. Keys are the names of physical
+ constants, values are tuples (value, units, precision).
+
+Functions
+---------
+value(key):
+ Returns the value of the physical constant(key).
+unit(key):
+ Returns the units of the physical constant(key).
+precision(key):
+ Returns the relative precision of the physical constant(key).
+find(sub):
+ Prints or returns list of keys containing the string sub, default is all.
+
+Source
+------
+The values of the constants provided at this site are recommended for
+international use by CODATA and are the latest available. Termed the "2018
+CODATA recommended values," they are generally recognized worldwide for use in
+all fields of science and technology. The values became available on 20 May
+2019 and replaced the 2014 CODATA set. Also available is an introduction to the
+constants for non-experts at
+
+https://physics.nist.gov/cuu/Constants/introduction.html
+
+References
+----------
+Theoretical and experimental publications relevant to the fundamental constants
+and closely related precision measurements published since the mid 1980s, but
+also including many older papers of particular interest, some of which date
+back to the 1800s. To search the bibliography, visit
+
+https://physics.nist.gov/cuu/Constants/
+
+"""
+import warnings
+from math import pi, sqrt
+
+__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find',
+ 'ConstantWarning']
+
+"""
+Source: https://physics.nist.gov/cuu/Constants/
+
+The values of the constants provided at this site are recommended for
+international use by CODATA and are the latest available. Termed the "2018
+CODATA recommended values," they are generally recognized worldwide for use in
+all fields of science and technology. The values became available on 20 May
+2019 and replaced the 2014 CODATA set.
+"""
+
+#
+# Source: https://physics.nist.gov/cuu/Constants/
+#
+
+# Quantity Value Uncertainty Unit
+# ---------------------------------------------------- --------------------- -------------------- -------------
+txt2002 = """\
+Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K
+atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3
+atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m
+atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1
+atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2
+atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1
+atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T
+deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1
+deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3
+deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092
+deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4
+deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045
+deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11
+electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1
+electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1
+electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1
+electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038
+electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85
+electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3
+electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071
+electron to shielded helion magn. moment ratio 864.058 255 0.000 010
+electron-deuteron magn. moment ratio -2143.923 493 0.000 023
+electron-muon magn. moment ratio 206.766 9894 0.000 0054
+electron-neutron magn. moment ratio 960.920 50 0.000 23
+electron-proton magn. moment ratio -658.210 6862 0.000 0066
+magn. constant 12.566 370 614...e-7 0 N A^-2
+magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb
+muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1
+muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3
+muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23
+muon-proton magn. moment ratio -3.183 345 118 0.000 000 089
+neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1
+neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1
+neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1
+neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3
+neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45
+neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16
+neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3
+neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16
+proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1
+proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1
+proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1
+proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3
+proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028
+proton magn. shielding correction 25.689e-6 0.015e-6
+proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34
+shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1
+shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1
+shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1
+shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3
+shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025
+shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012
+shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033
+shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1
+shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1
+shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1
+shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3
+shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030
+{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m"""
+
+txt2006 = """\
+lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m
+alpha particle-electron mass ratio 7294.299 5365 0.000 0031
+alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg
+alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J
+alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV
+alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u
+alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1
+alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41
+Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m
+atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg
+atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J
+atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV
+atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV
+atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h
+atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz
+atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1
+atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J
+atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K
+atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg
+atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3
+atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s
+atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C
+atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3
+atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A
+atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m
+atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1
+atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2
+atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1
+atomic unit of electric potential 27.211 383 86 0.000 000 68 V
+atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2
+atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J
+atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N
+atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
+atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1
+atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T
+atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2
+atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
+atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1
+atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
+atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s
+atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1
+Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1
+Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1
+Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1
+Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1
+Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1
+Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1
+Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
+Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1
+Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1
+Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1
+Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1
+characteristic impedance of vacuum 376.730 313 461... (exact) ohm
+classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m
+Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m
+Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m
+conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S
+conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
+conventional value of von Klitzing constant 25 812.807 (exact) ohm
+Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m
+deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4
+deuteron-electron mass ratio 3670.482 9654 0.000 0016
+deuteron g factor 0.857 438 2308 0.000 000 0072
+deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1
+deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3
+deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072
+deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg
+deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J
+deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV
+deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u
+deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
+deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024
+deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22
+deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m
+electric constant 8.854 187 817... e-12 (exact) F m^-1
+electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1
+electron-deuteron mag. mom. ratio -2143.923 498 0.000 018
+electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4
+electron g factor -2.002 319 304 3622 0.000 000 000 0015
+electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1
+electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1
+electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1
+electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3
+electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74
+electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80
+electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
+electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J
+electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV
+electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u
+electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1
+electron-muon mag. mom. ratio 206.766 9877 0.000 0052
+electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3
+electron-neutron mag. mom. ratio 960.920 50 0.000 23
+electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4
+electron-proton mag. mom. ratio -658.210 6848 0.000 0054
+electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4
+electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4
+electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4
+electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
+electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
+electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J
+electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u
+electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h
+electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz
+electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1
+electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J
+electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K
+electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg
+elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C
+elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1
+Faraday constant 96 485.3399 0.0024 C mol^-1
+Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1
+Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2
+fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3
+first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2
+first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1
+hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u
+hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV
+Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J
+Hartree energy in eV 27.211 383 86 0.000 000 68 eV
+hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz
+hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1
+hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J
+hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K
+hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg
+helion-electron mass ratio 5495.885 2765 0.000 0052
+helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg
+helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J
+helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV
+helion mass in u 3.014 932 2473 0.000 000 0026 u
+helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1
+helion-proton mass ratio 2.993 152 6713 0.000 000 0026
+hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u
+hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV
+hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h
+hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
+hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J
+hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K
+hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg
+inverse fine-structure constant 137.035 999 679 0.000 000 094
+inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u
+inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV
+inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h
+inverse meter-hertz relationship 299 792 458 (exact) Hz
+inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J
+inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K
+inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg
+inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm
+Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1
+joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u
+joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV
+joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h
+joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz
+joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1
+joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K
+joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
+kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u
+kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV
+kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h
+kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz
+kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1
+kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J
+kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg
+kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u
+kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV
+kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h
+kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz
+kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1
+kilogram-joule relationship 8.987 551 787... e16 (exact) J
+kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K
+lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m
+Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3
+mag. constant 12.566 370 614... e-7 (exact) N A^-2
+mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb
+molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1
+molar mass constant 1 e-3 (exact) kg mol^-1
+molar mass of carbon-12 12 e-3 (exact) kg mol^-1
+molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1
+molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1
+molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1
+Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m
+muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m
+muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m
+muon-electron mass ratio 206.768 2823 0.000 0052
+muon g factor -2.002 331 8414 0.000 000 0012
+muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1
+muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3
+muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3
+muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23
+muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg
+muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J
+muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV
+muon mass in u 0.113 428 9256 0.000 000 0029 u
+muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1
+muon-neutron mass ratio 0.112 454 5167 0.000 000 0029
+muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085
+muon-proton mass ratio 0.112 609 5261 0.000 000 0029
+muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2
+natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s
+natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s
+natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J
+natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV
+natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m
+natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
+natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1
+natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c
+natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s
+natural unit of velocity 299 792 458 (exact) m s^-1
+neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m
+neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m
+neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
+neutron-electron mass ratio 1838.683 6605 0.000 0011
+neutron g factor -3.826 085 45 0.000 000 90
+neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1
+neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1
+neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1
+neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45
+neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg
+neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J
+neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV
+neutron mass in u 1.008 664 915 97 0.000 000 000 43 u
+neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1
+neutron-muon mass ratio 8.892 484 09 0.000 000 23
+neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
+neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46
+neutron-tau mass ratio 0.528 740 0.000 086
+neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
+Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2
+nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1
+nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1
+nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1
+nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1
+nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1
+Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s
+Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s
+Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s
+Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s
+Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm
+Planck length 1.616 252 e-35 0.000 081 e-35 m
+Planck mass 2.176 44 e-8 0.000 11 e-8 kg
+Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV
+Planck temperature 1.416 785 e32 0.000 071 e32 K
+Planck time 5.391 24 e-44 0.000 27 e-44 s
+proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1
+proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m
+proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m
+proton-electron mass ratio 1836.152 672 47 0.000 000 80
+proton g factor 5.585 694 713 0.000 000 046
+proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1
+proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1
+proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1
+proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3
+proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023
+proton mag. shielding correction 25.694 e-6 0.014 e-6
+proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg
+proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J
+proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV
+proton mass in u 1.007 276 466 77 0.000 000 000 10 u
+proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1
+proton-muon mass ratio 8.880 243 39 0.000 000 23
+proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34
+proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46
+proton rms charge radius 0.8768 e-15 0.0069 e-15 m
+proton-tau mass ratio 0.528 012 0.000 086
+quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1
+quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1
+Rydberg constant 10 973 731.568 527 0.000 073 m^-1
+Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz
+Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV
+Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J
+Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044
+Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044
+second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K
+shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1
+shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1
+shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025
+shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011
+shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
+shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1
+shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1
+shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030
+speed of light in vacuum 299 792 458 (exact) m s^-1
+standard acceleration of gravity 9.806 65 (exact) m s^-2
+standard atmosphere 101 325 (exact) Pa
+Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4
+tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m
+tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m
+tau-electron mass ratio 3477.48 0.57
+tau mass 3.167 77 e-27 0.000 52 e-27 kg
+tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J
+tau mass energy equivalent in MeV 1776.99 0.29 MeV
+tau mass in u 1.907 68 0.000 31 u
+tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1
+tau-muon mass ratio 16.8183 0.0027
+tau-neutron mass ratio 1.891 29 0.000 31
+tau-proton mass ratio 1.893 90 0.000 31
+Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2
+triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3
+triton-electron mass ratio 5496.921 5269 0.000 0051
+triton g factor 5.957 924 896 0.000 000 076
+triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1
+triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3
+triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038
+triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg
+triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J
+triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV
+triton mass in u 3.015 500 7134 0.000 000 0025 u
+triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1
+triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37
+triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010
+triton-proton mass ratio 2.993 717 0309 0.000 000 0025
+unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg
+von Klitzing constant 25 812.807 557 0.000 018 ohm
+weak mixing angle 0.222 55 0.000 56
+Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1
+Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K"""
+
+txt2010 = """\
+{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m
+alpha particle-electron mass ratio 7294.299 5361 0.000 0029
+alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg
+alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J
+alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV
+alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u
+alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1
+alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36
+Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m
+atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg
+atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J
+atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV
+atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV
+atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h
+atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz
+atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1
+atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J
+atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K
+atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg
+atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3
+atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s
+atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C
+atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3
+atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A
+atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m
+atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1
+atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2
+atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1
+atomic unit of electric potential 27.211 385 05 0.000 000 60 V
+atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2
+atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J
+atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N
+atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
+atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1
+atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T
+atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2
+atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
+atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1
+atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
+atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s
+atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1
+Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1
+Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1
+Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1
+Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1
+Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1
+Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1
+Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
+Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1
+Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1
+Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1
+Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1
+characteristic impedance of vacuum 376.730 313 461... (exact) ohm
+classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m
+Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m
+Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m
+conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S
+conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
+conventional value of von Klitzing constant 25 812.807 (exact) ohm
+Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m
+deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4
+deuteron-electron mass ratio 3670.482 9652 0.000 0015
+deuteron g factor 0.857 438 2308 0.000 000 0072
+deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1
+deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3
+deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072
+deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg
+deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J
+deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV
+deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u
+deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
+deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024
+deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18
+deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m
+electric constant 8.854 187 817... e-12 (exact) F m^-1
+electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1
+electron-deuteron mag. mom. ratio -2143.923 498 0.000 018
+electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4
+electron g factor -2.002 319 304 361 53 0.000 000 000 000 53
+electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1
+electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1
+electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4
+electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1
+electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3
+electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27
+electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75
+electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
+electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J
+electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV
+electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u
+electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1
+electron-muon mag. mom. ratio 206.766 9896 0.000 0052
+electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3
+electron-neutron mag. mom. ratio 960.920 50 0.000 23
+electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4
+electron-proton mag. mom. ratio -658.210 6848 0.000 0054
+electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4
+electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4
+electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4
+electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
+electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
+electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4
+electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J
+electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u
+electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h
+electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz
+electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1
+electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J
+electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K
+electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg
+elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C
+elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1
+Faraday constant 96 485.3365 0.0021 C mol^-1
+Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1
+Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2
+fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3
+first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2
+first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1
+hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u
+hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV
+Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J
+Hartree energy in eV 27.211 385 05 0.000 000 60 eV
+hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz
+hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1
+hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J
+hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K
+hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg
+helion-electron mass ratio 5495.885 2754 0.000 0050
+helion g factor -4.255 250 613 0.000 000 050
+helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1
+helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025
+helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg
+helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J
+helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV
+helion mass in u 3.014 932 2468 0.000 000 0025 u
+helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1
+helion-proton mass ratio 2.993 152 6707 0.000 000 0025
+hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u
+hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV
+hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h
+hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
+hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J
+hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K
+hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg
+inverse fine-structure constant 137.035 999 074 0.000 000 044
+inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u
+inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV
+inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h
+inverse meter-hertz relationship 299 792 458 (exact) Hz
+inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J
+inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K
+inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg
+inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm
+Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1
+joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u
+joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV
+joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h
+joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz
+joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1
+joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K
+joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
+kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u
+kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV
+kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h
+kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz
+kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1
+kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J
+kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg
+kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u
+kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV
+kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h
+kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz
+kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1
+kilogram-joule relationship 8.987 551 787... e16 (exact) J
+kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K
+lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m
+Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3
+Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3
+mag. constant 12.566 370 614... e-7 (exact) N A^-2
+mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb
+molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1
+molar mass constant 1 e-3 (exact) kg mol^-1
+molar mass of carbon-12 12 e-3 (exact) kg mol^-1
+molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1
+molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1
+molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1
+Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m
+muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m
+muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m
+muon-electron mass ratio 206.768 2843 0.000 0052
+muon g factor -2.002 331 8418 0.000 000 0013
+muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1
+muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3
+muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22
+muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg
+muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J
+muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV
+muon mass in u 0.113 428 9267 0.000 000 0029 u
+muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1
+muon-neutron mass ratio 0.112 454 5177 0.000 000 0028
+muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084
+muon-proton mass ratio 0.112 609 5272 0.000 000 0028
+muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2
+natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s
+natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s
+natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J
+natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV
+natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m
+natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
+natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1
+natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c
+natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s
+natural unit of velocity 299 792 458 (exact) m s^-1
+neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m
+neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m
+neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
+neutron-electron mass ratio 1838.683 6605 0.000 0011
+neutron g factor -3.826 085 45 0.000 000 90
+neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1
+neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1
+neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1
+neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45
+neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg
+neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J
+neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV
+neutron mass in u 1.008 664 916 00 0.000 000 000 43 u
+neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1
+neutron-muon mass ratio 8.892 484 00 0.000 000 22
+neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
+neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30
+neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13
+neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42
+neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45
+neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45
+neutron-tau mass ratio 0.528 790 0.000 048
+neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
+Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2
+nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1
+nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1
+nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1
+nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1
+nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1
+Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s
+Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s
+Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s
+Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s
+Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm
+Planck length 1.616 199 e-35 0.000 097 e-35 m
+Planck mass 2.176 51 e-8 0.000 13 e-8 kg
+Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV
+Planck temperature 1.416 833 e32 0.000 085 e32 K
+Planck time 5.391 06 e-44 0.000 32 e-44 s
+proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1
+proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m
+proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m
+proton-electron mass ratio 1836.152 672 45 0.000 000 75
+proton g factor 5.585 694 713 0.000 000 046
+proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1
+proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1
+proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1
+proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3
+proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023
+proton mag. shielding correction 25.694 e-6 0.014 e-6
+proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg
+proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J
+proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV
+proton mass in u 1.007 276 466 812 0.000 000 000 090 u
+proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1
+proton-muon mass ratio 8.880 243 31 0.000 000 22
+proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34
+proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45
+proton rms charge radius 0.8775 e-15 0.0051 e-15 m
+proton-tau mass ratio 0.528 063 0.000 048
+quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1
+quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1
+Rydberg constant 10 973 731.568 539 0.000 055 m^-1
+Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz
+Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV
+Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J
+Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023
+Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023
+second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K
+shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1
+shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1
+shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025
+shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011
+shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
+shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1
+shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1
+shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030
+speed of light in vacuum 299 792 458 (exact) m s^-1
+standard acceleration of gravity 9.806 65 (exact) m s^-2
+standard atmosphere 101 325 (exact) Pa
+standard-state pressure 100 000 (exact) Pa
+Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4
+tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m
+tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m
+tau-electron mass ratio 3477.15 0.31
+tau mass 3.167 47 e-27 0.000 29 e-27 kg
+tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J
+tau mass energy equivalent in MeV 1776.82 0.16 MeV
+tau mass in u 1.907 49 0.000 17 u
+tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1
+tau-muon mass ratio 16.8167 0.0015
+tau-neutron mass ratio 1.891 11 0.000 17
+tau-proton mass ratio 1.893 72 0.000 17
+Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2
+triton-electron mass ratio 5496.921 5267 0.000 0050
+triton g factor 5.957 924 896 0.000 000 076
+triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1
+triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3
+triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038
+triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg
+triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J
+triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV
+triton mass in u 3.015 500 7134 0.000 000 0025 u
+triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1
+triton-proton mass ratio 2.993 717 0308 0.000 000 0025
+unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg
+von Klitzing constant 25 812.807 4434 0.000 0084 ohm
+weak mixing angle 0.2223 0.0021
+Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1
+Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K"""
+
+txt2014 = """\
+{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m
+alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24
+alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg
+alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J
+alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV
+alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u
+alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1
+alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36
+Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m
+atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg
+atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J
+atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV
+atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV
+atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h
+atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz
+atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1
+atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J
+atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K
+atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg
+atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3
+atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s
+atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C
+atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3
+atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A
+atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m
+atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1
+atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2
+atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1
+atomic unit of electric potential 27.211 386 02 0.000 000 17 V
+atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2
+atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J
+atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N
+atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
+atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1
+atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T
+atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2
+atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
+atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1
+atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
+atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s
+atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1
+Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1
+Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1
+Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1
+Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1
+Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1
+Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1
+Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
+Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1
+Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1
+Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1
+Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1
+characteristic impedance of vacuum 376.730 313 461... (exact) ohm
+classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m
+Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m
+Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m
+conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S
+conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
+conventional value of von Klitzing constant 25 812.807 (exact) ohm
+Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m
+deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4
+deuteron-electron mass ratio 3670.482 967 85 0.000 000 13
+deuteron g factor 0.857 438 2311 0.000 000 0048
+deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1
+deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3
+deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048
+deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg
+deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J
+deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV
+deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u
+deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
+deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015
+deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19
+deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m
+electric constant 8.854 187 817... e-12 (exact) F m^-1
+electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1
+electron-deuteron mag. mom. ratio -2143.923 499 0.000 012
+electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4
+electron g factor -2.002 319 304 361 82 0.000 000 000 000 52
+electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1
+electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1
+electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4
+electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1
+electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3
+electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26
+electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17
+electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
+electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J
+electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV
+electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u
+electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1
+electron-muon mag. mom. ratio 206.766 9880 0.000 0046
+electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3
+electron-neutron mag. mom. ratio 960.920 50 0.000 23
+electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4
+electron-proton mag. mom. ratio -658.210 6866 0.000 0020
+electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4
+electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4
+electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4
+electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
+electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
+electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4
+electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J
+electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u
+electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h
+electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz
+electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1
+electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J
+electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K
+electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg
+elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C
+elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1
+Faraday constant 96 485.332 89 0.000 59 C mol^-1
+Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1
+Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2
+fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3
+first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2
+first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1
+hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u
+hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV
+Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J
+Hartree energy in eV 27.211 386 02 0.000 000 17 eV
+hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz
+hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1
+hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J
+hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K
+hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg
+helion-electron mass ratio 5495.885 279 22 0.000 000 27
+helion g factor -4.255 250 616 0.000 000 050
+helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1
+helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025
+helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg
+helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J
+helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV
+helion mass in u 3.014 932 246 73 0.000 000 000 12 u
+helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1
+helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29
+hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u
+hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV
+hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h
+hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
+hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J
+hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K
+hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg
+inverse fine-structure constant 137.035 999 139 0.000 000 031
+inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u
+inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV
+inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h
+inverse meter-hertz relationship 299 792 458 (exact) Hz
+inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J
+inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K
+inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg
+inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm
+Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1
+joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u
+joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV
+joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h
+joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz
+joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1
+joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K
+joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
+kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u
+kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV
+kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h
+kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz
+kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1
+kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J
+kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg
+kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u
+kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV
+kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h
+kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz
+kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1
+kilogram-joule relationship 8.987 551 787... e16 (exact) J
+kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K
+lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m
+Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3
+Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3
+mag. constant 12.566 370 614... e-7 (exact) N A^-2
+mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb
+molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1
+molar mass constant 1 e-3 (exact) kg mol^-1
+molar mass of carbon-12 12 e-3 (exact) kg mol^-1
+molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1
+molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1
+molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1
+Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m
+muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m
+muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m
+muon-electron mass ratio 206.768 2826 0.000 0046
+muon g factor -2.002 331 8418 0.000 000 0013
+muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1
+muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3
+muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20
+muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg
+muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J
+muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV
+muon mass in u 0.113 428 9257 0.000 000 0025 u
+muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1
+muon-neutron mass ratio 0.112 454 5167 0.000 000 0025
+muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071
+muon-proton mass ratio 0.112 609 5262 0.000 000 0025
+muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2
+natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s
+natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s
+natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J
+natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV
+natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m
+natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
+natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1
+natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c
+natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s
+natural unit of velocity 299 792 458 (exact) m s^-1
+neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m
+neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m
+neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
+neutron-electron mass ratio 1838.683 661 58 0.000 000 90
+neutron g factor -3.826 085 45 0.000 000 90
+neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1
+neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1
+neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1
+neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45
+neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg
+neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J
+neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV
+neutron mass in u 1.008 664 915 88 0.000 000 000 49 u
+neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1
+neutron-muon mass ratio 8.892 484 08 0.000 000 20
+neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
+neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30
+neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13
+neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48
+neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51
+neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51
+neutron-tau mass ratio 0.528 790 0.000 048
+neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
+Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2
+nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1
+nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1
+nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1
+nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1
+nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1
+Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s
+Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s
+Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s
+Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s
+Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm
+Planck length 1.616 229 e-35 0.000 038 e-35 m
+Planck mass 2.176 470 e-8 0.000 051 e-8 kg
+Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV
+Planck temperature 1.416 808 e32 0.000 033 e32 K
+Planck time 5.391 16 e-44 0.000 13 e-44 s
+proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1
+proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m
+proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m
+proton-electron mass ratio 1836.152 673 89 0.000 000 17
+proton g factor 5.585 694 702 0.000 000 017
+proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1
+proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1
+proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1
+proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3
+proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085
+proton mag. shielding correction 25.691 e-6 0.011 e-6
+proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg
+proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J
+proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV
+proton mass in u 1.007 276 466 879 0.000 000 000 091 u
+proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1
+proton-muon mass ratio 8.880 243 38 0.000 000 20
+proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34
+proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51
+proton rms charge radius 0.8751 e-15 0.0061 e-15 m
+proton-tau mass ratio 0.528 063 0.000 048
+quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1
+quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1
+Rydberg constant 10 973 731.568 508 0.000 065 m^-1
+Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz
+Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV
+Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J
+Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014
+Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014
+second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K
+shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1
+shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1
+shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025
+shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092
+shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
+shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1
+shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1
+shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030
+speed of light in vacuum 299 792 458 (exact) m s^-1
+standard acceleration of gravity 9.806 65 (exact) m s^-2
+standard atmosphere 101 325 (exact) Pa
+standard-state pressure 100 000 (exact) Pa
+Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4
+tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m
+tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m
+tau-electron mass ratio 3477.15 0.31
+tau mass 3.167 47 e-27 0.000 29 e-27 kg
+tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J
+tau mass energy equivalent in MeV 1776.82 0.16 MeV
+tau mass in u 1.907 49 0.000 17 u
+tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1
+tau-muon mass ratio 16.8167 0.0015
+tau-neutron mass ratio 1.891 11 0.000 17
+tau-proton mass ratio 1.893 72 0.000 17
+Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2
+triton-electron mass ratio 5496.921 535 88 0.000 000 26
+triton g factor 5.957 924 920 0.000 000 028
+triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1
+triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3
+triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014
+triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg
+triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J
+triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV
+triton mass in u 3.015 500 716 32 0.000 000 000 11 u
+triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1
+triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22
+unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg
+von Klitzing constant 25 812.807 4555 0.000 0059 ohm
+weak mixing angle 0.2223 0.0021
+Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1
+Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K"""
+
+txt2018 = """\
+alpha particle-electron mass ratio 7294.299 541 42 0.000 000 24
+alpha particle mass 6.644 657 3357 e-27 0.000 000 0020 e-27 kg
+alpha particle mass energy equivalent 5.971 920 1914 e-10 0.000 000 0018 e-10 J
+alpha particle mass energy equivalent in MeV 3727.379 4066 0.000 0011 MeV
+alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u
+alpha particle molar mass 4.001 506 1777 e-3 0.000 000 0012 e-3 kg mol^-1
+alpha particle-proton mass ratio 3.972 599 690 09 0.000 000 000 22
+alpha particle relative atomic mass 4.001 506 179 127 0.000 000 000 063
+Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m
+atomic mass constant 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg
+atomic mass constant energy equivalent 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J
+atomic mass constant energy equivalent in MeV 931.494 102 42 0.000 000 28 MeV
+atomic mass unit-electron volt relationship 9.314 941 0242 e8 0.000 000 0028 e8 eV
+atomic mass unit-hartree relationship 3.423 177 6874 e7 0.000 000 0010 e7 E_h
+atomic mass unit-hertz relationship 2.252 342 718 71 e23 0.000 000 000 68 e23 Hz
+atomic mass unit-inverse meter relationship 7.513 006 6104 e14 0.000 000 0023 e14 m^-1
+atomic mass unit-joule relationship 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J
+atomic mass unit-kelvin relationship 1.080 954 019 16 e13 0.000 000 000 33 e13 K
+atomic mass unit-kilogram relationship 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg
+atomic unit of 1st hyperpolarizability 3.206 361 3061 e-53 0.000 000 0015 e-53 C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability 6.235 379 9905 e-65 0.000 000 0038 e-65 C^4 m^4 J^-3
+atomic unit of action 1.054 571 817... e-34 (exact) J s
+atomic unit of charge 1.602 176 634 e-19 (exact) C
+atomic unit of charge density 1.081 202 384 57 e12 0.000 000 000 49 e12 C m^-3
+atomic unit of current 6.623 618 237 510 e-3 0.000 000 000 013 e-3 A
+atomic unit of electric dipole mom. 8.478 353 6255 e-30 0.000 000 0013 e-30 C m
+atomic unit of electric field 5.142 206 747 63 e11 0.000 000 000 78 e11 V m^-1
+atomic unit of electric field gradient 9.717 362 4292 e21 0.000 000 0029 e21 V m^-2
+atomic unit of electric polarizability 1.648 777 274 36 e-41 0.000 000 000 50 e-41 C^2 m^2 J^-1
+atomic unit of electric potential 27.211 386 245 988 0.000 000 000 053 V
+atomic unit of electric quadrupole mom. 4.486 551 5246 e-40 0.000 000 0014 e-40 C m^2
+atomic unit of energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J
+atomic unit of force 8.238 723 4983 e-8 0.000 000 0012 e-8 N
+atomic unit of length 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m
+atomic unit of mag. dipole mom. 1.854 802 015 66 e-23 0.000 000 000 56 e-23 J T^-1
+atomic unit of mag. flux density 2.350 517 567 58 e5 0.000 000 000 71 e5 T
+atomic unit of magnetizability 7.891 036 6008 e-29 0.000 000 0048 e-29 J T^-2
+atomic unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg
+atomic unit of momentum 1.992 851 914 10 e-24 0.000 000 000 30 e-24 kg m s^-1
+atomic unit of permittivity 1.112 650 055 45 e-10 0.000 000 000 17 e-10 F m^-1
+atomic unit of time 2.418 884 326 5857 e-17 0.000 000 000 0047 e-17 s
+atomic unit of velocity 2.187 691 263 64 e6 0.000 000 000 33 e6 m s^-1
+Avogadro constant 6.022 140 76 e23 (exact) mol^-1
+Bohr magneton 9.274 010 0783 e-24 0.000 000 0028 e-24 J T^-1
+Bohr magneton in eV/T 5.788 381 8060 e-5 0.000 000 0017 e-5 eV T^-1
+Bohr magneton in Hz/T 1.399 624 493 61 e10 0.000 000 000 42 e10 Hz T^-1
+Bohr magneton in inverse meter per tesla 46.686 447 783 0.000 000 014 m^-1 T^-1
+Bohr magneton in K/T 0.671 713 815 63 0.000 000 000 20 K T^-1
+Bohr radius 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m
+Boltzmann constant 1.380 649 e-23 (exact) J K^-1
+Boltzmann constant in eV/K 8.617 333 262... e-5 (exact) eV K^-1
+Boltzmann constant in Hz/K 2.083 661 912... e10 (exact) Hz K^-1
+Boltzmann constant in inverse meter per kelvin 69.503 480 04... (exact) m^-1 K^-1
+classical electron radius 2.817 940 3262 e-15 0.000 000 0013 e-15 m
+Compton wavelength 2.426 310 238 67 e-12 0.000 000 000 73 e-12 m
+conductance quantum 7.748 091 729... e-5 (exact) S
+conventional value of ampere-90 1.000 000 088 87... (exact) A
+conventional value of coulomb-90 1.000 000 088 87... (exact) C
+conventional value of farad-90 0.999 999 982 20... (exact) F
+conventional value of henry-90 1.000 000 017 79... (exact) H
+conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
+conventional value of ohm-90 1.000 000 017 79... (exact) ohm
+conventional value of volt-90 1.000 000 106 66... (exact) V
+conventional value of von Klitzing constant 25 812.807 (exact) ohm
+conventional value of watt-90 1.000 000 195 53... (exact) W
+Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m
+deuteron-electron mag. mom. ratio -4.664 345 551 e-4 0.000 000 012 e-4
+deuteron-electron mass ratio 3670.482 967 88 0.000 000 13
+deuteron g factor 0.857 438 2338 0.000 000 0022
+deuteron mag. mom. 4.330 735 094 e-27 0.000 000 011 e-27 J T^-1
+deuteron mag. mom. to Bohr magneton ratio 4.669 754 570 e-4 0.000 000 012 e-4
+deuteron mag. mom. to nuclear magneton ratio 0.857 438 2338 0.000 000 0022
+deuteron mass 3.343 583 7724 e-27 0.000 000 0010 e-27 kg
+deuteron mass energy equivalent 3.005 063 231 02 e-10 0.000 000 000 91 e-10 J
+deuteron mass energy equivalent in MeV 1875.612 942 57 0.000 000 57 MeV
+deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u
+deuteron molar mass 2.013 553 212 05 e-3 0.000 000 000 61 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio -0.448 206 53 0.000 000 11
+deuteron-proton mag. mom. ratio 0.307 012 209 39 0.000 000 000 79
+deuteron-proton mass ratio 1.999 007 501 39 0.000 000 000 11
+deuteron relative atomic mass 2.013 553 212 745 0.000 000 000 040
+deuteron rms charge radius 2.127 99 e-15 0.000 74 e-15 m
+electron charge to mass quotient -1.758 820 010 76 e11 0.000 000 000 53 e11 C kg^-1
+electron-deuteron mag. mom. ratio -2143.923 4915 0.000 0056
+electron-deuteron mass ratio 2.724 437 107 462 e-4 0.000 000 000 096 e-4
+electron g factor -2.002 319 304 362 56 0.000 000 000 000 35
+electron gyromag. ratio 1.760 859 630 23 e11 0.000 000 000 53 e11 s^-1 T^-1
+electron gyromag. ratio in MHz/T 28 024.951 4242 0.000 0085 MHz T^-1
+electron-helion mass ratio 1.819 543 074 573 e-4 0.000 000 000 079 e-4
+electron mag. mom. -9.284 764 7043 e-24 0.000 000 0028 e-24 J T^-1
+electron mag. mom. anomaly 1.159 652 181 28 e-3 0.000 000 000 18 e-3
+electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 28 0.000 000 000 000 18
+electron mag. mom. to nuclear magneton ratio -1838.281 971 88 0.000 000 11
+electron mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg
+electron mass energy equivalent 8.187 105 7769 e-14 0.000 000 0025 e-14 J
+electron mass energy equivalent in MeV 0.510 998 950 00 0.000 000 000 15 MeV
+electron mass in u 5.485 799 090 65 e-4 0.000 000 000 16 e-4 u
+electron molar mass 5.485 799 0888 e-7 0.000 000 0017 e-7 kg mol^-1
+electron-muon mag. mom. ratio 206.766 9883 0.000 0046
+electron-muon mass ratio 4.836 331 69 e-3 0.000 000 11 e-3
+electron-neutron mag. mom. ratio 960.920 50 0.000 23
+electron-neutron mass ratio 5.438 673 4424 e-4 0.000 000 0026 e-4
+electron-proton mag. mom. ratio -658.210 687 89 0.000 000 20
+electron-proton mass ratio 5.446 170 214 87 e-4 0.000 000 000 33 e-4
+electron relative atomic mass 5.485 799 090 65 e-4 0.000 000 000 16 e-4
+electron-tau mass ratio 2.875 85 e-4 0.000 19 e-4
+electron to alpha particle mass ratio 1.370 933 554 787 e-4 0.000 000 000 045 e-4
+electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
+electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
+electron-triton mass ratio 1.819 200 062 251 e-4 0.000 000 000 090 e-4
+electron volt 1.602 176 634 e-19 (exact) J
+electron volt-atomic mass unit relationship 1.073 544 102 33 e-9 0.000 000 000 32 e-9 u
+electron volt-hartree relationship 3.674 932 217 5655 e-2 0.000 000 000 0071 e-2 E_h
+electron volt-hertz relationship 2.417 989 242... e14 (exact) Hz
+electron volt-inverse meter relationship 8.065 543 937... e5 (exact) m^-1
+electron volt-joule relationship 1.602 176 634 e-19 (exact) J
+electron volt-kelvin relationship 1.160 451 812... e4 (exact) K
+electron volt-kilogram relationship 1.782 661 921... e-36 (exact) kg
+elementary charge 1.602 176 634 e-19 (exact) C
+elementary charge over h-bar 1.519 267 447... e15 (exact) A J^-1
+Faraday constant 96 485.332 12... (exact) C mol^-1
+Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2
+fine-structure constant 7.297 352 5693 e-3 0.000 000 0011 e-3
+first radiation constant 3.741 771 852... e-16 (exact) W m^2
+first radiation constant for spectral radiance 1.191 042 972... e-16 (exact) W m^2 sr^-1
+hartree-atomic mass unit relationship 2.921 262 322 05 e-8 0.000 000 000 88 e-8 u
+hartree-electron volt relationship 27.211 386 245 988 0.000 000 000 053 eV
+Hartree energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J
+Hartree energy in eV 27.211 386 245 988 0.000 000 000 053 eV
+hartree-hertz relationship 6.579 683 920 502 e15 0.000 000 000 013 e15 Hz
+hartree-inverse meter relationship 2.194 746 313 6320 e7 0.000 000 000 0043 e7 m^-1
+hartree-joule relationship 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J
+hartree-kelvin relationship 3.157 750 248 0407 e5 0.000 000 000 0061 e5 K
+hartree-kilogram relationship 4.850 870 209 5432 e-35 0.000 000 000 0094 e-35 kg
+helion-electron mass ratio 5495.885 280 07 0.000 000 24
+helion g factor -4.255 250 615 0.000 000 050
+helion mag. mom. -1.074 617 532 e-26 0.000 000 013 e-26 J T^-1
+helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio -2.127 625 307 0.000 000 025
+helion mass 5.006 412 7796 e-27 0.000 000 0015 e-27 kg
+helion mass energy equivalent 4.499 539 4125 e-10 0.000 000 0014 e-10 J
+helion mass energy equivalent in MeV 2808.391 607 43 0.000 000 85 MeV
+helion mass in u 3.014 932 247 175 0.000 000 000 097 u
+helion molar mass 3.014 932 246 13 e-3 0.000 000 000 91 e-3 kg mol^-1
+helion-proton mass ratio 2.993 152 671 67 0.000 000 000 13
+helion relative atomic mass 3.014 932 247 175 0.000 000 000 097
+helion shielding shift 5.996 743 e-5 0.000 010 e-5
+hertz-atomic mass unit relationship 4.439 821 6652 e-24 0.000 000 0013 e-24 u
+hertz-electron volt relationship 4.135 667 696... e-15 (exact) eV
+hertz-hartree relationship 1.519 829 846 0570 e-16 0.000 000 000 0029 e-16 E_h
+hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
+hertz-joule relationship 6.626 070 15 e-34 (exact) J
+hertz-kelvin relationship 4.799 243 073... e-11 (exact) K
+hertz-kilogram relationship 7.372 497 323... e-51 (exact) kg
+hyperfine transition frequency of Cs-133 9 192 631 770 (exact) Hz
+inverse fine-structure constant 137.035 999 084 0.000 000 021
+inverse meter-atomic mass unit relationship 1.331 025 050 10 e-15 0.000 000 000 40 e-15 u
+inverse meter-electron volt relationship 1.239 841 984... e-6 (exact) eV
+inverse meter-hartree relationship 4.556 335 252 9120 e-8 0.000 000 000 0088 e-8 E_h
+inverse meter-hertz relationship 299 792 458 (exact) Hz
+inverse meter-joule relationship 1.986 445 857... e-25 (exact) J
+inverse meter-kelvin relationship 1.438 776 877... e-2 (exact) K
+inverse meter-kilogram relationship 2.210 219 094... e-42 (exact) kg
+inverse of conductance quantum 12 906.403 72... (exact) ohm
+Josephson constant 483 597.848 4... e9 (exact) Hz V^-1
+joule-atomic mass unit relationship 6.700 535 2565 e9 0.000 000 0020 e9 u
+joule-electron volt relationship 6.241 509 074... e18 (exact) eV
+joule-hartree relationship 2.293 712 278 3963 e17 0.000 000 000 0045 e17 E_h
+joule-hertz relationship 1.509 190 179... e33 (exact) Hz
+joule-inverse meter relationship 5.034 116 567... e24 (exact) m^-1
+joule-kelvin relationship 7.242 970 516... e22 (exact) K
+joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
+kelvin-atomic mass unit relationship 9.251 087 3014 e-14 0.000 000 0028 e-14 u
+kelvin-electron volt relationship 8.617 333 262... e-5 (exact) eV
+kelvin-hartree relationship 3.166 811 563 4556 e-6 0.000 000 000 0061 e-6 E_h
+kelvin-hertz relationship 2.083 661 912... e10 (exact) Hz
+kelvin-inverse meter relationship 69.503 480 04... (exact) m^-1
+kelvin-joule relationship 1.380 649 e-23 (exact) J
+kelvin-kilogram relationship 1.536 179 187... e-40 (exact) kg
+kilogram-atomic mass unit relationship 6.022 140 7621 e26 0.000 000 0018 e26 u
+kilogram-electron volt relationship 5.609 588 603... e35 (exact) eV
+kilogram-hartree relationship 2.061 485 788 7409 e34 0.000 000 000 0040 e34 E_h
+kilogram-hertz relationship 1.356 392 489... e50 (exact) Hz
+kilogram-inverse meter relationship 4.524 438 335... e41 (exact) m^-1
+kilogram-joule relationship 8.987 551 787... e16 (exact) J
+kilogram-kelvin relationship 6.509 657 260... e39 (exact) K
+lattice parameter of silicon 5.431 020 511 e-10 0.000 000 089 e-10 m
+lattice spacing of ideal Si (220) 1.920 155 716 e-10 0.000 000 032 e-10 m
+Loschmidt constant (273.15 K, 100 kPa) 2.651 645 804... e25 (exact) m^-3
+Loschmidt constant (273.15 K, 101.325 kPa) 2.686 780 111... e25 (exact) m^-3
+luminous efficacy 683 (exact) lm W^-1
+mag. flux quantum 2.067 833 848... e-15 (exact) Wb
+molar gas constant 8.314 462 618... (exact) J mol^-1 K^-1
+molar mass constant 0.999 999 999 65 e-3 0.000 000 000 30 e-3 kg mol^-1
+molar mass of carbon-12 11.999 999 9958 e-3 0.000 000 0036 e-3 kg mol^-1
+molar Planck constant 3.990 312 712... e-10 (exact) J Hz^-1 mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa) 22.710 954 64... e-3 (exact) m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 969 54... e-3 (exact) m^3 mol^-1
+molar volume of silicon 1.205 883 199 e-5 0.000 000 060 e-5 m^3 mol^-1
+Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m
+muon Compton wavelength 1.173 444 110 e-14 0.000 000 026 e-14 m
+muon-electron mass ratio 206.768 2830 0.000 0046
+muon g factor -2.002 331 8418 0.000 000 0013
+muon mag. mom. -4.490 448 30 e-26 0.000 000 10 e-26 J T^-1
+muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio -4.841 970 47 e-3 0.000 000 11 e-3
+muon mag. mom. to nuclear magneton ratio -8.890 597 03 0.000 000 20
+muon mass 1.883 531 627 e-28 0.000 000 042 e-28 kg
+muon mass energy equivalent 1.692 833 804 e-11 0.000 000 038 e-11 J
+muon mass energy equivalent in MeV 105.658 3755 0.000 0023 MeV
+muon mass in u 0.113 428 9259 0.000 000 0025 u
+muon molar mass 1.134 289 259 e-4 0.000 000 025 e-4 kg mol^-1
+muon-neutron mass ratio 0.112 454 5170 0.000 000 0025
+muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071
+muon-proton mass ratio 0.112 609 5264 0.000 000 0025
+muon-tau mass ratio 5.946 35 e-2 0.000 40 e-2
+natural unit of action 1.054 571 817... e-34 (exact) J s
+natural unit of action in eV s 6.582 119 569... e-16 (exact) eV s
+natural unit of energy 8.187 105 7769 e-14 0.000 000 0025 e-14 J
+natural unit of energy in MeV 0.510 998 950 00 0.000 000 000 15 MeV
+natural unit of length 3.861 592 6796 e-13 0.000 000 0012 e-13 m
+natural unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg
+natural unit of momentum 2.730 924 530 75 e-22 0.000 000 000 82 e-22 kg m s^-1
+natural unit of momentum in MeV/c 0.510 998 950 00 0.000 000 000 15 MeV/c
+natural unit of time 1.288 088 668 19 e-21 0.000 000 000 39 e-21 s
+natural unit of velocity 299 792 458 (exact) m s^-1
+neutron Compton wavelength 1.319 590 905 81 e-15 0.000 000 000 75 e-15 m
+neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
+neutron-electron mass ratio 1838.683 661 73 0.000 000 89
+neutron g factor -3.826 085 45 0.000 000 90
+neutron gyromag. ratio 1.832 471 71 e8 0.000 000 43 e8 s^-1 T^-1
+neutron gyromag. ratio in MHz/T 29.164 6931 0.000 0069 MHz T^-1
+neutron mag. mom. -9.662 3651 e-27 0.000 0023 e-27 J T^-1
+neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45
+neutron mass 1.674 927 498 04 e-27 0.000 000 000 95 e-27 kg
+neutron mass energy equivalent 1.505 349 762 87 e-10 0.000 000 000 86 e-10 J
+neutron mass energy equivalent in MeV 939.565 420 52 0.000 000 54 MeV
+neutron mass in u 1.008 664 915 95 0.000 000 000 49 u
+neutron molar mass 1.008 664 915 60 e-3 0.000 000 000 57 e-3 kg mol^-1
+neutron-muon mass ratio 8.892 484 06 0.000 000 20
+neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
+neutron-proton mass difference 2.305 574 35 e-30 0.000 000 82 e-30 kg
+neutron-proton mass difference energy equivalent 2.072 146 89 e-13 0.000 000 74 e-13 J
+neutron-proton mass difference energy equivalent in MeV 1.293 332 36 0.000 000 46 MeV
+neutron-proton mass difference in u 1.388 449 33 e-3 0.000 000 49 e-3 u
+neutron-proton mass ratio 1.001 378 419 31 0.000 000 000 49
+neutron relative atomic mass 1.008 664 915 95 0.000 000 000 49
+neutron-tau mass ratio 0.528 779 0.000 036
+neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
+Newtonian constant of gravitation 6.674 30 e-11 0.000 15 e-11 m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c 6.708 83 e-39 0.000 15 e-39 (GeV/c^2)^-2
+nuclear magneton 5.050 783 7461 e-27 0.000 000 0015 e-27 J T^-1
+nuclear magneton in eV/T 3.152 451 258 44 e-8 0.000 000 000 96 e-8 eV T^-1
+nuclear magneton in inverse meter per tesla 2.542 623 413 53 e-2 0.000 000 000 78 e-2 m^-1 T^-1
+nuclear magneton in K/T 3.658 267 7756 e-4 0.000 000 0011 e-4 K T^-1
+nuclear magneton in MHz/T 7.622 593 2291 0.000 000 0023 MHz T^-1
+Planck constant 6.626 070 15 e-34 (exact) J Hz^-1
+Planck constant in eV/Hz 4.135 667 696... e-15 (exact) eV Hz^-1
+Planck length 1.616 255 e-35 0.000 018 e-35 m
+Planck mass 2.176 434 e-8 0.000 024 e-8 kg
+Planck mass energy equivalent in GeV 1.220 890 e19 0.000 014 e19 GeV
+Planck temperature 1.416 784 e32 0.000 016 e32 K
+Planck time 5.391 247 e-44 0.000 060 e-44 s
+proton charge to mass quotient 9.578 833 1560 e7 0.000 000 0029 e7 C kg^-1
+proton Compton wavelength 1.321 409 855 39 e-15 0.000 000 000 40 e-15 m
+proton-electron mass ratio 1836.152 673 43 0.000 000 11
+proton g factor 5.585 694 6893 0.000 000 0016
+proton gyromag. ratio 2.675 221 8744 e8 0.000 000 0011 e8 s^-1 T^-1
+proton gyromag. ratio in MHz/T 42.577 478 518 0.000 000 018 MHz T^-1
+proton mag. mom. 1.410 606 797 36 e-26 0.000 000 000 60 e-26 J T^-1
+proton mag. mom. to Bohr magneton ratio 1.521 032 202 30 e-3 0.000 000 000 46 e-3
+proton mag. mom. to nuclear magneton ratio 2.792 847 344 63 0.000 000 000 82
+proton mag. shielding correction 2.5689 e-5 0.0011 e-5
+proton mass 1.672 621 923 69 e-27 0.000 000 000 51 e-27 kg
+proton mass energy equivalent 1.503 277 615 98 e-10 0.000 000 000 46 e-10 J
+proton mass energy equivalent in MeV 938.272 088 16 0.000 000 29 MeV
+proton mass in u 1.007 276 466 621 0.000 000 000 053 u
+proton molar mass 1.007 276 466 27 e-3 0.000 000 000 31 e-3 kg mol^-1
+proton-muon mass ratio 8.880 243 37 0.000 000 20
+proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34
+proton-neutron mass ratio 0.998 623 478 12 0.000 000 000 49
+proton relative atomic mass 1.007 276 466 621 0.000 000 000 053
+proton rms charge radius 8.414 e-16 0.019 e-16 m
+proton-tau mass ratio 0.528 051 0.000 036
+quantum of circulation 3.636 947 5516 e-4 0.000 000 0011 e-4 m^2 s^-1
+quantum of circulation times 2 7.273 895 1032 e-4 0.000 000 0022 e-4 m^2 s^-1
+reduced Compton wavelength 3.861 592 6796 e-13 0.000 000 0012 e-13 m
+reduced muon Compton wavelength 1.867 594 306 e-15 0.000 000 042 e-15 m
+reduced neutron Compton wavelength 2.100 194 1552 e-16 0.000 000 0012 e-16 m
+reduced Planck constant 1.054 571 817... e-34 (exact) J s
+reduced Planck constant in eV s 6.582 119 569... e-16 (exact) eV s
+reduced Planck constant times c in MeV fm 197.326 980 4... (exact) MeV fm
+reduced proton Compton wavelength 2.103 089 103 36 e-16 0.000 000 000 64 e-16 m
+reduced tau Compton wavelength 1.110 538 e-16 0.000 075 e-16 m
+Rydberg constant 10 973 731.568 160 0.000 021 m^-1
+Rydberg constant times c in Hz 3.289 841 960 2508 e15 0.000 000 000 0064 e15 Hz
+Rydberg constant times hc in eV 13.605 693 122 994 0.000 000 000 026 eV
+Rydberg constant times hc in J 2.179 872 361 1035 e-18 0.000 000 000 0042 e-18 J
+Sackur-Tetrode constant (1 K, 100 kPa) -1.151 707 537 06 0.000 000 000 45
+Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 870 523 58 0.000 000 000 45
+second radiation constant 1.438 776 877... e-2 (exact) m K
+shielded helion gyromag. ratio 2.037 894 569 e8 0.000 000 024 e8 s^-1 T^-1
+shielded helion gyromag. ratio in MHz/T 32.434 099 42 0.000 000 38 MHz T^-1
+shielded helion mag. mom. -1.074 553 090 e-26 0.000 000 013 e-26 J T^-1
+shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio -2.127 497 719 0.000 000 025
+shielded helion to proton mag. mom. ratio -0.761 766 5618 0.000 000 0089
+shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
+shielded proton gyromag. ratio 2.675 153 151 e8 0.000 000 029 e8 s^-1 T^-1
+shielded proton gyromag. ratio in MHz/T 42.576 384 74 0.000 000 46 MHz T^-1
+shielded proton mag. mom. 1.410 570 560 e-26 0.000 000 015 e-26 J T^-1
+shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio 2.792 775 599 0.000 000 030
+shielding difference of d and p in HD 2.0200 e-8 0.0020 e-8
+shielding difference of t and p in HT 2.4140 e-8 0.0020 e-8
+speed of light in vacuum 299 792 458 (exact) m s^-1
+standard acceleration of gravity 9.806 65 (exact) m s^-2
+standard atmosphere 101 325 (exact) Pa
+standard-state pressure 100 000 (exact) Pa
+Stefan-Boltzmann constant 5.670 374 419... e-8 (exact) W m^-2 K^-4
+tau Compton wavelength 6.977 71 e-16 0.000 47 e-16 m
+tau-electron mass ratio 3477.23 0.23
+tau energy equivalent 1776.86 0.12 MeV
+tau mass 3.167 54 e-27 0.000 21 e-27 kg
+tau mass energy equivalent 2.846 84 e-10 0.000 19 e-10 J
+tau mass in u 1.907 54 0.000 13 u
+tau molar mass 1.907 54 e-3 0.000 13 e-3 kg mol^-1
+tau-muon mass ratio 16.8170 0.0011
+tau-neutron mass ratio 1.891 15 0.000 13
+tau-proton mass ratio 1.893 76 0.000 13
+Thomson cross section 6.652 458 7321 e-29 0.000 000 0060 e-29 m^2
+triton-electron mass ratio 5496.921 535 73 0.000 000 27
+triton g factor 5.957 924 931 0.000 000 012
+triton mag. mom. 1.504 609 5202 e-26 0.000 000 0030 e-26 J T^-1
+triton mag. mom. to Bohr magneton ratio 1.622 393 6651 e-3 0.000 000 0032 e-3
+triton mag. mom. to nuclear magneton ratio 2.978 962 4656 0.000 000 0059
+triton mass 5.007 356 7446 e-27 0.000 000 0015 e-27 kg
+triton mass energy equivalent 4.500 387 8060 e-10 0.000 000 0014 e-10 J
+triton mass energy equivalent in MeV 2808.921 132 98 0.000 000 85 MeV
+triton mass in u 3.015 500 716 21 0.000 000 000 12 u
+triton molar mass 3.015 500 715 17 e-3 0.000 000 000 92 e-3 kg mol^-1
+triton-proton mass ratio 2.993 717 034 14 0.000 000 000 15
+triton relative atomic mass 3.015 500 716 21 0.000 000 000 12
+triton to proton mag. mom. ratio 1.066 639 9191 0.000 000 0021
+unified atomic mass unit 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg
+vacuum electric permittivity 8.854 187 8128 e-12 0.000 000 0013 e-12 F m^-1
+vacuum mag. permeability 1.256 637 062 12 e-6 0.000 000 000 19 e-6 N A^-2
+von Klitzing constant 25 812.807 45... (exact) ohm
+weak mixing angle 0.222 90 0.000 30
+Wien frequency displacement law constant 5.878 925 757... e10 (exact) Hz K^-1
+Wien wavelength displacement law constant 2.897 771 955... e-3 (exact) m K
+W to Z mass ratio 0.881 53 0.000 17 """
+
+# -----------------------------------------------------------------------------
+
+physical_constants = {}
+
+
+def parse_constants_2002to2014(d):
+ constants = {}
+ for line in d.split('\n'):
+ name = line[:55].rstrip()
+ val = line[55:77].replace(' ', '').replace('...', '')
+ val = float(val)
+ uncert = line[77:99].replace(' ', '').replace('(exact)', '0')
+ uncert = float(uncert)
+ units = line[99:].rstrip()
+ constants[name] = (val, units, uncert)
+ return constants
+
+def parse_constants_2018toXXXX(d):
+ constants = {}
+ for line in d.split('\n'):
+ name = line[:60].rstrip()
+ val = line[60:85].replace(' ', '').replace('...', '')
+ val = float(val)
+ uncert = line[85:110].replace(' ', '').replace('(exact)', '0')
+ uncert = float(uncert)
+ units = line[110:].rstrip()
+ constants[name] = (val, units, uncert)
+ return constants
+
+
+_physical_constants_2002 = parse_constants_2002to2014(txt2002)
+_physical_constants_2006 = parse_constants_2002to2014(txt2006)
+_physical_constants_2010 = parse_constants_2002to2014(txt2010)
+_physical_constants_2014 = parse_constants_2002to2014(txt2014)
+_physical_constants_2018 = parse_constants_2018toXXXX(txt2018)
+
+
+physical_constants.update(_physical_constants_2002)
+physical_constants.update(_physical_constants_2006)
+physical_constants.update(_physical_constants_2010)
+physical_constants.update(_physical_constants_2014)
+physical_constants.update(_physical_constants_2018)
+_current_constants = _physical_constants_2018
+_current_codata = "CODATA 2018"
+
+# check obsolete values
+_obsolete_constants = {}
+for k in physical_constants:
+ if k not in _current_constants:
+ _obsolete_constants[k] = True
+
+# generate some additional aliases
+_aliases = {}
+for k in _physical_constants_2002:
+ if 'magn.' in k:
+ _aliases[k] = k.replace('magn.', 'mag.')
+for k in _physical_constants_2006:
+ if 'momentum' in k:
+ _aliases[k] = k.replace('momentum', 'mom.um')
+for k in _physical_constants_2018:
+ if 'momentum' in k:
+ _aliases[k] = k.replace('momentum', 'mom.um')
+
+# CODATA 2018: renamed and no longer exact; use as aliases
+_aliases['mag. constant'] = 'vacuum mag. permeability'
+_aliases['electric constant'] = 'vacuum electric permittivity'
+
+
+class ConstantWarning(DeprecationWarning):
+ """Accessing a constant no longer in current CODATA data set"""
+ pass
+
+
+def _check_obsolete(key):
+ if key in _obsolete_constants and key not in _aliases:
+ warnings.warn("Constant '%s' is not in current %s data set" % (
+ key, _current_codata), ConstantWarning)
+
+
+def value(key):
+ """
+ Value in physical_constants indexed by key
+
+ Parameters
+ ----------
+ key : Python string or unicode
+ Key in dictionary `physical_constants`
+
+ Returns
+ -------
+ value : float
+ Value in `physical_constants` corresponding to `key`
+
+ Examples
+ --------
+ >>> from scipy import constants
+ >>> constants.value(u'elementary charge')
+ 1.602176634e-19
+
+ """
+ _check_obsolete(key)
+ return physical_constants[key][0]
+
+
+def unit(key):
+ """
+ Unit in physical_constants indexed by key
+
+ Parameters
+ ----------
+ key : Python string or unicode
+ Key in dictionary `physical_constants`
+
+ Returns
+ -------
+ unit : Python string
+ Unit in `physical_constants` corresponding to `key`
+
+ Examples
+ --------
+ >>> from scipy import constants
+ >>> constants.unit(u'proton mass')
+ 'kg'
+
+ """
+ _check_obsolete(key)
+ return physical_constants[key][1]
+
+
+def precision(key):
+ """
+ Relative precision in physical_constants indexed by key
+
+ Parameters
+ ----------
+ key : Python string or unicode
+ Key in dictionary `physical_constants`
+
+ Returns
+ -------
+ prec : float
+ Relative precision in `physical_constants` corresponding to `key`
+
+ Examples
+ --------
+ >>> from scipy import constants
+ >>> constants.precision(u'proton mass')
+ 5.1e-37
+
+ """
+ _check_obsolete(key)
+ return physical_constants[key][2] / physical_constants[key][0]
+
+
+def find(sub=None, disp=False):
+ """
+ Return list of physical_constant keys containing a given string.
+
+ Parameters
+ ----------
+ sub : str, unicode
+ Sub-string to search keys for. By default, return all keys.
+ disp : bool
+ If True, print the keys that are found and return None.
+ Otherwise, return the list of keys without printing anything.
+
+ Returns
+ -------
+ keys : list or None
+ If `disp` is False, the list of keys is returned.
+ Otherwise, None is returned.
+
+ Examples
+ --------
+ >>> from scipy.constants import find, physical_constants
+
+ Which keys in the ``physical_constants`` dictionary contain 'boltzmann'?
+
+ >>> find('boltzmann')
+ ['Boltzmann constant',
+ 'Boltzmann constant in Hz/K',
+ 'Boltzmann constant in eV/K',
+ 'Boltzmann constant in inverse meter per kelvin',
+ 'Stefan-Boltzmann constant']
+
+ Get the constant called 'Boltzmann constant in Hz/K':
+
+ >>> physical_constants['Boltzmann constant in Hz/K']
+ (20836619120.0, 'Hz K^-1', 0.0)
+
+ Find constants with 'radius' in the key:
+
+ >>> find('radius')
+ ['Bohr radius',
+ 'classical electron radius',
+ 'deuteron rms charge radius',
+ 'proton rms charge radius']
+ >>> physical_constants['classical electron radius']
+ (2.8179403262e-15, 'm', 1.3e-24)
+
+ """
+ if sub is None:
+ result = list(_current_constants.keys())
+ else:
+ result = [key for key in _current_constants
+ if sub.lower() in key.lower()]
+
+ result.sort()
+ if disp:
+ for key in result:
+ print(key)
+ return
+ else:
+ return result
+
+
+c = value('speed of light in vacuum')
+mu0 = value('vacuum mag. permeability')
+epsilon0 = value('vacuum electric permittivity')
+
+# Table is lacking some digits for exact values: calculate from definition
+exact_values = {
+ 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0),
+ 'kilogram-joule relationship': (c * c, 'J', 0.0),
+ 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0),
+
+ # The following derived quantities are no longer exact (CODATA2018):
+ # specify separately
+ 'characteristic impedance of vacuum': (
+ sqrt(mu0 / epsilon0), 'ohm',
+ sqrt(mu0 / epsilon0) * 0.5 * (
+ physical_constants['vacuum mag. permeability'][2] / mu0
+ + physical_constants['vacuum electric permittivity'][2] / epsilon0))
+}
+
+# sanity check
+for key in exact_values:
+ val = physical_constants[key][0]
+ if abs(exact_values[key][0] - val) / val > 1e-9:
+ raise ValueError("Constants.codata: exact values too far off.")
+ if exact_values[key][2] == 0 and physical_constants[key][2] != 0:
+ raise ValueError("Constants.codata: value not exact")
+
+physical_constants.update(exact_values)
+
+_tested_keys = ['natural unit of velocity',
+ 'natural unit of action',
+ 'natural unit of action in eV s',
+ 'natural unit of mass',
+ 'natural unit of energy',
+ 'natural unit of energy in MeV',
+ 'natural unit of mom.um',
+ 'natural unit of mom.um in MeV/c',
+ 'natural unit of length',
+ 'natural unit of time']
+
+# finally, insert aliases for values
+for k, v in list(_aliases.items()):
+ if v in _current_constants or v in _tested_keys:
+ physical_constants[k] = physical_constants[v]
+ else:
+ del _aliases[k]
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/constants.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/constants.py
new file mode 100644
index 0000000..6f61c88
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/constants.py
@@ -0,0 +1,305 @@
+"""
+Collection of physical constants and conversion factors.
+
+Most constants are in SI units, so you can do
+print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
+
+The list is not meant to be comprehensive, but just convenient for everyday use.
+"""
+"""
+BasSw 2006
+physical constants: imported from CODATA
+unit conversion: see e.g., NIST special publication 811
+Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
+Some constants exist in a few variants, which are marked with suffixes.
+The ones without any suffix should be the most common ones.
+"""
+
+import math as _math
+from .codata import value as _cd
+import numpy as _np
+
+# mathematical constants
+pi = _math.pi
+golden = golden_ratio = (1 + _math.sqrt(5)) / 2
+
+# SI prefixes
+yotta = 1e24
+zetta = 1e21
+exa = 1e18
+peta = 1e15
+tera = 1e12
+giga = 1e9
+mega = 1e6
+kilo = 1e3
+hecto = 1e2
+deka = 1e1
+deci = 1e-1
+centi = 1e-2
+milli = 1e-3
+micro = 1e-6
+nano = 1e-9
+pico = 1e-12
+femto = 1e-15
+atto = 1e-18
+zepto = 1e-21
+
+# binary prefixes
+kibi = 2**10
+mebi = 2**20
+gibi = 2**30
+tebi = 2**40
+pebi = 2**50
+exbi = 2**60
+zebi = 2**70
+yobi = 2**80
+
+# physical constants
+c = speed_of_light = _cd('speed of light in vacuum')
+mu_0 = _cd('vacuum mag. permeability')
+epsilon_0 = _cd('vacuum electric permittivity')
+h = Planck = _cd('Planck constant')
+hbar = h / (2 * pi)
+G = gravitational_constant = _cd('Newtonian constant of gravitation')
+g = _cd('standard acceleration of gravity')
+e = elementary_charge = _cd('elementary charge')
+R = gas_constant = _cd('molar gas constant')
+alpha = fine_structure = _cd('fine-structure constant')
+N_A = Avogadro = _cd('Avogadro constant')
+k = Boltzmann = _cd('Boltzmann constant')
+sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
+Wien = _cd('Wien wavelength displacement law constant')
+Rydberg = _cd('Rydberg constant')
+
+# mass in kg
+gram = 1e-3
+metric_ton = 1e3
+grain = 64.79891e-6
+lb = pound = 7000 * grain # avoirdupois
+blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
+slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
+oz = ounce = pound / 16
+stone = 14 * pound
+long_ton = 2240 * pound
+short_ton = 2000 * pound
+
+troy_ounce = 480 * grain # only for metals / gems
+troy_pound = 12 * troy_ounce
+carat = 200e-6
+
+m_e = electron_mass = _cd('electron mass')
+m_p = proton_mass = _cd('proton mass')
+m_n = neutron_mass = _cd('neutron mass')
+m_u = u = atomic_mass = _cd('atomic mass constant')
+
+# angle in rad
+degree = pi / 180
+arcmin = arcminute = degree / 60
+arcsec = arcsecond = arcmin / 60
+
+# time in second
+minute = 60.0
+hour = 60 * minute
+day = 24 * hour
+week = 7 * day
+year = 365 * day
+Julian_year = 365.25 * day
+
+# length in meter
+inch = 0.0254
+foot = 12 * inch
+yard = 3 * foot
+mile = 1760 * yard
+mil = inch / 1000
+pt = point = inch / 72 # typography
+survey_foot = 1200.0 / 3937
+survey_mile = 5280 * survey_foot
+nautical_mile = 1852.0
+fermi = 1e-15
+angstrom = 1e-10
+micron = 1e-6
+au = astronomical_unit = 149597870700.0
+light_year = Julian_year * c
+parsec = au / arcsec
+
+# pressure in pascal
+atm = atmosphere = _cd('standard atmosphere')
+bar = 1e5
+torr = mmHg = atm / 760
+psi = pound * g / (inch * inch)
+
+# area in meter**2
+hectare = 1e4
+acre = 43560 * foot**2
+
+# volume in meter**3
+litre = liter = 1e-3
+gallon = gallon_US = 231 * inch**3 # US
+# pint = gallon_US / 8
+fluid_ounce = fluid_ounce_US = gallon_US / 128
+bbl = barrel = 42 * gallon_US # for oil
+
+gallon_imp = 4.54609e-3 # UK
+fluid_ounce_imp = gallon_imp / 160
+
+# speed in meter per second
+kmh = 1e3 / hour
+mph = mile / hour
+mach = speed_of_sound = 340.5 # approx value at 15 degrees in 1 atm. Is this a common value?
+knot = nautical_mile / hour
+
+# temperature in kelvin
+zero_Celsius = 273.15
+degree_Fahrenheit = 1/1.8 # only for differences
+
+# energy in joule
+eV = electron_volt = elementary_charge # * 1 Volt
+calorie = calorie_th = 4.184
+calorie_IT = 4.1868
+erg = 1e-7
+Btu_th = pound * degree_Fahrenheit * calorie_th / gram
+Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
+ton_TNT = 1e9 * calorie_th
+# Wh = watt_hour
+
+# power in watt
+hp = horsepower = 550 * foot * pound * g
+
+# force in newton
+dyn = dyne = 1e-5
+lbf = pound_force = pound * g
+kgf = kilogram_force = g # * 1 kg
+
+# functions for conversions that are not linear
+
+
+def convert_temperature(val, old_scale, new_scale):
+ """
+ Convert from a temperature scale to another one among Celsius, Kelvin,
+ Fahrenheit, and Rankine scales.
+
+ Parameters
+ ----------
+ val : array_like
+ Value(s) of the temperature(s) to be converted expressed in the
+ original scale.
+
+ old_scale: str
+ Specifies as a string the original scale from which the temperature
+ value(s) will be converted. Supported scales are Celsius ('Celsius',
+ 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
+ Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
+ ('Rankine', 'rankine', 'R', 'r').
+
+ new_scale: str
+ Specifies as a string the new scale to which the temperature
+ value(s) will be converted. Supported scales are Celsius ('Celsius',
+ 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
+ Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
+ ('Rankine', 'rankine', 'R', 'r').
+
+ Returns
+ -------
+ res : float or array of floats
+ Value(s) of the converted temperature(s) expressed in the new scale.
+
+ Notes
+ -----
+ .. versionadded:: 0.18.0
+
+ Examples
+ --------
+ >>> from scipy.constants import convert_temperature
+ >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin')
+ array([ 233.15, 313.15])
+
+ """
+ # Convert from `old_scale` to Kelvin
+ if old_scale.lower() in ['celsius', 'c']:
+ tempo = _np.asanyarray(val) + zero_Celsius
+ elif old_scale.lower() in ['kelvin', 'k']:
+ tempo = _np.asanyarray(val)
+ elif old_scale.lower() in ['fahrenheit', 'f']:
+ tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius
+ elif old_scale.lower() in ['rankine', 'r']:
+ tempo = _np.asanyarray(val) * 5 / 9
+ else:
+ raise NotImplementedError("%s scale is unsupported: supported scales "
+ "are Celsius, Kelvin, Fahrenheit, and "
+ "Rankine" % old_scale)
+ # and from Kelvin to `new_scale`.
+ if new_scale.lower() in ['celsius', 'c']:
+ res = tempo - zero_Celsius
+ elif new_scale.lower() in ['kelvin', 'k']:
+ res = tempo
+ elif new_scale.lower() in ['fahrenheit', 'f']:
+ res = (tempo - zero_Celsius) * 9 / 5 + 32
+ elif new_scale.lower() in ['rankine', 'r']:
+ res = tempo * 9 / 5
+ else:
+ raise NotImplementedError("'%s' scale is unsupported: supported "
+ "scales are 'Celsius', 'Kelvin', "
+ "'Fahrenheit', and 'Rankine'" % new_scale)
+
+ return res
+
+
+# optics
+
+
+def lambda2nu(lambda_):
+ """
+ Convert wavelength to optical frequency
+
+ Parameters
+ ----------
+ lambda_ : array_like
+ Wavelength(s) to be converted.
+
+ Returns
+ -------
+ nu : float or array of floats
+ Equivalent optical frequency.
+
+ Notes
+ -----
+ Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
+ (vacuum) speed of light in meters/second.
+
+ Examples
+ --------
+ >>> from scipy.constants import lambda2nu, speed_of_light
+ >>> lambda2nu(np.array((1, speed_of_light)))
+ array([ 2.99792458e+08, 1.00000000e+00])
+
+ """
+ return c / _np.asanyarray(lambda_)
+
+
+def nu2lambda(nu):
+ """
+ Convert optical frequency to wavelength.
+
+ Parameters
+ ----------
+ nu : array_like
+ Optical frequency to be converted.
+
+ Returns
+ -------
+ lambda : float or array of floats
+ Equivalent wavelength(s).
+
+ Notes
+ -----
+ Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
+ (vacuum) speed of light in meters/second.
+
+ Examples
+ --------
+ >>> from scipy.constants import nu2lambda, speed_of_light
+ >>> nu2lambda(np.array((1, speed_of_light)))
+ array([ 2.99792458e+08, 1.00000000e+00])
+
+ """
+ return c / _np.asanyarray(nu)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/setup.py
new file mode 100644
index 0000000..3e8916d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/setup.py
@@ -0,0 +1,11 @@
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('constants', parent_package, top_path)
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_codata.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_codata.py
new file mode 100644
index 0000000..996fc9b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_codata.py
@@ -0,0 +1,57 @@
+from scipy.constants import constants, codata, find, value, ConstantWarning
+from numpy.testing import (assert_equal, assert_, assert_almost_equal,
+ suppress_warnings)
+
+
+def test_find():
+ keys = find('weak mixing', disp=False)
+ assert_equal(keys, ['weak mixing angle'])
+
+ keys = find('qwertyuiop', disp=False)
+ assert_equal(keys, [])
+
+ keys = find('natural unit', disp=False)
+ assert_equal(keys, sorted(['natural unit of velocity',
+ 'natural unit of action',
+ 'natural unit of action in eV s',
+ 'natural unit of mass',
+ 'natural unit of energy',
+ 'natural unit of energy in MeV',
+ 'natural unit of momentum',
+ 'natural unit of momentum in MeV/c',
+ 'natural unit of length',
+ 'natural unit of time']))
+
+
+def test_basic_table_parse():
+ c = 'speed of light in vacuum'
+ assert_equal(codata.value(c), constants.c)
+ assert_equal(codata.value(c), constants.speed_of_light)
+
+
+def test_basic_lookup():
+ assert_equal('%d %s' % (codata.c, codata.unit('speed of light in vacuum')),
+ '299792458 m s^-1')
+
+
+def test_find_all():
+ assert_(len(codata.find(disp=False)) > 300)
+
+
+def test_find_single():
+ assert_equal(codata.find('Wien freq', disp=False)[0],
+ 'Wien frequency displacement law constant')
+
+
+def test_2002_vs_2006():
+ assert_almost_equal(codata.value('magn. flux quantum'),
+ codata.value('mag. flux quantum'))
+
+
+def test_exact_values():
+ # Check that updating stored values with exact ones worked.
+ with suppress_warnings() as sup:
+ sup.filter(ConstantWarning)
+ for key in codata.exact_values:
+ assert_((codata.exact_values[key][0] - value(key)) / value(key) == 0)
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_constants.py b/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_constants.py
new file mode 100644
index 0000000..8d7461d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/constants/tests/test_constants.py
@@ -0,0 +1,35 @@
+from numpy.testing import assert_equal, assert_allclose
+import scipy.constants as sc
+
+
+def test_convert_temperature():
+ assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0)
+ assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'),
+ [273.15, 273.15])
+ assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'),
+ [-273.15, -273.15])
+ assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15])
+ assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'),
+ [32, 32])
+ assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32])
+ assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67],
+ rtol=0., atol=1e-13)
+ assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'),
+ [0., 0.], rtol=0., atol=1e-13)
+ assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'),
+ [32., 32.], rtol=0., atol=1e-13)
+ assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'),
+ [491.67, 491.67], rtol=0., atol=1e-13)
+ assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'),
+ [491.67, 491.67], rtol=0., atol=1e-13)
+ assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
+ [273.15, 0.], rtol=0., atol=1e-13)
+
+
+def test_lambda_to_nu():
+ assert_equal(sc.lambda2nu([sc.speed_of_light, 1]), [1, sc.speed_of_light])
+
+
+def test_nu_to_lambda():
+ assert_equal(sc.nu2lambda([sc.speed_of_light, 1]), [1, sc.speed_of_light])
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/__init__.py
new file mode 100644
index 0000000..4b060ff
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/__init__.py
@@ -0,0 +1,98 @@
+"""
+==============================================
+Discrete Fourier transforms (:mod:`scipy.fft`)
+==============================================
+
+.. currentmodule:: scipy.fft
+
+Fast Fourier Transforms (FFTs)
+==============================
+
+.. autosummary::
+ :toctree: generated/
+
+ fft - Fast (discrete) Fourier Transform (FFT)
+ ifft - Inverse FFT
+ fft2 - 2-D FFT
+ ifft2 - 2-D inverse FFT
+ fftn - N-D FFT
+ ifftn - N-D inverse FFT
+ rfft - FFT of strictly real-valued sequence
+ irfft - Inverse of rfft
+ rfft2 - 2-D FFT of real sequence
+ irfft2 - Inverse of rfft2
+ rfftn - N-D FFT of real sequence
+ irfftn - Inverse of rfftn
+ hfft - FFT of a Hermitian sequence (real spectrum)
+ ihfft - Inverse of hfft
+ hfft2 - 2-D FFT of a Hermitian sequence
+ ihfft2 - Inverse of hfft2
+ hfftn - N-D FFT of a Hermitian sequence
+ ihfftn - Inverse of hfftn
+
+Discrete Sin and Cosine Transforms (DST and DCT)
+================================================
+.. autosummary::
+ :toctree: generated/
+
+ dct - Discrete cosine transform
+ idct - Inverse discrete cosine transform
+ dctn - N-D Discrete cosine transform
+ idctn - N-D Inverse discrete cosine transform
+ dst - Discrete sine transform
+ idst - Inverse discrete sine transform
+ dstn - N-D Discrete sine transform
+ idstn - N-D Inverse discrete sine transform
+
+Helper functions
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ fftshift - Shift the zero-frequency component to the center of the spectrum
+ ifftshift - The inverse of `fftshift`
+ fftfreq - Return the Discrete Fourier Transform sample frequencies
+ rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
+ next_fast_len - Find the optimal length to zero-pad an FFT for speed
+ set_workers - Context manager to set default number of workers
+ get_workers - Get the current default number of workers
+
+Backend control
+===============
+
+.. autosummary::
+ :toctree: generated/
+
+ set_backend - Context manager to set the backend within a fixed scope
+ skip_backend - Context manager to skip a backend within a fixed scope
+ set_global_backend - Sets the global fft backend
+ register_backend - Register a backend for permanent use
+
+"""
+
+from ._basic import (
+ fft, ifft, fft2, ifft2, fftn, ifftn,
+ rfft, irfft, rfft2, irfft2, rfftn, irfftn,
+ hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
+from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
+from ._helper import next_fast_len
+from ._backend import (set_backend, skip_backend, set_global_backend,
+ register_backend)
+from numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift
+from ._pocketfft.helper import set_workers, get_workers
+
+__all__ = [
+ 'fft', 'ifft', 'fft2','ifft2', 'fftn', 'ifftn',
+ 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
+ 'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
+ 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
+ 'next_fast_len',
+ 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
+ 'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
+ 'get_workers', 'set_workers']
+
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_backend.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_backend.py
new file mode 100644
index 0000000..fa47386
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_backend.py
@@ -0,0 +1,180 @@
+import scipy._lib.uarray as ua
+from . import _pocketfft
+
+
+class _ScipyBackend:
+ """The default backend for fft calculations
+
+ Notes
+ -----
+ We use the domain ``numpy.scipy`` rather than ``scipy`` because in the
+ future, ``uarray`` will treat the domain as a hierarchy. This means the user
+ can install a single backend for ``numpy`` and have it implement
+ ``numpy.scipy.fft`` as well.
+ """
+ __ua_domain__ = "numpy.scipy.fft"
+
+ @staticmethod
+ def __ua_function__(method, args, kwargs):
+ fn = getattr(_pocketfft, method.__name__, None)
+
+ if fn is None:
+ return NotImplemented
+ return fn(*args, **kwargs)
+
+
+_named_backends = {
+ 'scipy': _ScipyBackend,
+}
+
+
+def _backend_from_arg(backend):
+ """Maps strings to known backends and validates the backend"""
+
+ if isinstance(backend, str):
+ try:
+ backend = _named_backends[backend]
+ except KeyError as e:
+ raise ValueError('Unknown backend {}'.format(backend)) from e
+
+ if backend.__ua_domain__ != 'numpy.scipy.fft':
+ raise ValueError('Backend does not implement "numpy.scipy.fft"')
+
+ return backend
+
+
+def set_global_backend(backend):
+ """Sets the global fft backend
+
+ The global backend has higher priority than registered backends, but lower
+ priority than context-specific backends set with `set_backend`.
+
+ Parameters
+ ----------
+ backend: {object, 'scipy'}
+ The backend to use.
+ Can either be a ``str`` containing the name of a known backend
+ {'scipy'} or an object that implements the uarray protocol.
+
+ Raises
+ ------
+ ValueError: If the backend does not implement ``numpy.scipy.fft``.
+
+ Notes
+ -----
+ This will overwrite the previously set global backend, which, by default, is
+ the SciPy implementation.
+
+ Examples
+ --------
+ We can set the global fft backend:
+
+ >>> from scipy.fft import fft, set_global_backend
+ >>> set_global_backend("scipy") # Sets global backend. "scipy" is the default backend.
+ >>> fft([1]) # Calls the global backend
+ array([1.+0.j])
+ """
+ backend = _backend_from_arg(backend)
+ ua.set_global_backend(backend)
+
+
+def register_backend(backend):
+ """
+ Register a backend for permanent use.
+
+ Registered backends have the lowest priority and will be tried after the
+ global backend.
+
+ Parameters
+ ----------
+ backend: {object, 'scipy'}
+ The backend to use.
+ Can either be a ``str`` containing the name of a known backend
+ {'scipy'} or an object that implements the uarray protocol.
+
+ Raises
+ ------
+ ValueError: If the backend does not implement ``numpy.scipy.fft``.
+
+ Examples
+ --------
+ We can register a new fft backend:
+
+ >>> from scipy.fft import fft, register_backend, set_global_backend
+ >>> class NoopBackend: # Define an invalid Backend
+ ... __ua_domain__ = "numpy.scipy.fft"
+ ... def __ua_function__(self, func, args, kwargs):
+ ... return NotImplemented
+ >>> set_global_backend(NoopBackend()) # Set the invalid backend as global
+ >>> register_backend("scipy") # Register a new backend
+ >>> fft([1]) # The registered backend is called because the global backend returns `NotImplemented`
+ array([1.+0.j])
+ >>> set_global_backend("scipy") # Restore global backend to default
+
+ """
+ backend = _backend_from_arg(backend)
+ ua.register_backend(backend)
+
+
+def set_backend(backend, coerce=False, only=False):
+ """Context manager to set the backend within a fixed scope.
+
+ Upon entering the ``with`` statement, the given backend will be added to
+ the list of available backends with the highest priority. Upon exit, the
+ backend is reset to the state before entering the scope.
+
+ Parameters
+ ----------
+ backend: {object, 'scipy'}
+ The backend to use.
+ Can either be a ``str`` containing the name of a known backend
+ {'scipy'} or an object that implements the uarray protocol.
+ coerce: bool, optional
+ Whether to allow expensive conversions for the ``x`` parameter. e.g.,
+ copying a NumPy array to the GPU for a CuPy backend. Implies ``only``.
+ only: bool, optional
+ If only is ``True`` and this backend returns ``NotImplemented``, then a
+ BackendNotImplemented error will be raised immediately. Ignoring any
+ lower priority backends.
+
+ Examples
+ --------
+ >>> import scipy.fft as fft
+ >>> with fft.set_backend('scipy', only=True):
+ ... fft.fft([1]) # Always calls the scipy implementation
+ array([1.+0.j])
+ """
+ backend = _backend_from_arg(backend)
+ return ua.set_backend(backend, coerce=coerce, only=only)
+
+
+def skip_backend(backend):
+ """Context manager to skip a backend within a fixed scope.
+
+ Within the context of a ``with`` statement, the given backend will not be
+ called. This covers backends registered both locally and globally. Upon
+ exit, the backend will again be considered.
+
+ Parameters
+ ----------
+ backend: {object, 'scipy'}
+ The backend to skip.
+ Can either be a ``str`` containing the name of a known backend
+ {'scipy'} or an object that implements the uarray protocol.
+
+ Examples
+ --------
+ >>> import scipy.fft as fft
+ >>> fft.fft([1]) # Calls default SciPy backend
+ array([1.+0.j])
+ >>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend
+ ... fft.fft([1]) # leaving no implementation available
+ Traceback (most recent call last):
+ ...
+ BackendNotImplementedError: No selected backends had an implementation ...
+ """
+ backend = _backend_from_arg(backend)
+ return ua.skip_backend(backend)
+
+
+set_global_backend('scipy')
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_basic.py
new file mode 100644
index 0000000..c108208
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_basic.py
@@ -0,0 +1,1617 @@
+from scipy._lib.uarray import generate_multimethod, Dispatchable
+import numpy as np
+
+
+def _x_replacer(args, kwargs, dispatchables):
+ """
+ uarray argument replacer to replace the transform input array (``x``)
+ """
+ if len(args) > 0:
+ return (dispatchables[0],) + args[1:], kwargs
+ kw = kwargs.copy()
+ kw['x'] = dispatchables[0]
+ return args, kw
+
+
+def _dispatch(func):
+ """
+ Function annotation that creates a uarray multimethod from the function
+ """
+ return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft")
+
+
+@_dispatch
+def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 1-D discrete Fourier Transform.
+
+ This function computes the 1-D *n*-point discrete Fourier
+ Transform (DFT) with the efficient Fast Fourier Transform (FFT)
+ algorithm [1]_.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex.
+ n : int, optional
+ Length of the transformed axis of the output.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last axis is
+ used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode. Default is "backward", meaning no normalization on
+ the forward transforms and scaling by ``1/n`` on the `ifft`.
+ "forward" instead applies the ``1/n`` factor on the forward tranform.
+ For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``.
+
+ .. versionadded:: 1.6.0
+ ``norm={"forward", "backward"}`` options were added
+
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See the notes below for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``. See below for more
+ details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+
+ Raises
+ ------
+ IndexError
+ if `axes` is larger than the last axis of `x`.
+
+ See Also
+ --------
+ ifft : The inverse of `fft`.
+ fft2 : The 2-D FFT.
+ fftn : The N-D FFT.
+ rfftn : The N-D FFT of real input.
+ fftfreq : Frequency bins for given FFT parameters.
+ next_fast_len : Size to pad input to for most efficient transforms
+
+ Notes
+ -----
+
+ FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform
+ (DFT) can be calculated efficiently, by using symmetries in the calculated
+ terms. The symmetry is highest when `n` is a power of 2, and the transform
+ is therefore most efficient for these sizes. For poorly factorizable sizes,
+ `scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than
+ O(`n` log `n`). Further performance improvements may be seen by zero-padding
+ the input using `next_fast_len`.
+
+ If ``x`` is a 1d array, then the `fft` is equivalent to ::
+
+ y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n))
+
+ The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach
+ the Nyquist frequency and wrap around to the negative-frequency terms. So,
+ for an 8-point transform, the frequencies of the result are
+ [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the
+ zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3],
+ use `fftshift`.
+
+ Transforms can be done in single, double, or extended precision (long
+ double) floating point. Half precision inputs will be converted to single
+ precision and non-floating-point inputs will be converted to double
+ precision.
+
+ If the data type of ``x`` is real, a "real FFT" algorithm is automatically
+ used, which roughly halves the computation time. To increase efficiency
+ a little further, use `rfft`, which does the same calculation, but only
+ outputs half of the symmetrical spectrum. If the data are both real and
+ symmetrical, the `dct` can again double the efficiency, by generating
+ half of the spectrum from half of the signal.
+
+ When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may
+ be used by the implementation in any way. This may include reusing the
+ memory for the result, but this is in no way guaranteed. You should not
+ rely on the contents of ``x`` after the transform as this may change in
+ future without warning.
+
+ The ``workers`` argument specifies the maximum number of parallel jobs to
+ split the FFT computation into. This will execute independent 1-D
+ FFTs within ``x``. So, ``x`` must be at least 2-D and the
+ non-transformed axes must be large enough to split into chunks. If ``x`` is
+ too small, fewer jobs may be used than requested.
+
+ References
+ ----------
+ .. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+ machine calculation of complex Fourier series," *Math. Comput.*
+ 19: 297-301.
+ .. [2] Bluestein, L., 1970, "A linear filtering approach to the
+ computation of discrete Fourier transform". *IEEE Transactions on
+ Audio and Electroacoustics.* 18 (4): 451-455.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
+ array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
+ 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
+ -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
+ 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
+
+ In this example, real input has an FFT which is Hermitian, i.e., symmetric
+ in the real part and anti-symmetric in the imaginary part:
+
+ >>> from scipy.fft import fft, fftfreq, fftshift
+ >>> import matplotlib.pyplot as plt
+ >>> t = np.arange(256)
+ >>> sp = fftshift(fft(np.sin(t)))
+ >>> freq = fftshift(fftfreq(t.shape[-1]))
+ >>> plt.plot(freq, sp.real, freq, sp.imag)
+ [, ]
+ >>> plt.show()
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 1-D inverse discrete Fourier Transform.
+
+ This function computes the inverse of the 1-D *n*-point
+ discrete Fourier transform computed by `fft`. In other words,
+ ``ifft(fft(x)) == x`` to within numerical accuracy.
+
+ The input should be ordered in the same way as is returned by `fft`,
+ i.e.,
+
+ * ``x[0]`` should contain the zero frequency term,
+ * ``x[1:n//2]`` should contain the positive-frequency terms,
+ * ``x[n//2 + 1:]`` should contain the negative-frequency terms, in
+ increasing order starting from the most negative frequency.
+
+ For an even number of input points, ``x[n//2]`` represents the sum of
+ the values at the positive and negative Nyquist frequencies, as the two
+ are aliased together. See `fft` for details.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex.
+ n : int, optional
+ Length of the transformed axis of the output.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ See notes about padding issues.
+ axis : int, optional
+ Axis over which to compute the inverse DFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+
+ Raises
+ ------
+ IndexError
+ If `axes` is larger than the last axis of `x`.
+
+ See Also
+ --------
+ fft : The 1-D (forward) FFT, of which `ifft` is the inverse.
+ ifft2 : The 2-D inverse FFT.
+ ifftn : The N-D inverse FFT.
+
+ Notes
+ -----
+ If the input parameter `n` is larger than the size of the input, the input
+ is padded by appending zeros at the end. Even though this is the common
+ approach, it might lead to surprising results. If a different padding is
+ desired, it must be performed before calling `ifft`.
+
+ If ``x`` is a 1-D array, then the `ifft` is equivalent to ::
+
+ y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x)
+
+ As with `fft`, `ifft` has support for all floating point types and is
+ optimized for real input.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> scipy.fft.ifft([0, 4, 0, 0])
+ array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
+
+ Create and plot a band-limited signal with random phases:
+
+ >>> import matplotlib.pyplot as plt
+ >>> t = np.arange(400)
+ >>> n = np.zeros((400,), dtype=complex)
+ >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
+ >>> s = scipy.fft.ifft(n)
+ >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
+ [, ]
+ >>> plt.legend(('real', 'imaginary'))
+
+ >>> plt.show()
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 1-D discrete Fourier Transform for real input.
+
+ This function computes the 1-D *n*-point discrete Fourier
+ Transform (DFT) of a real-valued array by means of an efficient algorithm
+ called the Fast Fourier Transform (FFT).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+ n : int, optional
+ Number of points along transformation axis in the input to use.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last axis is
+ used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ If `n` is even, the length of the transformed axis is ``(n/2)+1``.
+ If `n` is odd, the length is ``(n+1)/2``.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is larger than the last axis of `a`.
+
+ See Also
+ --------
+ irfft : The inverse of `rfft`.
+ fft : The 1-D FFT of general (complex) input.
+ fftn : The N-D FFT.
+ rfft2 : The 2-D FFT of real input.
+ rfftn : The N-D FFT of real input.
+
+ Notes
+ -----
+ When the DFT is computed for purely real input, the output is
+ Hermitian-symmetric, i.e., the negative frequency terms are just the complex
+ conjugates of the corresponding positive-frequency terms, and the
+ negative-frequency terms are therefore redundant. This function does not
+ compute the negative frequency terms, and the length of the transformed
+ axis of the output is therefore ``n//2 + 1``.
+
+ When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains
+ the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
+
+ If `n` is even, ``A[-1]`` contains the term representing both positive
+ and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
+ real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
+ the largest positive frequency (fs/2*(n-1)/n), and is complex in the
+ general case.
+
+ If the input `a` contains an imaginary part, it is silently discarded.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> scipy.fft.fft([0, 1, 0, 0])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
+ >>> scipy.fft.rfft([0, 1, 0, 0])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
+
+ Notice how the final element of the `fft` output is the complex conjugate
+ of the second element, for real input. For `rfft`, this symmetry is
+ exploited to compute only the non-negative frequency terms.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Computes the inverse of `rfft`.
+
+ This function computes the inverse of the 1-D *n*-point
+ discrete Fourier Transform of real input computed by `rfft`.
+ In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical
+ accuracy. (See Notes below for why ``len(a)`` is necessary here.)
+
+ The input is expected to be in the form returned by `rfft`, i.e., the
+ real zero-frequency term followed by the complex positive frequency terms
+ in order of increasing frequency. Since the discrete Fourier Transform of
+ real input is Hermitian-symmetric, the negative frequency terms are taken
+ to be the complex conjugates of the corresponding positive frequency terms.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ n : int, optional
+ Length of the transformed axis of the output.
+ For `n` output points, ``n//2+1`` input points are necessary. If the
+ input is longer than this, it is cropped. If it is shorter than this,
+ it is padded with zeros. If `n` is not given, it is taken to be
+ ``2*(m-1)``, where ``m`` is the length of the input along the axis
+ specified by `axis`.
+ axis : int, optional
+ Axis over which to compute the inverse FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is `n`, or, if `n` is not given,
+ ``2*(m-1)`` where ``m`` is the length of the transformed axis of the
+ input. To get an odd number of output points, `n` must be specified.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is larger than the last axis of `x`.
+
+ See Also
+ --------
+ rfft : The 1-D FFT of real input, of which `irfft` is inverse.
+ fft : The 1-D FFT.
+ irfft2 : The inverse of the 2-D FFT of real input.
+ irfftn : The inverse of the N-D FFT of real input.
+
+ Notes
+ -----
+ Returns the real valued `n`-point inverse discrete Fourier transform
+ of `x`, where `x` contains the non-negative frequency terms of a
+ Hermitian-symmetric sequence. `n` is the length of the result, not the
+ input.
+
+ If you specify an `n` such that `a` must be zero-padded or truncated, the
+ extra/removed values will be added/removed at high frequencies. One can
+ thus resample a series to `m` points via Fourier interpolation by:
+ ``a_resamp = irfft(rfft(a), m)``.
+
+ The default value of `n` assumes an even output length. By the Hermitian
+ symmetry, the last imaginary component must be 0 and so is ignored. To
+ avoid losing information, the correct length of the real input *must* be
+ given.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> scipy.fft.ifft([1, -1j, -1, 1j])
+ array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
+ >>> scipy.fft.irfft([1, -1j, -1])
+ array([0., 1., 0., 0.])
+
+ Notice how the last term in the input to the ordinary `ifft` is the
+ complex conjugate of the second term, and the output has zero imaginary
+ part everywhere. When calling `irfft`, the negative frequencies are not
+ specified, and the output array is purely real.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
+ spectrum.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ n : int, optional
+ Length of the transformed axis of the output. For `n` output
+ points, ``n//2 + 1`` input points are necessary. If the input is
+ longer than this, it is cropped. If it is shorter than this, it is
+ padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``,
+ where ``m`` is the length of the input along the axis specified by
+ `axis`.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See `fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is `n`, or, if `n` is not given,
+ ``2*m - 2``, where ``m`` is the length of the transformed axis of
+ the input. To get an odd number of output points, `n` must be
+ specified, for instance, as ``2*m - 1`` in the typical case,
+
+ Raises
+ ------
+ IndexError
+ If `axis` is larger than the last axis of `a`.
+
+ See also
+ --------
+ rfft : Compute the 1-D FFT for real input.
+ ihfft : The inverse of `hfft`.
+ hfftn : Compute the N-D FFT of a Hermitian signal.
+
+ Notes
+ -----
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+ opposite case: here the signal has Hermitian symmetry in the time
+ domain and is real in the frequency domain. So, here, it's `hfft`, for
+ which you must supply the length of the result if it is to be odd.
+ * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+
+ Examples
+ --------
+ >>> from scipy.fft import fft, hfft
+ >>> a = 2 * np.pi * np.arange(10) / 10
+ >>> signal = np.cos(a) + 3j * np.sin(3 * a)
+ >>> fft(signal).round(10)
+ array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j,
+ -0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j])
+ >>> hfft(signal[:6]).round(10) # Input first half of signal
+ array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
+ >>> hfft(signal, 10) # Input entire signal and truncate
+ array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the inverse FFT of a signal that has Hermitian symmetry.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ n : int, optional
+ Length of the inverse FFT, the number of points along
+ transformation axis in the input to use. If `n` is smaller than
+ the length of the input, the input is cropped. If it is larger,
+ the input is padded with zeros. If `n` is not given, the length of
+ the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the inverse FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See `fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is ``n//2 + 1``.
+
+ See also
+ --------
+ hfft, irfft
+
+ Notes
+ -----
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+ opposite case: here, the signal has Hermitian symmetry in the time
+ domain and is real in the frequency domain. So, here, it's `hfft`, for
+ which you must supply the length of the result if it is to be odd:
+ * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+
+ Examples
+ --------
+ >>> from scipy.fft import ifft, ihfft
+ >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
+ >>> ifft(spectrum)
+ array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
+ >>> ihfft(spectrum)
+ array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the N-D discrete Fourier Transform.
+
+ This function computes the N-D discrete Fourier Transform over
+ any number of axes in an M-D array by means of the Fast Fourier
+ Transform (FFT).
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``fft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` and `x`,
+ as explained in the parameters section above.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ ifftn : The inverse of `fftn`, the inverse N-D FFT.
+ fft : The 1-D FFT, with definitions and conventions used.
+ rfftn : The N-D FFT of real input.
+ fft2 : The 2-D FFT.
+ fftshift : Shifts zero-frequency terms to centre of array.
+
+ Notes
+ -----
+ The output, analogously to `fft`, contains the term for zero frequency in
+ the low-order corner of all axes, the positive frequency terms in the
+ first half of all axes, the term for the Nyquist frequency in the middle
+ of all axes and the negative frequency terms in the second half of all
+ axes, in order of decreasingly negative frequency.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.mgrid[:3, :3, :3][0]
+ >>> scipy.fft.fftn(x, axes=(1, 2))
+ array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[ 9.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[18.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
+ >>> scipy.fft.fftn(x, (2, 2), axes=(0, 1))
+ array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[-2.+0.j, -2.+0.j, -2.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
+
+ >>> import matplotlib.pyplot as plt
+ >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
+ ... 2 * np.pi * np.arange(200) / 34)
+ >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
+ >>> FS = scipy.fft.fftn(S)
+ >>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2))
+
+ >>> plt.show()
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the N-D inverse discrete Fourier Transform.
+
+ This function computes the inverse of the N-D discrete
+ Fourier Transform over any number of axes in an M-D array by
+ means of the Fast Fourier Transform (FFT). In other words,
+ ``ifftn(fftn(x)) == x`` to within numerical accuracy.
+
+ The input, analogously to `ifft`, should be ordered in the same way as is
+ returned by `fftn`, i.e., it should have the term for zero frequency
+ in all axes in the low-order corner, the positive frequency terms in the
+ first half of all axes, the term for the Nyquist frequency in the middle
+ of all axes and the negative frequency terms in the second half of all
+ axes, in order of decreasingly negative frequency.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``ifft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used. See notes for issue on `ifft` zero padding.
+ axes : sequence of ints, optional
+ Axes over which to compute the IFFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` or `x`,
+ as explained in the parameters section above.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ fftn : The forward N-D FFT, of which `ifftn` is the inverse.
+ ifft : The 1-D inverse FFT.
+ ifft2 : The 2-D inverse FFT.
+ ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
+ of array.
+
+ Notes
+ -----
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
+ the input along the specified dimension. Although this is the common
+ approach, it might lead to surprising results. If another form of zero
+ padding is desired, it must be performed before `ifftn` is called.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.eye(4)
+ >>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,))
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
+
+
+ Create and plot an image with band-limited frequency content:
+
+ >>> import matplotlib.pyplot as plt
+ >>> n = np.zeros((200,200), dtype=complex)
+ >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
+ >>> im = scipy.fft.ifftn(n).real
+ >>> plt.imshow(im)
+
+ >>> plt.show()
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 2-D discrete Fourier Transform
+
+ This function computes the N-D discrete Fourier Transform
+ over any axes in an M-D array by means of the
+ Fast Fourier Transform (FFT). By default, the transform is computed over
+ the last two axes of the input array, i.e., a 2-dimensional FFT.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``fft(x, n)``.
+ Along each axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last two axes are
+ used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or the last two axes if `axes` is not given.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length, or `axes` not given and
+ ``len(s) != 2``.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ ifft2 : The inverse 2-D FFT.
+ fft : The 1-D FFT.
+ fftn : The N-D FFT.
+ fftshift : Shifts zero-frequency terms to the center of the array.
+ For 2-D input, swaps first and third quadrants, and second
+ and fourth quadrants.
+
+ Notes
+ -----
+ `fft2` is just `fftn` with a different default for `axes`.
+
+ The output, analogously to `fft`, contains the term for zero frequency in
+ the low-order corner of the transformed axes, the positive frequency terms
+ in the first half of these axes, the term for the Nyquist frequency in the
+ middle of the axes and the negative frequency terms in the second half of
+ the axes, in order of decreasingly negative frequency.
+
+ See `fftn` for details and a plotting example, and `fft` for
+ definitions and conventions used.
+
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.mgrid[:5, :5][0]
+ >>> scipy.fft.fft2(x)
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
+ 0. +0.j , 0. +0.j ],
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 2-D inverse discrete Fourier Transform.
+
+ This function computes the inverse of the 2-D discrete Fourier
+ Transform over any number of axes in an M-D array by means of
+ the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x``
+ to within numerical accuracy. By default, the inverse transform is
+ computed over the last two axes of the input array.
+
+ The input, analogously to `ifft`, should be ordered in the same way as is
+ returned by `fft2`, i.e., it should have the term for zero frequency
+ in the low-order corner of the two axes, the positive frequency terms in
+ the first half of these axes, the term for the Nyquist frequency in the
+ middle of the axes and the negative frequency terms in the second half of
+ both axes, in order of decreasingly negative frequency.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
+ ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
+ Along each axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used. See notes for issue on `ifft` zero padding.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last two
+ axes are used.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or the last two axes if `axes` is not given.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length, or `axes` not given and
+ ``len(s) != 2``.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ fft2 : The forward 2-D FFT, of which `ifft2` is the inverse.
+ ifftn : The inverse of the N-D FFT.
+ fft : The 1-D FFT.
+ ifft : The 1-D inverse FFT.
+
+ Notes
+ -----
+ `ifft2` is just `ifftn` with a different default for `axes`.
+
+ See `ifftn` for details and a plotting example, and `fft` for
+ definition and conventions used.
+
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
+ the input along the specified dimension. Although this is the common
+ approach, it might lead to surprising results. If another form of zero
+ padding is desired, it must be performed before `ifft2` is called.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = 4 * np.eye(4)
+ >>> scipy.fft.ifft2(x)
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the N-D discrete Fourier Transform for real input.
+
+ This function computes the N-D discrete Fourier Transform over
+ any number of axes in an M-D real array by means of the Fast
+ Fourier Transform (FFT). By default, all axes are transformed, with the
+ real transform performed over the last axis, while the remaining
+ transforms are complex.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, taken to be real.
+ s : sequence of ints, optional
+ Shape (length along each transformed axis) to use from the input.
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
+ for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` and `x`,
+ as explained in the parameters section above.
+ The length of the last axis transformed will be ``s[-1]//2+1``,
+ while the remaining transformed axes will have lengths according to
+ `s`, or unchanged from the input.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT
+ of real input.
+ fft : The 1-D FFT, with definitions and conventions used.
+ rfft : The 1-D FFT of real input.
+ fftn : The N-D FFT.
+ rfft2 : The 2-D FFT of real input.
+
+ Notes
+ -----
+ The transform for real input is performed over the last transformation
+ axis, as by `rfft`, then the transform over the remaining axes is
+ performed as by `fftn`. The order of the output is as for `rfft` for the
+ final transformation axis, and as for `fftn` for the remaining
+ transformation axes.
+
+ See `fft` for details, definitions and conventions used.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.ones((2, 2, 2))
+ >>> scipy.fft.rfftn(x)
+ array([[[8.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+
+ >>> scipy.fft.rfftn(x, axes=(2, 0))
+ array([[[4.+0.j, 0.+0.j], # may vary
+ [4.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 2-D FFT of a real array.
+
+ Parameters
+ ----------
+ x : array
+ Input array, taken to be real.
+ s : sequence of ints, optional
+ Shape of the FFT.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The result of the real 2-D FFT.
+
+ See Also
+ --------
+ irfft2 : The inverse of the 2-D FFT of real input.
+ rfft : The 1-D FFT of real input.
+ rfftn : Compute the N-D discrete Fourier Transform for real
+ input.
+
+ Notes
+ -----
+ This is really just `rfftn` with different default behavior.
+ For more details see `rfftn`.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Computes the inverse of `rfftn`
+
+ This function computes the inverse of the N-D discrete
+ Fourier Transform for real input over any number of axes in an
+ M-D array by means of the Fast Fourier Transform (FFT). In
+ other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical
+ accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
+ and for the same reason.)
+
+ The input should be ordered in the same way as is returned by `rfftn`,
+ i.e., as for `irfft` for the final transformation axis, and as for `ifftn`
+ along all the other axes.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
+ number of input points used along this axis, except for the last axis,
+ where ``s[-1]//2+1`` points of the input are used.
+ Along any axis, if the shape indicated by `s` is smaller than that of
+ the input, the input is cropped. If it is larger, the input is padded
+ with zeros. If `s` is not given, the shape of the input along the axes
+ specified by axes is used. Except for the last axis which is taken to be
+ ``2*(m-1)``, where ``m`` is the length of the input along that axis.
+ axes : sequence of ints, optional
+ Axes over which to compute the inverse FFT. If not given, the last
+ `len(s)` axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` or `x`,
+ as explained in the parameters section above.
+ The length of each transformed axis is as given by the corresponding
+ element of `s`, or the length of the input in every axis except for the
+ last one if `s` is not given. In the final transformed axis the length
+ of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the
+ length of the final transformed axis of the input. To get an odd
+ number of output points in the final axis, `s` must be specified.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ rfftn : The forward N-D FFT of real input,
+ of which `ifftn` is the inverse.
+ fft : The 1-D FFT, with definitions and conventions used.
+ irfft : The inverse of the 1-D FFT of real input.
+ irfft2 : The inverse of the 2-D FFT of real input.
+
+ Notes
+ -----
+ See `fft` for definitions and conventions used.
+
+ See `rfft` for definitions and conventions used for real input.
+
+ The default value of `s` assumes an even output length in the final
+ transformation axis. When performing the final complex to real
+ transformation, the Hermitian symmetry requires that the last imaginary
+ component along that axis must be 0 and so it is ignored. To avoid losing
+ information, the correct length of the real input *must* be given.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.zeros((3, 2, 2))
+ >>> x[0, 0, 0] = 3 * 2 * 2
+ >>> scipy.fft.irfftn(x)
+ array([[[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Computes the inverse of `rfft2`
+
+ Parameters
+ ----------
+ x : array_like
+ The input array
+ s : sequence of ints, optional
+ Shape of the real output to the inverse FFT.
+ axes : sequence of ints, optional
+ The axes over which to compute the inverse fft.
+ Default is the last two axes.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The result of the inverse real 2-D FFT.
+
+ See Also
+ --------
+ rfft2 : The 2-D FFT of real input.
+ irfft : The inverse of the 1-D FFT of real input.
+ irfftn : The inverse of the N-D FFT of real input.
+
+ Notes
+ -----
+ This is really `irfftn` with different defaults.
+ For more details see `irfftn`.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the N-D FFT of Hermitian symmetric complex input, i.e., a
+ signal with a real spectrum.
+
+ This function computes the N-D discrete Fourier Transform for a
+ Hermitian symmetric complex input over any number of axes in an
+ M-D array by means of the Fast Fourier Transform (FFT). In other
+ words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s``
+ here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary
+ for the same reason ``x.shape`` would be necessary for `irfft`.)
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
+ number of input points used along this axis, except for the last axis,
+ where ``s[-1]//2+1`` points of the input are used.
+ Along any axis, if the shape indicated by `s` is smaller than that of
+ the input, the input is cropped. If it is larger, the input is padded
+ with zeros. If `s` is not given, the shape of the input along the axes
+ specified by axes is used. Except for the last axis which is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along that axis.
+ axes : sequence of ints, optional
+ Axes over which to compute the inverse FFT. If not given, the last
+ `len(s)` axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` or `x`,
+ as explained in the parameters section above.
+ The length of each transformed axis is as given by the corresponding
+ element of `s`, or the length of the input in every axis except for the
+ last one if `s` is not given. In the final transformed axis the length
+ of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
+ length of the final transformed axis of the input. To get an odd
+ number of output points in the final axis, `s` must be specified.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`.
+ fft : The 1-D FFT, with definitions and conventions used.
+ rfft : Forward FFT of real input.
+
+ Notes
+ -----
+
+ For a 1-D signal ``x`` to have a real spectrum, it must satisfy
+ the Hermitian property::
+
+ x[i] == np.conj(x[-i]) for all i
+
+ This generalizes into higher dimensions by reflecting over each axis in
+ turn::
+
+ x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ...
+
+ This should not be confused with a Hermitian matrix, for which the
+ transpose is its own conjugate::
+
+ x[i, j] == np.conj(x[j, i]) for all i, j
+
+
+ The default value of `s` assumes an even output length in the final
+ transformation axis. When performing the final complex to real
+ transformation, the Hermitian symmetry requires that the last imaginary
+ component along that axis must be 0 and so it is ignored. To avoid losing
+ information, the correct length of the real input *must* be given.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.ones((3, 2, 2))
+ >>> scipy.fft.hfftn(x)
+ array([[[12., 0.],
+ [ 0., 0.]],
+ [[ 0., 0.],
+ [ 0., 0.]],
+ [[ 0., 0.],
+ [ 0., 0.]]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 2-D FFT of a Hermitian complex array.
+
+ Parameters
+ ----------
+ x : array
+ Input array, taken to be Hermitian complex.
+ s : sequence of ints, optional
+ Shape of the real output.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See `fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The real result of the 2-D Hermitian complex real FFT.
+
+ See Also
+ --------
+ hfftn : Compute the N-D discrete Fourier Transform for Hermitian
+ complex input.
+
+ Notes
+ -----
+ This is really just `hfftn` with different default behavior.
+ For more details see `hfftn`.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the N-D inverse discrete Fourier Transform for a real
+ spectrum.
+
+ This function computes the N-D inverse discrete Fourier Transform
+ over any number of axes in an M-D real array by means of the Fast
+ Fourier Transform (FFT). By default, all axes are transformed, with the
+ real transform performed over the last axis, while the remaining transforms
+ are complex.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array, taken to be real.
+ s : sequence of ints, optional
+ Shape (length along each transformed axis) to use from the input.
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` and `x`,
+ as explained in the parameters section above.
+ The length of the last axis transformed will be ``s[-1]//2+1``,
+ while the remaining transformed axes will have lengths according to
+ `s`, or unchanged from the input.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `x`.
+
+ See Also
+ --------
+ hfftn : The forward N-D FFT of Hermitian input.
+ hfft : The 1-D FFT of Hermitian input.
+ fft : The 1-D FFT, with definitions and conventions used.
+ fftn : The N-D FFT.
+ hfft2 : The 2-D FFT of Hermitian input.
+
+ Notes
+ -----
+
+ The transform for real input is performed over the last transformation
+ axis, as by `ihfft`, then the transform over the remaining axes is
+ performed as by `ifftn`. The order of the output is the positive part of
+ the Hermitian output signal, in the same format as `rfft`.
+
+ Examples
+ --------
+ >>> import scipy.fft
+ >>> x = np.ones((2, 2, 2))
+ >>> scipy.fft.ihfftn(x)
+ array([[[1.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+ >>> scipy.fft.ihfftn(x, axes=(2, 0))
+ array([[[1.+0.j, 0.+0.j], # may vary
+ [1.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+ plan=None):
+ """
+ Compute the 2-D inverse FFT of a real spectrum.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array
+ s : sequence of ints, optional
+ Shape of the real input to the inverse FFT.
+ axes : sequence of ints, optional
+ The axes over which to compute the inverse fft.
+ Default is the last two axes.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see `fft`). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ See :func:`fft` for more details.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+ plan: object, optional
+ This argument is reserved for passing in a precomputed plan provided
+ by downstream FFT vendors. It is currently not used in SciPy.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ out : ndarray
+ The result of the inverse real 2-D FFT.
+
+ See Also
+ --------
+ ihfftn : Compute the inverse of the N-D FFT of Hermitian input.
+
+ Notes
+ -----
+ This is really `ihfftn` with different defaults.
+ For more details see `ihfftn`.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_debug_backends.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_debug_backends.py
new file mode 100644
index 0000000..c9647c5
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_debug_backends.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+class NumPyBackend:
+ """Backend that uses numpy.fft"""
+ __ua_domain__ = "numpy.scipy.fft"
+
+ @staticmethod
+ def __ua_function__(method, args, kwargs):
+ kwargs.pop("overwrite_x", None)
+
+ fn = getattr(np.fft, method.__name__, None)
+ return (NotImplemented if fn is None
+ else fn(*args, **kwargs))
+
+
+class EchoBackend:
+ """Backend that just prints the __ua_function__ arguments"""
+ __ua_domain__ = "numpy.scipy.fft"
+
+ @staticmethod
+ def __ua_function__(method, args, kwargs):
+ print(method, args, kwargs, sep='\n')
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_helper.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_helper.py
new file mode 100644
index 0000000..676b397
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_helper.py
@@ -0,0 +1,99 @@
+from functools import update_wrapper, lru_cache
+
+from ._pocketfft import helper as _helper
+
+
+def next_fast_len(target, real=False):
+ """Find the next fast size of input data to ``fft``, for zero-padding, etc.
+
+ SciPy's FFT algorithms gain their speed by a recursive divide and conquer
+ strategy. This relies on efficient functions for small prime factors of the
+ input length. Thus, the transforms are fastest when using composites of the
+ prime factors handled by the fft implementation. If there are efficient
+ functions for all radices <= `n`, then the result will be a number `x`
+ >= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
+ numbers)
+
+ Parameters
+ ----------
+ target : int
+ Length to start searching from. Must be a positive integer.
+ real : bool, optional
+ True if the FFT involves real input or output (e.g., `rfft` or `hfft`
+ but not `fft`). Defaults to False.
+
+ Returns
+ -------
+ out : int
+ The smallest fast length greater than or equal to ``target``.
+
+ Notes
+ -----
+ The result of this function may change in future as performance
+ considerations change, for example, if new prime factors are added.
+
+ Calling `fft` or `ifft` with real input data performs an ``'R2C'``
+ transform internally.
+
+ Examples
+ --------
+ On a particular machine, an FFT of prime length takes 11.4 ms:
+
+ >>> from scipy import fft
+ >>> min_len = 93059 # prime length is worst case for speed
+ >>> a = np.random.randn(min_len)
+ >>> b = fft.fft(a)
+
+ Zero-padding to the next regular length reduces computation time to
+ 1.6 ms, a speedup of 7.3 times:
+
+ >>> fft.next_fast_len(min_len, real=True)
+ 93312
+ >>> b = fft.fft(a, 93312)
+
+ Rounding up to the next power of 2 is not optimal, taking 3.0 ms to
+ compute; 1.9 times longer than the size given by ``next_fast_len``:
+
+ >>> b = fft.fft(a, 131072)
+
+ """
+ pass
+
+
+# Directly wrap the c-function good_size but take the docstring etc., from the
+# next_fast_len function above
+next_fast_len = update_wrapper(lru_cache()(_helper.good_size), next_fast_len)
+next_fast_len.__wrapped__ = _helper.good_size
+
+
+def _init_nd_shape_and_axes(x, shape, axes):
+ """Handle shape and axes arguments for N-D transforms.
+
+ Returns the shape and axes in a standard form, taking into account negative
+ values and checking for various potential errors.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ shape : int or array_like of ints or None
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If `shape` is -1, the size of the corresponding dimension of `x` is
+ used.
+ axes : int or array_like of ints or None
+ Axes along which the calculation is computed.
+ The default is over all axes.
+ Negative indices are automatically converted to their positive
+ counterparts.
+
+ Returns
+ -------
+ shape : array
+ The shape of the result. It is a 1-D integer array.
+ axes : array
+ The shape of the result. It is a 1-D integer array.
+
+ """
+ return _helper._init_nd_shape_and_axes(x, shape, axes)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/LICENSE.md b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/LICENSE.md
new file mode 100644
index 0000000..1b5163d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/LICENSE.md
@@ -0,0 +1,25 @@
+Copyright (C) 2010-2019 Max-Planck-Society
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+* Neither the name of the copyright holder nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/__init__.py
new file mode 100644
index 0000000..0671484
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/__init__.py
@@ -0,0 +1,9 @@
+""" FFT backend using pypocketfft """
+
+from .basic import *
+from .realtransforms import *
+from .helper import *
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/basic.py
new file mode 100644
index 0000000..443f6b3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/basic.py
@@ -0,0 +1,297 @@
+"""
+Discrete Fourier Transforms - basic.py
+"""
+import numpy as np
+import functools
+from . import pypocketfft as pfft
+from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
+ _fix_shape, _fix_shape_1d, _normalization,
+ _workers)
+
+def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """ Return discrete Fourier transform of real or complex sequence. """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+ overwrite_x = overwrite_x or _datacopied(tmp, x)
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ if n is not None:
+ tmp, copied = _fix_shape_1d(tmp, n, axis)
+ overwrite_x = overwrite_x or copied
+ elif tmp.shape[axis] < 1:
+ raise ValueError("invalid number of data points ({0}) specified"
+ .format(tmp.shape[axis]))
+
+ out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
+
+ return pfft.c2c(tmp, (axis,), forward, norm, out, workers)
+
+
+fft = functools.partial(c2c, True)
+fft.__name__ = 'fft'
+ifft = functools.partial(c2c, False)
+ifft.__name__ = 'ifft'
+
+
+def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """
+ Discrete Fourier transform of a real sequence.
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ if not np.isrealobj(tmp):
+ raise TypeError("x must be a real sequence")
+
+ if n is not None:
+ tmp, _ = _fix_shape_1d(tmp, n, axis)
+ elif tmp.shape[axis] < 1:
+ raise ValueError("invalid number of data points ({0}) specified"
+ .format(tmp.shape[axis]))
+
+ # Note: overwrite_x is not utilised
+ return pfft.r2c(tmp, (axis,), forward, norm, None, workers)
+
+
+rfft = functools.partial(r2c, True)
+rfft.__name__ = 'rfft'
+ihfft = functools.partial(r2c, False)
+ihfft.__name__ = 'ihfft'
+
+
+def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """
+ Return inverse discrete Fourier transform of real sequence x.
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ # TODO: Optimize for hermitian and real?
+ if np.isrealobj(tmp):
+ tmp = tmp + 0.j
+
+ # Last axis utilizes hermitian symmetry
+ if n is None:
+ n = (tmp.shape[axis] - 1) * 2
+ if n < 1:
+ raise ValueError("Invalid number of data points ({0}) specified"
+ .format(n))
+ else:
+ tmp, _ = _fix_shape_1d(tmp, (n//2) + 1, axis)
+
+ # Note: overwrite_x is not utilized
+ return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers)
+
+
+hfft = functools.partial(c2r, True)
+hfft.__name__ = 'hfft'
+irfft = functools.partial(c2r, False)
+irfft.__name__ = 'irfft'
+
+
+def fft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete Fourier transform.
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return fftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def ifft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete inverse Fourier transform of real or complex sequence.
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return ifftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def rfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete Fourier transform of a real sequence
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return rfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def irfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete inverse Fourier transform of a real sequence
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return irfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def hfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete Fourier transform of a Hermitian sequence
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return hfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def ihfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+ *, plan=None):
+ """
+ 2-D discrete inverse Fourier transform of a Hermitian sequence
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ return ihfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """
+ Return multidimensional discrete Fourier transform.
+ """
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+
+ shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+ overwrite_x = overwrite_x or _datacopied(tmp, x)
+ workers = _workers(workers)
+
+ if len(axes) == 0:
+ return x
+
+ tmp, copied = _fix_shape(tmp, shape, axes)
+ overwrite_x = overwrite_x or copied
+
+ norm = _normalization(norm, forward)
+ out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
+
+ return pfft.c2c(tmp, axes, forward, norm, out, workers)
+
+
+fftn = functools.partial(c2cn, True)
+fftn.__name__ = 'fftn'
+ifftn = functools.partial(c2cn, False)
+ifftn.__name__ = 'ifftn'
+
+def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """Return multidimensional discrete Fourier transform of real input"""
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+
+ if not np.isrealobj(tmp):
+ raise TypeError("x must be a real sequence")
+
+ shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+ tmp, _ = _fix_shape(tmp, shape, axes)
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ if len(axes) == 0:
+ raise ValueError("at least 1 axis must be transformed")
+
+ # Note: overwrite_x is not utilized
+ return pfft.r2c(tmp, axes, forward, norm, None, workers)
+
+
+rfftn = functools.partial(r2cn, True)
+rfftn.__name__ = 'rfftn'
+ihfftn = functools.partial(r2cn, False)
+ihfftn.__name__ = 'ihfftn'
+
+
+def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None, *, plan=None):
+ """Multidimensional inverse discrete fourier transform with real output"""
+ if plan is not None:
+ raise NotImplementedError('Passing a precomputed plan is not yet '
+ 'supported by scipy.fft functions')
+ tmp = _asfarray(x)
+
+ # TODO: Optimize for hermitian and real?
+ if np.isrealobj(tmp):
+ tmp = tmp + 0.j
+
+ noshape = s is None
+ shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+
+ if len(axes) == 0:
+ raise ValueError("at least 1 axis must be transformed")
+
+ if noshape:
+ shape[-1] = (x.shape[axes[-1]] - 1) * 2
+
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ # Last axis utilizes hermitian symmetry
+ lastsize = shape[-1]
+ shape[-1] = (shape[-1] // 2) + 1
+
+ tmp, _ = _fix_shape(tmp, shape, axes)
+
+ # Note: overwrite_x is not utilized
+ return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers)
+
+
+hfftn = functools.partial(c2rn, True)
+hfftn.__name__ = 'hfftn'
+irfftn = functools.partial(c2rn, False)
+irfftn.__name__ = 'irfftn'
+
+
+def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False):
+ """FFT of a real sequence, returning fftpack half complex format"""
+ tmp = _asfarray(x)
+ overwrite_x = overwrite_x or _datacopied(tmp, x)
+ norm = _normalization(norm, forward)
+ workers = _workers(None)
+
+ if tmp.dtype.kind == 'c':
+ raise TypeError('x must be a real sequence')
+
+ if n is not None:
+ tmp, copied = _fix_shape_1d(tmp, n, axis)
+ overwrite_x = overwrite_x or copied
+ elif tmp.shape[axis] < 1:
+ raise ValueError("invalid number of data points ({0}) specified"
+ .format(tmp.shape[axis]))
+
+ out = (tmp if overwrite_x else None)
+
+ return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers)
+
+
+rfft_fftpack = functools.partial(r2r_fftpack, True)
+rfft_fftpack.__name__ = 'rfft_fftpack'
+irfft_fftpack = functools.partial(r2r_fftpack, False)
+irfft_fftpack.__name__ = 'irfft_fftpack'
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/helper.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/helper.py
new file mode 100644
index 0000000..7a06d89
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/helper.py
@@ -0,0 +1,214 @@
+from numbers import Number
+import operator
+import os
+import threading
+import contextlib
+
+import numpy as np
+# good_size is exposed (and used) from this import
+from .pypocketfft import good_size
+
+_config = threading.local()
+_cpu_count = os.cpu_count()
+
+
+def _iterable_of_int(x, name=None):
+ """Convert ``x`` to an iterable sequence of int
+
+ Parameters
+ ----------
+ x : value, or sequence of values, convertible to int
+ name : str, optional
+ Name of the argument being converted, only used in the error message
+
+ Returns
+ -------
+ y : ``List[int]``
+ """
+ if isinstance(x, Number):
+ x = (x,)
+
+ try:
+ x = [operator.index(a) for a in x]
+ except TypeError as e:
+ name = name or "value"
+ raise ValueError("{} must be a scalar or iterable of integers"
+ .format(name)) from e
+
+ return x
+
+
+def _init_nd_shape_and_axes(x, shape, axes):
+ """Handles shape and axes arguments for nd transforms"""
+ noshape = shape is None
+ noaxes = axes is None
+
+ if not noaxes:
+ axes = _iterable_of_int(axes, 'axes')
+ axes = [a + x.ndim if a < 0 else a for a in axes]
+
+ if any(a >= x.ndim or a < 0 for a in axes):
+ raise ValueError("axes exceeds dimensionality of input")
+ if len(set(axes)) != len(axes):
+ raise ValueError("all axes must be unique")
+
+ if not noshape:
+ shape = _iterable_of_int(shape, 'shape')
+
+ if axes and len(axes) != len(shape):
+ raise ValueError("when given, axes and shape arguments"
+ " have to be of the same length")
+ if noaxes:
+ if len(shape) > x.ndim:
+ raise ValueError("shape requires more axes than are present")
+ axes = range(x.ndim - len(shape), x.ndim)
+
+ shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]
+ elif noaxes:
+ shape = list(x.shape)
+ axes = range(x.ndim)
+ else:
+ shape = [x.shape[a] for a in axes]
+
+ if any(s < 1 for s in shape):
+ raise ValueError(
+ "invalid number of data points ({0}) specified".format(shape))
+
+ return shape, axes
+
+
+def _asfarray(x):
+ """
+ Convert to array with floating or complex dtype.
+
+ float16 values are also promoted to float32.
+ """
+ if not hasattr(x, "dtype"):
+ x = np.asarray(x)
+
+ if x.dtype == np.float16:
+ return np.asarray(x, np.float32)
+ elif x.dtype.kind not in 'fc':
+ return np.asarray(x, np.float64)
+
+ # Require native byte order
+ dtype = x.dtype.newbyteorder('=')
+ # Always align input
+ copy = not x.flags['ALIGNED']
+ return np.array(x, dtype=dtype, copy=copy)
+
+def _datacopied(arr, original):
+ """
+ Strict check for `arr` not sharing any data with `original`,
+ under the assumption that arr = asarray(original)
+ """
+ if arr is original:
+ return False
+ if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
+ return False
+ return arr.base is None
+
+
+def _fix_shape(x, shape, axes):
+ """Internal auxiliary function for _raw_fft, _raw_fftnd."""
+ must_copy = False
+
+ # Build an nd slice with the dimensions to be read from x
+ index = [slice(None)]*x.ndim
+ for n, ax in zip(shape, axes):
+ if x.shape[ax] >= n:
+ index[ax] = slice(0, n)
+ else:
+ index[ax] = slice(0, x.shape[ax])
+ must_copy = True
+
+ index = tuple(index)
+
+ if not must_copy:
+ return x[index], False
+
+ s = list(x.shape)
+ for n, axis in zip(shape, axes):
+ s[axis] = n
+
+ z = np.zeros(s, x.dtype)
+ z[index] = x[index]
+ return z, True
+
+
+def _fix_shape_1d(x, n, axis):
+ if n < 1:
+ raise ValueError(
+ "invalid number of data points ({0}) specified".format(n))
+
+ return _fix_shape(x, (n,), (axis,))
+
+
+_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2}
+
+
+def _normalization(norm, forward):
+ """Returns the pypocketfft normalization mode from the norm argument"""
+ try:
+ inorm = _NORM_MAP[norm]
+ return inorm if forward else (2 - inorm)
+ except KeyError:
+ raise ValueError(
+ f'Invalid norm value {norm!r}, should '
+ 'be "backward", "ortho" or "forward"') from None
+
+
+def _workers(workers):
+ if workers is None:
+ return getattr(_config, 'default_workers', 1)
+
+ if workers < 0:
+ if workers >= -_cpu_count:
+ workers += 1 + _cpu_count
+ else:
+ raise ValueError("workers value out of range; got {}, must not be"
+ " less than {}".format(workers, -_cpu_count))
+ elif workers == 0:
+ raise ValueError("workers must not be zero")
+
+ return workers
+
+
+@contextlib.contextmanager
+def set_workers(workers):
+ """Context manager for the default number of workers used in `scipy.fft`
+
+ Parameters
+ ----------
+ workers : int
+ The default number of workers to use
+
+ Examples
+ --------
+ >>> from scipy import fft, signal
+ >>> x = np.random.randn(128, 64)
+ >>> with fft.set_workers(4):
+ ... y = signal.fftconvolve(x, x)
+
+ """
+ old_workers = get_workers()
+ _config.default_workers = _workers(operator.index(workers))
+ try:
+ yield
+ finally:
+ _config.default_workers = old_workers
+
+
+def get_workers():
+ """Returns the default number of workers within the current context
+
+ Examples
+ --------
+ >>> from scipy import fft
+ >>> fft.get_workers()
+ 1
+ >>> with fft.set_workers(4):
+ ... fft.get_workers()
+ 4
+ """
+ return getattr(_config, 'default_workers', 1)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/pypocketfft.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/pypocketfft.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..2d37e1a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/pypocketfft.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/realtransforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/realtransforms.py
new file mode 100644
index 0000000..435b278
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/realtransforms.py
@@ -0,0 +1,110 @@
+import numpy as np
+from . import pypocketfft as pfft
+from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
+ _fix_shape, _fix_shape_1d, _normalization, _workers)
+import functools
+
+
+def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None,
+ overwrite_x=False, workers=None):
+ """Forward or backward 1-D DCT/DST
+
+ Parameters
+ ----------
+ forward: bool
+ Transform direction (determines type and normalisation)
+ transform: {pypocketfft.dct, pypocketfft.dst}
+ The transform to perform
+ """
+ tmp = _asfarray(x)
+ overwrite_x = overwrite_x or _datacopied(tmp, x)
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+
+ if not forward:
+ if type == 2:
+ type = 3
+ elif type == 3:
+ type = 2
+
+ if n is not None:
+ tmp, copied = _fix_shape_1d(tmp, n, axis)
+ overwrite_x = overwrite_x or copied
+ elif tmp.shape[axis] < 1:
+ raise ValueError("invalid number of data points ({0}) specified"
+ .format(tmp.shape[axis]))
+
+ out = (tmp if overwrite_x else None)
+
+ # For complex input, transform real and imaginary components separably
+ if np.iscomplexobj(x):
+ out = np.empty_like(tmp) if out is None else out
+ transform(tmp.real, type, (axis,), norm, out.real, workers)
+ transform(tmp.imag, type, (axis,), norm, out.imag, workers)
+ return out
+
+ return transform(tmp, type, (axis,), norm, out, workers)
+
+
+dct = functools.partial(_r2r, True, pfft.dct)
+dct.__name__ = 'dct'
+idct = functools.partial(_r2r, False, pfft.dct)
+idct.__name__ = 'idct'
+
+dst = functools.partial(_r2r, True, pfft.dst)
+dst.__name__ = 'dst'
+idst = functools.partial(_r2r, False, pfft.dst)
+idst.__name__ = 'idst'
+
+
+def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None,
+ overwrite_x=False, workers=None):
+ """Forward or backward nd DCT/DST
+
+ Parameters
+ ----------
+ forward: bool
+ Transform direction (determines type and normalisation)
+ transform: {pypocketfft.dct, pypocketfft.dst}
+ The transform to perform
+ """
+ tmp = _asfarray(x)
+
+ shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+ overwrite_x = overwrite_x or _datacopied(tmp, x)
+
+ if len(axes) == 0:
+ return x
+
+ tmp, copied = _fix_shape(tmp, shape, axes)
+ overwrite_x = overwrite_x or copied
+
+ if not forward:
+ if type == 2:
+ type = 3
+ elif type == 3:
+ type = 2
+
+ norm = _normalization(norm, forward)
+ workers = _workers(workers)
+ out = (tmp if overwrite_x else None)
+
+ # For complex input, transform real and imaginary components separably
+ if np.iscomplexobj(x):
+ out = np.empty_like(tmp) if out is None else out
+ transform(tmp.real, type, axes, norm, out.real, workers)
+ transform(tmp.imag, type, axes, norm, out.imag, workers)
+ return out
+
+ return transform(tmp, type, axes, norm, out, workers)
+
+
+dctn = functools.partial(_r2rn, True, pfft.dct)
+dctn.__name__ = 'dctn'
+idctn = functools.partial(_r2rn, False, pfft.dct)
+idctn.__name__ = 'idctn'
+
+dstn = functools.partial(_r2rn, True, pfft.dst)
+dstn.__name__ = 'dstn'
+idstn = functools.partial(_r2rn, False, pfft.dst)
+idstn.__name__ = 'idstn'
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/setup.py
new file mode 100644
index 0000000..7e44565
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/setup.py
@@ -0,0 +1,49 @@
+
+def pre_build_hook(build_ext, ext):
+ from scipy._build_utils.compiler_helper import (
+ set_cxx_flags_hook, try_add_flag, try_compile, has_flag)
+ cc = build_ext._cxx_compiler
+ args = ext.extra_compile_args
+
+ set_cxx_flags_hook(build_ext, ext)
+
+ if cc.compiler_type == 'msvc':
+ args.append('/EHsc')
+ else:
+ # Use pthreads if available
+ has_pthreads = try_compile(cc, code='#include \n'
+ 'int main(int argc, char **argv) {}')
+ if has_pthreads:
+ ext.define_macros.append(('POCKETFFT_PTHREADS', None))
+ if has_flag(cc, '-pthread'):
+ args.append('-pthread')
+ ext.extra_link_args.append('-pthread')
+ else:
+ raise RuntimeError("Build failed: System has pthreads header "
+ "but could not compile with -pthread option")
+
+ # Don't export library symbols
+ try_add_flag(args, cc, '-fvisibility=hidden')
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ import pybind11
+ include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]
+
+ config = Configuration('_pocketfft', parent_package, top_path)
+ ext = config.add_extension('pypocketfft',
+ sources=['pypocketfft.cxx'],
+ depends=['pocketfft_hdronly.h'],
+ include_dirs=include_dirs,
+ language='c++')
+ ext._pre_build_hook = pre_build_hook
+
+ config.add_data_files('LICENSE.md')
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_basic.py
new file mode 100644
index 0000000..e440dee
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_basic.py
@@ -0,0 +1,1022 @@
+# Created by Pearu Peterson, September 2002
+
+from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
+ assert_array_almost_equal_nulp, assert_array_less,
+ assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
+ rfft, irfft, rfftn, irfftn, fft2,
+ hfft, ihfft, hfftn, ihfftn)
+
+from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
+ swapaxes, cdouble)
+import numpy as np
+import numpy.fft
+from numpy.random import rand
+
+# "large" composite numbers supported by FFT._PYPOCKETFFT
+LARGE_COMPOSITE_SIZES = [
+ 2**13,
+ 2**5 * 3**5,
+ 2**3 * 3**3 * 5**2,
+]
+SMALL_COMPOSITE_SIZES = [
+ 2,
+ 2*3*5,
+ 2*2*3*3,
+]
+# prime
+LARGE_PRIME_SIZES = [
+ 2011
+]
+SMALL_PRIME_SIZES = [
+ 29
+]
+
+
+def _assert_close_in_norm(x, y, rtol, size, rdt):
+ # helper function for testing
+ err_msg = "size: %s rdt: %s" % (size, rdt)
+ assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
+
+
+def random(size):
+ return rand(*size)
+
+def swap_byteorder(arr):
+ """Returns the same array with swapped byteorder"""
+ dtype = arr.dtype.newbyteorder('S')
+ return arr.astype(dtype)
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data, data)
+ return data
+
+
+def direct_dft(x):
+ x = asarray(x)
+ n = len(x)
+ y = zeros(n, dtype=cdouble)
+ w = -arange(n)*(2j*pi/n)
+ for i in range(n):
+ y[i] = dot(exp(i*w), x)
+ return y
+
+
+def direct_idft(x):
+ x = asarray(x)
+ n = len(x)
+ y = zeros(n, dtype=cdouble)
+ w = arange(n)*(2j*pi/n)
+ for i in range(n):
+ y[i] = dot(exp(i*w), x)/n
+ return y
+
+
+def direct_dftn(x):
+ x = asarray(x)
+ for axis in range(len(x.shape)):
+ x = fft(x, axis=axis)
+ return x
+
+
+def direct_idftn(x):
+ x = asarray(x)
+ for axis in range(len(x.shape)):
+ x = ifft(x, axis=axis)
+ return x
+
+
+def direct_rdft(x):
+ x = asarray(x)
+ n = len(x)
+ w = -arange(n)*(2j*pi/n)
+ y = zeros(n//2+1, dtype=cdouble)
+ for i in range(n//2+1):
+ y[i] = dot(exp(i*w), x)
+ return y
+
+
+def direct_irdft(x, n):
+ x = asarray(x)
+ x1 = zeros(n, dtype=cdouble)
+ for i in range(n//2+1):
+ x1[i] = x[i]
+ if i > 0 and 2*i < n:
+ x1[n-i] = np.conj(x[i])
+ return direct_idft(x1).real
+
+
+def direct_rdftn(x):
+ return fftn(rfft(x), axes=range(x.ndim - 1))
+
+
+class _TestFFTBase(object):
+ def setup_method(self):
+ self.cdt = None
+ self.rdt = None
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
+ y = fft(x)
+ assert_equal(y.dtype, self.cdt)
+ y1 = direct_dft(x)
+ assert_array_almost_equal(y,y1)
+ x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
+ assert_array_almost_equal(fft(x),direct_dft(x))
+
+ def test_n_argument_real(self):
+ x1 = np.array([1,2,3,4], dtype=self.rdt)
+ x2 = np.array([1,2,3,4], dtype=self.rdt)
+ y = fft([x1,x2],n=4)
+ assert_equal(y.dtype, self.cdt)
+ assert_equal(y.shape,(2,4))
+ assert_array_almost_equal(y[0],direct_dft(x1))
+ assert_array_almost_equal(y[1],direct_dft(x2))
+
+ def _test_n_argument_complex(self):
+ x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
+ x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
+ y = fft([x1,x2],n=4)
+ assert_equal(y.dtype, self.cdt)
+ assert_equal(y.shape,(2,4))
+ assert_array_almost_equal(y[0],direct_dft(x1))
+ assert_array_almost_equal(y[1],direct_dft(x2))
+
+ def test_djbfft(self):
+ for i in range(2,14):
+ n = 2**i
+ x = np.arange(n)
+ y = fft(x.astype(complex))
+ y2 = numpy.fft.fft(x)
+ assert_array_almost_equal(y,y2)
+ y = fft(x)
+ assert_array_almost_equal(y,y2)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, fft, [])
+ assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
+
+
+class TestLongDoubleFFT(_TestFFTBase):
+ def setup_method(self):
+ self.cdt = np.longcomplex
+ self.rdt = np.longdouble
+
+
+class TestDoubleFFT(_TestFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+
+
+class TestSingleFFT(_TestFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+
+
+class TestFloat16FFT(object):
+
+ def test_1_argument_real(self):
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+ y = fft(x1, n=4)
+ assert_equal(y.dtype, np.complex64)
+ assert_equal(y.shape, (4, ))
+ assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
+
+ def test_n_argument_real(self):
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+ x2 = np.array([1, 2, 3, 4], dtype=np.float16)
+ y = fft([x1, x2], n=4)
+ assert_equal(y.dtype, np.complex64)
+ assert_equal(y.shape, (2, 4))
+ assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
+ assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
+
+
+class _TestIFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
+ y = ifft(x)
+ y1 = direct_idft(x)
+ assert_equal(y.dtype, self.cdt)
+ assert_array_almost_equal(y,y1)
+
+ x = np.array([1,2,3,4+0j,5], self.cdt)
+ assert_array_almost_equal(ifft(x),direct_idft(x))
+
+ def test_definition_real(self):
+ x = np.array([1,2,3,4,1,2,3,4], self.rdt)
+ y = ifft(x)
+ assert_equal(y.dtype, self.cdt)
+ y1 = direct_idft(x)
+ assert_array_almost_equal(y,y1)
+
+ x = np.array([1,2,3,4,5], dtype=self.rdt)
+ assert_equal(y.dtype, self.cdt)
+ assert_array_almost_equal(ifft(x),direct_idft(x))
+
+ def test_djbfft(self):
+ for i in range(2,14):
+ n = 2**i
+ x = np.arange(n)
+ y = ifft(x.astype(self.cdt))
+ y2 = numpy.fft.ifft(x)
+ assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
+ y = ifft(x)
+ assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
+
+ def test_random_complex(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.cdt)
+ x = random([size]).astype(self.cdt) + 1j*x
+ y1 = ifft(fft(x))
+ y2 = fft(ifft(x))
+ assert_equal(y1.dtype, self.cdt)
+ assert_equal(y2.dtype, self.cdt)
+ assert_array_almost_equal(y1, x)
+ assert_array_almost_equal(y2, x)
+
+ def test_random_real(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.rdt)
+ y1 = ifft(fft(x))
+ y2 = fft(ifft(x))
+ assert_equal(y1.dtype, self.cdt)
+ assert_equal(y2.dtype, self.cdt)
+ assert_array_almost_equal(y1, x)
+ assert_array_almost_equal(y2, x)
+
+ def test_size_accuracy(self):
+ # Sanity check for the accuracy for prime and non-prime sized inputs
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+ np.random.seed(1234)
+ x = np.random.rand(size).astype(self.rdt)
+ y = ifft(fft(x))
+ _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+ y = fft(ifft(x))
+ _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+
+ x = (x + 1j*np.random.rand(size)).astype(self.cdt)
+ y = ifft(fft(x))
+ _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+ y = fft(ifft(x))
+ _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, ifft, [])
+ assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
+
+
+@pytest.mark.skipif(np.longdouble is np.float64,
+ reason="Long double is aliased to double")
+class TestLongDoubleIFFT(_TestIFFTBase):
+ def setup_method(self):
+ self.cdt = np.longcomplex
+ self.rdt = np.longdouble
+ self.rtol = 1e-10
+ self.atol = 1e-10
+
+
+class TestDoubleIFFT(_TestIFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+ self.rtol = 1e-10
+ self.atol = 1e-10
+
+
+class TestSingleIFFT(_TestIFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+ self.rtol = 1e-5
+ self.atol = 1e-4
+
+
+class _TestRFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
+ x = np.array(t, dtype=self.rdt)
+ y = rfft(x)
+ y1 = direct_rdft(x)
+ assert_array_almost_equal(y,y1)
+ assert_equal(y.dtype, self.cdt)
+
+ def test_djbfft(self):
+ for i in range(2,14):
+ n = 2**i
+ x = np.arange(n)
+ y1 = np.fft.rfft(x)
+ y = rfft(x)
+ assert_array_almost_equal(y,y1)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, rfft, [])
+ assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
+
+ def test_complex_input(self):
+ x = np.zeros(10, dtype=self.cdt)
+ with assert_raises(TypeError, match="x must be a real sequence"):
+ rfft(x)
+
+ # See gh-5790
+ class MockSeries(object):
+ def __init__(self, data):
+ self.data = np.asarray(data)
+
+ def __getattr__(self, item):
+ try:
+ return getattr(self.data, item)
+ except AttributeError as e:
+ raise AttributeError(("'MockSeries' object "
+ "has no attribute '{attr}'".
+ format(attr=item))) from e
+
+ def test_non_ndarray_with_dtype(self):
+ x = np.array([1., 2., 3., 4., 5.])
+ xs = _TestRFFTBase.MockSeries(x)
+
+ expected = [1, 2, 3, 4, 5]
+ rfft(xs)
+
+ # Data should not have been overwritten
+ assert_equal(x, expected)
+ assert_equal(xs.data, expected)
+
+@pytest.mark.skipif(np.longfloat is np.float64,
+ reason="Long double is aliased to double")
+class TestRFFTLongDouble(_TestRFFTBase):
+ def setup_method(self):
+ self.cdt = np.longcomplex
+ self.rdt = np.longfloat
+
+
+class TestRFFTDouble(_TestRFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+
+
+class TestRFFTSingle(_TestRFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+
+
+class _TestIRFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x1 = [1,2+3j,4+1j,1+2j,3+4j]
+ x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
+ x1 = x1_1[:5]
+ x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
+ x2 = x2_1[:5]
+
+ def _test(x, xr):
+ y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
+ y1 = direct_irdft(x, len(xr))
+ assert_equal(y.dtype, self.rdt)
+ assert_array_almost_equal(y,y1, decimal=self.ndec)
+ assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
+
+ _test(x1, x1_1)
+ _test(x2, x2_1)
+
+ def test_djbfft(self):
+ for i in range(2,14):
+ n = 2**i
+ x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
+ x[0] = 0
+ if n % 2 == 0:
+ x[-1] = np.real(x[-1])
+ y1 = np.fft.irfft(x)
+ y = irfft(x)
+ assert_array_almost_equal(y,y1)
+
+ def test_random_real(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.rdt)
+ y1 = irfft(rfft(x), n=size)
+ y2 = rfft(irfft(x, n=(size*2-1)))
+ assert_equal(y1.dtype, self.rdt)
+ assert_equal(y2.dtype, self.cdt)
+ assert_array_almost_equal(y1, x, decimal=self.ndec,
+ err_msg="size=%d" % size)
+ assert_array_almost_equal(y2, x, decimal=self.ndec,
+ err_msg="size=%d" % size)
+
+ def test_size_accuracy(self):
+ # Sanity check for the accuracy for prime and non-prime sized inputs
+ if self.rdt == np.float32:
+ rtol = 1e-5
+ elif self.rdt == np.float64:
+ rtol = 1e-10
+
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+ np.random.seed(1234)
+ x = np.random.rand(size).astype(self.rdt)
+ y = irfft(rfft(x), len(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+ y = rfft(irfft(x, 2 * len(x) - 1))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, irfft, [])
+ assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
+
+
+# self.ndec is bogus; we should have a assert_array_approx_equal for number of
+# significant digits
+
+@pytest.mark.skipif(np.longfloat is np.float64,
+ reason="Long double is aliased to double")
+class TestIRFFTLongDouble(_TestIRFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+ self.ndec = 14
+
+
+class TestIRFFTDouble(_TestIRFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+ self.ndec = 14
+
+
+class TestIRFFTSingle(_TestIRFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+ self.ndec = 5
+
+
+class Testfft2(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_regression_244(self):
+ """FFT returns wrong result with axes parameter."""
+ # fftn (and hence fft2) used to break when both axes and shape were
+ # used
+ x = numpy.ones((4, 4, 2))
+ y = fft2(x, s=(8, 8), axes=(-3, -2))
+ y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
+ assert_array_almost_equal(y, y_r)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, fft2, [[]])
+ assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
+
+
+class TestFftnSingle(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(np.array(x, np.float32))
+ assert_(y.dtype == np.complex64,
+ msg="double precision output with single precision")
+
+ y_r = np.array(fftn(x), np.complex64)
+ assert_array_almost_equal_nulp(y, y_r)
+
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+ def test_size_accuracy_small(self, size):
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+ y1 = fftn(x.real.astype(np.float32))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2000)
+
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+ def test_size_accuracy_large(self, size):
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+ y1 = fftn(x.real.astype(np.float32))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2000)
+
+ def test_definition_float16(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(np.array(x, np.float16))
+ assert_equal(y.dtype, np.complex64)
+ y_r = np.array(fftn(x), np.complex64)
+ assert_array_almost_equal_nulp(y, y_r)
+
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+ def test_float16_input_small(self, size):
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+ y1 = fftn(x.real.astype(np.float16))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 5e5)
+
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+ def test_float16_input_large(self, size):
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+ y1 = fftn(x.real.astype(np.float16))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2e6)
+
+
+class TestFftn(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(x)
+ assert_array_almost_equal(y, direct_dftn(x))
+
+ x = random((20, 26))
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+ x = random((5, 4, 3, 20))
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+ def test_axes_argument(self):
+ # plane == ji_plane, x== kji_space
+ plane1 = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ plane2 = [[10, 11, 12],
+ [13, 14, 15],
+ [16, 17, 18]]
+ plane3 = [[19, 20, 21],
+ [22, 23, 24],
+ [25, 26, 27]]
+ ki_plane1 = [[1, 2, 3],
+ [10, 11, 12],
+ [19, 20, 21]]
+ ki_plane2 = [[4, 5, 6],
+ [13, 14, 15],
+ [22, 23, 24]]
+ ki_plane3 = [[7, 8, 9],
+ [16, 17, 18],
+ [25, 26, 27]]
+ jk_plane1 = [[1, 10, 19],
+ [4, 13, 22],
+ [7, 16, 25]]
+ jk_plane2 = [[2, 11, 20],
+ [5, 14, 23],
+ [8, 17, 26]]
+ jk_plane3 = [[3, 12, 21],
+ [6, 15, 24],
+ [9, 18, 27]]
+ kj_plane1 = [[1, 4, 7],
+ [10, 13, 16], [19, 22, 25]]
+ kj_plane2 = [[2, 5, 8],
+ [11, 14, 17], [20, 23, 26]]
+ kj_plane3 = [[3, 6, 9],
+ [12, 15, 18], [21, 24, 27]]
+ ij_plane1 = [[1, 4, 7],
+ [2, 5, 8],
+ [3, 6, 9]]
+ ij_plane2 = [[10, 13, 16],
+ [11, 14, 17],
+ [12, 15, 18]]
+ ij_plane3 = [[19, 22, 25],
+ [20, 23, 26],
+ [21, 24, 27]]
+ ik_plane1 = [[1, 10, 19],
+ [2, 11, 20],
+ [3, 12, 21]]
+ ik_plane2 = [[4, 13, 22],
+ [5, 14, 23],
+ [6, 15, 24]]
+ ik_plane3 = [[7, 16, 25],
+ [8, 17, 26],
+ [9, 18, 27]]
+ ijk_space = [jk_plane1, jk_plane2, jk_plane3]
+ ikj_space = [kj_plane1, kj_plane2, kj_plane3]
+ jik_space = [ik_plane1, ik_plane2, ik_plane3]
+ jki_space = [ki_plane1, ki_plane2, ki_plane3]
+ kij_space = [ij_plane1, ij_plane2, ij_plane3]
+ x = array([plane1, plane2, plane3])
+
+ assert_array_almost_equal(fftn(x),
+ fftn(x, axes=(-3, -2, -1))) # kji_space
+ assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
+ assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
+ y = fftn(x, axes=(2, 1, 0)) # ijk_space
+ assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
+ y = fftn(x, axes=(2, 0, 1)) # ikj_space
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
+ fftn(ikj_space))
+ y = fftn(x, axes=(1, 2, 0)) # jik_space
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
+ fftn(jik_space))
+ y = fftn(x, axes=(1, 0, 2)) # jki_space
+ assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
+ y = fftn(x, axes=(0, 2, 1)) # kij_space
+ assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
+
+ y = fftn(x, axes=(-2, -1)) # ji_plane
+ assert_array_almost_equal(fftn(plane1), y[0])
+ assert_array_almost_equal(fftn(plane2), y[1])
+ assert_array_almost_equal(fftn(plane3), y[2])
+
+ y = fftn(x, axes=(1, 2)) # ji_plane
+ assert_array_almost_equal(fftn(plane1), y[0])
+ assert_array_almost_equal(fftn(plane2), y[1])
+ assert_array_almost_equal(fftn(plane3), y[2])
+
+ y = fftn(x, axes=(-3, -2)) # kj_plane
+ assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
+ assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
+ assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
+
+ y = fftn(x, axes=(-3, -1)) # ki_plane
+ assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
+ assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
+ assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
+
+ y = fftn(x, axes=(-1, -2)) # ij_plane
+ assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
+ assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
+ assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
+
+ y = fftn(x, axes=(-1, -3)) # ik_plane
+ assert_array_almost_equal(fftn(ik_plane1),
+ swapaxes(y[:, 0, :], -1, -2))
+ assert_array_almost_equal(fftn(ik_plane2),
+ swapaxes(y[:, 1, :], -1, -2))
+ assert_array_almost_equal(fftn(ik_plane3),
+ swapaxes(y[:, 2, :], -1, -2))
+
+ y = fftn(x, axes=(-2, -3)) # jk_plane
+ assert_array_almost_equal(fftn(jk_plane1),
+ swapaxes(y[:, :, 0], -1, -2))
+ assert_array_almost_equal(fftn(jk_plane2),
+ swapaxes(y[:, :, 1], -1, -2))
+ assert_array_almost_equal(fftn(jk_plane3),
+ swapaxes(y[:, :, 2], -1, -2))
+
+ y = fftn(x, axes=(-1,)) # i_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
+ y = fftn(x, axes=(-2,)) # j_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
+ y = fftn(x, axes=(0,)) # k_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
+
+ y = fftn(x, axes=()) # point
+ assert_array_almost_equal(y, x)
+
+ def test_shape_argument(self):
+ small_x = [[1, 2, 3],
+ [4, 5, 6]]
+ large_x1 = [[1, 2, 3, 0],
+ [4, 5, 6, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]]
+
+ y = fftn(small_x, s=(4, 4))
+ assert_array_almost_equal(y, fftn(large_x1))
+
+ y = fftn(small_x, s=(3, 4))
+ assert_array_almost_equal(y, fftn(large_x1[:-1]))
+
+ def test_shape_axes_argument(self):
+ small_x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ large_x1 = array([[1, 2, 3, 0],
+ [4, 5, 6, 0],
+ [7, 8, 9, 0],
+ [0, 0, 0, 0]])
+ y = fftn(small_x, s=(4, 4), axes=(-2, -1))
+ assert_array_almost_equal(y, fftn(large_x1))
+ y = fftn(small_x, s=(4, 4), axes=(-1, -2))
+
+ assert_array_almost_equal(y, swapaxes(
+ fftn(swapaxes(large_x1, -1, -2)), -1, -2))
+
+ def test_shape_axes_argument2(self):
+ # Change shape of the last axis
+ x = numpy.random.random((10, 5, 3, 7))
+ y = fftn(x, axes=(-1,), s=(8,))
+ assert_array_almost_equal(y, fft(x, axis=-1, n=8))
+
+ # Change shape of an arbitrary axis which is not the last one
+ x = numpy.random.random((10, 5, 3, 7))
+ y = fftn(x, axes=(-2,), s=(8,))
+ assert_array_almost_equal(y, fft(x, axis=-2, n=8))
+
+ # Change shape of axes: cf #244, where shape and axes were mixed up
+ x = numpy.random.random((4, 4, 2))
+ y = fftn(x, axes=(-3, -2), s=(8, 8))
+ assert_array_almost_equal(y,
+ numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
+
+ def test_shape_argument_more(self):
+ x = zeros((4, 4, 2))
+ with assert_raises(ValueError,
+ match="shape requires more axes than are present"):
+ fftn(x, s=(8, 8, 2, 1))
+
+ def test_invalid_sizes(self):
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[1, 0\]\) specified"):
+ fftn([[]])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[4, -3\]\) specified"):
+ fftn([[1, 1], [2, 2]], (4, -3))
+
+ def test_no_axes(self):
+ x = numpy.random.random((2,2,2))
+ assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
+
+
+class TestIfftn(object):
+ dtype = None
+ cdtype = None
+
+ def setup_method(self):
+ np.random.seed(1234)
+
+ @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+ [(np.float64, np.complex128, 2000),
+ (np.float32, np.complex64, 3500)])
+ def test_definition(self, dtype, cdtype, maxnlp):
+ x = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]], dtype=dtype)
+ y = ifftn(x)
+ assert_equal(y.dtype, cdtype)
+ assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
+
+ x = random((20, 26))
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+ x = random((5, 4, 3, 20))
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+ @pytest.mark.parametrize('maxnlp', [2000, 3500])
+ @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+ def test_random_complex(self, maxnlp, size):
+ x = random([size, size]) + 1j*random([size, size])
+ assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
+ assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
+
+ def test_invalid_sizes(self):
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[1, 0\]\) specified"):
+ ifftn([[]])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[4, -3\]\) specified"):
+ ifftn([[1, 1], [2, 2]], (4, -3))
+
+ def test_no_axes(self):
+ x = numpy.random.random((2,2,2))
+ assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
+
+class TestRfftn(object):
+ dtype = None
+ cdtype = None
+
+ def setup_method(self):
+ np.random.seed(1234)
+
+ @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+ [(np.float64, np.complex128, 2000),
+ (np.float32, np.complex64, 3500)])
+ def test_definition(self, dtype, cdtype, maxnlp):
+ x = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]], dtype=dtype)
+ y = rfftn(x)
+ assert_equal(y.dtype, cdtype)
+ assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
+
+ x = random((20, 26))
+ assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
+
+ x = random((5, 4, 3, 20))
+ assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
+
+ @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+ def test_random(self, size):
+ x = random([size, size])
+ assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
+
+ @pytest.mark.parametrize('func', [rfftn, irfftn])
+ def test_invalid_sizes(self, func):
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[1, 0\]\) specified"):
+ func([[]])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[4, -3\]\) specified"):
+ func([[1, 1], [2, 2]], (4, -3))
+
+ @pytest.mark.parametrize('func', [rfftn, irfftn])
+ def test_no_axes(self, func):
+ with assert_raises(ValueError,
+ match="at least 1 axis must be transformed"):
+ func([], axes=[])
+
+ def test_complex_input(self):
+ with assert_raises(TypeError, match="x must be a real sequence"):
+ rfftn(np.zeros(10, dtype=np.complex64))
+
+
+class FakeArray(object):
+ def __init__(self, data):
+ self._data = data
+ self.__array_interface__ = data.__array_interface__
+
+
+class FakeArray2(object):
+ def __init__(self, data):
+ self._data = data
+
+ def __array__(self):
+ return self._data
+
+# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
+# relied upon by users except for overwrite_x = False
+class TestOverwrite(object):
+ """Check input overwrite behavior of the FFT functions."""
+
+ real_dtypes = [np.float32, np.float64, np.longfloat]
+ dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
+ fftsizes = [8, 16, 32]
+
+ def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
+ x2 = x.copy()
+ for fake in [lambda x: x, FakeArray, FakeArray2]:
+ routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
+
+ sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+ if not should_overwrite:
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+ def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
+ fftsize, overwrite_x):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+
+ should_overwrite = (overwrite_x
+ and dtype in overwritable_dtypes
+ and fftsize <= shape[axis])
+ self._check(data, routine, fftsize, axis,
+ overwrite_x=overwrite_x,
+ should_overwrite=should_overwrite)
+
+ @pytest.mark.parametrize('dtype', dtypes)
+ @pytest.mark.parametrize('fftsize', fftsizes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
+ ((16, 2), 0),
+ ((2, 16), 1)])
+ def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
+ overwritable = (np.longcomplex, np.complex128, np.complex64)
+ self._check_1d(fft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+ self._check_1d(ifft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+
+ @pytest.mark.parametrize('dtype', real_dtypes)
+ @pytest.mark.parametrize('fftsize', fftsizes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
+ ((16, 2), 0),
+ ((2, 16), 1)])
+ def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
+ overwritable = self.real_dtypes
+ self._check_1d(irfft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+ self._check_1d(rfft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+
+ def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
+ overwrite_x):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+
+ def fftshape_iter(shp):
+ if len(shp) <= 0:
+ yield ()
+ else:
+ for j in (shp[0]//2, shp[0], shp[0]*2):
+ for rest in fftshape_iter(shp[1:]):
+ yield (j,) + rest
+
+ def part_shape(shape, axes):
+ if axes is None:
+ return shape
+ else:
+ return tuple(np.take(shape, axes))
+
+ def should_overwrite(data, shape, axes):
+ s = part_shape(data.shape, axes)
+ return (overwrite_x and
+ np.prod(shape) <= np.prod(s)
+ and dtype in overwritable_dtypes)
+
+ for fftshape in fftshape_iter(part_shape(shape, axes)):
+ self._check(data, routine, fftshape, axes,
+ overwrite_x=overwrite_x,
+ should_overwrite=should_overwrite(data, fftshape, axes))
+ if data.ndim > 1:
+ # check fortran order
+ self._check(data.T, routine, fftshape, axes,
+ overwrite_x=overwrite_x,
+ should_overwrite=should_overwrite(
+ data.T, fftshape, axes))
+
+ @pytest.mark.parametrize('dtype', dtypes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), None),
+ ((16,), (0,)),
+ ((16, 2), (0,)),
+ ((2, 16), (1,)),
+ ((8, 16), None),
+ ((8, 16), (0, 1)),
+ ((8, 16, 2), (0, 1)),
+ ((8, 16, 2), (1, 2)),
+ ((8, 16, 2), (0,)),
+ ((8, 16, 2), (1,)),
+ ((8, 16, 2), (2,)),
+ ((8, 16, 2), None),
+ ((8, 16, 2), (0, 1, 2))])
+ def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
+ overwritable = (np.longcomplex, np.complex128, np.complex64)
+ self._check_nd_one(fftn, dtype, shape, axes, overwritable,
+ overwrite_x)
+ self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
+ overwrite_x)
+
+
+@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
+ rfft, irfft, rfftn, irfftn])
+def test_invalid_norm(func):
+ x = np.arange(10, dtype=float)
+ with assert_raises(ValueError,
+ match='Invalid norm value \'o\', should be'
+ ' "backward", "ortho" or "forward"'):
+ func(x, norm='o')
+
+
+@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
+ irfft, irfftn, hfft, hfftn])
+def test_swapped_byte_order_complex(func):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(10) + 1j * rng.rand(10)
+ assert_allclose(func(swap_byteorder(x)), func(x))
+
+
+@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn])
+def test_swapped_byte_order_real(func):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(10)
+ assert_allclose(func(swap_byteorder(x)), func(x))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_real_transforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_real_transforms.py
new file mode 100644
index 0000000..b5d4846
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_pocketfft/tests/test_real_transforms.py
@@ -0,0 +1,487 @@
+from os.path import join, dirname
+
+import numpy as np
+from numpy.testing import (
+ assert_array_almost_equal, assert_equal, assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.fft._pocketfft.realtransforms import (
+ dct, idct, dst, idst, dctn, idctn, dstn, idstn)
+
+fftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests')
+
+MDATA_COUNT = 8
+FFTWDATA_COUNT = 14
+
+def is_longdouble_binary_compatible():
+ try:
+ one = np.frombuffer(
+ b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00',
+ dtype=' decimal
+dec_map = {
+ # DCT
+ (dct, np.double, 1): 13,
+ (dct, np.float32, 1): 6,
+
+ (dct, np.double, 2): 14,
+ (dct, np.float32, 2): 5,
+
+ (dct, np.double, 3): 14,
+ (dct, np.float32, 3): 5,
+
+ (dct, np.double, 4): 13,
+ (dct, np.float32, 4): 6,
+
+ # IDCT
+ (idct, np.double, 1): 14,
+ (idct, np.float32, 1): 6,
+
+ (idct, np.double, 2): 14,
+ (idct, np.float32, 2): 5,
+
+ (idct, np.double, 3): 14,
+ (idct, np.float32, 3): 5,
+
+ (idct, np.double, 4): 14,
+ (idct, np.float32, 4): 6,
+
+ # DST
+ (dst, np.double, 1): 13,
+ (dst, np.float32, 1): 6,
+
+ (dst, np.double, 2): 14,
+ (dst, np.float32, 2): 6,
+
+ (dst, np.double, 3): 14,
+ (dst, np.float32, 3): 7,
+
+ (dst, np.double, 4): 13,
+ (dst, np.float32, 4): 6,
+
+ # IDST
+ (idst, np.double, 1): 14,
+ (idst, np.float32, 1): 6,
+
+ (idst, np.double, 2): 14,
+ (idst, np.float32, 2): 6,
+
+ (idst, np.double, 3): 14,
+ (idst, np.float32, 3): 6,
+
+ (idst, np.double, 4): 14,
+ (idst, np.float32, 4): 6,
+}
+
+for k,v in dec_map.copy().items():
+ if k[1] == np.double:
+ dec_map[(k[0], np.longdouble, k[2])] = v
+ elif k[1] == np.float32:
+ dec_map[(k[0], int, k[2])] = v
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+class TestDCT:
+ def test_definition(self, rdt, type, fftwdata_size):
+ x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
+ y = dct(x, type=type)
+ assert_equal(y.dtype, dt)
+ dec = dec_map[(dct, rdt, type)]
+ assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
+
+ @pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64])
+ def test_axis(self, rdt, type, size):
+ nt = 2
+ dec = dec_map[(dct, rdt, type)]
+ x = np.random.randn(nt, size)
+ y = dct(x, type=type)
+ for j in range(nt):
+ assert_array_almost_equal(y[j], dct(x[j], type=type),
+ decimal=dec)
+
+ x = x.T
+ y = dct(x, axis=0, type=type)
+ for j in range(nt):
+ assert_array_almost_equal(y[:,j], dct(x[:,j], type=type),
+ decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct1_definition_ortho(rdt, mdata_x):
+ # Test orthornomal mode.
+ dec = dec_map[(dct, rdt, 1)]
+ x = np.array(mdata_x, dtype=rdt)
+ dt = np.result_type(np.float32, rdt)
+ y = dct(x, norm='ortho', type=1)
+ y2 = naive_dct1(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct2_definition_matlab(mdata_xy, rdt):
+ # Test correspondence with matlab (orthornomal mode).
+ dt = np.result_type(np.float32, rdt)
+ x = np.array(mdata_xy[0], dtype=dt)
+
+ yr = mdata_xy[1]
+ y = dct(x, norm="ortho", type=2)
+ dec = dec_map[(dct, rdt, 2)]
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y, yr, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct3_definition_ortho(mdata_x, rdt):
+ # Test orthornomal mode.
+ x = np.array(mdata_x, dtype=rdt)
+ dt = np.result_type(np.float32, rdt)
+ y = dct(x, norm='ortho', type=2)
+ xi = dct(y, norm="ortho", type=3)
+ dec = dec_map[(dct, rdt, 3)]
+ assert_equal(xi.dtype, dt)
+ assert_array_almost_equal(xi, x, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct4_definition_ortho(mdata_x, rdt):
+ # Test orthornomal mode.
+ x = np.array(mdata_x, dtype=rdt)
+ dt = np.result_type(np.float32, rdt)
+ y = dct(x, norm='ortho', type=4)
+ y2 = naive_dct4(x, norm='ortho')
+ dec = dec_map[(dct, rdt, 4)]
+ assert_equal(y.dtype, dt)
+ assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_idct_definition(fftwdata_size, rdt, type):
+ xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
+ x = idct(yr, type=type)
+ dec = dec_map[(idct, rdt, type)]
+ assert_equal(x.dtype, dt)
+ assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_definition(fftwdata_size, rdt, type):
+ xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
+ y = dst(xr, type=type)
+ dec = dec_map[(dst, rdt, type)]
+ assert_equal(y.dtype, dt)
+ assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dst1_definition_ortho(rdt, mdata_x):
+ # Test orthornomal mode.
+ dec = dec_map[(dst, rdt, 1)]
+ x = np.array(mdata_x, dtype=rdt)
+ dt = np.result_type(np.float32, rdt)
+ y = dst(x, norm='ortho', type=1)
+ y2 = naive_dst1(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dst4_definition_ortho(rdt, mdata_x):
+ # Test orthornomal mode.
+ dec = dec_map[(dst, rdt, 4)]
+ x = np.array(mdata_x, dtype=rdt)
+ dt = np.result_type(np.float32, rdt)
+ y = dst(x, norm='ortho', type=4)
+ y2 = naive_dst4(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y, y2, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_idst_definition(fftwdata_size, rdt, type):
+ xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
+ x = idst(yr, type=type)
+ dec = dec_map[(idst, rdt, type)]
+ assert_equal(x.dtype, dt)
+ assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
+
+
+@pytest.mark.parametrize('routine', [dct, dst, idct, idst])
+@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longfloat])
+@pytest.mark.parametrize('shape, axis', [
+ ((16,), -1), ((16, 2), 0), ((2, 16), 1)
+])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+@pytest.mark.parametrize('overwrite_x', [True, False])
+@pytest.mark.parametrize('norm', [None, 'ortho'])
+def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x):
+ # Check input overwrite behavior
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ x = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ x = np.random.randn(*shape)
+ x = x.astype(dtype)
+ x2 = x.copy()
+ routine(x2, type, None, axis, norm, overwrite_x=overwrite_x)
+
+ sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+ routine.__name__, x.dtype, x.shape, None, axis, overwrite_x)
+ if not overwrite_x:
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+
+class Test_DCTN_IDCTN(object):
+ dec = 14
+ dct_type = [1, 2, 3, 4]
+ norms = [None, 'backward', 'ortho', 'forward']
+ rstate = np.random.RandomState(1234)
+ shape = (32, 16)
+ data = rstate.randn(*shape)
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ @pytest.mark.parametrize('axes', [None,
+ 1, (1,), [1],
+ 0, (0,), [0],
+ (0, 1), [0, 1],
+ (-2, -1), [-2, -1]])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', ['ortho'])
+ def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
+ tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
+ tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
+ assert_array_almost_equal(self.data, tmp, decimal=12)
+
+ @pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', norms)
+ def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm):
+ y1 = funcn(self.data, type=dct_type, axes=None, norm=norm)
+ y2 = ref_2d(func, self.data, type=dct_type, norm=norm)
+ assert_array_almost_equal(y1, y2, decimal=11)
+
+ @pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', norms)
+ def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm):
+ fdata = dctn(self.data, type=dct_type, norm=norm)
+ y1 = funcn(fdata, type=dct_type, norm=norm)
+ y2 = ref_2d(func, fdata, type=dct_type, norm=norm)
+ assert_array_almost_equal(y1, y2, decimal=11)
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ def test_axes_and_shape(self, fforward, finverse):
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fforward(self.data, s=self.data.shape[0], axes=(0, 1))
+
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fforward(self.data, s=self.data.shape, axes=0)
+
+ @pytest.mark.parametrize('fforward', [dctn, dstn])
+ def test_shape(self, fforward):
+ tmp = fforward(self.data, s=(128, 128), axes=None)
+ assert_equal(tmp.shape, (128, 128))
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ @pytest.mark.parametrize('axes', [1, (1,), [1],
+ 0, (0,), [0]])
+ def test_shape_is_none_with_axes(self, fforward, finverse, axes):
+ tmp = fforward(self.data, s=None, axes=axes, norm='ortho')
+ tmp = finverse(tmp, s=None, axes=axes, norm='ortho')
+ assert_array_almost_equal(self.data, tmp, decimal=self.dec)
+
+
+@pytest.mark.parametrize('func', [dct, dctn, idct, idctn,
+ dst, dstn, idst, idstn])
+def test_swapped_byte_order(func):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(10)
+ swapped_dt = x.dtype.newbyteorder('S')
+ assert_allclose(func(x.astype(swapped_dt)), func(x))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/_realtransforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_realtransforms.py
new file mode 100644
index 0000000..89e2d83
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/_realtransforms.py
@@ -0,0 +1,620 @@
+from ._basic import _dispatch
+from scipy._lib.uarray import Dispatchable
+import numpy as np
+
+__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
+
+
+@_dispatch
+def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return multidimensional Discrete Cosine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ s : int or array_like of ints or None, optional
+ The shape of the result. If both `s` and `axes` (see below) are None,
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+ ``scipy.take(x.shape, axes, axis=0)``.
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+ ``s[i]``.
+ If any element of `s` is -1, the size of the corresponding dimension of
+ `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes over which the DCT is computed. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idctn : Inverse multidimensional DCT
+
+ Notes
+ -----
+ For full details of the DCT types and normalization modes, as well as
+ references, see `dct`.
+
+ Examples
+ --------
+ >>> from scipy.fft import dctn, idctn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idctn(dctn(y)))
+ True
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return multidimensional Discrete Cosine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ s : int or array_like of ints or None, optional
+ The shape of the result. If both `s` and `axes` (see below) are
+ None, `s` is ``x.shape``; if `s` is None but `axes` is
+ not None, then `s` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+ ``s[i]``.
+ If any element of `s` is -1, the size of the corresponding dimension of
+ `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes over which the IDCT is computed. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dctn : multidimensional DCT
+
+ Notes
+ -----
+ For full details of the IDCT types and normalization modes, as well as
+ references, see `idct`.
+
+ Examples
+ --------
+ >>> from scipy.fft import dctn, idctn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idctn(dctn(y)))
+ True
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return multidimensional Discrete Sine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ s : int or array_like of ints or None, optional
+ The shape of the result. If both `s` and `axes` (see below) are None,
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+ ``scipy.take(x.shape, axes, axis=0)``.
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+ ``s[i]``.
+ If any element of `shape` is -1, the size of the corresponding dimension
+ of `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes over which the DST is computed. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idstn : Inverse multidimensional DST
+
+ Notes
+ -----
+ For full details of the DST types and normalization modes, as well as
+ references, see `dst`.
+
+ Examples
+ --------
+ >>> from scipy.fft import dstn, idstn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idstn(dstn(y)))
+ True
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return multidimensional Discrete Sine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ s : int or array_like of ints or None, optional
+ The shape of the result. If both `s` and `axes` (see below) are None,
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+ ``scipy.take(x.shape, axes, axis=0)``.
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+ ``s[i]``.
+ If any element of `s` is -1, the size of the corresponding dimension of
+ `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes over which the IDST is computed. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dstn : multidimensional DST
+
+ Notes
+ -----
+ For full details of the IDST types and normalization modes, as well as
+ references, see `idst`.
+
+ Examples
+ --------
+ >>> from scipy.fft import dstn, idstn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idstn(dstn(y)))
+ True
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None):
+ r"""Return the Discrete Cosine Transform of arbitrary type sequence x.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the dct is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idct : Inverse DCT
+
+ Notes
+ -----
+ For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
+ MATLAB ``dct(x)``.
+
+ For ``norm="backward"``, there is no scaling on `dct` and the `idct` is
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For
+ ``norm="forward"`` the ``1/N`` normalization is applied to the forward
+ `dct` instead and the `idct` is unnormalized. For ``norm='ortho'`` both
+ directions are scaled by the same factor of ``1/sqrt(N)``.
+
+ There are, theoretically, 8 types of the DCT, only the first 4 types are
+ implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the'
+ Inverse DCT generally refers to DCT type 3.
+
+ **Type I**
+
+ There are several definitions of the DCT-I; we use the following
+ (for ``norm="backward"``)
+
+ .. math::
+
+ y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
+ \frac{\pi k n}{N-1} \right)
+
+ If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling
+ factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor
+ ``f``
+
+ .. math::
+
+ f = \begin{cases}
+ \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\
+ \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases}
+
+ .. note::
+ The DCT-I is only supported for input size > 1.
+
+ **Type II**
+
+ There are several definitions of the DCT-II; we use the following
+ (for ``norm="backward"``)
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
+
+ If ``norm="ortho"``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+ f = \begin{cases}
+ \sqrt{\frac{1}{4N}} & \text{if }k=0, \\
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+ which makes the corresponding matrix of coefficients orthonormal
+ (``O @ O.T = np.eye(N)``).
+
+ **Type III**
+
+ There are several definitions, we use the following (for
+ ``norm="backward"``)
+
+ .. math::
+
+ y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+ or, for ``norm="ortho"``
+
+ .. math::
+
+ y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n
+ \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+ The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
+ to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
+ the orthonormalized DCT-II.
+
+ **Type IV**
+
+ There are several definitions of the DCT-IV; we use the following
+ (for ``norm="backward"``)
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
+
+ If ``norm="ortho"``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+
+ f = \frac{1}{\sqrt{2N}}
+
+ References
+ ----------
+ .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
+ Makhoul, `IEEE Transactions on acoustics, speech and signal
+ processing` vol. 28(1), pp. 27-34,
+ :doi:`10.1109/TASSP.1980.1163351` (1980).
+ .. [2] Wikipedia, "Discrete cosine transform",
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform
+
+ Examples
+ --------
+ The Type 1 DCT is equivalent to the FFT (though faster) for real,
+ even-symmetrical inputs. The output is also real and even-symmetrical.
+ Half of the FFT input is used to generate half of the FFT output:
+
+ >>> from scipy.fft import fft, dct
+ >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
+ array([ 30., -8., 6., -2., 6., -8.])
+ >>> dct(np.array([4., 3., 5., 10.]), 1)
+ array([ 30., -8., 6., -2.])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the idct is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ idct : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dct : Forward DCT
+
+ Notes
+ -----
+ For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
+ MATLAB ``idct(x)``.
+
+ 'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III.
+
+ The IDCT is equivalent to a normal DCT except for the normalization and
+ type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each
+ other's inverses.
+
+ Examples
+ --------
+ The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
+ inputs. The output is also real and even-symmetrical. Half of the IFFT
+ input is used to generate half of the IFFT output:
+
+ >>> from scipy.fft import ifft, idct
+ >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
+ array([ 4., 3., 5., 10., 5., 3.])
+ >>> idct(np.array([ 30., -8., 6., -2.]), 1)
+ array([ 4., 3., 5., 10.])
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None):
+ r"""Return the Discrete Sine Transform of arbitrary type sequence x.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the dst is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ dst : ndarray of reals
+ The transformed input array.
+
+ See Also
+ --------
+ idst : Inverse DST
+
+ Notes
+ -----
+ For a single dimension array ``x``.
+
+ For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DST. For
+ ``norm='ortho'`` both directions are scaled by the same factor
+ ``1/sqrt(N)``.
+
+ There are, theoretically, 8 types of the DST for different combinations of
+ even/odd boundary conditions and boundary off sets [1]_, only the first
+ 4 types are implemented in SciPy.
+
+ **Type I**
+
+ There are several definitions of the DST-I; we use the following for
+ ``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and
+ :math:`n=N`.
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
+
+ Note that the DST-I is only supported for input size > 1.
+ The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`.
+ The orthonormalized DST-I is exactly its own inverse.
+
+ **Type II**
+
+ There are several definitions of the DST-II; we use the following for
+ ``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and
+ :math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1`
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
+
+ if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+
+ f = \begin{cases}
+ \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+ **Type III**
+
+ There are several definitions of the DST-III, we use the following (for
+ ``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and
+ even around :math:`n=N-1`
+
+ .. math::
+
+ y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
+ \frac{\pi(2k+1)(n+1)}{2N}\right)
+
+ The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
+ to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the
+ orthonormalized DST-II.
+
+ **Type IV**
+
+ There are several definitions of the DST-IV, we use the following (for
+ ``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and
+ even around :math:`n=N-0.5`
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
+
+ The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The
+ orthonormalized DST-IV is exactly its own inverse.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Discrete sine transform",
+ https://en.wikipedia.org/wiki/Discrete_sine_transform
+
+ """
+ return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
+ workers=None):
+ """
+ Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the idst is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {"backward", "ortho", "forward"}, optional
+ Normalization mode (see Notes). Default is "backward".
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+ workers : int, optional
+ Maximum number of workers to use for parallel computation. If negative,
+ the value wraps around from ``os.cpu_count()``.
+ See :func:`~scipy.fft.fft` for more details.
+
+ Returns
+ -------
+ idst : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dst : Forward DST
+
+ Notes
+ -----
+
+ 'The' IDST is the IDST-II, which is the same as the normalized DST-III.
+
+ The IDST is equivalent to a normal DST except for the normalization and
+ type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each
+ other's inverses.
+
+ """
+ return (Dispatchable(x, np.ndarray),)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/setup.py
new file mode 100644
index 0000000..300faec
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/setup.py
@@ -0,0 +1,12 @@
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('fft', parent_package, top_path)
+ config.add_subpackage('_pocketfft')
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/mock_backend.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/mock_backend.py
new file mode 100644
index 0000000..9f41bfb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/mock_backend.py
@@ -0,0 +1,56 @@
+import numpy as np
+
+class _MockFunction:
+ def __init__(self, return_value = None):
+ self.number_calls = 0
+ self.return_value = return_value
+ self.last_args = ([], {})
+
+ def __call__(self, *args, **kwargs):
+ self.number_calls += 1
+ self.last_args = (args, kwargs)
+ return self.return_value
+
+
+fft = _MockFunction(np.random.random(10))
+fft2 = _MockFunction(np.random.random(10))
+fftn = _MockFunction(np.random.random(10))
+
+ifft = _MockFunction(np.random.random(10))
+ifft2 = _MockFunction(np.random.random(10))
+ifftn = _MockFunction(np.random.random(10))
+
+rfft = _MockFunction(np.random.random(10))
+rfft2 = _MockFunction(np.random.random(10))
+rfftn = _MockFunction(np.random.random(10))
+
+irfft = _MockFunction(np.random.random(10))
+irfft2 = _MockFunction(np.random.random(10))
+irfftn = _MockFunction(np.random.random(10))
+
+hfft = _MockFunction(np.random.random(10))
+hfft2 = _MockFunction(np.random.random(10))
+hfftn = _MockFunction(np.random.random(10))
+
+ihfft = _MockFunction(np.random.random(10))
+ihfft2 = _MockFunction(np.random.random(10))
+ihfftn = _MockFunction(np.random.random(10))
+
+dct = _MockFunction(np.random.random(10))
+idct = _MockFunction(np.random.random(10))
+dctn = _MockFunction(np.random.random(10))
+idctn = _MockFunction(np.random.random(10))
+
+dst = _MockFunction(np.random.random(10))
+idst = _MockFunction(np.random.random(10))
+dstn = _MockFunction(np.random.random(10))
+idstn = _MockFunction(np.random.random(10))
+
+
+__ua_domain__ = "numpy.scipy.fft"
+
+
+def __ua_function__(method, args, kwargs):
+ fn = globals().get(method.__name__)
+ return (fn(*args, **kwargs) if fn is not None
+ else NotImplemented)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_backend.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_backend.py
new file mode 100644
index 0000000..ae485f2
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_backend.py
@@ -0,0 +1,87 @@
+import numpy as np
+import scipy.fft
+from scipy.fft import set_backend
+from scipy.fft import _pocketfft
+from scipy.fft.tests import mock_backend
+
+from numpy.testing import assert_allclose, assert_equal
+import pytest
+
+fnames = ('fft', 'fft2', 'fftn',
+ 'ifft', 'ifft2', 'ifftn',
+ 'rfft', 'rfft2', 'rfftn',
+ 'irfft', 'irfft2', 'irfftn',
+ 'dct', 'idct', 'dctn', 'idctn',
+ 'dst', 'idst', 'dstn', 'idstn')
+
+np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn,
+ np.fft.ifft, np.fft.ifft2, np.fft.ifftn,
+ np.fft.rfft, np.fft.rfft2, np.fft.rfftn,
+ np.fft.irfft, np.fft.irfft2, np.fft.irfftn,
+ np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn, # np has no hfftn
+ np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn,
+ _pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn,
+ _pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn)
+
+funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
+ scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
+ scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
+ scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
+ scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
+ scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn,
+ scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn,
+ scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn)
+
+mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
+ mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
+ mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
+ mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
+ mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
+ mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn,
+ mock_backend.dct, mock_backend.idct, mock_backend.dctn, mock_backend.idctn,
+ mock_backend.dst, mock_backend.idst, mock_backend.dstn, mock_backend.idstn)
+
+
+@pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks))
+def test_backend_call(func, np_func, mock):
+ x = np.arange(20).reshape((10,2))
+ answer = np_func(x)
+ assert_allclose(func(x), answer, atol=1e-10)
+
+ with set_backend(mock_backend, only=True):
+ mock.number_calls = 0
+ y = func(x)
+ assert_equal(y, mock.return_value)
+ assert_equal(mock.number_calls, 1)
+
+ assert_allclose(func(x), answer, atol=1e-10)
+
+
+plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
+ scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
+ scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
+ scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
+ scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
+ scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn)
+
+plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
+ mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
+ mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
+ mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
+ mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
+ mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn)
+
+
+@pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks))
+def test_backend_plan(func, mock):
+ x = np.arange(20).reshape((10, 2))
+
+ with pytest.raises(NotImplementedError, match='precomputed plan'):
+ func(x, plan='foo')
+
+ with set_backend(mock_backend, only=True):
+ mock.number_calls = 0
+ y = func(x, plan='foo')
+ assert_equal(y, mock.return_value)
+ assert_equal(mock.number_calls, 1)
+ assert_equal(mock.last_args[1]['plan'], 'foo')
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_fft_function.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_fft_function.py
new file mode 100644
index 0000000..7c7fec1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_fft_function.py
@@ -0,0 +1,46 @@
+import numpy as np
+import subprocess
+import sys
+
+TEST_BODY = r"""
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy
+import sys
+import pytest
+
+if hasattr(scipy, 'fft'):
+ raise AssertionError("scipy.fft should require an explicit import")
+
+np.random.seed(1234)
+x = np.random.randn(10) + 1j * np.random.randn(10)
+X = np.fft.fft(x)
+# Callable before scipy.fft is imported
+with pytest.deprecated_call(match=r'2\.0\.0'):
+ y = scipy.ifft(X)
+assert_allclose(y, x)
+
+# Callable after scipy.fft is imported
+import scipy.fft
+with pytest.deprecated_call(match=r'2\.0\.0'):
+ y = scipy.ifft(X)
+assert_allclose(y, x)
+
+"""
+
+def test_fft_function():
+ # Historically, scipy.fft was an alias for numpy.fft.fft
+ # Ensure there are no conflicts with the FFT module (gh-10253)
+
+ # Test must run in a subprocess so scipy.fft is not already imported
+ subprocess.check_call([sys.executable, '-c', TEST_BODY])
+
+ # scipy.fft is the correct module
+ from scipy import fft
+ assert not callable(fft)
+ assert fft.__name__ == 'scipy.fft'
+
+ from scipy import ifft
+ assert ifft.__wrapped__ is np.fft.ifft
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_helper.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_helper.py
new file mode 100644
index 0000000..22757a0
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_helper.py
@@ -0,0 +1,301 @@
+from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes
+from numpy.testing import assert_equal, assert_array_equal
+from pytest import raises as assert_raises
+import pytest
+import numpy as np
+import sys
+
+_5_smooth_numbers = [
+ 2, 3, 4, 5, 6, 8, 9, 10,
+ 2 * 3 * 5,
+ 2**3 * 3**5,
+ 2**3 * 3**3 * 5**2,
+]
+
+def test_next_fast_len():
+ for n in _5_smooth_numbers:
+ assert_equal(next_fast_len(n), n)
+
+
+def _assert_n_smooth(x, n):
+ x_orig = x
+ if n < 2:
+ assert False
+
+ while True:
+ q, r = divmod(x, 2)
+ if r != 0:
+ break
+ x = q
+
+ for d in range(3, n+1, 2):
+ while True:
+ q, r = divmod(x, d)
+ if r != 0:
+ break
+ x = q
+
+ assert x == 1, \
+ 'x={} is not {}-smooth, remainder={}'.format(x_orig, n, x)
+
+
+class TestNextFastLen(object):
+
+ def test_next_fast_len(self):
+ np.random.seed(1234)
+
+ def nums():
+ for j in range(1, 1000):
+ yield j
+ yield 2**5 * 3**5 * 4**5 + 1
+
+ for n in nums():
+ m = next_fast_len(n)
+ _assert_n_smooth(m, 11)
+ assert m == next_fast_len(n, False)
+
+ m = next_fast_len(n, True)
+ _assert_n_smooth(m, 5)
+
+ def test_np_integers(self):
+ ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
+ for ityp in ITYPES:
+ x = ityp(12345)
+ testN = next_fast_len(x)
+ assert_equal(testN, next_fast_len(int(x)))
+
+ def testnext_fast_len_small(self):
+ hams = {
+ 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
+ 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000
+ }
+ for x, y in hams.items():
+ assert_equal(next_fast_len(x, True), y)
+
+ @pytest.mark.xfail(sys.maxsize < 2**32,
+ reason="Hamming Numbers too large for 32-bit",
+ raises=ValueError, strict=True)
+ def testnext_fast_len_big(self):
+ hams = {
+ 510183360: 510183360, 510183360 + 1: 512000000,
+ 511000000: 512000000,
+ 854296875: 854296875, 854296875 + 1: 859963392,
+ 196608000000: 196608000000, 196608000000 + 1: 196830000000,
+ 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
+ 206391214080000: 206391214080000,
+ 206391214080000 + 1: 206624260800000,
+ 470184984576000: 470184984576000,
+ 470184984576000 + 1: 470715894135000,
+ 7222041363087360: 7222041363087360,
+ 7222041363087360 + 1: 7230196133913600,
+ # power of 5 5**23
+ 11920928955078125: 11920928955078125,
+ 11920928955078125 - 1: 11920928955078125,
+ # power of 3 3**34
+ 16677181699666569: 16677181699666569,
+ 16677181699666569 - 1: 16677181699666569,
+ # power of 2 2**54
+ 18014398509481984: 18014398509481984,
+ 18014398509481984 - 1: 18014398509481984,
+ # above this, int(ceil(n)) == int(ceil(n+1))
+ 19200000000000000: 19200000000000000,
+ 19200000000000000 + 1: 19221679687500000,
+ 288230376151711744: 288230376151711744,
+ 288230376151711744 + 1: 288325195312500000,
+ 288325195312500000 - 1: 288325195312500000,
+ 288325195312500000: 288325195312500000,
+ 288325195312500000 + 1: 288555831593533440,
+ }
+ for x, y in hams.items():
+ assert_equal(next_fast_len(x, True), y)
+
+ def test_keyword_args(self):
+ assert next_fast_len(11, real=True) == 12
+ assert next_fast_len(target=7, real=False) == 7
+
+
+class Test_init_nd_shape_and_axes(object):
+
+ def test_py_0d_defaults(self):
+ x = np.array(4)
+ shape = None
+ axes = None
+
+ shape_expected = np.array([])
+ axes_expected = np.array([])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_0d_defaults(self):
+ x = np.array(7.)
+ shape = None
+ axes = None
+
+ shape_expected = np.array([])
+ axes_expected = np.array([])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_py_1d_defaults(self):
+ x = np.array([1, 2, 3])
+ shape = None
+ axes = None
+
+ shape_expected = np.array([3])
+ axes_expected = np.array([0])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_1d_defaults(self):
+ x = np.arange(0, 1, .1)
+ shape = None
+ axes = None
+
+ shape_expected = np.array([10])
+ axes_expected = np.array([0])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_py_2d_defaults(self):
+ x = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8]])
+ shape = None
+ axes = None
+
+ shape_expected = np.array([2, 4])
+ axes_expected = np.array([0, 1])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_2d_defaults(self):
+ x = np.arange(0, 1, .1).reshape(5, 2)
+ shape = None
+ axes = None
+
+ shape_expected = np.array([5, 2])
+ axes_expected = np.array([0, 1])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_5d_defaults(self):
+ x = np.zeros([6, 2, 5, 3, 4])
+ shape = None
+ axes = None
+
+ shape_expected = np.array([6, 2, 5, 3, 4])
+ axes_expected = np.array([0, 1, 2, 3, 4])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_5d_set_shape(self):
+ x = np.zeros([6, 2, 5, 3, 4])
+ shape = [10, -1, -1, 1, 4]
+ axes = None
+
+ shape_expected = np.array([10, 2, 5, 1, 4])
+ axes_expected = np.array([0, 1, 2, 3, 4])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_5d_set_axes(self):
+ x = np.zeros([6, 2, 5, 3, 4])
+ shape = None
+ axes = [4, 1, 2]
+
+ shape_expected = np.array([4, 2, 5])
+ axes_expected = np.array([4, 1, 2])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_np_5d_set_shape_axes(self):
+ x = np.zeros([6, 2, 5, 3, 4])
+ shape = [10, -1, 2]
+ axes = [1, 0, 3]
+
+ shape_expected = np.array([10, 6, 2])
+ axes_expected = np.array([1, 0, 3])
+
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+ assert_equal(shape_res, shape_expected)
+ assert_equal(axes_res, axes_expected)
+
+ def test_shape_axes_subset(self):
+ x = np.zeros((2, 3, 4, 5))
+ shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
+
+ assert_array_equal(shape, [5, 5, 5])
+ assert_array_equal(axes, [1, 2, 3])
+
+ def test_errors(self):
+ x = np.zeros(1)
+ with assert_raises(ValueError, match="axes must be a scalar or "
+ "iterable of integers"):
+ _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
+
+ with assert_raises(ValueError, match="axes must be a scalar or "
+ "iterable of integers"):
+ _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
+
+ with assert_raises(ValueError,
+ match="axes exceeds dimensionality of input"):
+ _init_nd_shape_and_axes(x, shape=None, axes=[1])
+
+ with assert_raises(ValueError,
+ match="axes exceeds dimensionality of input"):
+ _init_nd_shape_and_axes(x, shape=None, axes=[-2])
+
+ with assert_raises(ValueError,
+ match="all axes must be unique"):
+ _init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
+
+ with assert_raises(ValueError, match="shape must be a scalar or "
+ "iterable of integers"):
+ _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
+
+ with assert_raises(ValueError, match="shape must be a scalar or "
+ "iterable of integers"):
+ _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
+
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ _init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]),
+ shape=[1, 2, 3], axes=[1])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[0\]\) specified"):
+ _init_nd_shape_and_axes(x, shape=[0], axes=None)
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[-2\]\) specified"):
+ _init_nd_shape_and_axes(x, shape=-2, axes=None)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_multithreading.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_multithreading.py
new file mode 100644
index 0000000..e771aff
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_multithreading.py
@@ -0,0 +1,83 @@
+from scipy import fft
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose
+import multiprocessing
+import os
+
+
+@pytest.fixture(scope='module')
+def x():
+ return np.random.randn(512, 128) # Must be large enough to qualify for mt
+
+
+@pytest.mark.parametrize("func", [
+ fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn,
+ fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn,
+ fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn,
+ fft.dct, fft.idct, fft.dctn, fft.idctn,
+ fft.dst, fft.idst, fft.dstn, fft.idstn,
+])
+@pytest.mark.parametrize("workers", [2, -1])
+def test_threaded_same(x, func, workers):
+ expected = func(x, workers=1)
+ actual = func(x, workers=workers)
+ assert_allclose(actual, expected)
+
+
+def _mt_fft(x):
+ return fft.fft(x, workers=2)
+
+
+def test_mixed_threads_processes(x):
+ # Test that the fft threadpool is safe to use before & after fork
+
+ expect = fft.fft(x, workers=2)
+
+ with multiprocessing.Pool(2) as p:
+ res = p.map(_mt_fft, [x for _ in range(4)])
+
+ for r in res:
+ assert_allclose(r, expect)
+
+ fft.fft(x, workers=2)
+
+
+def test_invalid_workers(x):
+ cpus = os.cpu_count()
+
+ fft.ifft([1], workers=-cpus)
+
+ with pytest.raises(ValueError, match='workers must not be zero'):
+ fft.fft(x, workers=0)
+
+ with pytest.raises(ValueError, match='workers value out of range'):
+ fft.ifft(x, workers=-cpus-1)
+
+
+def test_set_get_workers():
+ cpus = os.cpu_count()
+ assert fft.get_workers() == 1
+ with fft.set_workers(4):
+ assert fft.get_workers() == 4
+
+ with fft.set_workers(-1):
+ assert fft.get_workers() == cpus
+
+ assert fft.get_workers() == 4
+
+ assert fft.get_workers() == 1
+
+ with fft.set_workers(-cpus):
+ assert fft.get_workers() == 1
+
+
+def test_set_workers_invalid():
+
+ with pytest.raises(ValueError, match='workers must not be zero'):
+ with fft.set_workers(0):
+ pass
+
+ with pytest.raises(ValueError, match='workers value out of range'):
+ with fft.set_workers(-os.cpu_count()-1):
+ pass
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_numpy.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_numpy.py
new file mode 100644
index 0000000..f8ca24d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_numpy.py
@@ -0,0 +1,364 @@
+import queue
+import threading
+import multiprocessing
+import numpy as np
+import pytest
+from numpy.random import random
+from numpy.testing import (
+ assert_array_almost_equal, assert_array_equal, assert_allclose
+ )
+from pytest import raises as assert_raises
+import scipy.fft as fft
+
+def fft1(x):
+ L = len(x)
+ phase = -2j*np.pi*(np.arange(L)/float(L))
+ phase = np.arange(L).reshape(-1, 1) * phase
+ return np.sum(x*np.exp(phase), axis=1)
+
+
+class TestFFTShift(object):
+
+ def test_fft_n(self):
+ assert_raises(ValueError, fft.fft, [1, 2, 3], 0)
+
+
+class TestFFT1D(object):
+
+ def test_identity(self):
+ maxlen = 512
+ x = random(maxlen) + 1j*random(maxlen)
+ xr = random(maxlen)
+ for i in range(1,maxlen):
+ assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i],
+ decimal=12)
+ assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i),
+ xr[0:i], decimal=12)
+
+ def test_fft(self):
+ x = random(30) + 1j*random(30)
+ expect = fft1(x)
+ assert_array_almost_equal(expect, fft.fft(x))
+ assert_array_almost_equal(expect, fft.fft(x, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30),
+ fft.fft(x, norm="ortho"))
+ assert_array_almost_equal(expect / 30, fft.fft(x, norm="forward"))
+
+ def test_ifft(self):
+ x = random(30) + 1j*random(30)
+ assert_array_almost_equal(x, fft.ifft(fft.fft(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.ifft(fft.fft(x, norm=norm), norm=norm))
+
+ def test_fft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ expect = fft.fft(fft.fft(x, axis=1), axis=0)
+ assert_array_almost_equal(expect, fft.fft2(x))
+ assert_array_almost_equal(expect, fft.fft2(x, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30 * 20),
+ fft.fft2(x, norm="ortho"))
+ assert_array_almost_equal(expect / (30 * 20),
+ fft.fft2(x, norm="forward"))
+
+ def test_ifft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ expect = fft.ifft(fft.ifft(x, axis=1), axis=0)
+ assert_array_almost_equal(expect, fft.ifft2(x))
+ assert_array_almost_equal(expect, fft.ifft2(x, norm="backward"))
+ assert_array_almost_equal(expect * np.sqrt(30 * 20),
+ fft.ifft2(x, norm="ortho"))
+ assert_array_almost_equal(expect * (30 * 20),
+ fft.ifft2(x, norm="forward"))
+
+ def test_fftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0)
+ assert_array_almost_equal(expect, fft.fftn(x))
+ assert_array_almost_equal(expect, fft.fftn(x, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
+ fft.fftn(x, norm="ortho"))
+ assert_array_almost_equal(expect / (30 * 20 * 10),
+ fft.fftn(x, norm="forward"))
+
+ def test_ifftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0)
+ assert_array_almost_equal(expect, fft.ifftn(x))
+ assert_array_almost_equal(expect, fft.ifftn(x, norm="backward"))
+ assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10),
+ fft.ifftn(x, norm="ortho"))
+ assert_array_almost_equal(expect * (30 * 20 * 10),
+ fft.ifftn(x, norm="forward"))
+
+ def test_rfft(self):
+ x = random(29)
+ for n in [x.size, 2*x.size]:
+ for norm in [None, "backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
+ fft.rfft(x, n=n, norm=norm))
+ assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n),
+ fft.rfft(x, n=n, norm="ortho"))
+
+ def test_irfft(self):
+ x = random(30)
+ assert_array_almost_equal(x, fft.irfft(fft.rfft(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.irfft(fft.rfft(x, norm=norm), norm=norm))
+
+ def test_rfft2(self):
+ x = random((30, 20))
+ expect = fft.fft2(x)[:, :11]
+ assert_array_almost_equal(expect, fft.rfft2(x))
+ assert_array_almost_equal(expect, fft.rfft2(x, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30 * 20),
+ fft.rfft2(x, norm="ortho"))
+ assert_array_almost_equal(expect / (30 * 20),
+ fft.rfft2(x, norm="forward"))
+
+ def test_irfft2(self):
+ x = random((30, 20))
+ assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.irfft2(fft.rfft2(x, norm=norm), norm=norm))
+
+ def test_rfftn(self):
+ x = random((30, 20, 10))
+ expect = fft.fftn(x)[:, :, :6]
+ assert_array_almost_equal(expect, fft.rfftn(x))
+ assert_array_almost_equal(expect, fft.rfftn(x, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
+ fft.rfftn(x, norm="ortho"))
+ assert_array_almost_equal(expect / (30 * 20 * 10),
+ fft.rfftn(x, norm="forward"))
+
+ def test_irfftn(self):
+ x = random((30, 20, 10))
+ assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.irfftn(fft.rfftn(x, norm=norm), norm=norm))
+
+ def test_hfft(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ expect = fft.fft(x)
+ assert_array_almost_equal(expect, fft.hfft(x_herm))
+ assert_array_almost_equal(expect, fft.hfft(x_herm, norm="backward"))
+ assert_array_almost_equal(expect / np.sqrt(30),
+ fft.hfft(x_herm, norm="ortho"))
+ assert_array_almost_equal(expect / 30,
+ fft.hfft(x_herm, norm="forward"))
+
+ def test_ihfft(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x_herm, fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm))
+
+ def test_hfft2(self):
+ x = random((30, 20))
+ assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm))
+
+ def test_ihfft2(self):
+ x = random((30, 20))
+ expect = fft.ifft2(x)[:, :11]
+ assert_array_almost_equal(expect, fft.ihfft2(x))
+ assert_array_almost_equal(expect, fft.ihfft2(x, norm="backward"))
+ assert_array_almost_equal(expect * np.sqrt(30 * 20),
+ fft.ihfft2(x, norm="ortho"))
+ assert_array_almost_equal(expect * (30 * 20),
+ fft.ihfft2(x, norm="forward"))
+
+ def test_hfftn(self):
+ x = random((30, 20, 10))
+ assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x)))
+ for norm in ["backward", "ortho", "forward"]:
+ assert_array_almost_equal(
+ x, fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm))
+
+ def test_ihfftn(self):
+ x = random((30, 20, 10))
+ expect = fft.ifftn(x)[:, :, :6]
+ assert_array_almost_equal(expect, fft.ihfftn(x))
+ assert_array_almost_equal(expect, fft.ihfftn(x, norm="backward"))
+ assert_array_almost_equal(expect * np.sqrt(30 * 20 * 10),
+ fft.ihfftn(x, norm="ortho"))
+ assert_array_almost_equal(expect * (30 * 20 * 10),
+ fft.ihfftn(x, norm="forward"))
+
+ @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn,
+ fft.rfftn, fft.irfftn,
+ fft.hfftn, fft.ihfftn])
+ def test_axes(self, op):
+ x = random((30, 20, 10))
+ axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
+ for a in axes:
+ op_tr = op(np.transpose(x, a))
+ tr_op = np.transpose(op(x, axes=a), a)
+ assert_array_almost_equal(op_tr, tr_op)
+
+ @pytest.mark.parametrize("op", [fft.fft2, fft.ifft2,
+ fft.rfft2, fft.irfft2,
+ fft.hfft2, fft.ihfft2,
+ fft.fftn, fft.ifftn,
+ fft.rfftn, fft.irfftn,
+ fft.hfftn, fft.ihfftn])
+ def test_axes_subset_with_shape(self, op):
+ x = random((16, 8, 4))
+ axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
+ for a in axes:
+ # different shape on the first two axes
+ shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
+ for ax in range(x.ndim)])
+ # transform only the first two axes
+ op_tr = op(np.transpose(x, a), s=shape[:2], axes=(0, 1))
+ tr_op = np.transpose(op(x, s=shape[:2], axes=a[:2]), a)
+ assert_array_almost_equal(op_tr, tr_op)
+
+ def test_all_1d_norm_preserving(self):
+ # verify that round-trip transforms are norm-preserving
+ x = random(30)
+ x_norm = np.linalg.norm(x)
+ n = x.size * 2
+ func_pairs = [(fft.fft, fft.ifft),
+ (fft.rfft, fft.irfft),
+ # hfft: order so the first function takes x.size samples
+ # (necessary for comparison to x_norm above)
+ (fft.ihfft, fft.hfft),
+ ]
+ for forw, back in func_pairs:
+ for n in [x.size, 2*x.size]:
+ for norm in ['backward', 'ortho', 'forward']:
+ tmp = forw(x, n=n, norm=norm)
+ tmp = back(tmp, n=n, norm=norm)
+ assert_array_almost_equal(x_norm,
+ np.linalg.norm(tmp))
+
+ @pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
+ np.longdouble])
+ def test_dtypes(self, dtype):
+ # make sure that all input precisions are accepted
+ x = random(30).astype(dtype)
+ assert_array_almost_equal(fft.ifft(fft.fft(x)), x)
+ assert_array_almost_equal(fft.irfft(fft.rfft(x)), x)
+ assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ [np.float32, np.float64, np.longfloat,
+ np.complex64, np.complex128, np.longcomplex])
+@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
+@pytest.mark.parametrize(
+ "fft",
+ [fft.fft, fft.fft2, fft.fftn,
+ fft.ifft, fft.ifft2, fft.ifftn])
+def test_fft_with_order(dtype, order, fft):
+ # Check that FFT/IFFT produces identical results for C, Fortran and
+ # non contiguous arrays
+ rng = np.random.RandomState(42)
+ X = rng.rand(8, 7, 13).astype(dtype, copy=False)
+ if order == 'F':
+ Y = np.asfortranarray(X)
+ else:
+ # Make a non contiguous array
+ Y = X[::-1]
+ X = np.ascontiguousarray(X[::-1])
+
+ if fft.__name__.endswith('fft'):
+ for axis in range(3):
+ X_res = fft(X, axis=axis)
+ Y_res = fft(Y, axis=axis)
+ assert_array_almost_equal(X_res, Y_res)
+ elif fft.__name__.endswith(('fft2', 'fftn')):
+ axes = [(0, 1), (1, 2), (0, 2)]
+ if fft.__name__.endswith('fftn'):
+ axes.extend([(0,), (1,), (2,), None])
+ for ax in axes:
+ X_res = fft(X, axes=ax)
+ Y_res = fft(Y, axes=ax)
+ assert_array_almost_equal(X_res, Y_res)
+ else:
+ raise ValueError
+
+
+class TestFFTThreadSafe(object):
+ threads = 16
+ input_shape = (800, 200)
+
+ def _test_mtsame(self, func, *args):
+ def worker(args, q):
+ q.put(func(*args))
+
+ q = queue.Queue()
+ expected = func(*args)
+
+ # Spin off a bunch of threads to call the same function simultaneously
+ t = [threading.Thread(target=worker, args=(args, q))
+ for i in range(self.threads)]
+ [x.start() for x in t]
+
+ [x.join() for x in t]
+ # Make sure all threads returned the correct value
+ for i in range(self.threads):
+ assert_array_equal(q.get(timeout=5), expected,
+ 'Function returned wrong value in multithreaded context')
+
+ def test_fft(self):
+ a = np.ones(self.input_shape, dtype=np.complex128)
+ self._test_mtsame(fft.fft, a)
+
+ def test_ifft(self):
+ a = np.full(self.input_shape, 1+0j)
+ self._test_mtsame(fft.ifft, a)
+
+ def test_rfft(self):
+ a = np.ones(self.input_shape)
+ self._test_mtsame(fft.rfft, a)
+
+ def test_irfft(self):
+ a = np.full(self.input_shape, 1+0j)
+ self._test_mtsame(fft.irfft, a)
+
+ def test_hfft(self):
+ a = np.ones(self.input_shape, np.complex64)
+ self._test_mtsame(fft.hfft, a)
+
+ def test_ihfft(self):
+ a = np.ones(self.input_shape)
+ self._test_mtsame(fft.ihfft, a)
+
+
+@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft])
+def test_multiprocess(func):
+ # Test that fft still works after fork (gh-10422)
+
+ with multiprocessing.Pool(2) as p:
+ res = p.map(func, [np.ones(100) for _ in range(4)])
+
+ expect = func(np.ones(100))
+ for x in res:
+ assert_allclose(x, expect)
+
+
+class TestIRFFTN(object):
+
+ def test_not_last_axis_success(self):
+ ar, ai = np.random.random((2, 16, 8, 32))
+ a = ar + 1j*ai
+
+ axes = (-2,)
+
+ # Should not raise error
+ fft.irfftn(a, axes=axes)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_real_transforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_real_transforms.py
new file mode 100644
index 0000000..d0656bc
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fft/tests/test_real_transforms.py
@@ -0,0 +1,144 @@
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+import pytest
+
+from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn
+import scipy.fft as fft
+from scipy import fftpack
+
+# scipy.fft wraps the fftpack versions but with normalized inverse transforms.
+# So, the forward transforms and definitions are already thoroughly tested in
+# fftpack/test_real_transforms.py
+
+
+@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("n", [2, 3, 4, 5, 10, 16])
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+def test_identity_1d(forward, backward, type, n, axis, norm):
+ # Test the identity f^-1(f(x)) == x
+ x = np.random.rand(n, n)
+
+ y = forward(x, type, axis=axis, norm=norm)
+ z = backward(y, type, axis=axis, norm=norm)
+ assert_allclose(z, x)
+
+ pad = [(0, 0)] * 2
+ pad[axis] = (0, 4)
+
+ y2 = np.pad(y, pad, mode='edge')
+ z2 = backward(y2, type, n, axis, norm)
+ assert_allclose(z2, x)
+
+
+@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
+ np.complex64, np.complex128])
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("overwrite_x", [True, False])
+def test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm,
+ overwrite_x):
+ # Test the identity f^-1(f(x)) == x
+ x = np.random.rand(7, 8)
+ x_orig = x.copy()
+
+ y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
+ y_orig = y.copy()
+ z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
+ if not overwrite_x:
+ assert_allclose(z, x, rtol=1e-6, atol=1e-6)
+ assert_array_equal(x, x_orig)
+ assert_array_equal(y, y_orig)
+ else:
+ assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
+
+
+@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("shape, axes",
+ [
+ ((4, 4), 0),
+ ((4, 4), 1),
+ ((4, 4), None),
+ ((4, 4), (0, 1)),
+ ((10, 12), None),
+ ((10, 12), (0, 1)),
+ ((4, 5, 6), None),
+ ((4, 5, 6), 1),
+ ((4, 5, 6), (0, 2)),
+ ])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+def test_identity_nd(forward, backward, type, shape, axes, norm):
+ # Test the identity f^-1(f(x)) == x
+
+ x = np.random.random(shape)
+
+ if axes is not None:
+ shape = np.take(shape, axes)
+
+ y = forward(x, type, axes=axes, norm=norm)
+ z = backward(y, type, axes=axes, norm=norm)
+ assert_allclose(z, x)
+
+ if axes is None:
+ pad = [(0, 4)] * x.ndim
+ elif isinstance(axes, int):
+ pad = [(0, 0)] * x.ndim
+ pad[axes] = (0, 4)
+ else:
+ pad = [(0, 0)] * x.ndim
+
+ for a in axes:
+ pad[a] = (0, 4)
+
+ y2 = np.pad(y, pad, mode='edge')
+ z2 = backward(y2, type, shape, axes, norm)
+ assert_allclose(z2, x)
+
+
+@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("shape, axes",
+ [
+ ((4, 5), 0),
+ ((4, 5), 1),
+ ((4, 5), None),
+ ])
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
+ np.complex64, np.complex128])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("overwrite_x", [False, True])
+def test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype,
+ norm, overwrite_x):
+ # Test the identity f^-1(f(x)) == x
+
+ x = np.random.random(shape).astype(dtype)
+ x_orig = x.copy()
+
+ if axes is not None:
+ shape = np.take(shape, axes)
+
+ y = forward(x, type, axes=axes, norm=norm)
+ y_orig = y.copy()
+ z = backward(y, type, axes=axes, norm=norm)
+ if overwrite_x:
+ assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
+ else:
+ assert_allclose(z, x, rtol=1e-6, atol=1e-6)
+ assert_array_equal(x, x_orig)
+ assert_array_equal(y, y_orig)
+
+
+@pytest.mark.parametrize("func", ['dct', 'dst', 'dctn', 'dstn'])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+def test_fftpack_equivalience(func, type, norm):
+ x = np.random.rand(8, 16)
+ fft_res = getattr(fft, func)(x, type, norm=norm)
+ fftpack_res = getattr(fftpack, func)(x, type, norm=norm)
+
+ assert_allclose(fft_res, fftpack_res)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/__init__.py
new file mode 100644
index 0000000..af4eceb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/__init__.py
@@ -0,0 +1,101 @@
+"""
+=========================================================
+Legacy discrete Fourier transforms (:mod:`scipy.fftpack`)
+=========================================================
+
+.. warning::
+
+ This submodule is now considered legacy, new code should use
+ :mod:`scipy.fft`.
+
+Fast Fourier Transforms (FFTs)
+==============================
+
+.. autosummary::
+ :toctree: generated/
+
+ fft - Fast (discrete) Fourier Transform (FFT)
+ ifft - Inverse FFT
+ fft2 - 2-D FFT
+ ifft2 - 2-D inverse FFT
+ fftn - N-D FFT
+ ifftn - N-D inverse FFT
+ rfft - FFT of strictly real-valued sequence
+ irfft - Inverse of rfft
+ dct - Discrete cosine transform
+ idct - Inverse discrete cosine transform
+ dctn - N-D Discrete cosine transform
+ idctn - N-D Inverse discrete cosine transform
+ dst - Discrete sine transform
+ idst - Inverse discrete sine transform
+ dstn - N-D Discrete sine transform
+ idstn - N-D Inverse discrete sine transform
+
+Differential and pseudo-differential operators
+==============================================
+
+.. autosummary::
+ :toctree: generated/
+
+ diff - Differentiation and integration of periodic sequences
+ tilbert - Tilbert transform: cs_diff(x,h,h)
+ itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
+ hilbert - Hilbert transform: cs_diff(x,inf,inf)
+ ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
+ cs_diff - cosh/sinh pseudo-derivative of periodic sequences
+ sc_diff - sinh/cosh pseudo-derivative of periodic sequences
+ ss_diff - sinh/sinh pseudo-derivative of periodic sequences
+ cc_diff - cosh/cosh pseudo-derivative of periodic sequences
+ shift - Shift periodic sequences
+
+Helper functions
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ fftshift - Shift the zero-frequency component to the center of the spectrum
+ ifftshift - The inverse of `fftshift`
+ fftfreq - Return the Discrete Fourier Transform sample frequencies
+ rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
+ next_fast_len - Find the optimal length to zero-pad an FFT for speed
+
+Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions
+exposed by ``fftpack``; importing them from ``numpy`` should be preferred.
+
+Convolutions (:mod:`scipy.fftpack.convolve`)
+============================================
+
+.. module:: scipy.fftpack.convolve
+
+.. autosummary::
+ :toctree: generated/
+
+ convolve
+ convolve_z
+ init_convolution_kernel
+ destroy_convolve_cache
+
+"""
+
+
+__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
+ 'fft2','ifft2',
+ 'diff',
+ 'tilbert','itilbert','hilbert','ihilbert',
+ 'sc_diff','cs_diff','cc_diff','ss_diff',
+ 'shift',
+ 'fftfreq', 'rfftfreq',
+ 'fftshift', 'ifftshift',
+ 'next_fast_len',
+ 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
+ ]
+
+from .basic import *
+from .pseudo_diffs import *
+from .helper import *
+from .realtransforms import *
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/basic.py
new file mode 100644
index 0000000..db3fb42
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/basic.py
@@ -0,0 +1,424 @@
+"""
+Discrete Fourier Transforms - basic.py
+"""
+# Created by Pearu Peterson, August,September 2002
+__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
+ 'fft2','ifft2']
+
+from scipy.fft import _pocketfft
+from .helper import _good_shape
+
+
+def fft(x, n=None, axis=-1, overwrite_x=False):
+ """
+ Return discrete Fourier transform of real or complex sequence.
+
+ The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
+
+ ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
+
+ Parameters
+ ----------
+ x : array_like
+ Array to Fourier transform.
+ n : int, optional
+ Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the fft's are computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ z : complex ndarray
+ with the elements::
+
+ [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
+ [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
+
+ where::
+
+ y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
+
+ See Also
+ --------
+ ifft : Inverse FFT
+ rfft : FFT of a real sequence
+
+ Notes
+ -----
+ The packing of the result is "standard": If ``A = fft(a, n)``, then
+ ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
+ positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
+ terms, in order of decreasingly negative frequency. So ,for an 8-point
+ transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
+ To rearrange the fft output so that the zero-frequency component is
+ centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
+
+ Both single and double precision routines are implemented. Half precision
+ inputs will be converted to single precision. Non-floating-point inputs
+ will be converted to double precision. Long-double precision inputs are
+ not supported.
+
+ This function is most efficient when `n` is a power of two, and least
+ efficient when `n` is prime.
+
+ Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``.
+ If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real.
+
+ If the data type of `x` is real, a "real FFT" algorithm is automatically
+ used, which roughly halves the computation time. To increase efficiency
+ a little further, use `rfft`, which does the same calculation, but only
+ outputs half of the symmetrical spectrum. If the data is both real and
+ symmetrical, the `dct` can again double the efficiency by generating
+ half of the spectrum from half of the signal.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fft, ifft
+ >>> x = np.arange(5)
+ >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
+ True
+
+ """
+ return _pocketfft.fft(x, n, axis, None, overwrite_x)
+
+
+def ifft(x, n=None, axis=-1, overwrite_x=False):
+ """
+ Return discrete inverse Fourier transform of real or complex sequence.
+
+ The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
+
+ ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
+
+ Parameters
+ ----------
+ x : array_like
+ Transformed data to invert.
+ n : int, optional
+ Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
+ `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
+ The default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the ifft's are computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ ifft : ndarray of floats
+ The inverse discrete Fourier transform.
+
+ See Also
+ --------
+ fft : Forward FFT
+
+ Notes
+ -----
+ Both single and double precision routines are implemented. Half precision
+ inputs will be converted to single precision. Non-floating-point inputs
+ will be converted to double precision. Long-double precision inputs are
+ not supported.
+
+ This function is most efficient when `n` is a power of two, and least
+ efficient when `n` is prime.
+
+ If the data type of `x` is real, a "real IFFT" algorithm is automatically
+ used, which roughly halves the computation time.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fft, ifft
+ >>> import numpy as np
+ >>> x = np.arange(5)
+ >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy.
+ True
+
+ """
+ return _pocketfft.ifft(x, n, axis, None, overwrite_x)
+
+
+def rfft(x, n=None, axis=-1, overwrite_x=False):
+ """
+ Discrete Fourier transform of a real sequence.
+
+ Parameters
+ ----------
+ x : array_like, real-valued
+ The data to transform.
+ n : int, optional
+ Defines the length of the Fourier transform. If `n` is not specified
+ (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
+ `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
+ axis : int, optional
+ The axis along which the transform is applied. The default is the
+ last axis.
+ overwrite_x : bool, optional
+ If set to true, the contents of `x` can be overwritten. Default is
+ False.
+
+ Returns
+ -------
+ z : real ndarray
+ The returned real array contains::
+
+ [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
+ [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
+
+ where::
+
+ y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
+ j = 0..n-1
+
+ See Also
+ --------
+ fft, irfft, scipy.fft.rfft
+
+ Notes
+ -----
+ Within numerical accuracy, ``y == rfft(irfft(y))``.
+
+ Both single and double precision routines are implemented. Half precision
+ inputs will be converted to single precision. Non-floating-point inputs
+ will be converted to double precision. Long-double precision inputs are
+ not supported.
+
+ To get an output with a complex datatype, consider using the newer
+ function `scipy.fft.rfft`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fft, rfft
+ >>> a = [9, -9, 1, 3]
+ >>> fft(a)
+ array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
+ >>> rfft(a)
+ array([ 4., 8., 12., 16.])
+
+ """
+ return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)
+
+
+def irfft(x, n=None, axis=-1, overwrite_x=False):
+ """
+ Return inverse discrete Fourier transform of real sequence x.
+
+ The contents of `x` are interpreted as the output of the `rfft`
+ function.
+
+ Parameters
+ ----------
+ x : array_like
+ Transformed data to invert.
+ n : int, optional
+ Length of the inverse Fourier transform.
+ If n < x.shape[axis], x is truncated.
+ If n > x.shape[axis], x is zero-padded.
+ The default results in n = x.shape[axis].
+ axis : int, optional
+ Axis along which the ifft's are computed; the default is over
+ the last axis (i.e., axis=-1).
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ irfft : ndarray of floats
+ The inverse discrete Fourier transform.
+
+ See Also
+ --------
+ rfft, ifft, scipy.fft.irfft
+
+ Notes
+ -----
+ The returned real array contains::
+
+ [y(0),y(1),...,y(n-1)]
+
+ where for n is even::
+
+ y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
+ * exp(sqrt(-1)*j*k* 2*pi/n)
+ + c.c. + x[0] + (-1)**(j) x[n-1])
+
+ and for n is odd::
+
+ y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
+ * exp(sqrt(-1)*j*k* 2*pi/n)
+ + c.c. + x[0])
+
+ c.c. denotes complex conjugate of preceding expression.
+
+ For details on input parameters, see `rfft`.
+
+ To process (conjugate-symmetric) frequency-domain data with a complex
+ datatype, consider using the newer function `scipy.fft.irfft`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import rfft, irfft
+ >>> a = [1.0, 2.0, 3.0, 4.0, 5.0]
+ >>> irfft(a)
+ array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473])
+ >>> irfft(rfft(a))
+ array([1., 2., 3., 4., 5.])
+
+ """
+ return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x)
+
+
+def fftn(x, shape=None, axes=None, overwrite_x=False):
+ """
+ Return multidimensional discrete Fourier transform.
+
+ The returned array contains::
+
+ y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
+ x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
+
+ where d = len(x.shape) and n = x.shape.
+
+ Parameters
+ ----------
+ x : array_like
+ The (N-D) array to transform.
+ shape : int or array_like of ints or None, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+ length ``shape[i]``.
+ If any element of `shape` is -1, the size of the corresponding
+ dimension of `x` is used.
+ axes : int or array_like of ints or None, optional
+ The axes of `x` (`y` if `shape` is not None) along which the
+ transform is applied.
+ The default is over all axes.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed. Default is False.
+
+ Returns
+ -------
+ y : complex-valued N-D NumPy array
+ The (N-D) DFT of the input array.
+
+ See Also
+ --------
+ ifftn
+
+ Notes
+ -----
+ If ``x`` is real-valued, then
+ ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``.
+
+ Both single and double precision routines are implemented. Half precision
+ inputs will be converted to single precision. Non-floating-point inputs
+ will be converted to double precision. Long-double precision inputs are
+ not supported.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fftn, ifftn
+ >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
+ >>> np.allclose(y, fftn(ifftn(y)))
+ True
+
+ """
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.fftn(x, shape, axes, None, overwrite_x)
+
+
+def ifftn(x, shape=None, axes=None, overwrite_x=False):
+ """
+ Return inverse multidimensional discrete Fourier transform.
+
+ The sequence can be of an arbitrary type.
+
+ The returned array contains::
+
+ y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
+ x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
+
+ where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
+
+ For description of parameters see `fftn`.
+
+ See Also
+ --------
+ fftn : for detailed information.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fftn, ifftn
+ >>> import numpy as np
+ >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
+ >>> np.allclose(y, ifftn(fftn(y)))
+ True
+
+ """
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.ifftn(x, shape, axes, None, overwrite_x)
+
+
+def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
+ """
+ 2-D discrete Fourier transform.
+
+ Return the 2-D discrete Fourier transform of the 2-D argument
+ `x`.
+
+ See Also
+ --------
+ fftn : for detailed information.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fft2, ifft2
+ >>> y = np.mgrid[:5, :5][0]
+ >>> y
+ array([[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]])
+ >>> np.allclose(y, ifft2(fft2(y)))
+ True
+ """
+ return fftn(x,shape,axes,overwrite_x)
+
+
+def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
+ """
+ 2-D discrete inverse Fourier transform of real or complex sequence.
+
+ Return inverse 2-D discrete Fourier transform of
+ arbitrary type sequence x.
+
+ See `ifft` for more information.
+
+ See also
+ --------
+ fft2, ifft
+
+ Examples
+ --------
+ >>> from scipy.fftpack import fft2, ifft2
+ >>> y = np.mgrid[:5, :5][0]
+ >>> y
+ array([[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]])
+ >>> np.allclose(y, fft2(ifft2(y)))
+ True
+
+ """
+ return ifftn(x,shape,axes,overwrite_x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/convolve.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/convolve.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..074ab4d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/convolve.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/helper.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/helper.py
new file mode 100644
index 0000000..74f9a68
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/helper.py
@@ -0,0 +1,109 @@
+import operator
+from numpy.fft.helper import fftshift, ifftshift, fftfreq
+import scipy.fft._pocketfft.helper as _helper
+import numpy as np
+__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']
+
+
+def rfftfreq(n, d=1.0):
+ """DFT sample frequencies (for usage with rfft, irfft).
+
+ The returned float array contains the frequency bins in
+ cycles/unit (with zero at the start) given a window length `n` and a
+ sample spacing `d`::
+
+ f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even
+ f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd
+
+ Parameters
+ ----------
+ n : int
+ Window length.
+ d : scalar, optional
+ Sample spacing. Default is 1.
+
+ Returns
+ -------
+ out : ndarray
+ The array of length `n`, containing the sample frequencies.
+
+ Examples
+ --------
+ >>> from scipy import fftpack
+ >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
+ >>> sig_fft = fftpack.rfft(sig)
+ >>> n = sig_fft.size
+ >>> timestep = 0.1
+ >>> freq = fftpack.rfftfreq(n, d=timestep)
+ >>> freq
+ array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])
+
+ """
+ n = operator.index(n)
+ if n < 0:
+ raise ValueError("n = %s is not valid. "
+ "n must be a nonnegative integer." % n)
+
+ return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)
+
+
+def next_fast_len(target):
+ """
+ Find the next fast size of input data to `fft`, for zero-padding, etc.
+
+ SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
+ returns the next composite of the prime factors 2, 3, and 5 which is
+ greater than or equal to `target`. (These are also known as 5-smooth
+ numbers, regular numbers, or Hamming numbers.)
+
+ Parameters
+ ----------
+ target : int
+ Length to start searching from. Must be a positive integer.
+
+ Returns
+ -------
+ out : int
+ The first 5-smooth number greater than or equal to `target`.
+
+ Notes
+ -----
+ .. versionadded:: 0.18.0
+
+ Examples
+ --------
+ On a particular machine, an FFT of prime length takes 133 ms:
+
+ >>> from scipy import fftpack
+ >>> min_len = 10007 # prime length is worst case for speed
+ >>> a = np.random.randn(min_len)
+ >>> b = fftpack.fft(a)
+
+ Zero-padding to the next 5-smooth length reduces computation time to
+ 211 us, a speedup of 630 times:
+
+ >>> fftpack.helper.next_fast_len(min_len)
+ 10125
+ >>> b = fftpack.fft(a, 10125)
+
+ Rounding up to the next power of 2 is not optimal, taking 367 us to
+ compute, 1.7 times as long as the 5-smooth size:
+
+ >>> b = fftpack.fft(a, 16384)
+
+ """
+ # Real transforms use regular sizes so this is backwards compatible
+ return _helper.good_size(target, True)
+
+
+def _good_shape(x, shape, axes):
+ """Ensure that shape argument is valid for scipy.fftpack
+
+ scipy.fftpack does not support len(shape) < x.ndim when axes is not given.
+ """
+ if shape and not axes:
+ shape = _helper._iterable_of_int(shape, 'shape')
+ if len(shape) != np.ndim(x):
+ raise ValueError("when given, axes and shape arguments"
+ " have to be of the same length")
+ return shape
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/pseudo_diffs.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/pseudo_diffs.py
new file mode 100644
index 0000000..b8ef40e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/pseudo_diffs.py
@@ -0,0 +1,551 @@
+"""
+Differential and pseudo-differential operators.
+"""
+# Created by Pearu Peterson, September 2002
+
+__all__ = ['diff',
+ 'tilbert','itilbert','hilbert','ihilbert',
+ 'cs_diff','cc_diff','sc_diff','ss_diff',
+ 'shift']
+
+from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
+from . import convolve
+
+from scipy.fft._pocketfft.helper import _datacopied
+
+
+_cache = {}
+
+
+def diff(x,order=1,period=None, _cache=_cache):
+ """
+ Return kth derivative (or integral) of a periodic sequence x.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
+ y_0 = 0 if order is not 0.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ order : int, optional
+ The order of differentiation. Default order is 1. If order is
+ negative, then integration is carried out under the assumption
+ that ``x_0 == 0``.
+ period : float, optional
+ The assumed period of the sequence. Default is ``2*pi``.
+
+ Notes
+ -----
+ If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within
+ numerical accuracy).
+
+ For odd order and even ``len(x)``, the Nyquist mode is taken zero.
+
+ """
+ tmp = asarray(x)
+ if order == 0:
+ return tmp
+ if iscomplexobj(tmp):
+ return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
+ if period is not None:
+ c = 2*pi/period
+ else:
+ c = 1.0
+ n = len(x)
+ omega = _cache.get((n,order,c))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,order=order,c=c):
+ if k:
+ return pow(c*k,order)
+ return 0
+ omega = convolve.init_convolution_kernel(n,kernel,d=order,
+ zero_nyquist=1)
+ _cache[(n,order,c)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=order % 2,
+ overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def tilbert(x, h, period=None, _cache=_cache):
+ """
+ Return h-Tilbert transform of a periodic sequence x.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
+ y_0 = 0
+
+ Parameters
+ ----------
+ x : array_like
+ The input array to transform.
+ h : float
+ Defines the parameter of the Tilbert transform.
+ period : float, optional
+ The assumed period of the sequence. Default period is ``2*pi``.
+
+ Returns
+ -------
+ tilbert : ndarray
+ The result of the transform.
+
+ Notes
+ -----
+ If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd, then
+ ``tilbert(itilbert(x)) == x``.
+
+ If ``2 * pi * h / period`` is approximately 10 or larger, then
+ numerically ``tilbert == hilbert``
+ (theoretically oo-Tilbert == Hilbert).
+
+ For even ``len(x)``, the Nyquist mode of ``x`` is taken zero.
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return tilbert(tmp.real, h, period) + \
+ 1j * tilbert(tmp.imag, h, period)
+
+ if period is not None:
+ h = h * 2 * pi / period
+
+ n = len(x)
+ omega = _cache.get((n, h))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k, h=h):
+ if k:
+ return 1.0/tanh(h*k)
+
+ return 0
+
+ omega = convolve.init_convolution_kernel(n, kernel, d=1)
+ _cache[(n,h)] = omega
+
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def itilbert(x,h,period=None, _cache=_cache):
+ """
+ Return inverse h-Tilbert transform of a periodic sequence x.
+
+ If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
+ y_0 = 0
+
+ For more details, see `tilbert`.
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return itilbert(tmp.real,h,period) + \
+ 1j*itilbert(tmp.imag,h,period)
+ if period is not None:
+ h = h*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,h))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,h=h):
+ if k:
+ return -tanh(h*k)
+ return 0
+ omega = convolve.init_convolution_kernel(n,kernel,d=1)
+ _cache[(n,h)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def hilbert(x, _cache=_cache):
+ """
+ Return Hilbert transform of a periodic sequence x.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = sqrt(-1)*sign(j) * x_j
+ y_0 = 0
+
+ Parameters
+ ----------
+ x : array_like
+ The input array, should be periodic.
+ _cache : dict, optional
+ Dictionary that contains the kernel used to do a convolution with.
+
+ Returns
+ -------
+ y : ndarray
+ The transformed input.
+
+ See Also
+ --------
+ scipy.signal.hilbert : Compute the analytic signal, using the Hilbert
+ transform.
+
+ Notes
+ -----
+ If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``.
+
+ For even len(x), the Nyquist mode of x is taken zero.
+
+ The sign of the returned transform does not have a factor -1 that is more
+ often than not found in the definition of the Hilbert transform. Note also
+ that `scipy.signal.hilbert` does have an extra -1 factor compared to this
+ function.
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return hilbert(tmp.real)+1j*hilbert(tmp.imag)
+ n = len(x)
+ omega = _cache.get(n)
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k):
+ if k > 0:
+ return 1.0
+ elif k < 0:
+ return -1.0
+ return 0.0
+ omega = convolve.init_convolution_kernel(n,kernel,d=1)
+ _cache[n] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+def ihilbert(x):
+ """
+ Return inverse Hilbert transform of a periodic sequence x.
+
+ If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = -sqrt(-1)*sign(j) * x_j
+ y_0 = 0
+
+ """
+ return -hilbert(x)
+
+
+_cache = {}
+
+
+def cs_diff(x, a, b, period=None, _cache=_cache):
+ """
+ Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence.
+
+ If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
+ y_0 = 0
+
+ Parameters
+ ----------
+ x : array_like
+ The array to take the pseudo-derivative from.
+ a, b : float
+ Defines the parameters of the cosh/sinh pseudo-differential
+ operator.
+ period : float, optional
+ The period of the sequence. Default period is ``2*pi``.
+
+ Returns
+ -------
+ cs_diff : ndarray
+ Pseudo-derivative of periodic sequence `x`.
+
+ Notes
+ -----
+ For even len(`x`), the Nyquist mode of `x` is taken as zero.
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return cs_diff(tmp.real,a,b,period) + \
+ 1j*cs_diff(tmp.imag,a,b,period)
+ if period is not None:
+ a = a*2*pi/period
+ b = b*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,a,b))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,a=a,b=b):
+ if k:
+ return -cosh(a*k)/sinh(b*k)
+ return 0
+ omega = convolve.init_convolution_kernel(n,kernel,d=1)
+ _cache[(n,a,b)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def sc_diff(x, a, b, period=None, _cache=_cache):
+ """
+ Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
+ y_0 = 0
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ a,b : float
+ Defines the parameters of the sinh/cosh pseudo-differential
+ operator.
+ period : float, optional
+ The period of the sequence x. Default is 2*pi.
+
+ Notes
+ -----
+ ``sc_diff(cs_diff(x,a,b),b,a) == x``
+ For even ``len(x)``, the Nyquist mode of x is taken as zero.
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return sc_diff(tmp.real,a,b,period) + \
+ 1j*sc_diff(tmp.imag,a,b,period)
+ if period is not None:
+ a = a*2*pi/period
+ b = b*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,a,b))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,a=a,b=b):
+ if k:
+ return sinh(a*k)/cosh(b*k)
+ return 0
+ omega = convolve.init_convolution_kernel(n,kernel,d=1)
+ _cache[(n,a,b)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def ss_diff(x, a, b, period=None, _cache=_cache):
+ """
+ Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
+ y_0 = a/b * x_0
+
+ Parameters
+ ----------
+ x : array_like
+ The array to take the pseudo-derivative from.
+ a,b
+ Defines the parameters of the sinh/sinh pseudo-differential
+ operator.
+ period : float, optional
+ The period of the sequence x. Default is ``2*pi``.
+
+ Notes
+ -----
+ ``ss_diff(ss_diff(x,a,b),b,a) == x``
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return ss_diff(tmp.real,a,b,period) + \
+ 1j*ss_diff(tmp.imag,a,b,period)
+ if period is not None:
+ a = a*2*pi/period
+ b = b*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,a,b))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,a=a,b=b):
+ if k:
+ return sinh(a*k)/sinh(b*k)
+ return float(a)/b
+ omega = convolve.init_convolution_kernel(n,kernel)
+ _cache[(n,a,b)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def cc_diff(x, a, b, period=None, _cache=_cache):
+ """
+ Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence.
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
+
+ Parameters
+ ----------
+ x : array_like
+ The array to take the pseudo-derivative from.
+ a,b : float
+ Defines the parameters of the sinh/sinh pseudo-differential
+ operator.
+ period : float, optional
+ The period of the sequence x. Default is ``2*pi``.
+
+ Returns
+ -------
+ cc_diff : ndarray
+ Pseudo-derivative of periodic sequence `x`.
+
+ Notes
+ -----
+ ``cc_diff(cc_diff(x,a,b),b,a) == x``
+
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return cc_diff(tmp.real,a,b,period) + \
+ 1j*cc_diff(tmp.imag,a,b,period)
+ if period is not None:
+ a = a*2*pi/period
+ b = b*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,a,b))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel(k,a=a,b=b):
+ return cosh(a*k)/cosh(b*k)
+ omega = convolve.init_convolution_kernel(n,kernel)
+ _cache[(n,a,b)] = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def shift(x, a, period=None, _cache=_cache):
+ """
+ Shift periodic sequence x by a: y(u) = x(u+a).
+
+ If x_j and y_j are Fourier coefficients of periodic functions x
+ and y, respectively, then::
+
+ y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
+
+ Parameters
+ ----------
+ x : array_like
+ The array to take the pseudo-derivative from.
+ a : float
+ Defines the parameters of the sinh/sinh pseudo-differential
+ period : float, optional
+ The period of the sequences x and y. Default period is ``2*pi``.
+ """
+ tmp = asarray(x)
+ if iscomplexobj(tmp):
+ return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
+ if period is not None:
+ a = a*2*pi/period
+ n = len(x)
+ omega = _cache.get((n,a))
+ if omega is None:
+ if len(_cache) > 20:
+ while _cache:
+ _cache.popitem()
+
+ def kernel_real(k,a=a):
+ return cos(a*k)
+
+ def kernel_imag(k,a=a):
+ return sin(a*k)
+ omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
+ zero_nyquist=0)
+ omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
+ zero_nyquist=0)
+ _cache[(n,a)] = omega_real,omega_imag
+ else:
+ omega_real,omega_imag = omega
+ overwrite_x = _datacopied(tmp, x)
+ return convolve.convolve_z(tmp,omega_real,omega_imag,
+ overwrite_x=overwrite_x)
+
+
+del _cache
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/realtransforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/realtransforms.py
new file mode 100644
index 0000000..dc17b83
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/realtransforms.py
@@ -0,0 +1,588 @@
+"""
+Real spectrum transforms (DCT, DST, MDCT)
+"""
+
+__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
+
+from scipy.fft import _pocketfft
+from .helper import _good_shape
+
+_inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4}
+
+
+def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+ """
+ Return multidimensional Discrete Cosine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ shape : int or array_like of ints or None, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+ length ``shape[i]``.
+ If any element of `shape` is -1, the size of the corresponding
+ dimension of `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes along which the DCT is computed.
+ The default is over all axes.
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idctn : Inverse multidimensional DCT
+
+ Notes
+ -----
+ For full details of the DCT types and normalization modes, as well as
+ references, see `dct`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import dctn, idctn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
+ True
+
+ """
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
+
+
+def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+ """
+ Return multidimensional Discrete Cosine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ shape : int or array_like of ints or None, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+ length ``shape[i]``.
+ If any element of `shape` is -1, the size of the corresponding
+ dimension of `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes along which the IDCT is computed.
+ The default is over all axes.
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dctn : multidimensional DCT
+
+ Notes
+ -----
+ For full details of the IDCT types and normalization modes, as well as
+ references, see `idct`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import dctn, idctn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
+ True
+
+ """
+ type = _inverse_typemap[type]
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
+
+
+def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+ """
+ Return multidimensional Discrete Sine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ shape : int or array_like of ints or None, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+ length ``shape[i]``.
+ If any element of `shape` is -1, the size of the corresponding
+ dimension of `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes along which the DCT is computed.
+ The default is over all axes.
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idstn : Inverse multidimensional DST
+
+ Notes
+ -----
+ For full details of the DST types and normalization modes, as well as
+ references, see `dst`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import dstn, idstn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
+ True
+
+ """
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
+
+
+def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+ """
+ Return multidimensional Discrete Sine Transform along the specified axes.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ shape : int or array_like of ints or None, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+ length ``shape[i]``.
+ If any element of `shape` is -1, the size of the corresponding
+ dimension of `x` is used.
+ axes : int or array_like of ints or None, optional
+ Axes along which the IDST is computed.
+ The default is over all axes.
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dstn : multidimensional DST
+
+ Notes
+ -----
+ For full details of the IDST types and normalization modes, as well as
+ references, see `idst`.
+
+ Examples
+ --------
+ >>> from scipy.fftpack import dstn, idstn
+ >>> y = np.random.randn(16, 16)
+ >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
+ True
+
+ """
+ type = _inverse_typemap[type]
+ shape = _good_shape(x, shape, axes)
+ return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
+
+
+def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+ r"""
+ Return the Discrete Cosine Transform of arbitrary type sequence x.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the dct is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ y : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ idct : Inverse DCT
+
+ Notes
+ -----
+ For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
+ MATLAB ``dct(x)``.
+
+ There are, theoretically, 8 types of the DCT, only the first 4 types are
+ implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
+ Inverse DCT generally refers to DCT type 3.
+
+ **Type I**
+
+ There are several definitions of the DCT-I; we use the following
+ (for ``norm=None``)
+
+ .. math::
+
+ y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
+ \frac{\pi k n}{N-1} \right)
+
+ If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling
+ factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor
+ ``f``
+
+ .. math::
+
+ f = \begin{cases}
+ \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\
+ \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases}
+
+ .. versionadded:: 1.2.0
+ Orthonormalization in DCT-I.
+
+ .. note::
+ The DCT-I is only supported for input size > 1.
+
+ **Type II**
+
+ There are several definitions of the DCT-II; we use the following
+ (for ``norm=None``)
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
+
+ If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+ f = \begin{cases}
+ \sqrt{\frac{1}{4N}} & \text{if }k=0, \\
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+ which makes the corresponding matrix of coefficients orthonormal
+ (``O @ O.T = np.eye(N)``).
+
+ **Type III**
+
+ There are several definitions, we use the following (for ``norm=None``)
+
+ .. math::
+
+ y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+ or, for ``norm='ortho'``
+
+ .. math::
+
+ y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n
+ \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+ The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
+ to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
+ the orthonormalized DCT-II.
+
+ **Type IV**
+
+ There are several definitions of the DCT-IV; we use the following
+ (for ``norm=None``)
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
+
+ If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+
+ f = \frac{1}{\sqrt{2N}}
+
+ .. versionadded:: 1.2.0
+ Support for DCT-IV.
+
+ References
+ ----------
+ .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
+ Makhoul, `IEEE Transactions on acoustics, speech and signal
+ processing` vol. 28(1), pp. 27-34,
+ :doi:`10.1109/TASSP.1980.1163351` (1980).
+ .. [2] Wikipedia, "Discrete cosine transform",
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform
+
+ Examples
+ --------
+ The Type 1 DCT is equivalent to the FFT (though faster) for real,
+ even-symmetrical inputs. The output is also real and even-symmetrical.
+ Half of the FFT input is used to generate half of the FFT output:
+
+ >>> from scipy.fftpack import fft, dct
+ >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
+ array([ 30., -8., 6., -2., 6., -8.])
+ >>> dct(np.array([4., 3., 5., 10.]), 1)
+ array([ 30., -8., 6., -2.])
+
+ """
+ return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
+
+
+def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+ """
+ Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DCT (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the idct is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ idct : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dct : Forward DCT
+
+ Notes
+ -----
+ For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
+ MATLAB ``idct(x)``.
+
+ 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
+
+ IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
+ 3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT
+ of type 4. For the definition of these types, see `dct`.
+
+ Examples
+ --------
+ The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
+ inputs. The output is also real and even-symmetrical. Half of the IFFT
+ input is used to generate half of the IFFT output:
+
+ >>> from scipy.fftpack import ifft, idct
+ >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
+ array([ 4., 3., 5., 10., 5., 3.])
+ >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6
+ array([ 4., 3., 5., 10.])
+
+ """
+ type = _inverse_typemap[type]
+ return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
+
+
+def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+ r"""
+ Return the Discrete Sine Transform of arbitrary type sequence x.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the dst is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ dst : ndarray of reals
+ The transformed input array.
+
+ See Also
+ --------
+ idst : Inverse DST
+
+ Notes
+ -----
+ For a single dimension array ``x``.
+
+ There are, theoretically, 8 types of the DST for different combinations of
+ even/odd boundary conditions and boundary off sets [1]_, only the first
+ 4 types are implemented in scipy.
+
+ **Type I**
+
+ There are several definitions of the DST-I; we use the following
+ for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`.
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
+
+ Note that the DST-I is only supported for input size > 1.
+ The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`.
+ The orthonormalized DST-I is exactly its own inverse.
+
+ **Type II**
+
+ There are several definitions of the DST-II; we use the following for
+ ``norm=None``. DST-II assumes the input is odd around `n=-1/2` and
+ `n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1`
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
+
+ if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+ .. math::
+
+ f = \begin{cases}
+ \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+ **Type III**
+
+ There are several definitions of the DST-III, we use the following (for
+ ``norm=None``). DST-III assumes the input is odd around `n=-1` and even
+ around `n=N-1`
+
+ .. math::
+
+ y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
+ \frac{\pi(2k+1)(n+1)}{2N}\right)
+
+ The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
+ to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the
+ orthonormalized DST-II.
+
+ .. versionadded:: 0.11.0
+
+ **Type IV**
+
+ There are several definitions of the DST-IV, we use the following (for
+ ``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even
+ around `n=N-0.5`
+
+ .. math::
+
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
+
+ The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The
+ orthonormalized DST-IV is exactly its own inverse.
+
+ .. versionadded:: 1.2.0
+ Support for DST-IV.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Discrete sine transform",
+ https://en.wikipedia.org/wiki/Discrete_sine_transform
+
+ """
+ return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
+
+
+def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+ """
+ Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ type : {1, 2, 3, 4}, optional
+ Type of the DST (see Notes). Default type is 2.
+ n : int, optional
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
+ axis : int, optional
+ Axis along which the idst is computed; the default is over the
+ last axis (i.e., ``axis=-1``).
+ norm : {None, 'ortho'}, optional
+ Normalization mode (see Notes). Default is None.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ idst : ndarray of real
+ The transformed input array.
+
+ See Also
+ --------
+ dst : Forward DST
+
+ Notes
+ -----
+ 'The' IDST is the IDST of type 2, which is the same as DST of type 3.
+
+ IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type
+ 3, and IDST of type 3 is the DST of type 2. For the definition of these
+ types, see `dst`.
+
+ .. versionadded:: 0.11.0
+
+ """
+ type = _inverse_typemap[type]
+ return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/setup.py
new file mode 100644
index 0000000..226b8cf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/setup.py
@@ -0,0 +1,16 @@
+# Created by Pearu Peterson, August 2002
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('fftpack',parent_package, top_path)
+
+ config.add_data_dir('tests')
+
+ config.add_extension('convolve', sources=['convolve.c'])
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/Makefile b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/Makefile
new file mode 100644
index 0000000..39fdb58
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/Makefile
@@ -0,0 +1,13 @@
+CC = gcc
+LD = gcc
+
+fftw_single: fftw_dct.c
+ $(CC) -W -Wall -DDCT_TEST_USE_SINGLE $< -o $@ -lfftw3f
+
+fftw_double: fftw_dct.c
+ $(CC) -W -Wall $< -o $@ -lfftw3
+
+clean:
+ rm -f fftw_single
+ rm -f fftw_double
+ rm -f *.o
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_dct.c b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_dct.c
new file mode 100644
index 0000000..688eeb5
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_dct.c
@@ -0,0 +1,150 @@
+#include
+#include
+
+#include
+
+#if DCT_TEST_PRECISION == 1
+typedef float float_prec;
+#define PF "%.7f"
+#define FFTW_PLAN fftwf_plan
+#define FFTW_MALLOC fftwf_malloc
+#define FFTW_FREE fftwf_free
+#define FFTW_PLAN_CREATE fftwf_plan_r2r_1d
+#define FFTW_EXECUTE fftwf_execute
+#define FFTW_DESTROY_PLAN fftwf_destroy_plan
+#define FFTW_CLEANUP fftwf_cleanup
+#elif DCT_TEST_PRECISION == 2
+typedef double float_prec;
+#define PF "%.18f"
+#define FFTW_PLAN fftw_plan
+#define FFTW_MALLOC fftw_malloc
+#define FFTW_FREE fftw_free
+#define FFTW_PLAN_CREATE fftw_plan_r2r_1d
+#define FFTW_EXECUTE fftw_execute
+#define FFTW_DESTROY_PLAN fftw_destroy_plan
+#define FFTW_CLEANUP fftw_cleanup
+#elif DCT_TEST_PRECISION == 3
+typedef long double float_prec;
+#define PF "%.18Lf"
+#define FFTW_PLAN fftwl_plan
+#define FFTW_MALLOC fftwl_malloc
+#define FFTW_FREE fftwl_free
+#define FFTW_PLAN_CREATE fftwl_plan_r2r_1d
+#define FFTW_EXECUTE fftwl_execute
+#define FFTW_DESTROY_PLAN fftwl_destroy_plan
+#define FFTW_CLEANUP fftwl_cleanup
+#else
+#error DCT_TEST_PRECISION must be a number 1-3
+#endif
+
+
+enum type {
+ DCT_I = 1,
+ DCT_II = 2,
+ DCT_III = 3,
+ DCT_IV = 4,
+ DST_I = 5,
+ DST_II = 6,
+ DST_III = 7,
+ DST_IV = 8,
+};
+
+int gen(int type, int sz)
+{
+ float_prec *a, *b;
+ FFTW_PLAN p;
+ int i, tp;
+
+ a = FFTW_MALLOC(sizeof(*a) * sz);
+ if (a == NULL) {
+ fprintf(stderr, "failure\n");
+ exit(EXIT_FAILURE);
+ }
+ b = FFTW_MALLOC(sizeof(*b) * sz);
+ if (b == NULL) {
+ fprintf(stderr, "failure\n");
+ exit(EXIT_FAILURE);
+ }
+
+ switch(type) {
+ case DCT_I:
+ tp = FFTW_REDFT00;
+ break;
+ case DCT_II:
+ tp = FFTW_REDFT10;
+ break;
+ case DCT_III:
+ tp = FFTW_REDFT01;
+ break;
+ case DCT_IV:
+ tp = FFTW_REDFT11;
+ break;
+ case DST_I:
+ tp = FFTW_RODFT00;
+ break;
+ case DST_II:
+ tp = FFTW_RODFT10;
+ break;
+ case DST_III:
+ tp = FFTW_RODFT01;
+ break;
+ case DST_IV:
+ tp = FFTW_RODFT11;
+ break;
+ default:
+ fprintf(stderr, "unknown type\n");
+ exit(EXIT_FAILURE);
+ }
+
+ switch(type) {
+ case DCT_I:
+ case DCT_II:
+ case DCT_III:
+ case DCT_IV:
+ for(i=0; i < sz; ++i) {
+ a[i] = i;
+ }
+ break;
+ case DST_I:
+ case DST_II:
+ case DST_III:
+ case DST_IV:
+/* TODO: what should we do for dst's?*/
+ for(i=0; i < sz; ++i) {
+ a[i] = i;
+ }
+ break;
+ default:
+ fprintf(stderr, "unknown type\n");
+ exit(EXIT_FAILURE);
+ }
+
+ p = FFTW_PLAN_CREATE(sz, a, b, tp, FFTW_ESTIMATE);
+ FFTW_EXECUTE(p);
+ FFTW_DESTROY_PLAN(p);
+
+ for(i=0; i < sz; ++i) {
+ printf(PF"\n", b[i]);
+ }
+ FFTW_FREE(b);
+ FFTW_FREE(a);
+
+ return 0;
+}
+
+int main(int argc, char* argv[])
+{
+ int n, tp;
+
+ if (argc < 3) {
+ fprintf(stderr, "missing argument: program type n\n");
+ exit(EXIT_FAILURE);
+ }
+ tp = atoi(argv[1]);
+ n = atoi(argv[2]);
+
+ gen(tp, n);
+ FFTW_CLEANUP();
+
+ return 0;
+}
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_double_ref.npz b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_double_ref.npz
new file mode 100644
index 0000000..ee6dcb7
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_double_ref.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_longdouble_ref.npz b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_longdouble_ref.npz
new file mode 100644
index 0000000..cc53e6a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_longdouble_ref.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_single_ref.npz b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_single_ref.npz
new file mode 100644
index 0000000..8953d33
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/fftw_single_ref.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gen_fftw_ref.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gen_fftw_ref.py
new file mode 100644
index 0000000..f520daf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gen_fftw_ref.py
@@ -0,0 +1,74 @@
+from subprocess import Popen, PIPE, STDOUT
+
+import numpy as np
+
+SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024]
+
+
+def gen_data(dt):
+ arrays = {}
+
+ if dt == np.float128:
+ pg = './fftw_longdouble'
+ elif dt == np.double:
+ pg = './fftw_double'
+ elif dt == np.float32:
+ pg = './fftw_single'
+ else:
+ raise ValueError("unknown: %s" % dt)
+ # Generate test data using FFTW for reference
+ for type in [1, 2, 3, 4, 5, 6, 7, 8]:
+ arrays[type] = {}
+ for sz in SZ:
+ a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT)
+ st = [i.decode('ascii').strip() for i in a.stdout.readlines()]
+ arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt)
+
+ return arrays
+
+
+# generate single precision data
+data = gen_data(np.float32)
+filename = 'fftw_single_ref'
+# Save ref data into npz format
+d = {'sizes': SZ}
+for type in [1, 2, 3, 4]:
+ for sz in SZ:
+ d['dct_%d_%d' % (type, sz)] = data[type][sz]
+
+d['sizes'] = SZ
+for type in [5, 6, 7, 8]:
+ for sz in SZ:
+ d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
+np.savez(filename, **d)
+
+
+# generate double precision data
+data = gen_data(np.float64)
+filename = 'fftw_double_ref'
+# Save ref data into npz format
+d = {'sizes': SZ}
+for type in [1, 2, 3, 4]:
+ for sz in SZ:
+ d['dct_%d_%d' % (type, sz)] = data[type][sz]
+
+d['sizes'] = SZ
+for type in [5, 6, 7, 8]:
+ for sz in SZ:
+ d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
+np.savez(filename, **d)
+
+# generate long double precision data
+data = gen_data(np.float128)
+filename = 'fftw_longdouble_ref'
+# Save ref data into npz format
+d = {'sizes': SZ}
+for type in [1, 2, 3, 4]:
+ for sz in SZ:
+ d['dct_%d_%d' % (type, sz)] = data[type][sz]
+
+d['sizes'] = SZ
+for type in [5, 6, 7, 8]:
+ for sz in SZ:
+ d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
+np.savez(filename, **d)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.m b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.m
new file mode 100644
index 0000000..6c231df
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.m
@@ -0,0 +1,21 @@
+x0 = linspace(0, 10, 11);
+x1 = linspace(0, 10, 15);
+x2 = linspace(0, 10, 16);
+x3 = linspace(0, 10, 17);
+
+x4 = randn(32, 1);
+x5 = randn(64, 1);
+x6 = randn(128, 1);
+x7 = randn(256, 1);
+
+y0 = dct(x0);
+y1 = dct(x1);
+y2 = dct(x2);
+y3 = dct(x3);
+y4 = dct(x4);
+y5 = dct(x5);
+y6 = dct(x6);
+y7 = dct(x7);
+
+save('test.mat', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', ...
+ 'y0', 'y1', 'y2', 'y3', 'y4', 'y5', 'y6', 'y7');
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.py
new file mode 100644
index 0000000..7914a1f
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/gendata.py
@@ -0,0 +1,6 @@
+import numpy as np
+from scipy.io import loadmat
+
+m = loadmat('test.mat', squeeze_me=True, struct_as_record=True,
+ mat_dtype=True)
+np.savez('test.npz', **m)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test.npz b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test.npz
new file mode 100644
index 0000000..f90294b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_basic.py
new file mode 100644
index 0000000..669286d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_basic.py
@@ -0,0 +1,858 @@
+# Created by Pearu Peterson, September 2002
+
+from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
+ assert_array_almost_equal_nulp, assert_array_less)
+import pytest
+from pytest import raises as assert_raises
+from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
+
+from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
+ swapaxes, double, cdouble)
+import numpy as np
+import numpy.fft
+from numpy.random import rand
+
+# "large" composite numbers supported by FFTPACK
+LARGE_COMPOSITE_SIZES = [
+ 2**13,
+ 2**5 * 3**5,
+ 2**3 * 3**3 * 5**2,
+]
+SMALL_COMPOSITE_SIZES = [
+ 2,
+ 2*3*5,
+ 2*2*3*3,
+]
+# prime
+LARGE_PRIME_SIZES = [
+ 2011
+]
+SMALL_PRIME_SIZES = [
+ 29
+]
+
+
+def _assert_close_in_norm(x, y, rtol, size, rdt):
+ # helper function for testing
+ err_msg = "size: %s rdt: %s" % (size, rdt)
+ assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
+
+
+def random(size):
+ return rand(*size)
+
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data, data)
+ return data
+
+
+def direct_dft(x):
+ x = asarray(x)
+ n = len(x)
+ y = zeros(n, dtype=cdouble)
+ w = -arange(n)*(2j*pi/n)
+ for i in range(n):
+ y[i] = dot(exp(i*w), x)
+ return y
+
+
+def direct_idft(x):
+ x = asarray(x)
+ n = len(x)
+ y = zeros(n, dtype=cdouble)
+ w = arange(n)*(2j*pi/n)
+ for i in range(n):
+ y[i] = dot(exp(i*w), x)/n
+ return y
+
+
+def direct_dftn(x):
+ x = asarray(x)
+ for axis in range(len(x.shape)):
+ x = fft(x, axis=axis)
+ return x
+
+
+def direct_idftn(x):
+ x = asarray(x)
+ for axis in range(len(x.shape)):
+ x = ifft(x, axis=axis)
+ return x
+
+
+def direct_rdft(x):
+ x = asarray(x)
+ n = len(x)
+ w = -arange(n)*(2j*pi/n)
+ r = zeros(n, dtype=double)
+ for i in range(n//2+1):
+ y = dot(exp(i*w), x)
+ if i:
+ r[2*i-1] = y.real
+ if 2*i < n:
+ r[2*i] = y.imag
+ else:
+ r[0] = y.real
+ return r
+
+
+def direct_irdft(x):
+ x = asarray(x)
+ n = len(x)
+ x1 = zeros(n, dtype=cdouble)
+ for i in range(n//2+1):
+ if i:
+ if 2*i < n:
+ x1[i] = x[2*i-1] + 1j*x[2*i]
+ x1[n-i] = x[2*i-1] - 1j*x[2*i]
+ else:
+ x1[i] = x[2*i-1]
+ else:
+ x1[0] = x[0]
+ return direct_idft(x1).real
+
+
+class _TestFFTBase(object):
+ def setup_method(self):
+ self.cdt = None
+ self.rdt = None
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
+ y = fft(x)
+ assert_equal(y.dtype, self.cdt)
+ y1 = direct_dft(x)
+ assert_array_almost_equal(y,y1)
+ x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
+ assert_array_almost_equal(fft(x),direct_dft(x))
+
+ def test_n_argument_real(self):
+ x1 = np.array([1,2,3,4], dtype=self.rdt)
+ x2 = np.array([1,2,3,4], dtype=self.rdt)
+ y = fft([x1,x2],n=4)
+ assert_equal(y.dtype, self.cdt)
+ assert_equal(y.shape,(2,4))
+ assert_array_almost_equal(y[0],direct_dft(x1))
+ assert_array_almost_equal(y[1],direct_dft(x2))
+
+ def _test_n_argument_complex(self):
+ x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
+ x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
+ y = fft([x1,x2],n=4)
+ assert_equal(y.dtype, self.cdt)
+ assert_equal(y.shape,(2,4))
+ assert_array_almost_equal(y[0],direct_dft(x1))
+ assert_array_almost_equal(y[1],direct_dft(x2))
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, fft, [])
+ assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
+
+
+class TestDoubleFFT(_TestFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+
+
+class TestSingleFFT(_TestFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+
+ @pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
+ def test_notice(self):
+ pass
+
+
+class TestFloat16FFT(object):
+
+ def test_1_argument_real(self):
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+ y = fft(x1, n=4)
+ assert_equal(y.dtype, np.complex64)
+ assert_equal(y.shape, (4, ))
+ assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
+
+ def test_n_argument_real(self):
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+ x2 = np.array([1, 2, 3, 4], dtype=np.float16)
+ y = fft([x1, x2], n=4)
+ assert_equal(y.dtype, np.complex64)
+ assert_equal(y.shape, (2, 4))
+ assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
+ assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
+
+
+class _TestIFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
+ y = ifft(x)
+ y1 = direct_idft(x)
+ assert_equal(y.dtype, self.cdt)
+ assert_array_almost_equal(y,y1)
+
+ x = np.array([1,2,3,4+0j,5], self.cdt)
+ assert_array_almost_equal(ifft(x),direct_idft(x))
+
+ def test_definition_real(self):
+ x = np.array([1,2,3,4,1,2,3,4], self.rdt)
+ y = ifft(x)
+ assert_equal(y.dtype, self.cdt)
+ y1 = direct_idft(x)
+ assert_array_almost_equal(y,y1)
+
+ x = np.array([1,2,3,4,5], dtype=self.rdt)
+ assert_equal(y.dtype, self.cdt)
+ assert_array_almost_equal(ifft(x),direct_idft(x))
+
+ def test_random_complex(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.cdt)
+ x = random([size]).astype(self.cdt) + 1j*x
+ y1 = ifft(fft(x))
+ y2 = fft(ifft(x))
+ assert_equal(y1.dtype, self.cdt)
+ assert_equal(y2.dtype, self.cdt)
+ assert_array_almost_equal(y1, x)
+ assert_array_almost_equal(y2, x)
+
+ def test_random_real(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.rdt)
+ y1 = ifft(fft(x))
+ y2 = fft(ifft(x))
+ assert_equal(y1.dtype, self.cdt)
+ assert_equal(y2.dtype, self.cdt)
+ assert_array_almost_equal(y1, x)
+ assert_array_almost_equal(y2, x)
+
+ def test_size_accuracy(self):
+ # Sanity check for the accuracy for prime and non-prime sized inputs
+ if self.rdt == np.float32:
+ rtol = 1e-5
+ elif self.rdt == np.float64:
+ rtol = 1e-10
+
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+ np.random.seed(1234)
+ x = np.random.rand(size).astype(self.rdt)
+ y = ifft(fft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+ y = fft(ifft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+ x = (x + 1j*np.random.rand(size)).astype(self.cdt)
+ y = ifft(fft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+ y = fft(ifft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, ifft, [])
+ assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
+
+
+class TestDoubleIFFT(_TestIFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+
+
+class TestSingleIFFT(_TestIFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+
+
+class _TestRFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
+ x = np.array(t, dtype=self.rdt)
+ y = rfft(x)
+ y1 = direct_rdft(x)
+ assert_array_almost_equal(y,y1)
+ assert_equal(y.dtype, self.rdt)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, rfft, [])
+ assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
+
+ # See gh-5790
+ class MockSeries(object):
+ def __init__(self, data):
+ self.data = np.asarray(data)
+
+ def __getattr__(self, item):
+ try:
+ return getattr(self.data, item)
+ except AttributeError as e:
+ raise AttributeError(("'MockSeries' object "
+ "has no attribute '{attr}'".
+ format(attr=item))) from e
+
+ def test_non_ndarray_with_dtype(self):
+ x = np.array([1., 2., 3., 4., 5.])
+ xs = _TestRFFTBase.MockSeries(x)
+
+ expected = [1, 2, 3, 4, 5]
+ rfft(xs)
+
+ # Data should not have been overwritten
+ assert_equal(x, expected)
+ assert_equal(xs.data, expected)
+
+ def test_complex_input(self):
+ assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64))
+
+
+class TestRFFTDouble(_TestRFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+
+
+class TestRFFTSingle(_TestRFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+
+
+class _TestIRFFTBase(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x1 = [1,2,3,4,1,2,3,4]
+ x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
+ x2 = [1,2,3,4,1,2,3,4,5]
+ x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
+
+ def _test(x, xr):
+ y = irfft(np.array(x, dtype=self.rdt))
+ y1 = direct_irdft(x)
+ assert_equal(y.dtype, self.rdt)
+ assert_array_almost_equal(y,y1, decimal=self.ndec)
+ assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
+
+ _test(x1, x1_1)
+ _test(x2, x2_1)
+
+ def test_random_real(self):
+ for size in [1,51,111,100,200,64,128,256,1024]:
+ x = random([size]).astype(self.rdt)
+ y1 = irfft(rfft(x))
+ y2 = rfft(irfft(x))
+ assert_equal(y1.dtype, self.rdt)
+ assert_equal(y2.dtype, self.rdt)
+ assert_array_almost_equal(y1, x, decimal=self.ndec,
+ err_msg="size=%d" % size)
+ assert_array_almost_equal(y2, x, decimal=self.ndec,
+ err_msg="size=%d" % size)
+
+ def test_size_accuracy(self):
+ # Sanity check for the accuracy for prime and non-prime sized inputs
+ if self.rdt == np.float32:
+ rtol = 1e-5
+ elif self.rdt == np.float64:
+ rtol = 1e-10
+
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+ np.random.seed(1234)
+ x = np.random.rand(size).astype(self.rdt)
+ y = irfft(rfft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+ y = rfft(irfft(x))
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, irfft, [])
+ assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
+
+ def test_complex_input(self):
+ assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
+
+
+# self.ndec is bogus; we should have a assert_array_approx_equal for number of
+# significant digits
+
+class TestIRFFTDouble(_TestIRFFTBase):
+ def setup_method(self):
+ self.cdt = np.cdouble
+ self.rdt = np.double
+ self.ndec = 14
+
+
+class TestIRFFTSingle(_TestIRFFTBase):
+ def setup_method(self):
+ self.cdt = np.complex64
+ self.rdt = np.float32
+ self.ndec = 5
+
+
+class Testfft2(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_regression_244(self):
+ """FFT returns wrong result with axes parameter."""
+ # fftn (and hence fft2) used to break when both axes and shape were
+ # used
+ x = numpy.ones((4, 4, 2))
+ y = fft2(x, shape=(8, 8), axes=(-3, -2))
+ y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
+ assert_array_almost_equal(y, y_r)
+
+ def test_invalid_sizes(self):
+ assert_raises(ValueError, fft2, [[]])
+ assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
+
+
+class TestFftnSingle(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(np.array(x, np.float32))
+ assert_(y.dtype == np.complex64,
+ msg="double precision output with single precision")
+
+ y_r = np.array(fftn(x), np.complex64)
+ assert_array_almost_equal_nulp(y, y_r)
+
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+ def test_size_accuracy_small(self, size):
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+ y1 = fftn(x.real.astype(np.float32))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2000)
+
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+ def test_size_accuracy_large(self, size):
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+ y1 = fftn(x.real.astype(np.float32))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2000)
+
+ def test_definition_float16(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(np.array(x, np.float16))
+ assert_equal(y.dtype, np.complex64)
+ y_r = np.array(fftn(x), np.complex64)
+ assert_array_almost_equal_nulp(y, y_r)
+
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+ def test_float16_input_small(self, size):
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+ y1 = fftn(x.real.astype(np.float16))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 5e5)
+
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+ def test_float16_input_large(self, size):
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+ y1 = fftn(x.real.astype(np.float16))
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+ assert_equal(y1.dtype, np.complex64)
+ assert_array_almost_equal_nulp(y1, y2, 2e6)
+
+
+class TestFftn(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_definition(self):
+ x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ y = fftn(x)
+ assert_array_almost_equal(y, direct_dftn(x))
+
+ x = random((20, 26))
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+ x = random((5, 4, 3, 20))
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+ def test_axes_argument(self):
+ # plane == ji_plane, x== kji_space
+ plane1 = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ plane2 = [[10, 11, 12],
+ [13, 14, 15],
+ [16, 17, 18]]
+ plane3 = [[19, 20, 21],
+ [22, 23, 24],
+ [25, 26, 27]]
+ ki_plane1 = [[1, 2, 3],
+ [10, 11, 12],
+ [19, 20, 21]]
+ ki_plane2 = [[4, 5, 6],
+ [13, 14, 15],
+ [22, 23, 24]]
+ ki_plane3 = [[7, 8, 9],
+ [16, 17, 18],
+ [25, 26, 27]]
+ jk_plane1 = [[1, 10, 19],
+ [4, 13, 22],
+ [7, 16, 25]]
+ jk_plane2 = [[2, 11, 20],
+ [5, 14, 23],
+ [8, 17, 26]]
+ jk_plane3 = [[3, 12, 21],
+ [6, 15, 24],
+ [9, 18, 27]]
+ kj_plane1 = [[1, 4, 7],
+ [10, 13, 16], [19, 22, 25]]
+ kj_plane2 = [[2, 5, 8],
+ [11, 14, 17], [20, 23, 26]]
+ kj_plane3 = [[3, 6, 9],
+ [12, 15, 18], [21, 24, 27]]
+ ij_plane1 = [[1, 4, 7],
+ [2, 5, 8],
+ [3, 6, 9]]
+ ij_plane2 = [[10, 13, 16],
+ [11, 14, 17],
+ [12, 15, 18]]
+ ij_plane3 = [[19, 22, 25],
+ [20, 23, 26],
+ [21, 24, 27]]
+ ik_plane1 = [[1, 10, 19],
+ [2, 11, 20],
+ [3, 12, 21]]
+ ik_plane2 = [[4, 13, 22],
+ [5, 14, 23],
+ [6, 15, 24]]
+ ik_plane3 = [[7, 16, 25],
+ [8, 17, 26],
+ [9, 18, 27]]
+ ijk_space = [jk_plane1, jk_plane2, jk_plane3]
+ ikj_space = [kj_plane1, kj_plane2, kj_plane3]
+ jik_space = [ik_plane1, ik_plane2, ik_plane3]
+ jki_space = [ki_plane1, ki_plane2, ki_plane3]
+ kij_space = [ij_plane1, ij_plane2, ij_plane3]
+ x = array([plane1, plane2, plane3])
+
+ assert_array_almost_equal(fftn(x),
+ fftn(x, axes=(-3, -2, -1))) # kji_space
+ assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
+ assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
+ y = fftn(x, axes=(2, 1, 0)) # ijk_space
+ assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
+ y = fftn(x, axes=(2, 0, 1)) # ikj_space
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
+ fftn(ikj_space))
+ y = fftn(x, axes=(1, 2, 0)) # jik_space
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
+ fftn(jik_space))
+ y = fftn(x, axes=(1, 0, 2)) # jki_space
+ assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
+ y = fftn(x, axes=(0, 2, 1)) # kij_space
+ assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
+
+ y = fftn(x, axes=(-2, -1)) # ji_plane
+ assert_array_almost_equal(fftn(plane1), y[0])
+ assert_array_almost_equal(fftn(plane2), y[1])
+ assert_array_almost_equal(fftn(plane3), y[2])
+
+ y = fftn(x, axes=(1, 2)) # ji_plane
+ assert_array_almost_equal(fftn(plane1), y[0])
+ assert_array_almost_equal(fftn(plane2), y[1])
+ assert_array_almost_equal(fftn(plane3), y[2])
+
+ y = fftn(x, axes=(-3, -2)) # kj_plane
+ assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
+ assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
+ assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
+
+ y = fftn(x, axes=(-3, -1)) # ki_plane
+ assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
+ assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
+ assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
+
+ y = fftn(x, axes=(-1, -2)) # ij_plane
+ assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
+ assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
+ assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
+
+ y = fftn(x, axes=(-1, -3)) # ik_plane
+ assert_array_almost_equal(fftn(ik_plane1),
+ swapaxes(y[:, 0, :], -1, -2))
+ assert_array_almost_equal(fftn(ik_plane2),
+ swapaxes(y[:, 1, :], -1, -2))
+ assert_array_almost_equal(fftn(ik_plane3),
+ swapaxes(y[:, 2, :], -1, -2))
+
+ y = fftn(x, axes=(-2, -3)) # jk_plane
+ assert_array_almost_equal(fftn(jk_plane1),
+ swapaxes(y[:, :, 0], -1, -2))
+ assert_array_almost_equal(fftn(jk_plane2),
+ swapaxes(y[:, :, 1], -1, -2))
+ assert_array_almost_equal(fftn(jk_plane3),
+ swapaxes(y[:, :, 2], -1, -2))
+
+ y = fftn(x, axes=(-1,)) # i_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
+ y = fftn(x, axes=(-2,)) # j_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
+ y = fftn(x, axes=(0,)) # k_line
+ for i in range(3):
+ for j in range(3):
+ assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
+
+ y = fftn(x, axes=()) # point
+ assert_array_almost_equal(y, x)
+
+ def test_shape_argument(self):
+ small_x = [[1, 2, 3],
+ [4, 5, 6]]
+ large_x1 = [[1, 2, 3, 0],
+ [4, 5, 6, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]]
+
+ y = fftn(small_x, shape=(4, 4))
+ assert_array_almost_equal(y, fftn(large_x1))
+
+ y = fftn(small_x, shape=(3, 4))
+ assert_array_almost_equal(y, fftn(large_x1[:-1]))
+
+ def test_shape_axes_argument(self):
+ small_x = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ large_x1 = array([[1, 2, 3, 0],
+ [4, 5, 6, 0],
+ [7, 8, 9, 0],
+ [0, 0, 0, 0]])
+ y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
+ assert_array_almost_equal(y, fftn(large_x1))
+ y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
+
+ assert_array_almost_equal(y, swapaxes(
+ fftn(swapaxes(large_x1, -1, -2)), -1, -2))
+
+ def test_shape_axes_argument2(self):
+ # Change shape of the last axis
+ x = numpy.random.random((10, 5, 3, 7))
+ y = fftn(x, axes=(-1,), shape=(8,))
+ assert_array_almost_equal(y, fft(x, axis=-1, n=8))
+
+ # Change shape of an arbitrary axis which is not the last one
+ x = numpy.random.random((10, 5, 3, 7))
+ y = fftn(x, axes=(-2,), shape=(8,))
+ assert_array_almost_equal(y, fft(x, axis=-2, n=8))
+
+ # Change shape of axes: cf #244, where shape and axes were mixed up
+ x = numpy.random.random((4, 4, 2))
+ y = fftn(x, axes=(-3, -2), shape=(8, 8))
+ assert_array_almost_equal(y,
+ numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
+
+ def test_shape_argument_more(self):
+ x = zeros((4, 4, 2))
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fftn(x, shape=(8, 8, 2, 1))
+
+ def test_invalid_sizes(self):
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[1, 0\]\) specified"):
+ fftn([[]])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[4, -3\]\) specified"):
+ fftn([[1, 1], [2, 2]], (4, -3))
+
+
+class TestIfftn(object):
+ dtype = None
+ cdtype = None
+
+ def setup_method(self):
+ np.random.seed(1234)
+
+ @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+ [(np.float64, np.complex128, 2000),
+ (np.float32, np.complex64, 3500)])
+ def test_definition(self, dtype, cdtype, maxnlp):
+ x = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]], dtype=dtype)
+ y = ifftn(x)
+ assert_equal(y.dtype, cdtype)
+ assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
+
+ x = random((20, 26))
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+ x = random((5, 4, 3, 20))
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+ @pytest.mark.parametrize('maxnlp', [2000, 3500])
+ @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+ def test_random_complex(self, maxnlp, size):
+ x = random([size, size]) + 1j*random([size, size])
+ assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
+ assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
+
+ def test_invalid_sizes(self):
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[1, 0\]\) specified"):
+ ifftn([[]])
+
+ with assert_raises(ValueError,
+ match="invalid number of data points"
+ r" \(\[4, -3\]\) specified"):
+ ifftn([[1, 1], [2, 2]], (4, -3))
+
+
+class FakeArray(object):
+ def __init__(self, data):
+ self._data = data
+ self.__array_interface__ = data.__array_interface__
+
+
+class FakeArray2(object):
+ def __init__(self, data):
+ self._data = data
+
+ def __array__(self):
+ return self._data
+
+
+class TestOverwrite(object):
+ """Check input overwrite behavior of the FFT functions."""
+
+ real_dtypes = (np.float32, np.float64)
+ dtypes = real_dtypes + (np.complex64, np.complex128)
+ fftsizes = [8, 16, 32]
+
+ def _check(self, x, routine, fftsize, axis, overwrite_x):
+ x2 = x.copy()
+ for fake in [lambda x: x, FakeArray, FakeArray2]:
+ routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
+
+ sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+ if not overwrite_x:
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+ def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
+ fftsize, overwrite_x):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+
+ self._check(data, routine, fftsize, axis,
+ overwrite_x=overwrite_x)
+
+ @pytest.mark.parametrize('dtype', dtypes)
+ @pytest.mark.parametrize('fftsize', fftsizes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
+ ((16, 2), 0),
+ ((2, 16), 1)])
+ def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
+ overwritable = (np.complex128, np.complex64)
+ self._check_1d(fft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+ self._check_1d(ifft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+
+ @pytest.mark.parametrize('dtype', real_dtypes)
+ @pytest.mark.parametrize('fftsize', fftsizes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
+ ((16, 2), 0),
+ ((2, 16), 1)])
+ def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
+ overwritable = self.real_dtypes
+ self._check_1d(irfft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+ self._check_1d(rfft, dtype, shape, axes, overwritable,
+ fftsize, overwrite_x)
+
+ def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
+ overwrite_x):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+
+ def fftshape_iter(shp):
+ if len(shp) <= 0:
+ yield ()
+ else:
+ for j in (shp[0]//2, shp[0], shp[0]*2):
+ for rest in fftshape_iter(shp[1:]):
+ yield (j,) + rest
+
+ if axes is None:
+ part_shape = shape
+ else:
+ part_shape = tuple(np.take(shape, axes))
+
+ for fftshape in fftshape_iter(part_shape):
+ self._check(data, routine, fftshape, axes,
+ overwrite_x=overwrite_x)
+ if data.ndim > 1:
+ self._check(data.T, routine, fftshape, axes,
+ overwrite_x=overwrite_x)
+
+ @pytest.mark.parametrize('dtype', dtypes)
+ @pytest.mark.parametrize('overwrite_x', [True, False])
+ @pytest.mark.parametrize('shape,axes', [((16,), None),
+ ((16,), (0,)),
+ ((16, 2), (0,)),
+ ((2, 16), (1,)),
+ ((8, 16), None),
+ ((8, 16), (0, 1)),
+ ((8, 16, 2), (0, 1)),
+ ((8, 16, 2), (1, 2)),
+ ((8, 16, 2), (0,)),
+ ((8, 16, 2), (1,)),
+ ((8, 16, 2), (2,)),
+ ((8, 16, 2), None),
+ ((8, 16, 2), (0, 1, 2))])
+ def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
+ overwritable = (np.complex128, np.complex64)
+ self._check_nd_one(fftn, dtype, shape, axes, overwritable,
+ overwrite_x)
+ self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
+ overwrite_x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_helper.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_helper.py
new file mode 100644
index 0000000..7aebd9b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_helper.py
@@ -0,0 +1,54 @@
+# Created by Pearu Peterson, September 2002
+
+__usage__ = """
+Build fftpack:
+ python setup_fftpack.py build
+Run tests if scipy is installed:
+ python -c 'import scipy;scipy.fftpack.test()'
+Run tests if fftpack is not installed:
+ python tests/test_helper.py []
+"""
+
+from numpy.testing import assert_array_almost_equal
+from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq
+
+from numpy import pi, random
+
+class TestFFTShift(object):
+
+ def test_definition(self):
+ x = [0,1,2,3,4,-4,-3,-2,-1]
+ y = [-4,-3,-2,-1,0,1,2,3,4]
+ assert_array_almost_equal(fftshift(x),y)
+ assert_array_almost_equal(ifftshift(y),x)
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+ y = [-5,-4,-3,-2,-1,0,1,2,3,4]
+ assert_array_almost_equal(fftshift(x),y)
+ assert_array_almost_equal(ifftshift(y),x)
+
+ def test_inverse(self):
+ for n in [1,4,9,100,211]:
+ x = random.random((n,))
+ assert_array_almost_equal(ifftshift(fftshift(x)),x)
+
+
+class TestFFTFreq(object):
+
+ def test_definition(self):
+ x = [0,1,2,3,4,-4,-3,-2,-1]
+ assert_array_almost_equal(9*fftfreq(9),x)
+ assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+ assert_array_almost_equal(10*fftfreq(10),x)
+ assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
+
+
+class TestRFFTFreq(object):
+
+ def test_definition(self):
+ x = [0,1,1,2,2,3,3,4,4]
+ assert_array_almost_equal(9*rfftfreq(9),x)
+ assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
+ x = [0,1,1,2,2,3,3,4,4,5]
+ assert_array_almost_equal(10*rfftfreq(10),x)
+ assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_import.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_import.py
new file mode 100644
index 0000000..b108bf2
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_import.py
@@ -0,0 +1,31 @@
+"""Test possibility of patching fftpack with pyfftw.
+
+No module source outside of scipy.fftpack should contain an import of
+the form `from scipy.fftpack import ...`, so that a simple replacement
+of scipy.fftpack by the corresponding fftw interface completely swaps
+the two FFT implementations.
+
+Because this simply inspects source files, we only need to run the test
+on one version of Python.
+"""
+
+
+from pathlib import Path
+import re
+import tokenize
+from numpy.testing import assert_
+import scipy
+
+class TestFFTPackImport(object):
+ def test_fftpack_import(self):
+ base = Path(scipy.__file__).parent
+ regexp = r"\s*from.+\.fftpack import .*\n"
+ for path in base.rglob("*.py"):
+ if base / "fftpack" in path.parents:
+ continue
+ # use tokenize to auto-detect encoding on systems where no
+ # default encoding is defined (e.g., LANG='C')
+ with tokenize.open(str(path)) as file:
+ assert_(all(not re.fullmatch(regexp, line)
+ for line in file),
+ "{0} contains an import from fftpack".format(path))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_pseudo_diffs.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_pseudo_diffs.py
new file mode 100644
index 0000000..8e50358
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_pseudo_diffs.py
@@ -0,0 +1,380 @@
+# Created by Pearu Peterson, September 2002
+
+__usage__ = """
+Build fftpack:
+ python setup_fftpack.py build
+Run tests if scipy is installed:
+ python -c 'import scipy;scipy.fftpack.test()'
+Run tests if fftpack is not installed:
+ python tests/test_pseudo_diffs.py []
+"""
+
+from numpy.testing import (assert_equal, assert_almost_equal,
+ assert_array_almost_equal)
+from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
+ ihilbert, shift, fftfreq, cs_diff, sc_diff,
+ ss_diff, cc_diff)
+
+import numpy as np
+from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
+from numpy.random import random
+
+
+def direct_diff(x,k=1,period=None):
+ fx = fft(x)
+ n = len(fx)
+ if period is None:
+ period = 2*pi
+ w = fftfreq(n)*2j*pi/period*n
+ if k < 0:
+ w = 1 / w**k
+ w[0] = 0.0
+ else:
+ w = w**k
+ if n > 2000:
+ w[250:n-250] = 0.0
+ return ifft(w*fx).real
+
+
+def direct_tilbert(x,h=1,period=None):
+ fx = fft(x)
+ n = len(fx)
+ if period is None:
+ period = 2*pi
+ w = fftfreq(n)*h*2*pi/period*n
+ w[0] = 1
+ w = 1j/tanh(w)
+ w[0] = 0j
+ return ifft(w*fx)
+
+
+def direct_itilbert(x,h=1,period=None):
+ fx = fft(x)
+ n = len(fx)
+ if period is None:
+ period = 2*pi
+ w = fftfreq(n)*h*2*pi/period*n
+ w = -1j*tanh(w)
+ return ifft(w*fx)
+
+
+def direct_hilbert(x):
+ fx = fft(x)
+ n = len(fx)
+ w = fftfreq(n)*n
+ w = 1j*sign(w)
+ return ifft(w*fx)
+
+
+def direct_ihilbert(x):
+ return -direct_hilbert(x)
+
+
+def direct_shift(x,a,period=None):
+ n = len(x)
+ if period is None:
+ k = fftfreq(n)*1j*n
+ else:
+ k = fftfreq(n)*2j*pi/period*n
+ return ifft(fft(x)*exp(k*a)).real
+
+
+class TestDiff(object):
+
+ def test_definition(self):
+ for n in [16,17,64,127,32]:
+ x = arange(n)*2*pi/n
+ assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
+ assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
+ assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
+ assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
+ assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
+ assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
+ assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
+ assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
+ assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
+ assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
+ assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
+ assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
+ assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
+ assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
+ for k in range(5):
+ assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
+ assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
+
+ def test_period(self):
+ for n in [17,64]:
+ x = arange(n)/float(n)
+ assert_array_almost_equal(diff(sin(2*pi*x),period=1),
+ 2*pi*cos(2*pi*x))
+ assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
+ -(2*pi)**3*cos(2*pi*x))
+
+ def test_sin(self):
+ for n in [32,64,77]:
+ x = arange(n)*2*pi/n
+ assert_array_almost_equal(diff(sin(x)),cos(x))
+ assert_array_almost_equal(diff(cos(x)),-sin(x))
+ assert_array_almost_equal(diff(sin(x),2),-sin(x))
+ assert_array_almost_equal(diff(sin(x),4),sin(x))
+ assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
+ assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
+
+ def test_expr(self):
+ for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
+ x = arange(n)*2*pi/n
+ f = sin(x)*cos(4*x)+exp(sin(3*x))
+ df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
+ ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
+ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
+ d1 = diff(f)
+ assert_array_almost_equal(d1,df)
+ assert_array_almost_equal(diff(df),ddf)
+ assert_array_almost_equal(diff(f,2),ddf)
+ assert_array_almost_equal(diff(ddf,-1),df)
+
+ def test_expr_large(self):
+ for n in [2048,4096]:
+ x = arange(n)*2*pi/n
+ f = sin(x)*cos(4*x)+exp(sin(3*x))
+ df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
+ ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
+ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
+ assert_array_almost_equal(diff(f),df)
+ assert_array_almost_equal(diff(df),ddf)
+ assert_array_almost_equal(diff(ddf,-1),df)
+ assert_array_almost_equal(diff(f,2),ddf)
+
+ def test_int(self):
+ n = 64
+ x = arange(n)*2*pi/n
+ assert_array_almost_equal(diff(sin(x),-1),-cos(x))
+ assert_array_almost_equal(diff(sin(x),-2),-sin(x))
+ assert_array_almost_equal(diff(sin(x),-4),sin(x))
+ assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
+
+ def test_random_even(self):
+ for k in [0,2,4,6]:
+ for n in [60,32,64,56,55]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ # zeroing Nyquist mode:
+ f = diff(diff(f,1),-1)
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+ def test_random_odd(self):
+ for k in [0,1,2,3,4,5,6]:
+ for n in [33,65,55]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+ def test_zero_nyquist(self):
+ for k in [0,1,2,3,4,5,6]:
+ for n in [32,33,64,56,55]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ # zeroing Nyquist mode:
+ f = diff(diff(f,1),-1)
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+
+class TestTilbert(object):
+
+ def test_definition(self):
+ for h in [0.1,0.5,1,5.5,10]:
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ y = tilbert(sin(x),h)
+ y1 = direct_tilbert(sin(x),h)
+ assert_array_almost_equal(y,y1)
+ assert_array_almost_equal(tilbert(sin(x),h),
+ direct_tilbert(sin(x),h))
+ assert_array_almost_equal(tilbert(sin(2*x),h),
+ direct_tilbert(sin(2*x),h))
+
+ def test_random_even(self):
+ for h in [0.1,0.5,1,5.5,10]:
+ for n in [32,64,56]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
+
+ def test_random_odd(self):
+ for h in [0.1,0.5,1,5.5,10]:
+ for n in [33,65,55]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
+ assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
+
+
+class TestITilbert(object):
+
+ def test_definition(self):
+ for h in [0.1,0.5,1,5.5,10]:
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ y = itilbert(sin(x),h)
+ y1 = direct_itilbert(sin(x),h)
+ assert_array_almost_equal(y,y1)
+ assert_array_almost_equal(itilbert(sin(x),h),
+ direct_itilbert(sin(x),h))
+ assert_array_almost_equal(itilbert(sin(2*x),h),
+ direct_itilbert(sin(2*x),h))
+
+
+class TestHilbert(object):
+
+ def test_definition(self):
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ y = hilbert(sin(x))
+ y1 = direct_hilbert(sin(x))
+ assert_array_almost_equal(y,y1)
+ assert_array_almost_equal(hilbert(sin(2*x)),
+ direct_hilbert(sin(2*x)))
+
+ def test_tilbert_relation(self):
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ f = sin(x)+cos(2*x)*sin(x)
+ y = hilbert(f)
+ y1 = direct_hilbert(f)
+ assert_array_almost_equal(y,y1)
+ y2 = tilbert(f,h=10)
+ assert_array_almost_equal(y,y2)
+
+ def test_random_odd(self):
+ for n in [33,65,55]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(ihilbert(hilbert(f)),f)
+ assert_array_almost_equal(hilbert(ihilbert(f)),f)
+
+ def test_random_even(self):
+ for n in [32,64,56]:
+ f = random((n,))
+ af = sum(f,axis=0)/n
+ f = f-af
+ # zeroing Nyquist mode:
+ f = diff(diff(f,1),-1)
+ assert_almost_equal(sum(f,axis=0),0.0)
+ assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
+ assert_array_almost_equal(hilbert(ihilbert(f)),f)
+
+
+class TestIHilbert(object):
+
+ def test_definition(self):
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ y = ihilbert(sin(x))
+ y1 = direct_ihilbert(sin(x))
+ assert_array_almost_equal(y,y1)
+ assert_array_almost_equal(ihilbert(sin(2*x)),
+ direct_ihilbert(sin(2*x)))
+
+ def test_itilbert_relation(self):
+ for n in [16,17,64,127]:
+ x = arange(n)*2*pi/n
+ f = sin(x)+cos(2*x)*sin(x)
+ y = ihilbert(f)
+ y1 = direct_ihilbert(f)
+ assert_array_almost_equal(y,y1)
+ y2 = itilbert(f,h=10)
+ assert_array_almost_equal(y,y2)
+
+
+class TestShift(object):
+
+ def test_definition(self):
+ for n in [18,17,64,127,32,2048,256]:
+ x = arange(n)*2*pi/n
+ for a in [0.1,3]:
+ assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
+ assert_array_almost_equal(shift(sin(x),a),sin(x+a))
+ assert_array_almost_equal(shift(cos(x),a),cos(x+a))
+ assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
+ cos(2*(x+a))+sin(x+a))
+ assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
+ assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
+ assert_array_almost_equal(shift(sin(x),pi),-sin(x))
+ assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
+
+
+class TestOverwrite(object):
+ """Check input overwrite behavior """
+
+ real_dtypes = (np.float32, np.float64)
+ dtypes = real_dtypes + (np.complex64, np.complex128)
+
+ def _check(self, x, routine, *args, **kwargs):
+ x2 = x.copy()
+ routine(x2, *args, **kwargs)
+ sig = routine.__name__
+ if args:
+ sig += repr(args)
+ if kwargs:
+ sig += repr(kwargs)
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+ def _check_1d(self, routine, dtype, shape, *args, **kwargs):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+ self._check(data, routine, *args, **kwargs)
+
+ def test_diff(self):
+ for dtype in self.dtypes:
+ self._check_1d(diff, dtype, (16,))
+
+ def test_tilbert(self):
+ for dtype in self.dtypes:
+ self._check_1d(tilbert, dtype, (16,), 1.6)
+
+ def test_itilbert(self):
+ for dtype in self.dtypes:
+ self._check_1d(itilbert, dtype, (16,), 1.6)
+
+ def test_hilbert(self):
+ for dtype in self.dtypes:
+ self._check_1d(hilbert, dtype, (16,))
+
+ def test_cs_diff(self):
+ for dtype in self.dtypes:
+ self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
+
+ def test_sc_diff(self):
+ for dtype in self.dtypes:
+ self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
+
+ def test_ss_diff(self):
+ for dtype in self.dtypes:
+ self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
+
+ def test_cc_diff(self):
+ for dtype in self.dtypes:
+ self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
+
+ def test_shift(self):
+ for dtype in self.dtypes:
+ self._check_1d(shift, dtype, (16,), 1.0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_real_transforms.py b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_real_transforms.py
new file mode 100644
index 0000000..9b6c274
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/fftpack/tests/test_real_transforms.py
@@ -0,0 +1,817 @@
+from os.path import join, dirname
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_equal
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.fftpack.realtransforms import (
+ dct, idct, dst, idst, dctn, idctn, dstn, idstn)
+
+# Matlab reference data
+MDATA = np.load(join(dirname(__file__), 'test.npz'))
+X = [MDATA['x%d' % i] for i in range(8)]
+Y = [MDATA['y%d' % i] for i in range(8)]
+
+# FFTW reference data: the data are organized as follows:
+# * SIZES is an array containing all available sizes
+# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
+# contains the output of the DCT applied to the input np.linspace(0, size-1,
+# size)
+FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
+FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
+FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
+
+
+def fftw_dct_ref(type, size, dt):
+ x = np.linspace(0, size-1, size).astype(dt)
+ dt = np.result_type(np.float32, dt)
+ if dt == np.double:
+ data = FFTWDATA_DOUBLE
+ elif dt == np.float32:
+ data = FFTWDATA_SINGLE
+ else:
+ raise ValueError()
+ y = (data['dct_%d_%d' % (type, size)]).astype(dt)
+ return x, y, dt
+
+
+def fftw_dst_ref(type, size, dt):
+ x = np.linspace(0, size-1, size).astype(dt)
+ dt = np.result_type(np.float32, dt)
+ if dt == np.double:
+ data = FFTWDATA_DOUBLE
+ elif dt == np.float32:
+ data = FFTWDATA_SINGLE
+ else:
+ raise ValueError()
+ y = (data['dst_%d_%d' % (type, size)]).astype(dt)
+ return x, y, dt
+
+
+def dct_2d_ref(x, **kwargs):
+ """Calculate reference values for testing dct2."""
+ x = np.array(x, copy=True)
+ for row in range(x.shape[0]):
+ x[row, :] = dct(x[row, :], **kwargs)
+ for col in range(x.shape[1]):
+ x[:, col] = dct(x[:, col], **kwargs)
+ return x
+
+
+def idct_2d_ref(x, **kwargs):
+ """Calculate reference values for testing idct2."""
+ x = np.array(x, copy=True)
+ for row in range(x.shape[0]):
+ x[row, :] = idct(x[row, :], **kwargs)
+ for col in range(x.shape[1]):
+ x[:, col] = idct(x[:, col], **kwargs)
+ return x
+
+
+def dst_2d_ref(x, **kwargs):
+ """Calculate reference values for testing dst2."""
+ x = np.array(x, copy=True)
+ for row in range(x.shape[0]):
+ x[row, :] = dst(x[row, :], **kwargs)
+ for col in range(x.shape[1]):
+ x[:, col] = dst(x[:, col], **kwargs)
+ return x
+
+
+def idst_2d_ref(x, **kwargs):
+ """Calculate reference values for testing idst2."""
+ x = np.array(x, copy=True)
+ for row in range(x.shape[0]):
+ x[row, :] = idst(x[row, :], **kwargs)
+ for col in range(x.shape[1]):
+ x[:, col] = idst(x[:, col], **kwargs)
+ return x
+
+
+def naive_dct1(x, norm=None):
+ """Calculate textbook definition version of DCT-I."""
+ x = np.array(x, copy=True)
+ N = len(x)
+ M = N-1
+ y = np.zeros(N)
+ m0, m = 1, 2
+ if norm == 'ortho':
+ m0 = np.sqrt(1.0/M)
+ m = np.sqrt(2.0/M)
+ for k in range(N):
+ for n in range(1, N-1):
+ y[k] += m*x[n]*np.cos(np.pi*n*k/M)
+ y[k] += m0 * x[0]
+ y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
+ if norm == 'ortho':
+ y[0] *= 1/np.sqrt(2)
+ y[N-1] *= 1/np.sqrt(2)
+ return y
+
+
+def naive_dst1(x, norm=None):
+ """Calculate textbook definition version of DST-I."""
+ x = np.array(x, copy=True)
+ N = len(x)
+ M = N+1
+ y = np.zeros(N)
+ for k in range(N):
+ for n in range(N):
+ y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
+ if norm == 'ortho':
+ y *= np.sqrt(0.5/M)
+ return y
+
+
+def naive_dct4(x, norm=None):
+ """Calculate textbook definition version of DCT-IV."""
+ x = np.array(x, copy=True)
+ N = len(x)
+ y = np.zeros(N)
+ for k in range(N):
+ for n in range(N):
+ y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
+ if norm == 'ortho':
+ y *= np.sqrt(2.0/N)
+ else:
+ y *= 2
+ return y
+
+
+def naive_dst4(x, norm=None):
+ """Calculate textbook definition version of DST-IV."""
+ x = np.array(x, copy=True)
+ N = len(x)
+ y = np.zeros(N)
+ for k in range(N):
+ for n in range(N):
+ y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
+ if norm == 'ortho':
+ y *= np.sqrt(2.0/N)
+ else:
+ y *= 2
+ return y
+
+
+class TestComplex(object):
+ def test_dct_complex64(self):
+ y = dct(1j*np.arange(5, dtype=np.complex64))
+ x = 1j*dct(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+ def test_dct_complex(self):
+ y = dct(np.arange(5)*1j)
+ x = 1j*dct(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+ def test_idct_complex(self):
+ y = idct(np.arange(5)*1j)
+ x = 1j*idct(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+ def test_dst_complex64(self):
+ y = dst(np.arange(5, dtype=np.complex64)*1j)
+ x = 1j*dst(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+ def test_dst_complex(self):
+ y = dst(np.arange(5)*1j)
+ x = 1j*dst(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+ def test_idst_complex(self):
+ y = idst(np.arange(5)*1j)
+ x = 1j*idst(np.arange(5))
+ assert_array_almost_equal(x, y)
+
+
+class _TestDCTBase(object):
+ def setup_method(self):
+ self.rdt = None
+ self.dec = 14
+ self.type = None
+
+ def test_definition(self):
+ for i in FFTWDATA_SIZES:
+ x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
+ y = dct(x, type=self.type)
+ assert_equal(y.dtype, dt)
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
+ # should really use something like assert_array_approx_equal. The
+ # difference is due to fftw using a better algorithm w.r.t error
+ # propagation compared to the ones from fftpack.
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
+ err_msg="Size %d failed" % i)
+
+ def test_axis(self):
+ nt = 2
+ for i in [7, 8, 9, 16, 32, 64]:
+ x = np.random.randn(nt, i)
+ y = dct(x, type=self.type)
+ for j in range(nt):
+ assert_array_almost_equal(y[j], dct(x[j], type=self.type),
+ decimal=self.dec)
+
+ x = x.T
+ y = dct(x, axis=0, type=self.type)
+ for j in range(nt):
+ assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
+ decimal=self.dec)
+
+
+class _TestDCTIBase(_TestDCTBase):
+ def test_definition_ortho(self):
+ # Test orthornomal mode.
+ for i in range(len(X)):
+ x = np.array(X[i], dtype=self.rdt)
+ dt = np.result_type(np.float32, self.rdt)
+ y = dct(x, norm='ortho', type=1)
+ y2 = naive_dct1(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+class _TestDCTIIBase(_TestDCTBase):
+ def test_definition_matlab(self):
+ # Test correspondence with MATLAB (orthornomal mode).
+ for i in range(len(X)):
+ dt = np.result_type(np.float32, self.rdt)
+ x = np.array(X[i], dtype=dt)
+
+ yr = Y[i]
+ y = dct(x, norm="ortho", type=2)
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y, yr, decimal=self.dec)
+
+
+class _TestDCTIIIBase(_TestDCTBase):
+ def test_definition_ortho(self):
+ # Test orthornomal mode.
+ for i in range(len(X)):
+ x = np.array(X[i], dtype=self.rdt)
+ dt = np.result_type(np.float32, self.rdt)
+ y = dct(x, norm='ortho', type=2)
+ xi = dct(y, norm="ortho", type=3)
+ assert_equal(xi.dtype, dt)
+ assert_array_almost_equal(xi, x, decimal=self.dec)
+
+class _TestDCTIVBase(_TestDCTBase):
+ def test_definition_ortho(self):
+ # Test orthornomal mode.
+ for i in range(len(X)):
+ x = np.array(X[i], dtype=self.rdt)
+ dt = np.result_type(np.float32, self.rdt)
+ y = dct(x, norm='ortho', type=4)
+ y2 = naive_dct4(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+
+class TestDCTIDouble(_TestDCTIBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 10
+ self.type = 1
+
+
+class TestDCTIFloat(_TestDCTIBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 4
+ self.type = 1
+
+
+class TestDCTIInt(_TestDCTIBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 1
+
+
+class TestDCTIIDouble(_TestDCTIIBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 10
+ self.type = 2
+
+
+class TestDCTIIFloat(_TestDCTIIBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 2
+
+
+class TestDCTIIInt(_TestDCTIIBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 2
+
+
+class TestDCTIIIDouble(_TestDCTIIIBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 3
+
+
+class TestDCTIIIFloat(_TestDCTIIIBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 3
+
+
+class TestDCTIIIInt(_TestDCTIIIBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 3
+
+
+class TestDCTIVDouble(_TestDCTIVBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 3
+
+
+class TestDCTIVFloat(_TestDCTIVBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 3
+
+
+class TestDCTIVInt(_TestDCTIVBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 3
+
+
+class _TestIDCTBase(object):
+ def setup_method(self):
+ self.rdt = None
+ self.dec = 14
+ self.type = None
+
+ def test_definition(self):
+ for i in FFTWDATA_SIZES:
+ xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
+ x = idct(yr, type=self.type)
+ if self.type == 1:
+ x /= 2 * (i-1)
+ else:
+ x /= 2 * i
+ assert_equal(x.dtype, dt)
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
+ # should really use something like assert_array_approx_equal. The
+ # difference is due to fftw using a better algorithm w.r.t error
+ # propagation compared to the ones from fftpack.
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
+ err_msg="Size %d failed" % i)
+
+
+class TestIDCTIDouble(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 10
+ self.type = 1
+
+
+class TestIDCTIFloat(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 4
+ self.type = 1
+
+
+class TestIDCTIInt(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 4
+ self.type = 1
+
+
+class TestIDCTIIDouble(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 10
+ self.type = 2
+
+
+class TestIDCTIIFloat(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 2
+
+
+class TestIDCTIIInt(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 2
+
+
+class TestIDCTIIIDouble(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 3
+
+
+class TestIDCTIIIFloat(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 3
+
+
+class TestIDCTIIIInt(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 3
+
+class TestIDCTIVDouble(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 4
+
+
+class TestIDCTIVFloat(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 5
+ self.type = 4
+
+
+class TestIDCTIVInt(_TestIDCTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 4
+
+class _TestDSTBase(object):
+ def setup_method(self):
+ self.rdt = None # dtype
+ self.dec = None # number of decimals to match
+ self.type = None # dst type
+
+ def test_definition(self):
+ for i in FFTWDATA_SIZES:
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
+ y = dst(xr, type=self.type)
+ assert_equal(y.dtype, dt)
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
+ # should really use something like assert_array_approx_equal. The
+ # difference is due to fftw using a better algorithm w.r.t error
+ # propagation compared to the ones from fftpack.
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
+ err_msg="Size %d failed" % i)
+
+
+class _TestDSTIBase(_TestDSTBase):
+ def test_definition_ortho(self):
+ # Test orthornomal mode.
+ for i in range(len(X)):
+ x = np.array(X[i], dtype=self.rdt)
+ dt = np.result_type(np.float32, self.rdt)
+ y = dst(x, norm='ortho', type=1)
+ y2 = naive_dst1(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+class _TestDSTIVBase(_TestDSTBase):
+ def test_definition_ortho(self):
+ # Test orthornomal mode.
+ for i in range(len(X)):
+ x = np.array(X[i], dtype=self.rdt)
+ dt = np.result_type(np.float32, self.rdt)
+ y = dst(x, norm='ortho', type=4)
+ y2 = naive_dst4(x, norm='ortho')
+ assert_equal(y.dtype, dt)
+ assert_array_almost_equal(y, y2, decimal=self.dec)
+
+class TestDSTIDouble(_TestDSTIBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 1
+
+
+class TestDSTIFloat(_TestDSTIBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 4
+ self.type = 1
+
+
+class TestDSTIInt(_TestDSTIBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 1
+
+
+class TestDSTIIDouble(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 2
+
+
+class TestDSTIIFloat(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 6
+ self.type = 2
+
+
+class TestDSTIIInt(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 6
+ self.type = 2
+
+
+class TestDSTIIIDouble(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 3
+
+
+class TestDSTIIIFloat(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 7
+ self.type = 3
+
+
+class TestDSTIIIInt(_TestDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 7
+ self.type = 3
+
+
+class TestDSTIVDouble(_TestDSTIVBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 4
+
+
+class TestDSTIVFloat(_TestDSTIVBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 4
+ self.type = 4
+
+
+class TestDSTIVInt(_TestDSTIVBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 5
+ self.type = 4
+
+
+class _TestIDSTBase(object):
+ def setup_method(self):
+ self.rdt = None
+ self.dec = None
+ self.type = None
+
+ def test_definition(self):
+ for i in FFTWDATA_SIZES:
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
+ x = idst(yr, type=self.type)
+ if self.type == 1:
+ x /= 2 * (i+1)
+ else:
+ x /= 2 * i
+ assert_equal(x.dtype, dt)
+ # XXX: we divide by np.max(x) because the tests fail otherwise. We
+ # should really use something like assert_array_approx_equal. The
+ # difference is due to fftw using a better algorithm w.r.t error
+ # propagation compared to the ones from fftpack.
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
+ err_msg="Size %d failed" % i)
+
+
+class TestIDSTIDouble(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 1
+
+
+class TestIDSTIFloat(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 4
+ self.type = 1
+
+
+class TestIDSTIInt(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 4
+ self.type = 1
+
+
+class TestIDSTIIDouble(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 2
+
+
+class TestIDSTIIFloat(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 6
+ self.type = 2
+
+
+class TestIDSTIIInt(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 6
+ self.type = 2
+
+
+class TestIDSTIIIDouble(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 14
+ self.type = 3
+
+
+class TestIDSTIIIFloat(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 6
+ self.type = 3
+
+
+class TestIDSTIIIInt(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 6
+ self.type = 3
+
+
+class TestIDSTIVDouble(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.double
+ self.dec = 12
+ self.type = 4
+
+
+class TestIDSTIVFloat(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = np.float32
+ self.dec = 6
+ self.type = 4
+
+
+class TestIDSTIVnt(_TestIDSTBase):
+ def setup_method(self):
+ self.rdt = int
+ self.dec = 6
+ self.type = 4
+
+
+class TestOverwrite(object):
+ """Check input overwrite behavior."""
+
+ real_dtypes = [np.float32, np.float64]
+
+ def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
+ x2 = x.copy()
+ routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
+
+ sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+ if not overwrite_x:
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+ def _check_1d(self, routine, dtype, shape, axis):
+ np.random.seed(1234)
+ if np.issubdtype(dtype, np.complexfloating):
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+ else:
+ data = np.random.randn(*shape)
+ data = data.astype(dtype)
+
+ for type in [1, 2, 3, 4]:
+ for overwrite_x in [True, False]:
+ for norm in [None, 'ortho']:
+ self._check(data, routine, type, None, axis, norm,
+ overwrite_x)
+
+ def test_dct(self):
+ for dtype in self.real_dtypes:
+ self._check_1d(dct, dtype, (16,), -1)
+ self._check_1d(dct, dtype, (16, 2), 0)
+ self._check_1d(dct, dtype, (2, 16), 1)
+
+ def test_idct(self):
+ for dtype in self.real_dtypes:
+ self._check_1d(idct, dtype, (16,), -1)
+ self._check_1d(idct, dtype, (16, 2), 0)
+ self._check_1d(idct, dtype, (2, 16), 1)
+
+ def test_dst(self):
+ for dtype in self.real_dtypes:
+ self._check_1d(dst, dtype, (16,), -1)
+ self._check_1d(dst, dtype, (16, 2), 0)
+ self._check_1d(dst, dtype, (2, 16), 1)
+
+ def test_idst(self):
+ for dtype in self.real_dtypes:
+ self._check_1d(idst, dtype, (16,), -1)
+ self._check_1d(idst, dtype, (16, 2), 0)
+ self._check_1d(idst, dtype, (2, 16), 1)
+
+
+class Test_DCTN_IDCTN(object):
+ dec = 14
+ dct_type = [1, 2, 3, 4]
+ norms = [None, 'ortho']
+ rstate = np.random.RandomState(1234)
+ shape = (32, 16)
+ data = rstate.randn(*shape)
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ @pytest.mark.parametrize('axes', [None,
+ 1, (1,), [1],
+ 0, (0,), [0],
+ (0, 1), [0, 1],
+ (-2, -1), [-2, -1]])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', ['ortho'])
+ def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
+ tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
+ tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
+ assert_array_almost_equal(self.data, tmp, decimal=12)
+
+ @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
+ (dstn, dst_2d_ref)])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', norms)
+ def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
+ dct_type, norm):
+ y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
+ y2 = fforward_ref(self.data, type=dct_type, norm=norm)
+ assert_array_almost_equal(y1, y2, decimal=11)
+
+ @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
+ (idstn, idst_2d_ref)])
+ @pytest.mark.parametrize('dct_type', dct_type)
+ @pytest.mark.parametrize('norm', [None, 'ortho'])
+ def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
+ dct_type, norm):
+ fdata = dctn(self.data, type=dct_type, norm=norm)
+ y1 = finverse(fdata, type=dct_type, norm=norm)
+ y2 = finverse_ref(fdata, type=dct_type, norm=norm)
+ assert_array_almost_equal(y1, y2, decimal=11)
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ def test_axes_and_shape(self, fforward, finverse):
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
+
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fforward(self.data, shape=self.data.shape[0], axes=None)
+
+ with assert_raises(ValueError,
+ match="when given, axes and shape arguments"
+ " have to be of the same length"):
+ fforward(self.data, shape=self.data.shape, axes=0)
+
+ @pytest.mark.parametrize('fforward', [dctn, dstn])
+ def test_shape(self, fforward):
+ tmp = fforward(self.data, shape=(128, 128), axes=None)
+ assert_equal(tmp.shape, (128, 128))
+
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+ (dstn, idstn)])
+ @pytest.mark.parametrize('axes', [1, (1,), [1],
+ 0, (0,), [0]])
+ def test_shape_is_none_with_axes(self, fforward, finverse, axes):
+ tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
+ tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
+ assert_array_almost_equal(self.data, tmp, decimal=self.dec)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/__init__.py
new file mode 100644
index 0000000..7bacf3a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/__init__.py
@@ -0,0 +1,103 @@
+"""
+=============================================
+Integration and ODEs (:mod:`scipy.integrate`)
+=============================================
+
+.. currentmodule:: scipy.integrate
+
+Integrating functions, given function object
+============================================
+
+.. autosummary::
+ :toctree: generated/
+
+ quad -- General purpose integration
+ quad_vec -- General purpose integration of vector-valued functions
+ dblquad -- General purpose double integration
+ tplquad -- General purpose triple integration
+ nquad -- General purpose N-D integration
+ fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
+ quadrature -- Integrate with given tolerance using Gaussian quadrature
+ romberg -- Integrate func using Romberg integration
+ quad_explain -- Print information for use of quad
+ newton_cotes -- Weights and error coefficient for Newton-Cotes integration
+ IntegrationWarning -- Warning on issues during integration
+ AccuracyWarning -- Warning on issues during quadrature integration
+
+Integrating functions, given fixed samples
+==========================================
+
+.. autosummary::
+ :toctree: generated/
+
+ trapezoid -- Use trapezoidal rule to compute integral.
+ cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
+ simpson -- Use Simpson's rule to compute integral from samples.
+ romb -- Use Romberg Integration to compute integral from
+ -- (2**k + 1) evenly-spaced samples.
+
+.. seealso::
+
+ :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
+ quadrature roots and weights for other weighting factors and regions.
+
+Solving initial value problems for ODE systems
+==============================================
+
+The solvers are implemented as individual classes, which can be used directly
+(low-level usage) or through a convenience function.
+
+.. autosummary::
+ :toctree: generated/
+
+ solve_ivp -- Convenient function for ODE integration.
+ RK23 -- Explicit Runge-Kutta solver of order 3(2).
+ RK45 -- Explicit Runge-Kutta solver of order 5(4).
+ DOP853 -- Explicit Runge-Kutta solver of order 8.
+ Radau -- Implicit Runge-Kutta solver of order 5.
+ BDF -- Implicit multi-step variable order (1 to 5) solver.
+ LSODA -- LSODA solver from ODEPACK Fortran package.
+ OdeSolver -- Base class for ODE solvers.
+ DenseOutput -- Local interpolant for computing a dense output.
+ OdeSolution -- Class which represents a continuous ODE solution.
+
+
+Old API
+-------
+
+These are the routines developed earlier for SciPy. They wrap older solvers
+implemented in Fortran (mostly ODEPACK). While the interface to them is not
+particularly convenient and certain features are missing compared to the new
+API, the solvers themselves are of good quality and work fast as compiled
+Fortran code. In some cases, it might be worth using this old API.
+
+.. autosummary::
+ :toctree: generated/
+
+ odeint -- General integration of ordinary differential equations.
+ ode -- Integrate ODE using VODE and ZVODE routines.
+ complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
+
+
+Solving boundary value problems for ODE systems
+===============================================
+
+.. autosummary::
+ :toctree: generated/
+
+ solve_bvp -- Solve a boundary value problem for a system of ODEs.
+""" # noqa: E501
+from ._quadrature import *
+from .odepack import *
+from .quadpack import *
+from ._ode import *
+from ._bvp import solve_bvp
+from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
+ OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
+from ._quad_vec import quad_vec
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_bvp.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_bvp.py
new file mode 100644
index 0000000..1633909
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_bvp.py
@@ -0,0 +1,1158 @@
+"""Boundary value problem solver."""
+from warnings import warn
+
+import numpy as np
+from numpy.linalg import pinv
+
+from scipy.sparse import coo_matrix, csc_matrix
+from scipy.sparse.linalg import splu
+from scipy.optimize import OptimizeResult
+
+
+EPS = np.finfo(float).eps
+
+
+def estimate_fun_jac(fun, x, y, p, f0=None):
+ """Estimate derivatives of an ODE system rhs with forward differences.
+
+ Returns
+ -------
+ df_dy : ndarray, shape (n, n, m)
+ Derivatives with respect to y. An element (i, j, q) corresponds to
+ d f_i(x_q, y_q) / d (y_q)_j.
+ df_dp : ndarray with shape (n, k, m) or None
+ Derivatives with respect to p. An element (i, j, q) corresponds to
+ d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
+ """
+ n, m = y.shape
+ if f0 is None:
+ f0 = fun(x, y, p)
+
+ dtype = y.dtype
+
+ df_dy = np.empty((n, n, m), dtype=dtype)
+ h = EPS**0.5 * (1 + np.abs(y))
+ for i in range(n):
+ y_new = y.copy()
+ y_new[i] += h[i]
+ hi = y_new[i] - y[i]
+ f_new = fun(x, y_new, p)
+ df_dy[:, i, :] = (f_new - f0) / hi
+
+ k = p.shape[0]
+ if k == 0:
+ df_dp = None
+ else:
+ df_dp = np.empty((n, k, m), dtype=dtype)
+ h = EPS**0.5 * (1 + np.abs(p))
+ for i in range(k):
+ p_new = p.copy()
+ p_new[i] += h[i]
+ hi = p_new[i] - p[i]
+ f_new = fun(x, y, p_new)
+ df_dp[:, i, :] = (f_new - f0) / hi
+
+ return df_dy, df_dp
+
+
+def estimate_bc_jac(bc, ya, yb, p, bc0=None):
+ """Estimate derivatives of boundary conditions with forward differences.
+
+ Returns
+ -------
+ dbc_dya : ndarray, shape (n + k, n)
+ Derivatives with respect to ya. An element (i, j) corresponds to
+ d bc_i / d ya_j.
+ dbc_dyb : ndarray, shape (n + k, n)
+ Derivatives with respect to yb. An element (i, j) corresponds to
+ d bc_i / d ya_j.
+ dbc_dp : ndarray with shape (n + k, k) or None
+ Derivatives with respect to p. An element (i, j) corresponds to
+ d bc_i / d p_j. If `p` is empty, None is returned.
+ """
+ n = ya.shape[0]
+ k = p.shape[0]
+
+ if bc0 is None:
+ bc0 = bc(ya, yb, p)
+
+ dtype = ya.dtype
+
+ dbc_dya = np.empty((n, n + k), dtype=dtype)
+ h = EPS**0.5 * (1 + np.abs(ya))
+ for i in range(n):
+ ya_new = ya.copy()
+ ya_new[i] += h[i]
+ hi = ya_new[i] - ya[i]
+ bc_new = bc(ya_new, yb, p)
+ dbc_dya[i] = (bc_new - bc0) / hi
+ dbc_dya = dbc_dya.T
+
+ h = EPS**0.5 * (1 + np.abs(yb))
+ dbc_dyb = np.empty((n, n + k), dtype=dtype)
+ for i in range(n):
+ yb_new = yb.copy()
+ yb_new[i] += h[i]
+ hi = yb_new[i] - yb[i]
+ bc_new = bc(ya, yb_new, p)
+ dbc_dyb[i] = (bc_new - bc0) / hi
+ dbc_dyb = dbc_dyb.T
+
+ if k == 0:
+ dbc_dp = None
+ else:
+ h = EPS**0.5 * (1 + np.abs(p))
+ dbc_dp = np.empty((k, n + k), dtype=dtype)
+ for i in range(k):
+ p_new = p.copy()
+ p_new[i] += h[i]
+ hi = p_new[i] - p[i]
+ bc_new = bc(ya, yb, p_new)
+ dbc_dp[i] = (bc_new - bc0) / hi
+ dbc_dp = dbc_dp.T
+
+ return dbc_dya, dbc_dyb, dbc_dp
+
+
+def compute_jac_indices(n, m, k):
+ """Compute indices for the collocation system Jacobian construction.
+
+ See `construct_global_jac` for the explanation.
+ """
+ i_col = np.repeat(np.arange((m - 1) * n), n)
+ j_col = (np.tile(np.arange(n), n * (m - 1)) +
+ np.repeat(np.arange(m - 1) * n, n**2))
+
+ i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
+ j_bc = np.tile(np.arange(n), n + k)
+
+ i_p_col = np.repeat(np.arange((m - 1) * n), k)
+ j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
+
+ i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
+ j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
+
+ i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
+ j = np.hstack((j_col, j_col + n,
+ j_bc, j_bc + (m - 1) * n,
+ j_p_col, j_p_bc))
+
+ return i, j
+
+
+def stacked_matmul(a, b):
+ """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
+
+ Empirical optimization. Use outer Python loop and BLAS for large
+ matrices, otherwise use a single einsum call.
+ """
+ if a.shape[1] > 50:
+ out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
+ for i in range(a.shape[0]):
+ out[i] = np.dot(a[i], b[i])
+ return out
+ else:
+ return np.einsum('...ij,...jk->...ik', a, b)
+
+
+def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
+ df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
+ """Construct the Jacobian of the collocation system.
+
+ There are n * m + k functions: m - 1 collocations residuals, each
+ containing n components, followed by n + k boundary condition residuals.
+
+ There are n * m + k variables: m vectors of y, each containing n
+ components, followed by k values of vector p.
+
+ For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
+ the following sparsity structure:
+
+ 1 1 2 2 0 0 0 0 5
+ 1 1 2 2 0 0 0 0 5
+ 0 0 1 1 2 2 0 0 5
+ 0 0 1 1 2 2 0 0 5
+ 0 0 0 0 1 1 2 2 5
+ 0 0 0 0 1 1 2 2 5
+
+ 3 3 0 0 0 0 4 4 6
+ 3 3 0 0 0 0 4 4 6
+ 3 3 0 0 0 0 4 4 6
+
+ Zeros denote identically zero values, other values denote different kinds
+ of blocks in the matrix (see below). The blank row indicates the separation
+ of collocation residuals from boundary conditions. And the blank column
+ indicates the separation of y values from p values.
+
+ Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
+ of collocation residuals with respect to y.
+
+ Parameters
+ ----------
+ n : int
+ Number of equations in the ODE system.
+ m : int
+ Number of nodes in the mesh.
+ k : int
+ Number of the unknown parameters.
+ i_jac, j_jac : ndarray
+ Row and column indices returned by `compute_jac_indices`. They
+ represent different blocks in the Jacobian matrix in the following
+ order (see the scheme above):
+
+ * 1: m - 1 diagonal n x n blocks for the collocation residuals.
+ * 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
+ * 3 : (n + k) x n block for the dependency of the boundary
+ conditions on ya.
+ * 4: (n + k) x n block for the dependency of the boundary
+ conditions on yb.
+ * 5: (m - 1) * n x k block for the dependency of the collocation
+ residuals on p.
+ * 6: (n + k) x k block for the dependency of the boundary
+ conditions on p.
+
+ df_dy : ndarray, shape (n, n, m)
+ Jacobian of f with respect to y computed at the mesh nodes.
+ df_dy_middle : ndarray, shape (n, n, m - 1)
+ Jacobian of f with respect to y computed at the middle between the
+ mesh nodes.
+ df_dp : ndarray with shape (n, k, m) or None
+ Jacobian of f with respect to p computed at the mesh nodes.
+ df_dp_middle: ndarray with shape (n, k, m - 1) or None
+ Jacobian of f with respect to p computed at the middle between the
+ mesh nodes.
+ dbc_dya, dbc_dyb : ndarray, shape (n, n)
+ Jacobian of bc with respect to ya and yb.
+ dbc_dp: ndarray with shape (n, k) or None
+ Jacobian of bc with respect to p.
+
+ Returns
+ -------
+ J : csc_matrix, shape (n * m + k, n * m + k)
+ Jacobian of the collocation system in a sparse form.
+
+ References
+ ----------
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+ Number 3, pp. 299-316, 2001.
+ """
+ df_dy = np.transpose(df_dy, (2, 0, 1))
+ df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
+
+ h = h[:, np.newaxis, np.newaxis]
+
+ dtype = df_dy.dtype
+
+ # Computing diagonal n x n blocks.
+ dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
+ dPhi_dy_0[:] = -np.identity(n)
+ dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
+ T = stacked_matmul(df_dy_middle, df_dy[:-1])
+ dPhi_dy_0 -= h**2 / 12 * T
+
+ # Computing off-diagonal n x n blocks.
+ dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
+ dPhi_dy_1[:] = np.identity(n)
+ dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
+ T = stacked_matmul(df_dy_middle, df_dy[1:])
+ dPhi_dy_1 += h**2 / 12 * T
+
+ values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
+ dbc_dyb.ravel()))
+
+ if k > 0:
+ df_dp = np.transpose(df_dp, (2, 0, 1))
+ df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
+ T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
+ df_dp_middle += 0.125 * h * T
+ dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
+ values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
+
+ J = coo_matrix((values, (i_jac, j_jac)))
+ return csc_matrix(J)
+
+
+def collocation_fun(fun, y, p, x, h):
+ """Evaluate collocation residuals.
+
+ This function lies in the core of the method. The solution is sought
+ as a cubic C1 continuous spline with derivatives matching the ODE rhs
+ at given nodes `x`. Collocation conditions are formed from the equality
+ of the spline derivatives and rhs of the ODE system in the middle points
+ between nodes.
+
+ Such method is classified to Lobbato IIIA family in ODE literature.
+ Refer to [1]_ for the formula and some discussion.
+
+ Returns
+ -------
+ col_res : ndarray, shape (n, m - 1)
+ Collocation residuals at the middle points of the mesh intervals.
+ y_middle : ndarray, shape (n, m - 1)
+ Values of the cubic spline evaluated at the middle points of the mesh
+ intervals.
+ f : ndarray, shape (n, m)
+ RHS of the ODE system evaluated at the mesh nodes.
+ f_middle : ndarray, shape (n, m - 1)
+ RHS of the ODE system evaluated at the middle points of the mesh
+ intervals (and using `y_middle`).
+
+ References
+ ----------
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+ Number 3, pp. 299-316, 2001.
+ """
+ f = fun(x, y, p)
+ y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
+ 0.125 * h * (f[:, 1:] - f[:, :-1]))
+ f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
+ col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
+ 4 * f_middle)
+
+ return col_res, y_middle, f, f_middle
+
+
+def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
+ """Create the function and the Jacobian for the collocation system."""
+ x_middle = x[:-1] + 0.5 * h
+ i_jac, j_jac = compute_jac_indices(n, m, k)
+
+ def col_fun(y, p):
+ return collocation_fun(fun, y, p, x, h)
+
+ def sys_jac(y, p, y_middle, f, f_middle, bc0):
+ if fun_jac is None:
+ df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
+ df_dy_middle, df_dp_middle = estimate_fun_jac(
+ fun, x_middle, y_middle, p, f_middle)
+ else:
+ df_dy, df_dp = fun_jac(x, y, p)
+ df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
+
+ if bc_jac is None:
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
+ p, bc0)
+ else:
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
+
+ return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
+ df_dy_middle, df_dp, df_dp_middle, dbc_dya,
+ dbc_dyb, dbc_dp)
+
+ return col_fun, sys_jac
+
+
+def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
+ """Solve the nonlinear collocation system by a Newton method.
+
+ This is a simple Newton method with a backtracking line search. As
+ advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
+ is used, where J is the Jacobian matrix at the current iteration and r is
+ the vector or collocation residuals (values of the system lhs).
+
+ The method alters between full Newton iterations and the fixed-Jacobian
+ iterations based
+
+ There are other tricks proposed in [1]_, but they are not used as they
+ don't seem to improve anything significantly, and even break the
+ convergence on some test problems I tried.
+
+ All important parameters of the algorithm are defined inside the function.
+
+ Parameters
+ ----------
+ n : int
+ Number of equations in the ODE system.
+ m : int
+ Number of nodes in the mesh.
+ h : ndarray, shape (m-1,)
+ Mesh intervals.
+ col_fun : callable
+ Function computing collocation residuals.
+ bc : callable
+ Function computing boundary condition residuals.
+ jac : callable
+ Function computing the Jacobian of the whole system (including
+ collocation and boundary condition residuals). It is supposed to
+ return csc_matrix.
+ y : ndarray, shape (n, m)
+ Initial guess for the function values at the mesh nodes.
+ p : ndarray, shape (k,)
+ Initial guess for the unknown parameters.
+ B : ndarray with shape (n, n) or None
+ Matrix to force the S y(a) = 0 condition for a problems with the
+ singular term. If None, the singular term is assumed to be absent.
+ bvp_tol : float
+ Tolerance to which we want to solve a BVP.
+ bc_tol : float
+ Tolerance to which we want to satisfy the boundary conditions.
+
+ Returns
+ -------
+ y : ndarray, shape (n, m)
+ Final iterate for the function values at the mesh nodes.
+ p : ndarray, shape (k,)
+ Final iterate for the unknown parameters.
+ singular : bool
+ True, if the LU decomposition failed because Jacobian turned out
+ to be singular.
+
+ References
+ ----------
+ .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
+ Boundary Value Problems for Ordinary Differential Equations"
+ """
+ # We know that the solution residuals at the middle points of the mesh
+ # are connected with collocation residuals r_middle = 1.5 * col_res / h.
+ # As our BVP solver tries to decrease relative residuals below a certain
+ # tolerance, it seems reasonable to terminated Newton iterations by
+ # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
+ # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
+ # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
+ # should be computed as follows:
+ tol_r = 2/3 * h * 5e-2 * bvp_tol
+
+ # Maximum allowed number of Jacobian evaluation and factorization, in
+ # other words, the maximum number of full Newton iterations. A small value
+ # is recommended in the literature.
+ max_njev = 4
+
+ # Maximum number of iterations, considering that some of them can be
+ # performed with the fixed Jacobian. In theory, such iterations are cheap,
+ # but it's not that simple in Python.
+ max_iter = 8
+
+ # Minimum relative improvement of the criterion function to accept the
+ # step (Armijo constant).
+ sigma = 0.2
+
+ # Step size decrease factor for backtracking.
+ tau = 0.5
+
+ # Maximum number of backtracking steps, the minimum step is then
+ # tau ** n_trial.
+ n_trial = 4
+
+ col_res, y_middle, f, f_middle = col_fun(y, p)
+ bc_res = bc(y[:, 0], y[:, -1], p)
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
+
+ njev = 0
+ singular = False
+ recompute_jac = True
+ for iteration in range(max_iter):
+ if recompute_jac:
+ J = jac(y, p, y_middle, f, f_middle, bc_res)
+ njev += 1
+ try:
+ LU = splu(J)
+ except RuntimeError:
+ singular = True
+ break
+
+ step = LU.solve(res)
+ cost = np.dot(step, step)
+
+ y_step = step[:m * n].reshape((n, m), order='F')
+ p_step = step[m * n:]
+
+ alpha = 1
+ for trial in range(n_trial + 1):
+ y_new = y - alpha * y_step
+ if B is not None:
+ y_new[:, 0] = np.dot(B, y_new[:, 0])
+ p_new = p - alpha * p_step
+
+ col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
+ bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
+ res = np.hstack((col_res.ravel(order='F'), bc_res))
+
+ step_new = LU.solve(res)
+ cost_new = np.dot(step_new, step_new)
+ if cost_new < (1 - 2 * alpha * sigma) * cost:
+ break
+
+ if trial < n_trial:
+ alpha *= tau
+
+ y = y_new
+ p = p_new
+
+ if njev == max_njev:
+ break
+
+ if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
+ np.all(np.abs(bc_res) < bc_tol)):
+ break
+
+ # If the full step was taken, then we are going to continue with
+ # the same Jacobian. This is the approach of BVP_SOLVER.
+ if alpha == 1:
+ step = step_new
+ cost = cost_new
+ recompute_jac = False
+ else:
+ recompute_jac = True
+
+ return y, p, singular
+
+
+def print_iteration_header():
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
+ "Iteration", "Max residual", "Max BC residual", "Total nodes",
+ "Nodes added"))
+
+
+def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
+ nodes_added):
+ print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
+ iteration, residual, bc_residual, total_nodes, nodes_added))
+
+
+class BVPResult(OptimizeResult):
+ pass
+
+
+TERMINATION_MESSAGES = {
+ 0: "The algorithm converged to the desired accuracy.",
+ 1: "The maximum number of mesh nodes is exceeded.",
+ 2: "A singular Jacobian encountered when solving the collocation system.",
+ 3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
+}
+
+
+def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
+ """Estimate rms values of collocation residuals using Lobatto quadrature.
+
+ The residuals are defined as the difference between the derivatives of
+ our solution and rhs of the ODE system. We use relative residuals, i.e.,
+ normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
+ normalized integrals of the squared relative residuals over each interval.
+ Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
+ fact that residuals at the mesh nodes are identically zero.
+
+ In [2] they don't normalize integrals by interval lengths, which gives
+ a higher rate of convergence of the residuals by the factor of h**0.5.
+ I chose to do such normalization for an ease of interpretation of return
+ values as RMS estimates.
+
+ Returns
+ -------
+ rms_res : ndarray, shape (m - 1,)
+ Estimated rms values of the relative residuals over each interval.
+
+ References
+ ----------
+ .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
+ .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+ Number 3, pp. 299-316, 2001.
+ """
+ x_middle = x[:-1] + 0.5 * h
+ s = 0.5 * h * (3/7)**0.5
+ x1 = x_middle + s
+ x2 = x_middle - s
+ y1 = sol(x1)
+ y2 = sol(x2)
+ y1_prime = sol(x1, 1)
+ y2_prime = sol(x2, 1)
+ f1 = fun(x1, y1, p)
+ f2 = fun(x2, y2, p)
+ r1 = y1_prime - f1
+ r2 = y2_prime - f2
+
+ r_middle /= 1 + np.abs(f_middle)
+ r1 /= 1 + np.abs(f1)
+ r2 /= 1 + np.abs(f2)
+
+ r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
+ r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
+ r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
+
+ return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
+
+
+def create_spline(y, yp, x, h):
+ """Create a cubic spline given values and derivatives.
+
+ Formulas for the coefficients are taken from interpolate.CubicSpline.
+
+ Returns
+ -------
+ sol : PPoly
+ Constructed spline as a PPoly instance.
+ """
+ from scipy.interpolate import PPoly
+
+ n, m = y.shape
+ c = np.empty((4, n, m - 1), dtype=y.dtype)
+ slope = (y[:, 1:] - y[:, :-1]) / h
+ t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
+ c[0] = t / h
+ c[1] = (slope - yp[:, :-1]) / h - t
+ c[2] = yp[:, :-1]
+ c[3] = y[:, :-1]
+ c = np.rollaxis(c, 1)
+
+ return PPoly(c, x, extrapolate=True, axis=1)
+
+
+def modify_mesh(x, insert_1, insert_2):
+ """Insert nodes into a mesh.
+
+ Nodes removal logic is not established, its impact on the solver is
+ presumably negligible. So, only insertion is done in this function.
+
+ Parameters
+ ----------
+ x : ndarray, shape (m,)
+ Mesh nodes.
+ insert_1 : ndarray
+ Intervals to each insert 1 new node in the middle.
+ insert_2 : ndarray
+ Intervals to each insert 2 new nodes, such that divide an interval
+ into 3 equal parts.
+
+ Returns
+ -------
+ x_new : ndarray
+ New mesh nodes.
+
+ Notes
+ -----
+ `insert_1` and `insert_2` should not have common values.
+ """
+ # Because np.insert implementation apparently varies with a version of
+ # NumPy, we use a simple and reliable approach with sorting.
+ return np.sort(np.hstack((
+ x,
+ 0.5 * (x[insert_1] + x[insert_1 + 1]),
+ (2 * x[insert_2] + x[insert_2 + 1]) / 3,
+ (x[insert_2] + 2 * x[insert_2 + 1]) / 3
+ )))
+
+
+def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
+ """Wrap functions for unified usage in the solver."""
+ if fun_jac is None:
+ fun_jac_wrapped = None
+
+ if bc_jac is None:
+ bc_jac_wrapped = None
+
+ if k == 0:
+ def fun_p(x, y, _):
+ return np.asarray(fun(x, y), dtype)
+
+ def bc_wrapped(ya, yb, _):
+ return np.asarray(bc(ya, yb), dtype)
+
+ if fun_jac is not None:
+ def fun_jac_p(x, y, _):
+ return np.asarray(fun_jac(x, y), dtype), None
+
+ if bc_jac is not None:
+ def bc_jac_wrapped(ya, yb, _):
+ dbc_dya, dbc_dyb = bc_jac(ya, yb)
+ return (np.asarray(dbc_dya, dtype),
+ np.asarray(dbc_dyb, dtype), None)
+ else:
+ def fun_p(x, y, p):
+ return np.asarray(fun(x, y, p), dtype)
+
+ def bc_wrapped(x, y, p):
+ return np.asarray(bc(x, y, p), dtype)
+
+ if fun_jac is not None:
+ def fun_jac_p(x, y, p):
+ df_dy, df_dp = fun_jac(x, y, p)
+ return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
+
+ if bc_jac is not None:
+ def bc_jac_wrapped(ya, yb, p):
+ dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
+ return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
+ np.asarray(dbc_dp, dtype))
+
+ if S is None:
+ fun_wrapped = fun_p
+ else:
+ def fun_wrapped(x, y, p):
+ f = fun_p(x, y, p)
+ if x[0] == a:
+ f[:, 0] = np.dot(D, f[:, 0])
+ f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
+ else:
+ f += np.dot(S, y) / (x - a)
+ return f
+
+ if fun_jac is not None:
+ if S is None:
+ fun_jac_wrapped = fun_jac_p
+ else:
+ Sr = S[:, :, np.newaxis]
+
+ def fun_jac_wrapped(x, y, p):
+ df_dy, df_dp = fun_jac_p(x, y, p)
+ if x[0] == a:
+ df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
+ df_dy[:, :, 1:] += Sr / (x[1:] - a)
+ else:
+ df_dy += Sr / (x - a)
+
+ return df_dy, df_dp
+
+ return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
+
+
+def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
+ tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
+ """Solve a boundary value problem for a system of ODEs.
+
+ This function numerically solves a first order system of ODEs subject to
+ two-point boundary conditions::
+
+ dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
+ bc(y(a), y(b), p) = 0
+
+ Here x is a 1-D independent variable, y(x) is an N-D
+ vector-valued function and p is a k-D vector of unknown
+ parameters which is to be found along with y(x). For the problem to be
+ determined, there must be n + k boundary conditions, i.e., bc must be an
+ (n + k)-D function.
+
+ The last singular term on the right-hand side of the system is optional.
+ It is defined by an n-by-n matrix S, such that the solution must satisfy
+ S y(a) = 0. This condition will be forced during iterations, so it must not
+ contradict boundary conditions. See [2]_ for the explanation how this term
+ is handled when solving BVPs numerically.
+
+ Problems in a complex domain can be solved as well. In this case, y and p
+ are considered to be complex, and f and bc are assumed to be complex-valued
+ functions, but x stays real. Note that f and bc must be complex
+ differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
+ should rewrite your problem for real and imaginary parts separately. To
+ solve a problem in a complex domain, pass an initial guess for y with a
+ complex data type (see below).
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(x, y)``,
+ or ``fun(x, y, p)`` if parameters are present. All arguments are
+ ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
+ ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
+ return value must be an array with shape (n, m) and with the same
+ layout as ``y``.
+ bc : callable
+ Function evaluating residuals of the boundary conditions. The calling
+ signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
+ present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
+ and ``p`` with shape (k,). The return value must be an array with
+ shape (n + k,).
+ x : array_like, shape (m,)
+ Initial mesh. Must be a strictly increasing sequence of real numbers
+ with ``x[0]=a`` and ``x[-1]=b``.
+ y : array_like, shape (n, m)
+ Initial guess for the function values at the mesh nodes, ith column
+ corresponds to ``x[i]``. For problems in a complex domain pass `y`
+ with a complex data type (even if the initial guess is purely real).
+ p : array_like with shape (k,) or None, optional
+ Initial guess for the unknown parameters. If None (default), it is
+ assumed that the problem doesn't depend on any parameters.
+ S : array_like with shape (n, n) or None
+ Matrix defining the singular term. If None (default), the problem is
+ solved without the singular term.
+ fun_jac : callable or None, optional
+ Function computing derivatives of f with respect to y and p. The
+ calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
+ parameters are present. The return must contain 1 or 2 elements in the
+ following order:
+
+ * df_dy : array_like with shape (n, n, m), where an element
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
+ * df_dp : array_like with shape (n, k, m), where an element
+ (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
+
+ Here q numbers nodes at which x and y are defined, whereas i and j
+ number vector components. If the problem is solved without unknown
+ parameters, df_dp should not be returned.
+
+ If `fun_jac` is None (default), the derivatives will be estimated
+ by the forward finite differences.
+ bc_jac : callable or None, optional
+ Function computing derivatives of bc with respect to ya, yb, and p.
+ The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
+ if parameters are present. The return must contain 2 or 3 elements in
+ the following order:
+
+ * dbc_dya : array_like with shape (n, n), where an element (i, j)
+ equals to d bc_i(ya, yb, p) / d ya_j.
+ * dbc_dyb : array_like with shape (n, n), where an element (i, j)
+ equals to d bc_i(ya, yb, p) / d yb_j.
+ * dbc_dp : array_like with shape (n, k), where an element (i, j)
+ equals to d bc_i(ya, yb, p) / d p_j.
+
+ If the problem is solved without unknown parameters, dbc_dp should not
+ be returned.
+
+ If `bc_jac` is None (default), the derivatives will be estimated by
+ the forward finite differences.
+ tol : float, optional
+ Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
+ where y is the found solution, then the solver tries to achieve on each
+ mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
+ estimated in a root mean squared sense (using a numerical quadrature
+ formula). Default is 1e-3.
+ max_nodes : int, optional
+ Maximum allowed number of the mesh nodes. If exceeded, the algorithm
+ terminates. Default is 1000.
+ verbose : {0, 1, 2}, optional
+ Level of algorithm's verbosity:
+
+ * 0 (default) : work silently.
+ * 1 : display a termination report.
+ * 2 : display progress during iterations.
+ bc_tol : float, optional
+ Desired absolute tolerance for the boundary condition residuals: `bc`
+ value should satisfy ``abs(bc) < bc_tol`` component-wise.
+ Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
+ tolerance.
+
+ Returns
+ -------
+ Bunch object with the following fields defined:
+ sol : PPoly
+ Found solution for y as `scipy.interpolate.PPoly` instance, a C1
+ continuous cubic spline.
+ p : ndarray or None, shape (k,)
+ Found parameters. None, if the parameters were not present in the
+ problem.
+ x : ndarray, shape (m,)
+ Nodes of the final mesh.
+ y : ndarray, shape (n, m)
+ Solution values at the mesh nodes.
+ yp : ndarray, shape (n, m)
+ Solution derivatives at the mesh nodes.
+ rms_residuals : ndarray, shape (m - 1,)
+ RMS values of the relative residuals over each mesh interval (see the
+ description of `tol` parameter).
+ niter : int
+ Number of completed iterations.
+ status : int
+ Reason for algorithm termination:
+
+ * 0: The algorithm converged to the desired accuracy.
+ * 1: The maximum number of mesh nodes is exceeded.
+ * 2: A singular Jacobian encountered when solving the collocation
+ system.
+
+ message : string
+ Verbal description of the termination reason.
+ success : bool
+ True if the algorithm converged to the desired accuracy (``status=0``).
+
+ Notes
+ -----
+ This function implements a 4th order collocation algorithm with the
+ control of residuals similar to [1]_. A collocation system is solved
+ by a damped Newton method with an affine-invariant criterion function as
+ described in [3]_.
+
+ Note that in [1]_ integral residuals are defined without normalization
+ by interval lengths. So, their definition is different by a multiplier of
+ h**0.5 (h is an interval length) from the definition used here.
+
+ .. versionadded:: 0.18.0
+
+ References
+ ----------
+ .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+ Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+ Number 3, pp. 299-316, 2001.
+ .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
+ Solver".
+ .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
+ Boundary Value Problems for Ordinary Differential Equations".
+ .. [4] `Cauchy-Riemann equations
+ `_ on
+ Wikipedia.
+
+ Examples
+ --------
+ In the first example, we solve Bratu's problem::
+
+ y'' + k * exp(y) = 0
+ y(0) = y(1) = 0
+
+ for k = 1.
+
+ We rewrite the equation as a first-order system and implement its
+ right-hand side evaluation::
+
+ y1' = y2
+ y2' = -exp(y1)
+
+ >>> def fun(x, y):
+ ... return np.vstack((y[1], -np.exp(y[0])))
+
+ Implement evaluation of the boundary condition residuals:
+
+ >>> def bc(ya, yb):
+ ... return np.array([ya[0], yb[0]])
+
+ Define the initial mesh with 5 nodes:
+
+ >>> x = np.linspace(0, 1, 5)
+
+ This problem is known to have two solutions. To obtain both of them, we
+ use two different initial guesses for y. We denote them by subscripts
+ a and b.
+
+ >>> y_a = np.zeros((2, x.size))
+ >>> y_b = np.zeros((2, x.size))
+ >>> y_b[0] = 3
+
+ Now we are ready to run the solver.
+
+ >>> from scipy.integrate import solve_bvp
+ >>> res_a = solve_bvp(fun, bc, x, y_a)
+ >>> res_b = solve_bvp(fun, bc, x, y_b)
+
+ Let's plot the two found solutions. We take an advantage of having the
+ solution in a spline form to produce a smooth plot.
+
+ >>> x_plot = np.linspace(0, 1, 100)
+ >>> y_plot_a = res_a.sol(x_plot)[0]
+ >>> y_plot_b = res_b.sol(x_plot)[0]
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(x_plot, y_plot_a, label='y_a')
+ >>> plt.plot(x_plot, y_plot_b, label='y_b')
+ >>> plt.legend()
+ >>> plt.xlabel("x")
+ >>> plt.ylabel("y")
+ >>> plt.show()
+
+ We see that the two solutions have similar shape, but differ in scale
+ significantly.
+
+ In the second example, we solve a simple Sturm-Liouville problem::
+
+ y'' + k**2 * y = 0
+ y(0) = y(1) = 0
+
+ It is known that a non-trivial solution y = A * sin(k * x) is possible for
+ k = pi * n, where n is an integer. To establish the normalization constant
+ A = 1 we add a boundary condition::
+
+ y'(0) = k
+
+ Again, we rewrite our equation as a first-order system and implement its
+ right-hand side evaluation::
+
+ y1' = y2
+ y2' = -k**2 * y1
+
+ >>> def fun(x, y, p):
+ ... k = p[0]
+ ... return np.vstack((y[1], -k**2 * y[0]))
+
+ Note that parameters p are passed as a vector (with one element in our
+ case).
+
+ Implement the boundary conditions:
+
+ >>> def bc(ya, yb, p):
+ ... k = p[0]
+ ... return np.array([ya[0], yb[0], ya[1] - k])
+
+ Set up the initial mesh and guess for y. We aim to find the solution for
+ k = 2 * pi, to achieve that we set values of y to approximately follow
+ sin(2 * pi * x):
+
+ >>> x = np.linspace(0, 1, 5)
+ >>> y = np.zeros((2, x.size))
+ >>> y[0, 1] = 1
+ >>> y[0, 3] = -1
+
+ Run the solver with 6 as an initial guess for k.
+
+ >>> sol = solve_bvp(fun, bc, x, y, p=[6])
+
+ We see that the found k is approximately correct:
+
+ >>> sol.p[0]
+ 6.28329460046
+
+ And, finally, plot the solution to see the anticipated sinusoid:
+
+ >>> x_plot = np.linspace(0, 1, 100)
+ >>> y_plot = sol.sol(x_plot)[0]
+ >>> plt.plot(x_plot, y_plot)
+ >>> plt.xlabel("x")
+ >>> plt.ylabel("y")
+ >>> plt.show()
+ """
+ x = np.asarray(x, dtype=float)
+ if x.ndim != 1:
+ raise ValueError("`x` must be 1 dimensional.")
+ h = np.diff(x)
+ if np.any(h <= 0):
+ raise ValueError("`x` must be strictly increasing.")
+ a = x[0]
+
+ y = np.asarray(y)
+ if np.issubdtype(y.dtype, np.complexfloating):
+ dtype = complex
+ else:
+ dtype = float
+ y = y.astype(dtype, copy=False)
+
+ if y.ndim != 2:
+ raise ValueError("`y` must be 2 dimensional.")
+ if y.shape[1] != x.shape[0]:
+ raise ValueError("`y` is expected to have {} columns, but actually "
+ "has {}.".format(x.shape[0], y.shape[1]))
+
+ if p is None:
+ p = np.array([])
+ else:
+ p = np.asarray(p, dtype=dtype)
+ if p.ndim != 1:
+ raise ValueError("`p` must be 1 dimensional.")
+
+ if tol < 100 * EPS:
+ warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
+ tol = 100 * EPS
+
+ if verbose not in [0, 1, 2]:
+ raise ValueError("`verbose` must be in [0, 1, 2].")
+
+ n = y.shape[0]
+ k = p.shape[0]
+
+ if S is not None:
+ S = np.asarray(S, dtype=dtype)
+ if S.shape != (n, n):
+ raise ValueError("`S` is expected to have shape {}, "
+ "but actually has {}".format((n, n), S.shape))
+
+ # Compute I - S^+ S to impose necessary boundary conditions.
+ B = np.identity(n) - np.dot(pinv(S), S)
+
+ y[:, 0] = np.dot(B, y[:, 0])
+
+ # Compute (I - S)^+ to correct derivatives at x=a.
+ D = pinv(np.identity(n) - S)
+ else:
+ B = None
+ D = None
+
+ if bc_tol is None:
+ bc_tol = tol
+
+ # Maximum number of iterations
+ max_iteration = 10
+
+ fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
+ fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
+
+ f = fun_wrapped(x, y, p)
+ if f.shape != y.shape:
+ raise ValueError("`fun` return is expected to have shape {}, "
+ "but actually has {}.".format(y.shape, f.shape))
+
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
+ if bc_res.shape != (n + k,):
+ raise ValueError("`bc` return is expected to have shape {}, "
+ "but actually has {}.".format((n + k,), bc_res.shape))
+
+ status = 0
+ iteration = 0
+ if verbose == 2:
+ print_iteration_header()
+
+ while True:
+ m = x.shape[0]
+
+ col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
+ fun_jac_wrapped, bc_jac_wrapped, x, h)
+ y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
+ y, p, B, tol, bc_tol)
+ iteration += 1
+
+ col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
+ p, x, h)
+ bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
+ max_bc_res = np.max(abs(bc_res))
+
+ # This relation is not trivial, but can be verified.
+ r_middle = 1.5 * col_res / h
+ sol = create_spline(y, f, x, h)
+ rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
+ r_middle, f_middle)
+ max_rms_res = np.max(rms_res)
+
+ if singular:
+ status = 2
+ break
+
+ insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
+ insert_2, = np.nonzero(rms_res >= 100 * tol)
+ nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
+
+ if m + nodes_added > max_nodes:
+ status = 1
+ if verbose == 2:
+ nodes_added = "({})".format(nodes_added)
+ print_iteration_progress(iteration, max_rms_res, max_bc_res,
+ m, nodes_added)
+ break
+
+ if verbose == 2:
+ print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
+ nodes_added)
+
+ if nodes_added > 0:
+ x = modify_mesh(x, insert_1, insert_2)
+ h = np.diff(x)
+ y = sol(x)
+ elif max_bc_res <= bc_tol:
+ status = 0
+ break
+ elif iteration >= max_iteration:
+ status = 3
+ break
+
+ if verbose > 0:
+ if status == 0:
+ print("Solved in {} iterations, number of nodes {}. \n"
+ "Maximum relative residual: {:.2e} \n"
+ "Maximum boundary residual: {:.2e}"
+ .format(iteration, x.shape[0], max_rms_res, max_bc_res))
+ elif status == 1:
+ print("Number of nodes is exceeded after iteration {}. \n"
+ "Maximum relative residual: {:.2e} \n"
+ "Maximum boundary residual: {:.2e}"
+ .format(iteration, max_rms_res, max_bc_res))
+ elif status == 2:
+ print("Singular Jacobian encountered when solving the collocation "
+ "system on iteration {}. \n"
+ "Maximum relative residual: {:.2e} \n"
+ "Maximum boundary residual: {:.2e}"
+ .format(iteration, max_rms_res, max_bc_res))
+ elif status == 3:
+ print("The solver was unable to satisfy boundary conditions "
+ "tolerance on iteration {}. \n"
+ "Maximum relative residual: {:.2e} \n"
+ "Maximum boundary residual: {:.2e}"
+ .format(iteration, max_rms_res, max_bc_res))
+
+ if p.size == 0:
+ p = None
+
+ return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
+ niter=iteration, status=status,
+ message=TERMINATION_MESSAGES[status], success=status == 0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_dop.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_dop.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..f05fdb1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_dop.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/__init__.py
new file mode 100644
index 0000000..f3c8aaa
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/__init__.py
@@ -0,0 +1,8 @@
+"""Suite of ODE solvers implemented in Python."""
+from .ivp import solve_ivp
+from .rk import RK23, RK45, DOP853
+from .radau import Radau
+from .bdf import BDF
+from .lsoda import LSODA
+from .common import OdeSolution
+from .base import DenseOutput, OdeSolver
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/base.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/base.py
new file mode 100644
index 0000000..d7eedd8
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/base.py
@@ -0,0 +1,274 @@
+import numpy as np
+
+
+def check_arguments(fun, y0, support_complex):
+ """Helper function for checking arguments common to all solvers."""
+ y0 = np.asarray(y0)
+ if np.issubdtype(y0.dtype, np.complexfloating):
+ if not support_complex:
+ raise ValueError("`y0` is complex, but the chosen solver does "
+ "not support integration in a complex domain.")
+ dtype = complex
+ else:
+ dtype = float
+ y0 = y0.astype(dtype, copy=False)
+
+ if y0.ndim != 1:
+ raise ValueError("`y0` must be 1-dimensional.")
+
+ def fun_wrapped(t, y):
+ return np.asarray(fun(t, y), dtype=dtype)
+
+ return fun_wrapped, y0
+
+
+class OdeSolver(object):
+ """Base class for ODE solvers.
+
+ In order to implement a new solver you need to follow the guidelines:
+
+ 1. A constructor must accept parameters presented in the base class
+ (listed below) along with any other parameters specific to a solver.
+ 2. A constructor must accept arbitrary extraneous arguments
+ ``**extraneous``, but warn that these arguments are irrelevant
+ using `common.warn_extraneous` function. Do not pass these
+ arguments to the base class.
+ 3. A solver must implement a private method `_step_impl(self)` which
+ propagates a solver one step further. It must return tuple
+ ``(success, message)``, where ``success`` is a boolean indicating
+ whether a step was successful, and ``message`` is a string
+ containing description of a failure if a step failed or None
+ otherwise.
+ 4. A solver must implement a private method `_dense_output_impl(self)`,
+ which returns a `DenseOutput` object covering the last successful
+ step.
+ 5. A solver must have attributes listed below in Attributes section.
+ Note that ``t_old`` and ``step_size`` are updated automatically.
+ 6. Use `fun(self, t, y)` method for the system rhs evaluation, this
+ way the number of function evaluations (`nfev`) will be tracked
+ automatically.
+ 7. For convenience, a base class provides `fun_single(self, t, y)` and
+ `fun_vectorized(self, t, y)` for evaluating the rhs in
+ non-vectorized and vectorized fashions respectively (regardless of
+ how `fun` from the constructor is implemented). These calls don't
+ increment `nfev`.
+ 8. If a solver uses a Jacobian matrix and LU decompositions, it should
+ track the number of Jacobian evaluations (`njev`) and the number of
+ LU decompositions (`nlu`).
+ 9. By convention, the function evaluations used to compute a finite
+ difference approximation of the Jacobian should not be counted in
+ `nfev`, thus use `fun_single(self, t, y)` or
+ `fun_vectorized(self, t, y)` when computing a finite difference
+ approximation of the Jacobian.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar and there are two options for ndarray ``y``.
+ It can either have shape (n,), then ``fun`` must return array_like with
+ shape (n,). Or, alternatively, it can have shape (n, n_points), then
+ ``fun`` must return array_like with shape (n, n_points) (each column
+ corresponds to a single column in ``y``). The choice between the two
+ options is determined by `vectorized` argument (see below).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time --- the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ vectorized : bool
+ Whether `fun` is implemented in a vectorized fashion.
+ support_complex : bool, optional
+ Whether integration in a complex domain should be supported.
+ Generally determined by a derived solver class capabilities.
+ Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number of the system's rhs evaluations.
+ njev : int
+ Number of the Jacobian evaluations.
+ nlu : int
+ Number of LU decompositions.
+ """
+ TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
+
+ def __init__(self, fun, t0, y0, t_bound, vectorized,
+ support_complex=False):
+ self.t_old = None
+ self.t = t0
+ self._fun, self.y = check_arguments(fun, y0, support_complex)
+ self.t_bound = t_bound
+ self.vectorized = vectorized
+
+ if vectorized:
+ def fun_single(t, y):
+ return self._fun(t, y[:, None]).ravel()
+ fun_vectorized = self._fun
+ else:
+ fun_single = self._fun
+
+ def fun_vectorized(t, y):
+ f = np.empty_like(y)
+ for i, yi in enumerate(y.T):
+ f[:, i] = self._fun(t, yi)
+ return f
+
+ def fun(t, y):
+ self.nfev += 1
+ return self.fun_single(t, y)
+
+ self.fun = fun
+ self.fun_single = fun_single
+ self.fun_vectorized = fun_vectorized
+
+ self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
+ self.n = self.y.size
+ self.status = 'running'
+
+ self.nfev = 0
+ self.njev = 0
+ self.nlu = 0
+
+ @property
+ def step_size(self):
+ if self.t_old is None:
+ return None
+ else:
+ return np.abs(self.t - self.t_old)
+
+ def step(self):
+ """Perform one integration step.
+
+ Returns
+ -------
+ message : string or None
+ Report from the solver. Typically a reason for a failure if
+ `self.status` is 'failed' after the step was taken or None
+ otherwise.
+ """
+ if self.status != 'running':
+ raise RuntimeError("Attempt to step on a failed or finished "
+ "solver.")
+
+ if self.n == 0 or self.t == self.t_bound:
+ # Handle corner cases of empty solver or no integration.
+ self.t_old = self.t
+ self.t = self.t_bound
+ message = None
+ self.status = 'finished'
+ else:
+ t = self.t
+ success, message = self._step_impl()
+
+ if not success:
+ self.status = 'failed'
+ else:
+ self.t_old = t
+ if self.direction * (self.t - self.t_bound) >= 0:
+ self.status = 'finished'
+
+ return message
+
+ def dense_output(self):
+ """Compute a local interpolant over the last successful step.
+
+ Returns
+ -------
+ sol : `DenseOutput`
+ Local interpolant over the last successful step.
+ """
+ if self.t_old is None:
+ raise RuntimeError("Dense output is available after a successful "
+ "step was made.")
+
+ if self.n == 0 or self.t == self.t_old:
+ # Handle corner cases of empty solver and no integration.
+ return ConstantDenseOutput(self.t_old, self.t, self.y)
+ else:
+ return self._dense_output_impl()
+
+ def _step_impl(self):
+ raise NotImplementedError
+
+ def _dense_output_impl(self):
+ raise NotImplementedError
+
+
+class DenseOutput(object):
+ """Base class for local interpolant over step made by an ODE solver.
+
+ It interpolates between `t_min` and `t_max` (see Attributes below).
+ Evaluation outside this interval is not forbidden, but the accuracy is not
+ guaranteed.
+
+ Attributes
+ ----------
+ t_min, t_max : float
+ Time range of the interpolation.
+ """
+ def __init__(self, t_old, t):
+ self.t_old = t_old
+ self.t = t
+ self.t_min = min(t, t_old)
+ self.t_max = max(t, t_old)
+
+ def __call__(self, t):
+ """Evaluate the interpolant.
+
+ Parameters
+ ----------
+ t : float or array_like with shape (n_points,)
+ Points to evaluate the solution at.
+
+ Returns
+ -------
+ y : ndarray, shape (n,) or (n, n_points)
+ Computed values. Shape depends on whether `t` was a scalar or a
+ 1-D array.
+ """
+ t = np.asarray(t)
+ if t.ndim > 1:
+ raise ValueError("`t` must be a float or a 1-D array.")
+ return self._call_impl(t)
+
+ def _call_impl(self, t):
+ raise NotImplementedError
+
+
+class ConstantDenseOutput(DenseOutput):
+ """Constant value interpolator.
+
+ This class used for degenerate integration cases: equal integration limits
+ or a system with 0 equations.
+ """
+ def __init__(self, t_old, t, value):
+ super(ConstantDenseOutput, self).__init__(t_old, t)
+ self.value = value
+
+ def _call_impl(self, t):
+ if t.ndim == 0:
+ return self.value
+ else:
+ ret = np.empty((self.value.shape[0], t.shape[0]))
+ ret[:] = self.value[:, None]
+ return ret
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/bdf.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/bdf.py
new file mode 100644
index 0000000..8db4710
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/bdf.py
@@ -0,0 +1,466 @@
+import numpy as np
+from scipy.linalg import lu_factor, lu_solve
+from scipy.sparse import issparse, csc_matrix, eye
+from scipy.sparse.linalg import splu
+from scipy.optimize._numdiff import group_columns
+from .common import (validate_max_step, validate_tol, select_initial_step,
+ norm, EPS, num_jac, validate_first_step,
+ warn_extraneous)
+from .base import OdeSolver, DenseOutput
+
+
+MAX_ORDER = 5
+NEWTON_MAXITER = 4
+MIN_FACTOR = 0.2
+MAX_FACTOR = 10
+
+
+def compute_R(order, factor):
+ """Compute the matrix for changing the differences array."""
+ I = np.arange(1, order + 1)[:, None]
+ J = np.arange(1, order + 1)
+ M = np.zeros((order + 1, order + 1))
+ M[1:, 1:] = (I - 1 - factor * J) / I
+ M[0] = 1
+ return np.cumprod(M, axis=0)
+
+
+def change_D(D, order, factor):
+ """Change differences array in-place when step size is changed."""
+ R = compute_R(order, factor)
+ U = compute_R(order, 1)
+ RU = R.dot(U)
+ D[:order + 1] = np.dot(RU.T, D[:order + 1])
+
+
+def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
+ """Solve the algebraic system resulting from BDF method."""
+ d = 0
+ y = y_predict.copy()
+ dy_norm_old = None
+ converged = False
+ for k in range(NEWTON_MAXITER):
+ f = fun(t_new, y)
+ if not np.all(np.isfinite(f)):
+ break
+
+ dy = solve_lu(LU, c * f - psi - d)
+ dy_norm = norm(dy / scale)
+
+ if dy_norm_old is None:
+ rate = None
+ else:
+ rate = dy_norm / dy_norm_old
+
+ if (rate is not None and (rate >= 1 or
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
+ break
+
+ y += dy
+ d += dy
+
+ if (dy_norm == 0 or
+ rate is not None and rate / (1 - rate) * dy_norm < tol):
+ converged = True
+ break
+
+ dy_norm_old = dy_norm
+
+ return converged, k + 1, y, d
+
+
+class BDF(OdeSolver):
+ """Implicit method based on backward-differentiation formulas.
+
+ This is a variable order method with the order varying automatically from
+ 1 to 5. The general framework of the BDF algorithm is described in [1]_.
+ This class implements a quasi-constant step size as explained in [2]_.
+ The error estimation strategy for the constant-step BDF is derived in [3]_.
+ An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
+
+ Can be applied in the complex domain.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+ It can either have shape (n,); then ``fun`` must return array_like with
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
+ must return an array_like with shape (n, k), i.e. each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below). The
+ vectorized implementation allows a faster approximation of the Jacobian
+ by finite differences (required for this solver).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ jac : {None, array_like, sparse_matrix, callable}, optional
+ Jacobian matrix of the right-hand side of the system with respect to y,
+ required by this method. The Jacobian matrix has shape (n, n) and its
+ element (i, j) is equal to ``d f_i / d y_j``.
+ There are three ways to define the Jacobian:
+
+ * If array_like or sparse_matrix, the Jacobian is assumed to
+ be constant.
+ * If callable, the Jacobian is assumed to depend on both
+ t and y; it will be called as ``jac(t, y)`` as necessary.
+ For the 'Radau' and 'BDF' methods, the return value might be a
+ sparse matrix.
+ * If None (default), the Jacobian will be approximated by
+ finite differences.
+
+ It is generally recommended to provide the Jacobian rather than
+ relying on a finite-difference approximation.
+ jac_sparsity : {None, array_like, sparse matrix}, optional
+ Defines a sparsity structure of the Jacobian matrix for a
+ finite-difference approximation. Its shape must be (n, n). This argument
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
+ elements in *each* row, providing the sparsity structure will greatly
+ speed up the computations [4]_. A zero entry means that a corresponding
+ element in the Jacobian is always zero. If None (default), the Jacobian
+ is assumed to be dense.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number of evaluations of the right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian.
+ nlu : int
+ Number of LU decompositions.
+
+ References
+ ----------
+ .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
+ Solution of Ordinary Differential Equations", ACM Transactions on
+ Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
+ .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
+ .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
+ Nonstiff Problems", Sec. III.2.
+ .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
+ and its Applications, 13, pp. 117-120, 1974.
+ """
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
+ vectorized=False, first_step=None, **extraneous):
+ warn_extraneous(extraneous)
+ super(BDF, self).__init__(fun, t0, y0, t_bound, vectorized,
+ support_complex=True)
+ self.max_step = validate_max_step(max_step)
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+ f = self.fun(self.t, self.y)
+ if first_step is None:
+ self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
+ self.direction, 1,
+ self.rtol, self.atol)
+ else:
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
+ self.h_abs_old = None
+ self.error_norm_old = None
+
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
+
+ self.jac_factor = None
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
+ if issparse(self.J):
+ def lu(A):
+ self.nlu += 1
+ return splu(A)
+
+ def solve_lu(LU, b):
+ return LU.solve(b)
+
+ I = eye(self.n, format='csc', dtype=self.y.dtype)
+ else:
+ def lu(A):
+ self.nlu += 1
+ return lu_factor(A, overwrite_a=True)
+
+ def solve_lu(LU, b):
+ return lu_solve(LU, b, overwrite_b=True)
+
+ I = np.identity(self.n, dtype=self.y.dtype)
+
+ self.lu = lu
+ self.solve_lu = solve_lu
+ self.I = I
+
+ kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
+ self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
+ self.alpha = (1 - kappa) * self.gamma
+ self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
+
+ D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
+ D[0] = self.y
+ D[1] = f * self.h_abs * self.direction
+ self.D = D
+
+ self.order = 1
+ self.n_equal_steps = 0
+ self.LU = None
+
+ def _validate_jac(self, jac, sparsity):
+ t0 = self.t
+ y0 = self.y
+
+ if jac is None:
+ if sparsity is not None:
+ if issparse(sparsity):
+ sparsity = csc_matrix(sparsity)
+ groups = group_columns(sparsity)
+ sparsity = (sparsity, groups)
+
+ def jac_wrapped(t, y):
+ self.njev += 1
+ f = self.fun_single(t, y)
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
+ self.atol, self.jac_factor,
+ sparsity)
+ return J
+ J = jac_wrapped(t0, y0)
+ elif callable(jac):
+ J = jac(t0, y0)
+ self.njev += 1
+ if issparse(J):
+ J = csc_matrix(J, dtype=y0.dtype)
+
+ def jac_wrapped(t, y):
+ self.njev += 1
+ return csc_matrix(jac(t, y), dtype=y0.dtype)
+ else:
+ J = np.asarray(J, dtype=y0.dtype)
+
+ def jac_wrapped(t, y):
+ self.njev += 1
+ return np.asarray(jac(t, y), dtype=y0.dtype)
+
+ if J.shape != (self.n, self.n):
+ raise ValueError("`jac` is expected to have shape {}, but "
+ "actually has {}."
+ .format((self.n, self.n), J.shape))
+ else:
+ if issparse(jac):
+ J = csc_matrix(jac, dtype=y0.dtype)
+ else:
+ J = np.asarray(jac, dtype=y0.dtype)
+
+ if J.shape != (self.n, self.n):
+ raise ValueError("`jac` is expected to have shape {}, but "
+ "actually has {}."
+ .format((self.n, self.n), J.shape))
+ jac_wrapped = None
+
+ return jac_wrapped, J
+
+ def _step_impl(self):
+ t = self.t
+ D = self.D
+
+ max_step = self.max_step
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+ if self.h_abs > max_step:
+ h_abs = max_step
+ change_D(D, self.order, max_step / self.h_abs)
+ self.n_equal_steps = 0
+ elif self.h_abs < min_step:
+ h_abs = min_step
+ change_D(D, self.order, min_step / self.h_abs)
+ self.n_equal_steps = 0
+ else:
+ h_abs = self.h_abs
+
+ atol = self.atol
+ rtol = self.rtol
+ order = self.order
+
+ alpha = self.alpha
+ gamma = self.gamma
+ error_const = self.error_const
+
+ J = self.J
+ LU = self.LU
+ current_jac = self.jac is None
+
+ step_accepted = False
+ while not step_accepted:
+ if h_abs < min_step:
+ return False, self.TOO_SMALL_STEP
+
+ h = h_abs * self.direction
+ t_new = t + h
+
+ if self.direction * (t_new - self.t_bound) > 0:
+ t_new = self.t_bound
+ change_D(D, order, np.abs(t_new - t) / h_abs)
+ self.n_equal_steps = 0
+ LU = None
+
+ h = t_new - t
+ h_abs = np.abs(h)
+
+ y_predict = np.sum(D[:order + 1], axis=0)
+
+ scale = atol + rtol * np.abs(y_predict)
+ psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
+
+ converged = False
+ c = h / alpha[order]
+ while not converged:
+ if LU is None:
+ LU = self.lu(self.I - c * J)
+
+ converged, n_iter, y_new, d = solve_bdf_system(
+ self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
+ scale, self.newton_tol)
+
+ if not converged:
+ if current_jac:
+ break
+ J = self.jac(t_new, y_predict)
+ LU = None
+ current_jac = True
+
+ if not converged:
+ factor = 0.5
+ h_abs *= factor
+ change_D(D, order, factor)
+ self.n_equal_steps = 0
+ LU = None
+ continue
+
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ + n_iter)
+
+ scale = atol + rtol * np.abs(y_new)
+ error = error_const[order] * d
+ error_norm = norm(error / scale)
+
+ if error_norm > 1:
+ factor = max(MIN_FACTOR,
+ safety * error_norm ** (-1 / (order + 1)))
+ h_abs *= factor
+ change_D(D, order, factor)
+ self.n_equal_steps = 0
+ # As we didn't have problems with convergence, we don't
+ # reset LU here.
+ else:
+ step_accepted = True
+
+ self.n_equal_steps += 1
+
+ self.t = t_new
+ self.y = y_new
+
+ self.h_abs = h_abs
+ self.J = J
+ self.LU = LU
+
+ # Update differences. The principal relation here is
+ # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
+ # contained difference for previous interpolating polynomial and
+ # d = D^{k + 1} y_n. Thus this elegant code follows.
+ D[order + 2] = d - D[order + 1]
+ D[order + 1] = d
+ for i in reversed(range(order + 1)):
+ D[i] += D[i + 1]
+
+ if self.n_equal_steps < order + 1:
+ return True, None
+
+ if order > 1:
+ error_m = error_const[order - 1] * D[order]
+ error_m_norm = norm(error_m / scale)
+ else:
+ error_m_norm = np.inf
+
+ if order < MAX_ORDER:
+ error_p = error_const[order + 1] * D[order + 2]
+ error_p_norm = norm(error_p / scale)
+ else:
+ error_p_norm = np.inf
+
+ error_norms = np.array([error_m_norm, error_norm, error_p_norm])
+ with np.errstate(divide='ignore'):
+ factors = error_norms ** (-1 / np.arange(order, order + 3))
+
+ delta_order = np.argmax(factors) - 1
+ order += delta_order
+ self.order = order
+
+ factor = min(MAX_FACTOR, safety * np.max(factors))
+ self.h_abs *= factor
+ change_D(D, order, factor)
+ self.n_equal_steps = 0
+ self.LU = None
+
+ return True, None
+
+ def _dense_output_impl(self):
+ return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
+ self.order, self.D[:self.order + 1].copy())
+
+
+class BdfDenseOutput(DenseOutput):
+ def __init__(self, t_old, t, h, order, D):
+ super(BdfDenseOutput, self).__init__(t_old, t)
+ self.order = order
+ self.t_shift = self.t - h * np.arange(self.order)
+ self.denom = h * (1 + np.arange(self.order))
+ self.D = D
+
+ def _call_impl(self, t):
+ if t.ndim == 0:
+ x = (t - self.t_shift) / self.denom
+ p = np.cumprod(x)
+ else:
+ x = (t - self.t_shift[:, None]) / self.denom[:, None]
+ p = np.cumprod(x, axis=0)
+
+ y = np.dot(self.D[1:].T, p)
+ if y.ndim == 1:
+ y += self.D[0]
+ else:
+ y += self.D[0, :, None]
+
+ return y
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/common.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/common.py
new file mode 100644
index 0000000..4205a33
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/common.py
@@ -0,0 +1,431 @@
+from itertools import groupby
+from warnings import warn
+import numpy as np
+from scipy.sparse import find, coo_matrix
+
+
+EPS = np.finfo(float).eps
+
+
+def validate_first_step(first_step, t0, t_bound):
+ """Assert that first_step is valid and return it."""
+ if first_step <= 0:
+ raise ValueError("`first_step` must be positive.")
+ if first_step > np.abs(t_bound - t0):
+ raise ValueError("`first_step` exceeds bounds.")
+ return first_step
+
+
+def validate_max_step(max_step):
+ """Assert that max_Step is valid and return it."""
+ if max_step <= 0:
+ raise ValueError("`max_step` must be positive.")
+ return max_step
+
+
+def warn_extraneous(extraneous):
+ """Display a warning for extraneous keyword arguments.
+
+ The initializer of each solver class is expected to collect keyword
+ arguments that it doesn't understand and warn about them. This function
+ prints a warning for each key in the supplied dictionary.
+
+ Parameters
+ ----------
+ extraneous : dict
+ Extraneous keyword arguments
+ """
+ if extraneous:
+ warn("The following arguments have no effect for a chosen solver: {}."
+ .format(", ".join("`{}`".format(x) for x in extraneous)))
+
+
+def validate_tol(rtol, atol, n):
+ """Validate tolerance values."""
+ if rtol < 100 * EPS:
+ warn("`rtol` is too low, setting to {}".format(100 * EPS))
+ rtol = 100 * EPS
+
+ atol = np.asarray(atol)
+ if atol.ndim > 0 and atol.shape != (n,):
+ raise ValueError("`atol` has wrong shape.")
+
+ if np.any(atol < 0):
+ raise ValueError("`atol` must be positive.")
+
+ return rtol, atol
+
+
+def norm(x):
+ """Compute RMS norm."""
+ return np.linalg.norm(x) / x.size ** 0.5
+
+
+def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
+ """Empirically select a good initial step.
+
+ The algorithm is described in [1]_.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system.
+ t0 : float
+ Initial value of the independent variable.
+ y0 : ndarray, shape (n,)
+ Initial value of the dependent variable.
+ f0 : ndarray, shape (n,)
+ Initial value of the derivative, i.e., ``fun(t0, y0)``.
+ direction : float
+ Integration direction.
+ order : float
+ Error estimator order. It means that the error controlled by the
+ algorithm is proportional to ``step_size ** (order + 1)`.
+ rtol : float
+ Desired relative tolerance.
+ atol : float
+ Desired absolute tolerance.
+
+ Returns
+ -------
+ h_abs : float
+ Absolute value of the suggested initial step.
+
+ References
+ ----------
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+ Equations I: Nonstiff Problems", Sec. II.4.
+ """
+ if y0.size == 0:
+ return np.inf
+
+ scale = atol + np.abs(y0) * rtol
+ d0 = norm(y0 / scale)
+ d1 = norm(f0 / scale)
+ if d0 < 1e-5 or d1 < 1e-5:
+ h0 = 1e-6
+ else:
+ h0 = 0.01 * d0 / d1
+
+ y1 = y0 + h0 * direction * f0
+ f1 = fun(t0 + h0 * direction, y1)
+ d2 = norm((f1 - f0) / scale) / h0
+
+ if d1 <= 1e-15 and d2 <= 1e-15:
+ h1 = max(1e-6, h0 * 1e-3)
+ else:
+ h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
+
+ return min(100 * h0, h1)
+
+
+class OdeSolution(object):
+ """Continuous ODE solution.
+
+ It is organized as a collection of `DenseOutput` objects which represent
+ local interpolants. It provides an algorithm to select a right interpolant
+ for each given point.
+
+ The interpolants cover the range between `t_min` and `t_max` (see
+ Attributes below). Evaluation outside this interval is not forbidden, but
+ the accuracy is not guaranteed.
+
+ When evaluating at a breakpoint (one of the values in `ts`) a segment with
+ the lower index is selected.
+
+ Parameters
+ ----------
+ ts : array_like, shape (n_segments + 1,)
+ Time instants between which local interpolants are defined. Must
+ be strictly increasing or decreasing (zero segment with two points is
+ also allowed).
+ interpolants : list of DenseOutput with n_segments elements
+ Local interpolants. An i-th interpolant is assumed to be defined
+ between ``ts[i]`` and ``ts[i + 1]``.
+
+ Attributes
+ ----------
+ t_min, t_max : float
+ Time range of the interpolation.
+ """
+ def __init__(self, ts, interpolants):
+ ts = np.asarray(ts)
+ d = np.diff(ts)
+ # The first case covers integration on zero segment.
+ if not ((ts.size == 2 and ts[0] == ts[-1])
+ or np.all(d > 0) or np.all(d < 0)):
+ raise ValueError("`ts` must be strictly increasing or decreasing.")
+
+ self.n_segments = len(interpolants)
+ if ts.shape != (self.n_segments + 1,):
+ raise ValueError("Numbers of time stamps and interpolants "
+ "don't match.")
+
+ self.ts = ts
+ self.interpolants = interpolants
+ if ts[-1] >= ts[0]:
+ self.t_min = ts[0]
+ self.t_max = ts[-1]
+ self.ascending = True
+ self.ts_sorted = ts
+ else:
+ self.t_min = ts[-1]
+ self.t_max = ts[0]
+ self.ascending = False
+ self.ts_sorted = ts[::-1]
+
+ def _call_single(self, t):
+ # Here we preserve a certain symmetry that when t is in self.ts,
+ # then we prioritize a segment with a lower index.
+ if self.ascending:
+ ind = np.searchsorted(self.ts_sorted, t, side='left')
+ else:
+ ind = np.searchsorted(self.ts_sorted, t, side='right')
+
+ segment = min(max(ind - 1, 0), self.n_segments - 1)
+ if not self.ascending:
+ segment = self.n_segments - 1 - segment
+
+ return self.interpolants[segment](t)
+
+ def __call__(self, t):
+ """Evaluate the solution.
+
+ Parameters
+ ----------
+ t : float or array_like with shape (n_points,)
+ Points to evaluate at.
+
+ Returns
+ -------
+ y : ndarray, shape (n_states,) or (n_states, n_points)
+ Computed values. Shape depends on whether `t` is a scalar or a
+ 1-D array.
+ """
+ t = np.asarray(t)
+
+ if t.ndim == 0:
+ return self._call_single(t)
+
+ order = np.argsort(t)
+ reverse = np.empty_like(order)
+ reverse[order] = np.arange(order.shape[0])
+ t_sorted = t[order]
+
+ # See comment in self._call_single.
+ if self.ascending:
+ segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
+ else:
+ segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
+ segments -= 1
+ segments[segments < 0] = 0
+ segments[segments > self.n_segments - 1] = self.n_segments - 1
+ if not self.ascending:
+ segments = self.n_segments - 1 - segments
+
+ ys = []
+ group_start = 0
+ for segment, group in groupby(segments):
+ group_end = group_start + len(list(group))
+ y = self.interpolants[segment](t_sorted[group_start:group_end])
+ ys.append(y)
+ group_start = group_end
+
+ ys = np.hstack(ys)
+ ys = ys[:, reverse]
+
+ return ys
+
+
+NUM_JAC_DIFF_REJECT = EPS ** 0.875
+NUM_JAC_DIFF_SMALL = EPS ** 0.75
+NUM_JAC_DIFF_BIG = EPS ** 0.25
+NUM_JAC_MIN_FACTOR = 1e3 * EPS
+NUM_JAC_FACTOR_INCREASE = 10
+NUM_JAC_FACTOR_DECREASE = 0.1
+
+
+def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
+ """Finite differences Jacobian approximation tailored for ODE solvers.
+
+ This function computes finite difference approximation to the Jacobian
+ matrix of `fun` with respect to `y` using forward differences.
+ The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
+ ``d f_i / d y_j``.
+
+ A special feature of this function is the ability to correct the step
+ size from iteration to iteration. The main idea is to keep the finite
+ difference significantly separated from its round-off error which
+ approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
+ huge error and assures that the estimated derivative are reasonably close
+ to the true values (i.e., the finite difference approximation is at least
+ qualitatively reflects the structure of the true Jacobian).
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system implemented in a vectorized fashion.
+ t : float
+ Current time.
+ y : ndarray, shape (n,)
+ Current state.
+ f : ndarray, shape (n,)
+ Value of the right hand side at (t, y).
+ threshold : float
+ Threshold for `y` value used for computing the step size as
+ ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
+ absolute tolerance (atol) for a solver should be passed as `threshold`.
+ factor : ndarray with shape (n,) or None
+ Factor to use for computing the step size. Pass None for the very
+ evaluation, then use the value returned from this function.
+ sparsity : tuple (structure, groups) or None
+ Sparsity structure of the Jacobian, `structure` must be csc_matrix.
+
+ Returns
+ -------
+ J : ndarray or csc_matrix, shape (n, n)
+ Jacobian matrix.
+ factor : ndarray, shape (n,)
+ Suggested `factor` for the next evaluation.
+ """
+ y = np.asarray(y)
+ n = y.shape[0]
+ if n == 0:
+ return np.empty((0, 0)), factor
+
+ if factor is None:
+ factor = np.full(n, EPS ** 0.5)
+ else:
+ factor = factor.copy()
+
+ # Direct the step as ODE dictates, hoping that such a step won't lead to
+ # a problematic region. For complex ODEs it makes sense to use the real
+ # part of f as we use steps along real axis.
+ f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
+ y_scale = f_sign * np.maximum(threshold, np.abs(y))
+ h = (y + factor * y_scale) - y
+
+ # Make sure that the step is not 0 to start with. Not likely it will be
+ # executed often.
+ for i in np.nonzero(h == 0)[0]:
+ while h[i] == 0:
+ factor[i] *= 10
+ h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
+
+ if sparsity is None:
+ return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
+ else:
+ structure, groups = sparsity
+ return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
+ structure, groups)
+
+
+def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
+ n = y.shape[0]
+ h_vecs = np.diag(h)
+ f_new = fun(t, y[:, None] + h_vecs)
+ diff = f_new - f[:, None]
+ max_ind = np.argmax(np.abs(diff), axis=0)
+ r = np.arange(n)
+ max_diff = np.abs(diff[max_ind, r])
+ scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
+
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
+ if np.any(diff_too_small):
+ ind, = np.nonzero(diff_too_small)
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
+ h_vecs[ind, ind] = h_new
+ f_new = fun(t, y[:, None] + h_vecs[:, ind])
+ diff_new = f_new - f[:, None]
+ max_ind = np.argmax(np.abs(diff_new), axis=0)
+ r = np.arange(ind.shape[0])
+ max_diff_new = np.abs(diff_new[max_ind, r])
+ scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
+
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
+ if np.any(update):
+ update, = np.nonzero(update)
+ update_ind = ind[update]
+ factor[update_ind] = new_factor[update]
+ h[update_ind] = h_new[update]
+ diff[:, update_ind] = diff_new[:, update]
+ scale[update_ind] = scale_new[update]
+ max_diff[update_ind] = max_diff_new[update]
+
+ diff /= h
+
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
+
+ return diff, factor
+
+
+def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
+ n = y.shape[0]
+ n_groups = np.max(groups) + 1
+ h_vecs = np.empty((n_groups, n))
+ for group in range(n_groups):
+ e = np.equal(group, groups)
+ h_vecs[group] = h * e
+ h_vecs = h_vecs.T
+
+ f_new = fun(t, y[:, None] + h_vecs)
+ df = f_new - f[:, None]
+
+ i, j, _ = find(structure)
+ diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
+ max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
+ r = np.arange(n)
+ max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
+ scale = np.maximum(np.abs(f[max_ind]),
+ np.abs(f_new[max_ind, groups[r]]))
+
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
+ if np.any(diff_too_small):
+ ind, = np.nonzero(diff_too_small)
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
+ h_new_all = np.zeros(n)
+ h_new_all[ind] = h_new
+
+ groups_unique = np.unique(groups[ind])
+ groups_map = np.empty(n_groups, dtype=int)
+ h_vecs = np.empty((groups_unique.shape[0], n))
+ for k, group in enumerate(groups_unique):
+ e = np.equal(group, groups)
+ h_vecs[k] = h_new_all * e
+ groups_map[group] = k
+ h_vecs = h_vecs.T
+
+ f_new = fun(t, y[:, None] + h_vecs)
+ df = f_new - f[:, None]
+ i, j, _ = find(structure[:, ind])
+ diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
+ (i, j)), shape=(n, ind.shape[0])).tocsc()
+
+ max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
+ r = np.arange(ind.shape[0])
+ max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
+ scale_new = np.maximum(
+ np.abs(f[max_ind_new]),
+ np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
+
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
+ if np.any(update):
+ update, = np.nonzero(update)
+ update_ind = ind[update]
+ factor[update_ind] = new_factor[update]
+ h[update_ind] = h_new[update]
+ diff[:, update_ind] = diff_new[:, update]
+ scale[update_ind] = scale_new[update]
+ max_diff[update_ind] = max_diff_new[update]
+
+ diff.data /= np.repeat(h, np.diff(diff.indptr))
+
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
+
+ return diff, factor
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/dop853_coefficients.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/dop853_coefficients.py
new file mode 100644
index 0000000..f39f2f3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/dop853_coefficients.py
@@ -0,0 +1,193 @@
+import numpy as np
+
+N_STAGES = 12
+N_STAGES_EXTENDED = 16
+INTERPOLATOR_POWER = 7
+
+C = np.array([0.0,
+ 0.526001519587677318785587544488e-01,
+ 0.789002279381515978178381316732e-01,
+ 0.118350341907227396726757197510,
+ 0.281649658092772603273242802490,
+ 0.333333333333333333333333333333,
+ 0.25,
+ 0.307692307692307692307692307692,
+ 0.651282051282051282051282051282,
+ 0.6,
+ 0.857142857142857142857142857142,
+ 1.0,
+ 1.0,
+ 0.1,
+ 0.2,
+ 0.777777777777777777777777777778])
+
+A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
+A[1, 0] = 5.26001519587677318785587544488e-2
+
+A[2, 0] = 1.97250569845378994544595329183e-2
+A[2, 1] = 5.91751709536136983633785987549e-2
+
+A[3, 0] = 2.95875854768068491816892993775e-2
+A[3, 2] = 8.87627564304205475450678981324e-2
+
+A[4, 0] = 2.41365134159266685502369798665e-1
+A[4, 2] = -8.84549479328286085344864962717e-1
+A[4, 3] = 9.24834003261792003115737966543e-1
+
+A[5, 0] = 3.7037037037037037037037037037e-2
+A[5, 3] = 1.70828608729473871279604482173e-1
+A[5, 4] = 1.25467687566822425016691814123e-1
+
+A[6, 0] = 3.7109375e-2
+A[6, 3] = 1.70252211019544039314978060272e-1
+A[6, 4] = 6.02165389804559606850219397283e-2
+A[6, 5] = -1.7578125e-2
+
+A[7, 0] = 3.70920001185047927108779319836e-2
+A[7, 3] = 1.70383925712239993810214054705e-1
+A[7, 4] = 1.07262030446373284651809199168e-1
+A[7, 5] = -1.53194377486244017527936158236e-2
+A[7, 6] = 8.27378916381402288758473766002e-3
+
+A[8, 0] = 6.24110958716075717114429577812e-1
+A[8, 3] = -3.36089262944694129406857109825
+A[8, 4] = -8.68219346841726006818189891453e-1
+A[8, 5] = 2.75920996994467083049415600797e1
+A[8, 6] = 2.01540675504778934086186788979e1
+A[8, 7] = -4.34898841810699588477366255144e1
+
+A[9, 0] = 4.77662536438264365890433908527e-1
+A[9, 3] = -2.48811461997166764192642586468
+A[9, 4] = -5.90290826836842996371446475743e-1
+A[9, 5] = 2.12300514481811942347288949897e1
+A[9, 6] = 1.52792336328824235832596922938e1
+A[9, 7] = -3.32882109689848629194453265587e1
+A[9, 8] = -2.03312017085086261358222928593e-2
+
+A[10, 0] = -9.3714243008598732571704021658e-1
+A[10, 3] = 5.18637242884406370830023853209
+A[10, 4] = 1.09143734899672957818500254654
+A[10, 5] = -8.14978701074692612513997267357
+A[10, 6] = -1.85200656599969598641566180701e1
+A[10, 7] = 2.27394870993505042818970056734e1
+A[10, 8] = 2.49360555267965238987089396762
+A[10, 9] = -3.0467644718982195003823669022
+
+A[11, 0] = 2.27331014751653820792359768449
+A[11, 3] = -1.05344954667372501984066689879e1
+A[11, 4] = -2.00087205822486249909675718444
+A[11, 5] = -1.79589318631187989172765950534e1
+A[11, 6] = 2.79488845294199600508499808837e1
+A[11, 7] = -2.85899827713502369474065508674
+A[11, 8] = -8.87285693353062954433549289258
+A[11, 9] = 1.23605671757943030647266201528e1
+A[11, 10] = 6.43392746015763530355970484046e-1
+
+A[12, 0] = 5.42937341165687622380535766363e-2
+A[12, 5] = 4.45031289275240888144113950566
+A[12, 6] = 1.89151789931450038304281599044
+A[12, 7] = -5.8012039600105847814672114227
+A[12, 8] = 3.1116436695781989440891606237e-1
+A[12, 9] = -1.52160949662516078556178806805e-1
+A[12, 10] = 2.01365400804030348374776537501e-1
+A[12, 11] = 4.47106157277725905176885569043e-2
+
+A[13, 0] = 5.61675022830479523392909219681e-2
+A[13, 6] = 2.53500210216624811088794765333e-1
+A[13, 7] = -2.46239037470802489917441475441e-1
+A[13, 8] = -1.24191423263816360469010140626e-1
+A[13, 9] = 1.5329179827876569731206322685e-1
+A[13, 10] = 8.20105229563468988491666602057e-3
+A[13, 11] = 7.56789766054569976138603589584e-3
+A[13, 12] = -8.298e-3
+
+A[14, 0] = 3.18346481635021405060768473261e-2
+A[14, 5] = 2.83009096723667755288322961402e-2
+A[14, 6] = 5.35419883074385676223797384372e-2
+A[14, 7] = -5.49237485713909884646569340306e-2
+A[14, 10] = -1.08347328697249322858509316994e-4
+A[14, 11] = 3.82571090835658412954920192323e-4
+A[14, 12] = -3.40465008687404560802977114492e-4
+A[14, 13] = 1.41312443674632500278074618366e-1
+
+A[15, 0] = -4.28896301583791923408573538692e-1
+A[15, 5] = -4.69762141536116384314449447206
+A[15, 6] = 7.68342119606259904184240953878
+A[15, 7] = 4.06898981839711007970213554331
+A[15, 8] = 3.56727187455281109270669543021e-1
+A[15, 12] = -1.39902416515901462129418009734e-3
+A[15, 13] = 2.9475147891527723389556272149
+A[15, 14] = -9.15095847217987001081870187138
+
+
+B = A[N_STAGES, :N_STAGES]
+
+E3 = np.zeros(N_STAGES + 1)
+E3[:-1] = B.copy()
+E3[0] -= 0.244094488188976377952755905512
+E3[8] -= 0.733846688281611857341361741547
+E3[11] -= 0.220588235294117647058823529412e-1
+
+E5 = np.zeros(N_STAGES + 1)
+E5[0] = 0.1312004499419488073250102996e-1
+E5[5] = -0.1225156446376204440720569753e+1
+E5[6] = -0.4957589496572501915214079952
+E5[7] = 0.1664377182454986536961530415e+1
+E5[8] = -0.3503288487499736816886487290
+E5[9] = 0.3341791187130174790297318841
+E5[10] = 0.8192320648511571246570742613e-1
+E5[11] = -0.2235530786388629525884427845e-1
+
+# First 3 coefficients are computed separately.
+D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
+D[0, 0] = -0.84289382761090128651353491142e+1
+D[0, 5] = 0.56671495351937776962531783590
+D[0, 6] = -0.30689499459498916912797304727e+1
+D[0, 7] = 0.23846676565120698287728149680e+1
+D[0, 8] = 0.21170345824450282767155149946e+1
+D[0, 9] = -0.87139158377797299206789907490
+D[0, 10] = 0.22404374302607882758541771650e+1
+D[0, 11] = 0.63157877876946881815570249290
+D[0, 12] = -0.88990336451333310820698117400e-1
+D[0, 13] = 0.18148505520854727256656404962e+2
+D[0, 14] = -0.91946323924783554000451984436e+1
+D[0, 15] = -0.44360363875948939664310572000e+1
+
+D[1, 0] = 0.10427508642579134603413151009e+2
+D[1, 5] = 0.24228349177525818288430175319e+3
+D[1, 6] = 0.16520045171727028198505394887e+3
+D[1, 7] = -0.37454675472269020279518312152e+3
+D[1, 8] = -0.22113666853125306036270938578e+2
+D[1, 9] = 0.77334326684722638389603898808e+1
+D[1, 10] = -0.30674084731089398182061213626e+2
+D[1, 11] = -0.93321305264302278729567221706e+1
+D[1, 12] = 0.15697238121770843886131091075e+2
+D[1, 13] = -0.31139403219565177677282850411e+2
+D[1, 14] = -0.93529243588444783865713862664e+1
+D[1, 15] = 0.35816841486394083752465898540e+2
+
+D[2, 0] = 0.19985053242002433820987653617e+2
+D[2, 5] = -0.38703730874935176555105901742e+3
+D[2, 6] = -0.18917813819516756882830838328e+3
+D[2, 7] = 0.52780815920542364900561016686e+3
+D[2, 8] = -0.11573902539959630126141871134e+2
+D[2, 9] = 0.68812326946963000169666922661e+1
+D[2, 10] = -0.10006050966910838403183860980e+1
+D[2, 11] = 0.77771377980534432092869265740
+D[2, 12] = -0.27782057523535084065932004339e+1
+D[2, 13] = -0.60196695231264120758267380846e+2
+D[2, 14] = 0.84320405506677161018159903784e+2
+D[2, 15] = 0.11992291136182789328035130030e+2
+
+D[3, 0] = -0.25693933462703749003312586129e+2
+D[3, 5] = -0.15418974869023643374053993627e+3
+D[3, 6] = -0.23152937917604549567536039109e+3
+D[3, 7] = 0.35763911791061412378285349910e+3
+D[3, 8] = 0.93405324183624310003907691704e+2
+D[3, 9] = -0.37458323136451633156875139351e+2
+D[3, 10] = 0.10409964950896230045147246184e+3
+D[3, 11] = 0.29840293426660503123344363579e+2
+D[3, 12] = -0.43533456590011143754432175058e+2
+D[3, 13] = 0.96324553959188282948394950600e+2
+D[3, 14] = -0.39177261675615439165231486172e+2
+D[3, 15] = -0.14972683625798562581422125276e+3
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/ivp.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/ivp.py
new file mode 100644
index 0000000..06a4a3b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/ivp.py
@@ -0,0 +1,663 @@
+import inspect
+import numpy as np
+from .bdf import BDF
+from .radau import Radau
+from .rk import RK23, RK45, DOP853
+from .lsoda import LSODA
+from scipy.optimize import OptimizeResult
+from .common import EPS, OdeSolution
+from .base import OdeSolver
+
+
+METHODS = {'RK23': RK23,
+ 'RK45': RK45,
+ 'DOP853': DOP853,
+ 'Radau': Radau,
+ 'BDF': BDF,
+ 'LSODA': LSODA}
+
+
+MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
+ 1: "A termination event occurred."}
+
+
+class OdeResult(OptimizeResult):
+ pass
+
+
+def prepare_events(events):
+ """Standardize event functions and extract is_terminal and direction."""
+ if callable(events):
+ events = (events,)
+
+ if events is not None:
+ is_terminal = np.empty(len(events), dtype=bool)
+ direction = np.empty(len(events))
+ for i, event in enumerate(events):
+ try:
+ is_terminal[i] = event.terminal
+ except AttributeError:
+ is_terminal[i] = False
+
+ try:
+ direction[i] = event.direction
+ except AttributeError:
+ direction[i] = 0
+ else:
+ is_terminal = None
+ direction = None
+
+ return events, is_terminal, direction
+
+
+def solve_event_equation(event, sol, t_old, t):
+ """Solve an equation corresponding to an ODE event.
+
+ The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
+ ODE solver using some sort of interpolation. It is solved by
+ `scipy.optimize.brentq` with xtol=atol=4*EPS.
+
+ Parameters
+ ----------
+ event : callable
+ Function ``event(t, y)``.
+ sol : callable
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
+ and `t`.
+ t_old, t : float
+ Previous and new values of time. They will be used as a bracketing
+ interval.
+
+ Returns
+ -------
+ root : float
+ Found solution.
+ """
+ from scipy.optimize import brentq
+ return brentq(lambda t: event(t, sol(t)), t_old, t,
+ xtol=4 * EPS, rtol=4 * EPS)
+
+
+def handle_events(sol, events, active_events, is_terminal, t_old, t):
+ """Helper function to handle events.
+
+ Parameters
+ ----------
+ sol : DenseOutput
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
+ and `t`.
+ events : list of callables, length n_events
+ Event functions with signatures ``event(t, y)``.
+ active_events : ndarray
+ Indices of events which occurred.
+ is_terminal : ndarray, shape (n_events,)
+ Which events are terminal.
+ t_old, t : float
+ Previous and new values of time.
+
+ Returns
+ -------
+ root_indices : ndarray
+ Indices of events which take zero between `t_old` and `t` and before
+ a possible termination.
+ roots : ndarray
+ Values of t at which events occurred.
+ terminate : bool
+ Whether a terminal event occurred.
+ """
+ roots = [solve_event_equation(events[event_index], sol, t_old, t)
+ for event_index in active_events]
+
+ roots = np.asarray(roots)
+
+ if np.any(is_terminal[active_events]):
+ if t > t_old:
+ order = np.argsort(roots)
+ else:
+ order = np.argsort(-roots)
+ active_events = active_events[order]
+ roots = roots[order]
+ t = np.nonzero(is_terminal[active_events])[0][0]
+ active_events = active_events[:t + 1]
+ roots = roots[:t + 1]
+ terminate = True
+ else:
+ terminate = False
+
+ return active_events, roots, terminate
+
+
+def find_active_events(g, g_new, direction):
+ """Find which event occurred during an integration step.
+
+ Parameters
+ ----------
+ g, g_new : array_like, shape (n_events,)
+ Values of event functions at a current and next points.
+ direction : ndarray, shape (n_events,)
+ Event "direction" according to the definition in `solve_ivp`.
+
+ Returns
+ -------
+ active_events : ndarray
+ Indices of events which occurred during the step.
+ """
+ g, g_new = np.asarray(g), np.asarray(g_new)
+ up = (g <= 0) & (g_new >= 0)
+ down = (g >= 0) & (g_new <= 0)
+ either = up | down
+ mask = (up & (direction > 0) |
+ down & (direction < 0) |
+ either & (direction == 0))
+
+ return np.nonzero(mask)[0]
+
+
+def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
+ events=None, vectorized=False, args=None, **options):
+ """Solve an initial value problem for a system of ODEs.
+
+ This function numerically integrates a system of ordinary differential
+ equations given an initial value::
+
+ dy / dt = f(t, y)
+ y(t0) = y0
+
+ Here t is a 1-D independent variable (time), y(t) is an
+ N-D vector-valued function (state), and an N-D
+ vector-valued function f(t, y) determines the differential equations.
+ The goal is to find y(t) approximately satisfying the differential
+ equations, given an initial value y(t0)=y0.
+
+ Some of the solvers support integration in the complex domain, but note
+ that for stiff ODE solvers, the right-hand side must be
+ complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
+ To solve a problem in the complex domain, pass y0 with a complex data type.
+ Another option always available is to rewrite your problem for real and
+ imaginary parts separately.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here `t` is a scalar, and there are two options for the ndarray `y`:
+ It can either have shape (n,); then `fun` must return array_like with
+ shape (n,). Alternatively, it can have shape (n, k); then `fun`
+ must return an array_like with shape (n, k), i.e., each column
+ corresponds to a single column in `y`. The choice between the two
+ options is determined by `vectorized` argument (see below). The
+ vectorized implementation allows a faster approximation of the Jacobian
+ by finite differences (required for stiff solvers).
+ t_span : 2-tuple of floats
+ Interval of integration (t0, tf). The solver starts with t=t0 and
+ integrates until it reaches t=tf.
+ y0 : array_like, shape (n,)
+ Initial state. For problems in the complex domain, pass `y0` with a
+ complex data type (even if the initial value is purely real).
+ method : string or `OdeSolver`, optional
+ Integration method to use:
+
+ * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
+ The error is controlled assuming accuracy of the fourth-order
+ method, but steps are taken using the fifth-order accurate
+ formula (local extrapolation is done). A quartic interpolation
+ polynomial is used for the dense output [2]_. Can be applied in
+ the complex domain.
+ * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
+ is controlled assuming accuracy of the second-order method, but
+ steps are taken using the third-order accurate formula (local
+ extrapolation is done). A cubic Hermite polynomial is used for the
+ dense output. Can be applied in the complex domain.
+ * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
+ Python implementation of the "DOP853" algorithm originally
+ written in Fortran [14]_. A 7-th order interpolation polynomial
+ accurate to 7-th order is used for the dense output.
+ Can be applied in the complex domain.
+ * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
+ order 5 [4]_. The error is controlled with a third-order accurate
+ embedded formula. A cubic polynomial which satisfies the
+ collocation conditions is used for the dense output.
+ * 'BDF': Implicit multi-step variable-order (1 to 5) method based
+ on a backward differentiation formula for the derivative
+ approximation [5]_. The implementation follows the one described
+ in [6]_. A quasi-constant step scheme is used and accuracy is
+ enhanced using the NDF modification. Can be applied in the
+ complex domain.
+ * 'LSODA': Adams/BDF method with automatic stiffness detection and
+ switching [7]_, [8]_. This is a wrapper of the Fortran solver
+ from ODEPACK.
+
+ Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
+ for non-stiff problems and implicit methods ('Radau', 'BDF') for
+ stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
+ for solving with high precision (low values of `rtol` and `atol`).
+
+ If not sure, first try to run 'RK45'. If it makes unusually many
+ iterations, diverges, or fails, your problem is likely to be stiff and
+ you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
+ choice, but it might be somewhat less convenient to work with as it
+ wraps old Fortran code.
+
+ You can also pass an arbitrary class derived from `OdeSolver` which
+ implements the solver.
+ t_eval : array_like or None, optional
+ Times at which to store the computed solution, must be sorted and lie
+ within `t_span`. If None (default), use points selected by the solver.
+ dense_output : bool, optional
+ Whether to compute a continuous solution. Default is False.
+ events : callable, or list of callables, optional
+ Events to track. If None (default), no events will be tracked.
+ Each event occurs at the zeros of a continuous function of time and
+ state. Each function must have the signature ``event(t, y)`` and return
+ a float. The solver will find an accurate value of `t` at which
+ ``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all
+ zeros will be found. The solver looks for a sign change over each step,
+ so if multiple zero crossings occur within one step, events may be
+ missed. Additionally each `event` function might have the following
+ attributes:
+
+ terminal: bool, optional
+ Whether to terminate integration if this event occurs.
+ Implicitly False if not assigned.
+ direction: float, optional
+ Direction of a zero crossing. If `direction` is positive,
+ `event` will only trigger when going from negative to positive,
+ and vice versa if `direction` is negative. If 0, then either
+ direction will trigger event. Implicitly 0 if not assigned.
+
+ You can assign attributes like ``event.terminal = True`` to any
+ function in Python.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+ args : tuple, optional
+ Additional arguments to pass to the user-defined functions. If given,
+ the additional arguments are passed to all user-defined functions.
+ So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
+ then `jac` (if given) and any event functions must have the same
+ signature, and `args` must be a tuple of length 3.
+ options
+ Options passed to a chosen solver. All options available for already
+ implemented solvers are listed below.
+ first_step : float or None, optional
+ Initial step size. Default is `None` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float or array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ jac : array_like, sparse_matrix, callable or None, optional
+ Jacobian matrix of the right-hand side of the system with respect
+ to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
+ Jacobian matrix has shape (n, n) and its element (i, j) is equal to
+ ``d f_i / d y_j``. There are three ways to define the Jacobian:
+
+ * If array_like or sparse_matrix, the Jacobian is assumed to
+ be constant. Not supported by 'LSODA'.
+ * If callable, the Jacobian is assumed to depend on both
+ t and y; it will be called as ``jac(t, y)``, as necessary.
+ For 'Radau' and 'BDF' methods, the return value might be a
+ sparse matrix.
+ * If None (default), the Jacobian will be approximated by
+ finite differences.
+
+ It is generally recommended to provide the Jacobian rather than
+ relying on a finite-difference approximation.
+ jac_sparsity : array_like, sparse matrix or None, optional
+ Defines a sparsity structure of the Jacobian matrix for a finite-
+ difference approximation. Its shape must be (n, n). This argument
+ is ignored if `jac` is not `None`. If the Jacobian has only few
+ non-zero elements in *each* row, providing the sparsity structure
+ will greatly speed up the computations [10]_. A zero entry means that
+ a corresponding element in the Jacobian is always zero. If None
+ (default), the Jacobian is assumed to be dense.
+ Not supported by 'LSODA', see `lband` and `uband` instead.
+ lband, uband : int or None, optional
+ Parameters defining the bandwidth of the Jacobian for the 'LSODA'
+ method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
+ Default is None. Setting these requires your jac routine to return the
+ Jacobian in the packed format: the returned array must have ``n``
+ columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
+ written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
+ The same format is used in `scipy.linalg.solve_banded` (check for an
+ illustration). These parameters can be also used with ``jac=None`` to
+ reduce the number of Jacobian elements estimated by finite differences.
+ min_step : float, optional
+ The minimum allowed step size for 'LSODA' method.
+ By default `min_step` is zero.
+
+ Returns
+ -------
+ Bunch object with the following fields defined:
+ t : ndarray, shape (n_points,)
+ Time points.
+ y : ndarray, shape (n, n_points)
+ Values of the solution at `t`.
+ sol : `OdeSolution` or None
+ Found solution as `OdeSolution` instance; None if `dense_output` was
+ set to False.
+ t_events : list of ndarray or None
+ Contains for each event type a list of arrays at which an event of
+ that type event was detected. None if `events` was None.
+ y_events : list of ndarray or None
+ For each value of `t_events`, the corresponding value of the solution.
+ None if `events` was None.
+ nfev : int
+ Number of evaluations of the right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian.
+ nlu : int
+ Number of LU decompositions.
+ status : int
+ Reason for algorithm termination:
+
+ * -1: Integration step failed.
+ * 0: The solver successfully reached the end of `tspan`.
+ * 1: A termination event occurred.
+
+ message : string
+ Human-readable description of the termination reason.
+ success : bool
+ True if the solver reached the interval end or a termination event
+ occurred (``status >= 0``).
+
+ References
+ ----------
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
+ No. 1, pp. 19-26, 1980.
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
+ .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
+ .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
+ .. [5] `Backward Differentiation Formula
+ `_
+ on Wikipedia.
+ .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
+ .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
+ pp. 55-64, 1983.
+ .. [8] L. Petzold, "Automatic selection of methods for solving stiff and
+ nonstiff systems of ordinary differential equations", SIAM Journal
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
+ 1983.
+ .. [9] `Stiff equation `_ on
+ Wikipedia.
+ .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
+ and its Applications, 13, pp. 117-120, 1974.
+ .. [11] `Cauchy-Riemann equations
+ `_ on
+ Wikipedia.
+ .. [12] `Lotka-Volterra equations
+ `_
+ on Wikipedia.
+ .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+ Equations I: Nonstiff Problems", Sec. II.
+ .. [14] `Page with original Fortran code of DOP853
+ `_.
+
+ Examples
+ --------
+ Basic exponential decay showing automatically chosen time points.
+
+ >>> from scipy.integrate import solve_ivp
+ >>> def exponential_decay(t, y): return -0.5 * y
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
+ >>> print(sol.t)
+ [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
+ 8.33328988 10. ]
+ >>> print(sol.y)
+ [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
+ 0.03107158 0.01350781]
+ [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
+ 0.06214316 0.02701561]
+ [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
+ 0.12428631 0.05403123]]
+
+ Specifying points where the solution is desired.
+
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
+ ... t_eval=[0, 1, 2, 4, 10])
+ >>> print(sol.t)
+ [ 0 1 2 4 10]
+ >>> print(sol.y)
+ [[2. 1.21305369 0.73534021 0.27066736 0.01350938]
+ [4. 2.42610739 1.47068043 0.54133472 0.02701876]
+ [8. 4.85221478 2.94136085 1.08266944 0.05403753]]
+
+ Cannon fired upward with terminal event upon impact. The ``terminal`` and
+ ``direction`` fields of an event are applied by monkey patching a function.
+ Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
+ at position 0 with velocity +10. Note that the integration never reaches
+ t=100 because the event is terminal.
+
+ >>> def upward_cannon(t, y): return [y[1], -0.5]
+ >>> def hit_ground(t, y): return y[0]
+ >>> hit_ground.terminal = True
+ >>> hit_ground.direction = -1
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
+ >>> print(sol.t_events)
+ [array([40.])]
+ >>> print(sol.t)
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
+
+ Use `dense_output` and `events` to find position, which is 100, at the apex
+ of the cannonball's trajectory. Apex is not defined as terminal, so both
+ apex and hit_ground are found. There is no information at t=20, so the sol
+ attribute is used to evaluate the solution. The sol attribute is returned
+ by setting ``dense_output=True``. Alternatively, the `y_events` attribute
+ can be used to access the solution at the time of the event.
+
+ >>> def apex(t, y): return y[1]
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
+ ... events=(hit_ground, apex), dense_output=True)
+ >>> print(sol.t_events)
+ [array([40.]), array([20.])]
+ >>> print(sol.t)
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
+ >>> print(sol.sol(sol.t_events[1][0]))
+ [100. 0.]
+ >>> print(sol.y_events)
+ [array([[-5.68434189e-14, -1.00000000e+01]]), array([[1.00000000e+02, 1.77635684e-15]])]
+
+ As an example of a system with additional parameters, we'll implement
+ the Lotka-Volterra equations [12]_.
+
+ >>> def lotkavolterra(t, z, a, b, c, d):
+ ... x, y = z
+ ... return [a*x - b*x*y, -c*y + d*x*y]
+ ...
+
+ We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
+ argument.
+
+ >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
+ ... dense_output=True)
+
+ Compute a dense solution and plot it.
+
+ >>> t = np.linspace(0, 15, 300)
+ >>> z = sol.sol(t)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(t, z.T)
+ >>> plt.xlabel('t')
+ >>> plt.legend(['x', 'y'], shadow=True)
+ >>> plt.title('Lotka-Volterra System')
+ >>> plt.show()
+
+ """
+ if method not in METHODS and not (
+ inspect.isclass(method) and issubclass(method, OdeSolver)):
+ raise ValueError("`method` must be one of {} or OdeSolver class."
+ .format(METHODS))
+
+ t0, tf = float(t_span[0]), float(t_span[1])
+
+ if args is not None:
+ # Wrap the user's fun (and jac, if given) in lambdas to hide the
+ # additional parameters. Pass in the original fun as a keyword
+ # argument to keep it in the scope of the lambda.
+ fun = lambda t, x, fun=fun: fun(t, x, *args)
+ jac = options.get('jac')
+ if callable(jac):
+ options['jac'] = lambda t, x: jac(t, x, *args)
+
+ if t_eval is not None:
+ t_eval = np.asarray(t_eval)
+ if t_eval.ndim != 1:
+ raise ValueError("`t_eval` must be 1-dimensional.")
+
+ if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
+ raise ValueError("Values in `t_eval` are not within `t_span`.")
+
+ d = np.diff(t_eval)
+ if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
+ raise ValueError("Values in `t_eval` are not properly sorted.")
+
+ if tf > t0:
+ t_eval_i = 0
+ else:
+ # Make order of t_eval decreasing to use np.searchsorted.
+ t_eval = t_eval[::-1]
+ # This will be an upper bound for slices.
+ t_eval_i = t_eval.shape[0]
+
+ if method in METHODS:
+ method = METHODS[method]
+
+ solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
+
+ if t_eval is None:
+ ts = [t0]
+ ys = [y0]
+ elif t_eval is not None and dense_output:
+ ts = []
+ ti = [t0]
+ ys = []
+ else:
+ ts = []
+ ys = []
+
+ interpolants = []
+
+ events, is_terminal, event_dir = prepare_events(events)
+
+ if events is not None:
+ if args is not None:
+ # Wrap user functions in lambdas to hide the additional parameters.
+ # The original event function is passed as a keyword argument to the
+ # lambda to keep the original function in scope (i.e., avoid the
+ # late binding closure "gotcha").
+ events = [lambda t, x, event=event: event(t, x, *args)
+ for event in events]
+ g = [event(t0, y0) for event in events]
+ t_events = [[] for _ in range(len(events))]
+ y_events = [[] for _ in range(len(events))]
+ else:
+ t_events = None
+ y_events = None
+
+ status = None
+ while status is None:
+ message = solver.step()
+
+ if solver.status == 'finished':
+ status = 0
+ elif solver.status == 'failed':
+ status = -1
+ break
+
+ t_old = solver.t_old
+ t = solver.t
+ y = solver.y
+
+ if dense_output:
+ sol = solver.dense_output()
+ interpolants.append(sol)
+ else:
+ sol = None
+
+ if events is not None:
+ g_new = [event(t, y) for event in events]
+ active_events = find_active_events(g, g_new, event_dir)
+ if active_events.size > 0:
+ if sol is None:
+ sol = solver.dense_output()
+
+ root_indices, roots, terminate = handle_events(
+ sol, events, active_events, is_terminal, t_old, t)
+
+ for e, te in zip(root_indices, roots):
+ t_events[e].append(te)
+ y_events[e].append(sol(te))
+
+ if terminate:
+ status = 1
+ t = roots[-1]
+ y = sol(t)
+
+ g = g_new
+
+ if t_eval is None:
+ ts.append(t)
+ ys.append(y)
+ else:
+ # The value in t_eval equal to t will be included.
+ if solver.direction > 0:
+ t_eval_i_new = np.searchsorted(t_eval, t, side='right')
+ t_eval_step = t_eval[t_eval_i:t_eval_i_new]
+ else:
+ t_eval_i_new = np.searchsorted(t_eval, t, side='left')
+ # It has to be done with two slice operations, because
+ # you can't slice to 0th element inclusive using backward
+ # slicing.
+ t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
+
+ if t_eval_step.size > 0:
+ if sol is None:
+ sol = solver.dense_output()
+ ts.append(t_eval_step)
+ ys.append(sol(t_eval_step))
+ t_eval_i = t_eval_i_new
+
+ if t_eval is not None and dense_output:
+ ti.append(t)
+
+ message = MESSAGES.get(status, message)
+
+ if t_events is not None:
+ t_events = [np.asarray(te) for te in t_events]
+ y_events = [np.asarray(ye) for ye in y_events]
+
+ if t_eval is None:
+ ts = np.array(ts)
+ ys = np.vstack(ys).T
+ else:
+ ts = np.hstack(ts)
+ ys = np.hstack(ys)
+
+ if dense_output:
+ if t_eval is None:
+ sol = OdeSolution(ts, interpolants)
+ else:
+ sol = OdeSolution(ti, interpolants)
+ else:
+ sol = None
+
+ return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
+ nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
+ status=status, message=message, success=status >= 0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/lsoda.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/lsoda.py
new file mode 100644
index 0000000..9b695c7
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/lsoda.py
@@ -0,0 +1,188 @@
+import numpy as np
+from scipy.integrate import ode
+from .common import validate_tol, validate_first_step, warn_extraneous
+from .base import OdeSolver, DenseOutput
+
+
+class LSODA(OdeSolver):
+ """Adams/BDF method with automatic stiffness detection and switching.
+
+ This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
+ automatically between the nonstiff Adams method and the stiff BDF method.
+ The method was originally detailed in [2]_.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+ It can either have shape (n,); then ``fun`` must return array_like with
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
+ must return an array_like with shape (n, k), i.e. each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below). The
+ vectorized implementation allows a faster approximation of the Jacobian
+ by finite differences (required for this solver).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ min_step : float, optional
+ Minimum allowed step size. Default is 0.0, i.e., the step size is not
+ bounded and determined solely by the solver.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ jac : None or callable, optional
+ Jacobian matrix of the right-hand side of the system with respect to
+ ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
+ equal to ``d f_i / d y_j``. The function will be called as
+ ``jac(t, y)``. If None (default), the Jacobian will be
+ approximated by finite differences. It is generally recommended to
+ provide the Jacobian rather than relying on a finite-difference
+ approximation.
+ lband, uband : int or None
+ Parameters defining the bandwidth of the Jacobian,
+ i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
+ these requires your jac routine to return the Jacobian in the packed format:
+ the returned array must have ``n`` columns and ``uband + lband + 1``
+ rows in which Jacobian diagonals are written. Specifically
+ ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
+ in `scipy.linalg.solve_banded` (check for an illustration).
+ These parameters can be also used with ``jac=None`` to reduce the
+ number of Jacobian elements estimated by finite differences.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. A vectorized
+ implementation offers no advantages for this solver. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ nfev : int
+ Number of evaluations of the right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian.
+
+ References
+ ----------
+ .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
+ pp. 55-64, 1983.
+ .. [2] L. Petzold, "Automatic selection of methods for solving stiff and
+ nonstiff systems of ordinary differential equations", SIAM Journal
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
+ 1983.
+ """
+ def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
+ max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
+ uband=None, vectorized=False, **extraneous):
+ warn_extraneous(extraneous)
+ super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)
+
+ if first_step is None:
+ first_step = 0 # LSODA value for automatic selection.
+ else:
+ first_step = validate_first_step(first_step, t0, t_bound)
+
+ first_step *= self.direction
+
+ if max_step == np.inf:
+ max_step = 0 # LSODA value for infinity.
+ elif max_step <= 0:
+ raise ValueError("`max_step` must be positive.")
+
+ if min_step < 0:
+ raise ValueError("`min_step` must be nonnegative.")
+
+ rtol, atol = validate_tol(rtol, atol, self.n)
+
+ solver = ode(self.fun, jac)
+ solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
+ min_step=min_step, first_step=first_step,
+ lband=lband, uband=uband)
+ solver.set_initial_value(y0, t0)
+
+ # Inject t_bound into rwork array as needed for itask=5.
+ solver._integrator.rwork[0] = self.t_bound
+ solver._integrator.call_args[4] = solver._integrator.rwork
+
+ self._lsoda_solver = solver
+
+ def _step_impl(self):
+ solver = self._lsoda_solver
+ integrator = solver._integrator
+
+ # From lsoda.step and lsoda.integrate itask=5 means take a single
+ # step and do not go past t_bound.
+ itask = integrator.call_args[2]
+ integrator.call_args[2] = 5
+ solver._y, solver.t = integrator.run(
+ solver.f, solver.jac or (lambda: None), solver._y, solver.t,
+ self.t_bound, solver.f_params, solver.jac_params)
+ integrator.call_args[2] = itask
+
+ if solver.successful():
+ self.t = solver.t
+ self.y = solver._y
+ # From LSODA Fortran source njev is equal to nlu.
+ self.njev = integrator.iwork[12]
+ self.nlu = integrator.iwork[12]
+ return True, None
+ else:
+ return False, 'Unexpected istate in LSODA.'
+
+ def _dense_output_impl(self):
+ iwork = self._lsoda_solver._integrator.iwork
+ rwork = self._lsoda_solver._integrator.rwork
+
+ order = iwork[14]
+ h = rwork[11]
+ yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
+ (self.n, order + 1), order='F').copy()
+
+ return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
+
+
+class LsodaDenseOutput(DenseOutput):
+ def __init__(self, t_old, t, h, order, yh):
+ super(LsodaDenseOutput, self).__init__(t_old, t)
+ self.h = h
+ self.yh = yh
+ self.p = np.arange(order + 1)
+
+ def _call_impl(self, t):
+ if t.ndim == 0:
+ x = ((t - self.t) / self.h) ** self.p
+ else:
+ x = ((t - self.t) / self.h) ** self.p[:, None]
+
+ return np.dot(self.yh, x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/radau.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/radau.py
new file mode 100644
index 0000000..417c347
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/radau.py
@@ -0,0 +1,561 @@
+import numpy as np
+from scipy.linalg import lu_factor, lu_solve
+from scipy.sparse import csc_matrix, issparse, eye
+from scipy.sparse.linalg import splu
+from scipy.optimize._numdiff import group_columns
+from .common import (validate_max_step, validate_tol, select_initial_step,
+ norm, num_jac, EPS, warn_extraneous,
+ validate_first_step)
+from .base import OdeSolver, DenseOutput
+
+S6 = 6 ** 0.5
+
+# Butcher tableau. A is not used directly, see below.
+C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
+E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
+
+# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
+# and a complex conjugate pair. They are written below.
+MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
+MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
+ - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
+
+# These are transformation matrices.
+T = np.array([
+ [0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
+ [0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
+ [1, 1, 0]])
+TI = np.array([
+ [4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
+ [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
+ [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
+# These linear combinations are used in the algorithm.
+TI_REAL = TI[0]
+TI_COMPLEX = TI[1] + 1j * TI[2]
+
+# Interpolator coefficients.
+P = np.array([
+ [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
+ [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
+ [1/3, -8/3, 10/3]])
+
+
+NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
+MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
+MAX_FACTOR = 10 # Maximum allowed increase in a step size.
+
+
+def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
+ LU_real, LU_complex, solve_lu):
+ """Solve the collocation system.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system.
+ t : float
+ Current time.
+ y : ndarray, shape (n,)
+ Current state.
+ h : float
+ Step to try.
+ Z0 : ndarray, shape (3, n)
+ Initial guess for the solution. It determines new values of `y` at
+ ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
+ scale : float
+ Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
+ tol : float
+ Tolerance to which solve the system. This value is compared with
+ the normalized by `scale` error.
+ LU_real, LU_complex
+ LU decompositions of the system Jacobians.
+ solve_lu : callable
+ Callable which solves a linear system given a LU decomposition. The
+ signature is ``solve_lu(LU, b)``.
+
+ Returns
+ -------
+ converged : bool
+ Whether iterations converged.
+ n_iter : int
+ Number of completed iterations.
+ Z : ndarray, shape (3, n)
+ Found solution.
+ rate : float
+ The rate of convergence.
+ """
+ n = y.shape[0]
+ M_real = MU_REAL / h
+ M_complex = MU_COMPLEX / h
+
+ W = TI.dot(Z0)
+ Z = Z0
+
+ F = np.empty((3, n))
+ ch = h * C
+
+ dW_norm_old = None
+ dW = np.empty_like(W)
+ converged = False
+ rate = None
+ for k in range(NEWTON_MAXITER):
+ for i in range(3):
+ F[i] = fun(t + ch[i], y + Z[i])
+
+ if not np.all(np.isfinite(F)):
+ break
+
+ f_real = F.T.dot(TI_REAL) - M_real * W[0]
+ f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
+
+ dW_real = solve_lu(LU_real, f_real)
+ dW_complex = solve_lu(LU_complex, f_complex)
+
+ dW[0] = dW_real
+ dW[1] = dW_complex.real
+ dW[2] = dW_complex.imag
+
+ dW_norm = norm(dW / scale)
+ if dW_norm_old is not None:
+ rate = dW_norm / dW_norm_old
+
+ if (rate is not None and (rate >= 1 or
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
+ break
+
+ W += dW
+ Z = T.dot(W)
+
+ if (dW_norm == 0 or
+ rate is not None and rate / (1 - rate) * dW_norm < tol):
+ converged = True
+ break
+
+ dW_norm_old = dW_norm
+
+ return converged, k + 1, Z, rate
+
+
+def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
+ """Predict by which factor to increase/decrease the step size.
+
+ The algorithm is described in [1]_.
+
+ Parameters
+ ----------
+ h_abs, h_abs_old : float
+ Current and previous values of the step size, `h_abs_old` can be None
+ (see Notes).
+ error_norm, error_norm_old : float
+ Current and previous values of the error norm, `error_norm_old` can
+ be None (see Notes).
+
+ Returns
+ -------
+ factor : float
+ Predicted factor.
+
+ Notes
+ -----
+ If `h_abs_old` and `error_norm_old` are both not None then a two-step
+ algorithm is used, otherwise a one-step algorithm is used.
+
+ References
+ ----------
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+ Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
+ """
+ if error_norm_old is None or h_abs_old is None or error_norm == 0:
+ multiplier = 1
+ else:
+ multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
+
+ with np.errstate(divide='ignore'):
+ factor = min(1, multiplier) * error_norm ** -0.25
+
+ return factor
+
+
+class Radau(OdeSolver):
+ """Implicit Runge-Kutta method of Radau IIA family of order 5.
+
+ The implementation follows [1]_. The error is controlled with a
+ third-order accurate embedded formula. A cubic polynomial which satisfies
+ the collocation conditions is used for the dense output.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+ It can either have shape (n,); then ``fun`` must return array_like with
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
+ must return an array_like with shape (n, k), i.e., each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below). The
+ vectorized implementation allows a faster approximation of the Jacobian
+ by finite differences (required for this solver).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ jac : {None, array_like, sparse_matrix, callable}, optional
+ Jacobian matrix of the right-hand side of the system with respect to
+ y, required by this method. The Jacobian matrix has shape (n, n) and
+ its element (i, j) is equal to ``d f_i / d y_j``.
+ There are three ways to define the Jacobian:
+
+ * If array_like or sparse_matrix, the Jacobian is assumed to
+ be constant.
+ * If callable, the Jacobian is assumed to depend on both
+ t and y; it will be called as ``jac(t, y)`` as necessary.
+ For the 'Radau' and 'BDF' methods, the return value might be a
+ sparse matrix.
+ * If None (default), the Jacobian will be approximated by
+ finite differences.
+
+ It is generally recommended to provide the Jacobian rather than
+ relying on a finite-difference approximation.
+ jac_sparsity : {None, array_like, sparse matrix}, optional
+ Defines a sparsity structure of the Jacobian matrix for a
+ finite-difference approximation. Its shape must be (n, n). This argument
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
+ elements in *each* row, providing the sparsity structure will greatly
+ speed up the computations [2]_. A zero entry means that a corresponding
+ element in the Jacobian is always zero. If None (default), the Jacobian
+ is assumed to be dense.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number of evaluations of the right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian.
+ nlu : int
+ Number of LU decompositions.
+
+ References
+ ----------
+ .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
+ and its Applications, 13, pp. 117-120, 1974.
+ """
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
+ vectorized=False, first_step=None, **extraneous):
+ warn_extraneous(extraneous)
+ super(Radau, self).__init__(fun, t0, y0, t_bound, vectorized)
+ self.y_old = None
+ self.max_step = validate_max_step(max_step)
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+ self.f = self.fun(self.t, self.y)
+ # Select initial step assuming the same order which is used to control
+ # the error.
+ if first_step is None:
+ self.h_abs = select_initial_step(
+ self.fun, self.t, self.y, self.f, self.direction,
+ 3, self.rtol, self.atol)
+ else:
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
+ self.h_abs_old = None
+ self.error_norm_old = None
+
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
+ self.sol = None
+
+ self.jac_factor = None
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
+ if issparse(self.J):
+ def lu(A):
+ self.nlu += 1
+ return splu(A)
+
+ def solve_lu(LU, b):
+ return LU.solve(b)
+
+ I = eye(self.n, format='csc')
+ else:
+ def lu(A):
+ self.nlu += 1
+ return lu_factor(A, overwrite_a=True)
+
+ def solve_lu(LU, b):
+ return lu_solve(LU, b, overwrite_b=True)
+
+ I = np.identity(self.n)
+
+ self.lu = lu
+ self.solve_lu = solve_lu
+ self.I = I
+
+ self.current_jac = True
+ self.LU_real = None
+ self.LU_complex = None
+ self.Z = None
+
+ def _validate_jac(self, jac, sparsity):
+ t0 = self.t
+ y0 = self.y
+
+ if jac is None:
+ if sparsity is not None:
+ if issparse(sparsity):
+ sparsity = csc_matrix(sparsity)
+ groups = group_columns(sparsity)
+ sparsity = (sparsity, groups)
+
+ def jac_wrapped(t, y, f):
+ self.njev += 1
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
+ self.atol, self.jac_factor,
+ sparsity)
+ return J
+ J = jac_wrapped(t0, y0, self.f)
+ elif callable(jac):
+ J = jac(t0, y0)
+ self.njev = 1
+ if issparse(J):
+ J = csc_matrix(J)
+
+ def jac_wrapped(t, y, _=None):
+ self.njev += 1
+ return csc_matrix(jac(t, y), dtype=float)
+
+ else:
+ J = np.asarray(J, dtype=float)
+
+ def jac_wrapped(t, y, _=None):
+ self.njev += 1
+ return np.asarray(jac(t, y), dtype=float)
+
+ if J.shape != (self.n, self.n):
+ raise ValueError("`jac` is expected to have shape {}, but "
+ "actually has {}."
+ .format((self.n, self.n), J.shape))
+ else:
+ if issparse(jac):
+ J = csc_matrix(jac)
+ else:
+ J = np.asarray(jac, dtype=float)
+
+ if J.shape != (self.n, self.n):
+ raise ValueError("`jac` is expected to have shape {}, but "
+ "actually has {}."
+ .format((self.n, self.n), J.shape))
+ jac_wrapped = None
+
+ return jac_wrapped, J
+
+ def _step_impl(self):
+ t = self.t
+ y = self.y
+ f = self.f
+
+ max_step = self.max_step
+ atol = self.atol
+ rtol = self.rtol
+
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+ if self.h_abs > max_step:
+ h_abs = max_step
+ h_abs_old = None
+ error_norm_old = None
+ elif self.h_abs < min_step:
+ h_abs = min_step
+ h_abs_old = None
+ error_norm_old = None
+ else:
+ h_abs = self.h_abs
+ h_abs_old = self.h_abs_old
+ error_norm_old = self.error_norm_old
+
+ J = self.J
+ LU_real = self.LU_real
+ LU_complex = self.LU_complex
+
+ current_jac = self.current_jac
+ jac = self.jac
+
+ rejected = False
+ step_accepted = False
+ message = None
+ while not step_accepted:
+ if h_abs < min_step:
+ return False, self.TOO_SMALL_STEP
+
+ h = h_abs * self.direction
+ t_new = t + h
+
+ if self.direction * (t_new - self.t_bound) > 0:
+ t_new = self.t_bound
+
+ h = t_new - t
+ h_abs = np.abs(h)
+
+ if self.sol is None:
+ Z0 = np.zeros((3, y.shape[0]))
+ else:
+ Z0 = self.sol(t + h * C).T - y
+
+ scale = atol + np.abs(y) * rtol
+
+ converged = False
+ while not converged:
+ if LU_real is None or LU_complex is None:
+ LU_real = self.lu(MU_REAL / h * self.I - J)
+ LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
+
+ converged, n_iter, Z, rate = solve_collocation_system(
+ self.fun, t, y, h, Z0, scale, self.newton_tol,
+ LU_real, LU_complex, self.solve_lu)
+
+ if not converged:
+ if current_jac:
+ break
+
+ J = self.jac(t, y, f)
+ current_jac = True
+ LU_real = None
+ LU_complex = None
+
+ if not converged:
+ h_abs *= 0.5
+ LU_real = None
+ LU_complex = None
+ continue
+
+ y_new = y + Z[-1]
+ ZE = Z.T.dot(E) / h
+ error = self.solve_lu(LU_real, f + ZE)
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
+ error_norm = norm(error / scale)
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ + n_iter)
+
+ if rejected and error_norm > 1:
+ error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
+ error_norm = norm(error / scale)
+
+ if error_norm > 1:
+ factor = predict_factor(h_abs, h_abs_old,
+ error_norm, error_norm_old)
+ h_abs *= max(MIN_FACTOR, safety * factor)
+
+ LU_real = None
+ LU_complex = None
+ rejected = True
+ else:
+ step_accepted = True
+
+ recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
+
+ factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
+ factor = min(MAX_FACTOR, safety * factor)
+
+ if not recompute_jac and factor < 1.2:
+ factor = 1
+ else:
+ LU_real = None
+ LU_complex = None
+
+ f_new = self.fun(t_new, y_new)
+ if recompute_jac:
+ J = jac(t_new, y_new, f_new)
+ current_jac = True
+ elif jac is not None:
+ current_jac = False
+
+ self.h_abs_old = self.h_abs
+ self.error_norm_old = error_norm
+
+ self.h_abs = h_abs * factor
+
+ self.y_old = y
+
+ self.t = t_new
+ self.y = y_new
+ self.f = f_new
+
+ self.Z = Z
+
+ self.LU_real = LU_real
+ self.LU_complex = LU_complex
+ self.current_jac = current_jac
+ self.J = J
+
+ self.t_old = t
+ self.sol = self._compute_dense_output()
+
+ return step_accepted, message
+
+ def _compute_dense_output(self):
+ Q = np.dot(self.Z.T, P)
+ return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
+
+ def _dense_output_impl(self):
+ return self.sol
+
+
+class RadauDenseOutput(DenseOutput):
+ def __init__(self, t_old, t, y_old, Q):
+ super(RadauDenseOutput, self).__init__(t_old, t)
+ self.h = t - t_old
+ self.Q = Q
+ self.order = Q.shape[1] - 1
+ self.y_old = y_old
+
+ def _call_impl(self, t):
+ x = (t - self.t_old) / self.h
+ if t.ndim == 0:
+ p = np.tile(x, self.order + 1)
+ p = np.cumprod(p)
+ else:
+ p = np.tile(x, (self.order + 1, 1))
+ p = np.cumprod(p, axis=0)
+ # Here we don't multiply by h, not a mistake.
+ y = np.dot(self.Q, p)
+ if y.ndim == 2:
+ y += self.y_old[:, None]
+ else:
+ y += self.y_old
+
+ return y
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/rk.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/rk.py
new file mode 100644
index 0000000..4542a9b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/rk.py
@@ -0,0 +1,576 @@
+import numpy as np
+from .base import OdeSolver, DenseOutput
+from .common import (validate_max_step, validate_tol, select_initial_step,
+ norm, warn_extraneous, validate_first_step)
+from . import dop853_coefficients
+
+# Multiply steps computed from asymptotic behaviour of errors by this.
+SAFETY = 0.9
+
+MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
+MAX_FACTOR = 10 # Maximum allowed increase in a step size.
+
+
+def rk_step(fun, t, y, f, h, A, B, C, K):
+ """Perform a single Runge-Kutta step.
+
+ This function computes a prediction of an explicit Runge-Kutta method and
+ also estimates the error of a less accurate method.
+
+ Notation for Butcher tableau is as in [1]_.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system.
+ t : float
+ Current time.
+ y : ndarray, shape (n,)
+ Current state.
+ f : ndarray, shape (n,)
+ Current value of the derivative, i.e., ``fun(x, y)``.
+ h : float
+ Step to use.
+ A : ndarray, shape (n_stages, n_stages)
+ Coefficients for combining previous RK stages to compute the next
+ stage. For explicit methods the coefficients at and above the main
+ diagonal are zeros.
+ B : ndarray, shape (n_stages,)
+ Coefficients for combining RK stages for computing the final
+ prediction.
+ C : ndarray, shape (n_stages,)
+ Coefficients for incrementing time for consecutive RK stages.
+ The value for the first stage is always zero.
+ K : ndarray, shape (n_stages + 1, n)
+ Storage array for putting RK stages here. Stages are stored in rows.
+ The last row is a linear combination of the previous rows with
+ coefficients
+
+ Returns
+ -------
+ y_new : ndarray, shape (n,)
+ Solution at t + h computed with a higher accuracy.
+ f_new : ndarray, shape (n,)
+ Derivative ``fun(t + h, y_new)``.
+
+ References
+ ----------
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+ Equations I: Nonstiff Problems", Sec. II.4.
+ """
+ K[0] = f
+ for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
+ dy = np.dot(K[:s].T, a[:s]) * h
+ K[s] = fun(t + c * h, y + dy)
+
+ y_new = y + h * np.dot(K[:-1].T, B)
+ f_new = fun(t + h, y_new)
+
+ K[-1] = f_new
+
+ return y_new, f_new
+
+
+class RungeKutta(OdeSolver):
+ """Base class for explicit Runge-Kutta methods."""
+ C = NotImplemented
+ A = NotImplemented
+ B = NotImplemented
+ E = NotImplemented
+ P = NotImplemented
+ order = NotImplemented
+ error_estimator_order = NotImplemented
+ n_stages = NotImplemented
+
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+ rtol=1e-3, atol=1e-6, vectorized=False,
+ first_step=None, **extraneous):
+ warn_extraneous(extraneous)
+ super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
+ support_complex=True)
+ self.y_old = None
+ self.max_step = validate_max_step(max_step)
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+ self.f = self.fun(self.t, self.y)
+ if first_step is None:
+ self.h_abs = select_initial_step(
+ self.fun, self.t, self.y, self.f, self.direction,
+ self.error_estimator_order, self.rtol, self.atol)
+ else:
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
+ self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
+ self.error_exponent = -1 / (self.error_estimator_order + 1)
+ self.h_previous = None
+
+ def _estimate_error(self, K, h):
+ return np.dot(K.T, self.E) * h
+
+ def _estimate_error_norm(self, K, h, scale):
+ return norm(self._estimate_error(K, h) / scale)
+
+ def _step_impl(self):
+ t = self.t
+ y = self.y
+
+ max_step = self.max_step
+ rtol = self.rtol
+ atol = self.atol
+
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+
+ if self.h_abs > max_step:
+ h_abs = max_step
+ elif self.h_abs < min_step:
+ h_abs = min_step
+ else:
+ h_abs = self.h_abs
+
+ step_accepted = False
+ step_rejected = False
+
+ while not step_accepted:
+ if h_abs < min_step:
+ return False, self.TOO_SMALL_STEP
+
+ h = h_abs * self.direction
+ t_new = t + h
+
+ if self.direction * (t_new - self.t_bound) > 0:
+ t_new = self.t_bound
+
+ h = t_new - t
+ h_abs = np.abs(h)
+
+ y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
+ self.B, self.C, self.K)
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
+ error_norm = self._estimate_error_norm(self.K, h, scale)
+
+ if error_norm < 1:
+ if error_norm == 0:
+ factor = MAX_FACTOR
+ else:
+ factor = min(MAX_FACTOR,
+ SAFETY * error_norm ** self.error_exponent)
+
+ if step_rejected:
+ factor = min(1, factor)
+
+ h_abs *= factor
+
+ step_accepted = True
+ else:
+ h_abs *= max(MIN_FACTOR,
+ SAFETY * error_norm ** self.error_exponent)
+ step_rejected = True
+
+ self.h_previous = h
+ self.y_old = y
+
+ self.t = t_new
+ self.y = y_new
+
+ self.h_abs = h_abs
+ self.f = f_new
+
+ return True, None
+
+ def _dense_output_impl(self):
+ Q = self.K.T.dot(self.P)
+ return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
+
+
+class RK23(RungeKutta):
+ """Explicit Runge-Kutta method of order 3(2).
+
+ This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
+ assuming accuracy of the second-order method, but steps are taken using the
+ third-order accurate formula (local extrapolation is done). A cubic Hermite
+ polynomial is used for the dense output.
+
+ Can be applied in the complex domain.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar and there are two options for ndarray ``y``.
+ It can either have shape (n,), then ``fun`` must return array_like with
+ shape (n,). Or alternatively it can have shape (n, k), then ``fun``
+ must return array_like with shape (n, k), i.e. each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number evaluations of the system's right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
+ nlu : int
+ Number of LU decompositions. Is always 0 for this solver.
+
+ References
+ ----------
+ .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
+ """
+ order = 3
+ error_estimator_order = 2
+ n_stages = 3
+ C = np.array([0, 1/2, 3/4])
+ A = np.array([
+ [0, 0, 0],
+ [1/2, 0, 0],
+ [0, 3/4, 0]
+ ])
+ B = np.array([2/9, 1/3, 4/9])
+ E = np.array([5/72, -1/12, -1/9, 1/8])
+ P = np.array([[1, -4 / 3, 5 / 9],
+ [0, 1, -2/3],
+ [0, 4/3, -8/9],
+ [0, -1, 1]])
+
+
+class RK45(RungeKutta):
+ """Explicit Runge-Kutta method of order 5(4).
+
+ This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
+ assuming accuracy of the fourth-order method accuracy, but steps are taken
+ using the fifth-order accurate formula (local extrapolation is done).
+ A quartic interpolation polynomial is used for the dense output [2]_.
+
+ Can be applied in the complex domain.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+ It can either have shape (n,); then ``fun`` must return array_like with
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
+ must return an array_like with shape (n, k), i.e., each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number evaluations of the system's right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
+ nlu : int
+ Number of LU decompositions. Is always 0 for this solver.
+
+ References
+ ----------
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
+ No. 1, pp. 19-26, 1980.
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
+ """
+ order = 5
+ error_estimator_order = 4
+ n_stages = 6
+ C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
+ A = np.array([
+ [0, 0, 0, 0, 0],
+ [1/5, 0, 0, 0, 0],
+ [3/40, 9/40, 0, 0, 0],
+ [44/45, -56/15, 32/9, 0, 0],
+ [19372/6561, -25360/2187, 64448/6561, -212/729, 0],
+ [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
+ ])
+ B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
+ E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
+ 1/40])
+ # Corresponds to the optimum value of c_6 from [2]_.
+ P = np.array([
+ [1, -8048581381/2820520608, 8663915743/2820520608,
+ -12715105075/11282082432],
+ [0, 0, 0, 0],
+ [0, 131558114200/32700410799, -68118460800/10900136933,
+ 87487479700/32700410799],
+ [0, -1754552775/470086768, 14199869525/1410260304,
+ -10690763975/1880347072],
+ [0, 127303824393/49829197408, -318862633887/49829197408,
+ 701980252875 / 199316789632],
+ [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
+ [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
+
+
+class DOP853(RungeKutta):
+ """Explicit Runge-Kutta method of order 8.
+
+ This is a Python implementation of "DOP853" algorithm originally written
+ in Fortran [1]_, [2]_. Note that this is not a literate translation, but
+ the algorithmic core and coefficients are the same.
+
+ Can be applied in the complex domain.
+
+ Parameters
+ ----------
+ fun : callable
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
+ Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
+ It can either have shape (n,); then ``fun`` must return array_like with
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
+ must return an array_like with shape (n, k), i.e. each column
+ corresponds to a single column in ``y``. The choice between the two
+ options is determined by `vectorized` argument (see below).
+ t0 : float
+ Initial time.
+ y0 : array_like, shape (n,)
+ Initial state.
+ t_bound : float
+ Boundary time - the integration won't continue beyond it. It also
+ determines the direction of the integration.
+ first_step : float or None, optional
+ Initial step size. Default is ``None`` which means that the algorithm
+ should choose.
+ max_step : float, optional
+ Maximum allowed step size. Default is np.inf, i.e. the step size is not
+ bounded and determined solely by the solver.
+ rtol, atol : float and array_like, optional
+ Relative and absolute tolerances. The solver keeps the local error
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+ relative accuracy (number of correct digits). But if a component of `y`
+ is approximately below `atol`, the error only needs to fall within
+ the same `atol` threshold, and the number of correct digits is not
+ guaranteed. If components of y have different scales, it might be
+ beneficial to set different `atol` values for different components by
+ passing array_like with shape (n,) for `atol`. Default values are
+ 1e-3 for `rtol` and 1e-6 for `atol`.
+ vectorized : bool, optional
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+ Attributes
+ ----------
+ n : int
+ Number of equations.
+ status : string
+ Current status of the solver: 'running', 'finished' or 'failed'.
+ t_bound : float
+ Boundary time.
+ direction : float
+ Integration direction: +1 or -1.
+ t : float
+ Current time.
+ y : ndarray
+ Current state.
+ t_old : float
+ Previous time. None if no steps were made yet.
+ step_size : float
+ Size of the last successful step. None if no steps were made yet.
+ nfev : int
+ Number evaluations of the system's right-hand side.
+ njev : int
+ Number of evaluations of the Jacobian. Is always 0 for this solver
+ as it does not use the Jacobian.
+ nlu : int
+ Number of LU decompositions. Is always 0 for this solver.
+
+ References
+ ----------
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+ Equations I: Nonstiff Problems", Sec. II.
+ .. [2] `Page with original Fortran code of DOP853
+ `_.
+ """
+ n_stages = dop853_coefficients.N_STAGES
+ order = 8
+ error_estimator_order = 7
+ A = dop853_coefficients.A[:n_stages, :n_stages]
+ B = dop853_coefficients.B
+ C = dop853_coefficients.C[:n_stages]
+ E3 = dop853_coefficients.E3
+ E5 = dop853_coefficients.E5
+ D = dop853_coefficients.D
+
+ A_EXTRA = dop853_coefficients.A[n_stages + 1:]
+ C_EXTRA = dop853_coefficients.C[n_stages + 1:]
+
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+ rtol=1e-3, atol=1e-6, vectorized=False,
+ first_step=None, **extraneous):
+ super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,
+ rtol, atol, vectorized, first_step,
+ **extraneous)
+ self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
+ self.n), dtype=self.y.dtype)
+ self.K = self.K_extended[:self.n_stages + 1]
+
+ def _estimate_error(self, K, h): # Left for testing purposes.
+ err5 = np.dot(K.T, self.E5)
+ err3 = np.dot(K.T, self.E3)
+ denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
+ correction_factor = np.ones_like(err5)
+ mask = denom > 0
+ correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
+ return h * err5 * correction_factor
+
+ def _estimate_error_norm(self, K, h, scale):
+ err5 = np.dot(K.T, self.E5) / scale
+ err3 = np.dot(K.T, self.E3) / scale
+ err5_norm_2 = np.linalg.norm(err5)**2
+ err3_norm_2 = np.linalg.norm(err3)**2
+ if err5_norm_2 == 0 and err3_norm_2 == 0:
+ return 0.0
+ denom = err5_norm_2 + 0.01 * err3_norm_2
+ return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
+
+ def _dense_output_impl(self):
+ K = self.K_extended
+ h = self.h_previous
+ for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
+ start=self.n_stages + 1):
+ dy = np.dot(K[:s].T, a[:s]) * h
+ K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
+
+ F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
+ dtype=self.y_old.dtype)
+
+ f_old = K[0]
+ delta_y = self.y - self.y_old
+
+ F[0] = delta_y
+ F[1] = h * f_old - delta_y
+ F[2] = 2 * delta_y - h * (self.f + f_old)
+ F[3:] = h * np.dot(self.D, K)
+
+ return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
+
+
+class RkDenseOutput(DenseOutput):
+ def __init__(self, t_old, t, y_old, Q):
+ super(RkDenseOutput, self).__init__(t_old, t)
+ self.h = t - t_old
+ self.Q = Q
+ self.order = Q.shape[1] - 1
+ self.y_old = y_old
+
+ def _call_impl(self, t):
+ x = (t - self.t_old) / self.h
+ if t.ndim == 0:
+ p = np.tile(x, self.order + 1)
+ p = np.cumprod(p)
+ else:
+ p = np.tile(x, (self.order + 1, 1))
+ p = np.cumprod(p, axis=0)
+ y = self.h * np.dot(self.Q, p)
+ if y.ndim == 2:
+ y += self.y_old[:, None]
+ else:
+ y += self.y_old
+
+ return y
+
+
+class Dop853DenseOutput(DenseOutput):
+ def __init__(self, t_old, t, y_old, F):
+ super(Dop853DenseOutput, self).__init__(t_old, t)
+ self.h = t - t_old
+ self.F = F
+ self.y_old = y_old
+
+ def _call_impl(self, t):
+ x = (t - self.t_old) / self.h
+
+ if t.ndim == 0:
+ y = np.zeros_like(self.y_old)
+ else:
+ x = x[:, None]
+ y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
+
+ for i, f in enumerate(reversed(self.F)):
+ y += f
+ if i % 2 == 0:
+ y *= x
+ else:
+ y *= 1 - x
+ y += self.y_old
+
+ return y.T
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/setup.py
new file mode 100644
index 0000000..006afc3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/setup.py
@@ -0,0 +1,12 @@
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('_ivp', parent_package, top_path)
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_ivp.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_ivp.py
new file mode 100644
index 0000000..6513b7f
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_ivp.py
@@ -0,0 +1,982 @@
+from itertools import product
+from numpy.testing import (assert_, assert_allclose,
+ assert_equal, assert_no_warnings, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+from scipy.optimize._numdiff import group_columns
+from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA
+from scipy.integrate import OdeSolution
+from scipy.integrate._ivp.common import num_jac
+from scipy.integrate._ivp.base import ConstantDenseOutput
+from scipy.sparse import coo_matrix, csc_matrix
+
+
+def fun_zero(t, y):
+ return np.zeros_like(y)
+
+
+def fun_linear(t, y):
+ return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
+
+
+def jac_linear():
+ return np.array([[-1, -5], [1, 1]])
+
+
+def sol_linear(t):
+ return np.vstack((-5 * np.sin(2 * t),
+ 2 * np.cos(2 * t) + np.sin(2 * t)))
+
+
+def fun_rational(t, y):
+ return np.array([y[1] / t,
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
+
+
+def fun_rational_vectorized(t, y):
+ return np.vstack((y[1] / t,
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
+
+
+def jac_rational(t, y):
+ return np.array([
+ [0, 1 / t],
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
+ ])
+
+
+def jac_rational_sparse(t, y):
+ return csc_matrix([
+ [0, 1 / t],
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
+ ])
+
+
+def sol_rational(t):
+ return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
+
+
+def fun_medazko(t, y):
+ n = y.shape[0] // 2
+ k = 100
+ c = 4
+
+ phi = 2 if t <= 5 else 0
+ y = np.hstack((phi, 0, y, y[-2]))
+
+ d = 1 / n
+ j = np.arange(n) + 1
+ alpha = 2 * (j * d - 1) ** 3 / c ** 2
+ beta = (j * d - 1) ** 4 / c ** 2
+
+ j_2_p1 = 2 * j + 2
+ j_2_m3 = 2 * j - 2
+ j_2_m1 = 2 * j
+ j_2 = 2 * j + 1
+
+ f = np.empty(2 * n)
+ f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
+ beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
+ k * y[j_2_m1] * y[j_2])
+ f[1::2] = -k * y[j_2] * y[j_2_m1]
+
+ return f
+
+
+def medazko_sparsity(n):
+ cols = []
+ rows = []
+
+ i = np.arange(n) * 2
+
+ cols.append(i[1:])
+ rows.append(i[1:] - 2)
+
+ cols.append(i)
+ rows.append(i)
+
+ cols.append(i)
+ rows.append(i + 1)
+
+ cols.append(i[:-1])
+ rows.append(i[:-1] + 2)
+
+ i = np.arange(n) * 2 + 1
+
+ cols.append(i)
+ rows.append(i)
+
+ cols.append(i)
+ rows.append(i - 1)
+
+ cols = np.hstack(cols)
+ rows = np.hstack(rows)
+
+ return coo_matrix((np.ones_like(cols), (cols, rows)))
+
+
+def fun_complex(t, y):
+ return -y
+
+
+def jac_complex(t, y):
+ return -np.eye(y.shape[0])
+
+
+def jac_complex_sparse(t, y):
+ return csc_matrix(jac_complex(t, y))
+
+
+def sol_complex(t):
+ y = (0.5 + 1j) * np.exp(-t)
+ return y.reshape((1, -1))
+
+
+def compute_error(y, y_true, rtol, atol):
+ e = (y - y_true) / (atol + rtol * np.abs(y_true))
+ return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])
+
+
+def test_integration():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [1/3, 2/9]
+
+ for vectorized, method, t_span, jac in product(
+ [False, True],
+ ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
+ [[5, 9], [5, 1]],
+ [None, jac_rational, jac_rational_sparse]):
+
+ if vectorized:
+ fun = fun_rational_vectorized
+ else:
+ fun = fun_rational
+
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ "The following arguments have no effect for a chosen "
+ "solver: `jac`")
+ res = solve_ivp(fun, t_span, y0, rtol=rtol,
+ atol=atol, method=method, dense_output=True,
+ jac=jac, vectorized=vectorized)
+ assert_equal(res.t[0], t_span[0])
+ assert_(res.t_events is None)
+ assert_(res.y_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ if method == 'DOP853':
+ # DOP853 spends more functions evaluation because it doesn't
+ # have enough time to develop big enough step size.
+ assert_(res.nfev < 50)
+ else:
+ assert_(res.nfev < 40)
+
+ if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
+ assert_equal(res.njev, 0)
+ assert_equal(res.nlu, 0)
+ else:
+ assert_(0 < res.njev < 3)
+ assert_(0 < res.nlu < 10)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ tc = np.linspace(*t_span)
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+
+ e = compute_error(yc, yc_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ tc = (t_span[0] + t_span[-1]) / 2
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+
+ e = compute_error(yc, yc_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ # LSODA for some reasons doesn't pass the polynomial through the
+ # previous points exactly after the order change. It might be some
+ # bug in LSOSA implementation or maybe we missing something.
+ if method != 'LSODA':
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+
+def test_integration_complex():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [0.5 + 1j]
+ t_span = [0, 1]
+ tc = np.linspace(t_span[0], t_span[1])
+ for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],
+ [None, jac_complex, jac_complex_sparse]):
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ "The following arguments have no effect for a chosen "
+ "solver: `jac`")
+ res = solve_ivp(fun_complex, t_span, y0, method=method,
+ dense_output=True, rtol=rtol, atol=atol, jac=jac)
+
+ assert_equal(res.t[0], t_span[0])
+ assert_(res.t_events is None)
+ assert_(res.y_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ if method == 'DOP853':
+ assert res.nfev < 35
+ else:
+ assert res.nfev < 25
+
+ if method == 'BDF':
+ assert_equal(res.njev, 1)
+ assert res.nlu < 6
+ else:
+ assert res.njev == 0
+ assert res.nlu == 0
+
+ y_true = sol_complex(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert np.all(e < 5)
+
+ yc_true = sol_complex(tc)
+ yc = res.sol(tc)
+ e = compute_error(yc, yc_true, rtol, atol)
+
+ assert np.all(e < 5)
+
+
+def test_integration_sparse_difference():
+ n = 200
+ t_span = [0, 20]
+ y0 = np.zeros(2 * n)
+ y0[1::2] = 1
+ sparsity = medazko_sparsity(n)
+
+ for method in ['BDF', 'Radau']:
+ res = solve_ivp(fun_medazko, t_span, y0, method=method,
+ jac_sparsity=sparsity)
+
+ assert_equal(res.t[0], t_span[0])
+ assert_(res.t_events is None)
+ assert_(res.y_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
+ assert_allclose(res.y[79, -1], 0, atol=1e-3)
+ assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
+ assert_allclose(res.y[149, -1], 0, atol=1e-3)
+ assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
+ assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
+ assert_allclose(res.y[238, -1], 0, atol=1e-3)
+ assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
+
+
+def test_integration_const_jac():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [0, 2]
+ t_span = [0, 2]
+ J = jac_linear()
+ J_sparse = csc_matrix(J)
+
+ for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
+ res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
+ method=method, dense_output=True, jac=jac)
+ assert_equal(res.t[0], t_span[0])
+ assert_(res.t_events is None)
+ assert_(res.y_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ assert_(res.nfev < 100)
+ assert_equal(res.njev, 0)
+ assert_(0 < res.nlu < 15)
+
+ y_true = sol_linear(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 10))
+
+ tc = np.linspace(*t_span)
+ yc_true = sol_linear(tc)
+ yc = res.sol(tc)
+
+ e = compute_error(yc, yc_true, rtol, atol)
+ assert_(np.all(e < 15))
+
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA'])
+def test_integration_stiff(method):
+ rtol = 1e-6
+ atol = 1e-6
+ y0 = [1e4, 0, 0]
+ tspan = [0, 1e8]
+
+ def fun_robertson(t, state):
+ x, y, z = state
+ return [
+ -0.04 * x + 1e4 * y * z,
+ 0.04 * x - 1e4 * y * z - 3e7 * y * y,
+ 3e7 * y * y,
+ ]
+
+ res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,
+ atol=atol, method=method)
+
+ # If the stiff mode is not activated correctly, these numbers will be much bigger
+ assert res.nfev < 5000
+ assert res.njev < 200
+
+
+def test_events():
+ def event_rational_1(t, y):
+ return y[0] - y[1] ** 0.7
+
+ def event_rational_2(t, y):
+ return y[1] ** 0.6 - y[0]
+
+ def event_rational_3(t, y):
+ return t - 7.4
+
+ event_rational_3.terminal = True
+
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+ res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 1)
+ assert_equal(res.t_events[1].size, 1)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+ assert_(7.3 < res.t_events[1][0] < 7.7)
+
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert_equal(res.y_events[1].shape, (1, 2))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+ assert np.isclose(
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+ event_rational_1.direction = 1
+ event_rational_2.direction = 1
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 1)
+ assert_equal(res.t_events[1].size, 0)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert_equal(res.y_events[1].shape, (0,))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+ event_rational_1.direction = -1
+ event_rational_2.direction = -1
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 0)
+ assert_equal(res.t_events[1].size, 1)
+ assert_(7.3 < res.t_events[1][0] < 7.7)
+ assert_equal(res.y_events[0].shape, (0,))
+ assert_equal(res.y_events[1].shape, (1, 2))
+ assert np.isclose(
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+ event_rational_1.direction = 0
+ event_rational_2.direction = 0
+
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+ events=(event_rational_1, event_rational_2,
+ event_rational_3), dense_output=True)
+ assert_equal(res.status, 1)
+ assert_equal(res.t_events[0].size, 1)
+ assert_equal(res.t_events[1].size, 0)
+ assert_equal(res.t_events[2].size, 1)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+ assert_(7.3 < res.t_events[2][0] < 7.5)
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert_equal(res.y_events[1].shape, (0,))
+ assert_equal(res.y_events[2].shape, (1, 2))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+ assert np.isclose(
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
+
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+ events=event_rational_1, dense_output=True)
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 1)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+ # Also test that termination by event doesn't break interpolants.
+ tc = np.linspace(res.t[0], res.t[-1])
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
+ assert_(np.all(e < 5))
+
+ # Test that the y_event matches solution
+ assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], rtol=1e-3, atol=1e-6)
+
+ # Test in backward direction.
+ event_rational_1.direction = 0
+ event_rational_2.direction = 0
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 1)
+ assert_equal(res.t_events[1].size, 1)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+ assert_(7.3 < res.t_events[1][0] < 7.7)
+
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert_equal(res.y_events[1].shape, (1, 2))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+ assert np.isclose(
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+ event_rational_1.direction = -1
+ event_rational_2.direction = -1
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 1)
+ assert_equal(res.t_events[1].size, 0)
+ assert_(5.3 < res.t_events[0][0] < 5.7)
+
+ assert_equal(res.y_events[0].shape, (1, 2))
+ assert_equal(res.y_events[1].shape, (0,))
+ assert np.isclose(
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+ event_rational_1.direction = 1
+ event_rational_2.direction = 1
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+ events=(event_rational_1, event_rational_2))
+ assert_equal(res.status, 0)
+ assert_equal(res.t_events[0].size, 0)
+ assert_equal(res.t_events[1].size, 1)
+ assert_(7.3 < res.t_events[1][0] < 7.7)
+
+ assert_equal(res.y_events[0].shape, (0,))
+ assert_equal(res.y_events[1].shape, (1, 2))
+ assert np.isclose(
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+ event_rational_1.direction = 0
+ event_rational_2.direction = 0
+
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+ events=(event_rational_1, event_rational_2,
+ event_rational_3), dense_output=True)
+ assert_equal(res.status, 1)
+ assert_equal(res.t_events[0].size, 0)
+ assert_equal(res.t_events[1].size, 1)
+ assert_equal(res.t_events[2].size, 1)
+ assert_(7.3 < res.t_events[1][0] < 7.7)
+ assert_(7.3 < res.t_events[2][0] < 7.5)
+
+ assert_equal(res.y_events[0].shape, (0,))
+ assert_equal(res.y_events[1].shape, (1, 2))
+ assert_equal(res.y_events[2].shape, (1, 2))
+ assert np.isclose(
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+ assert np.isclose(
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
+
+ # Also test that termination by event doesn't break interpolants.
+ tc = np.linspace(res.t[-1], res.t[0])
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
+ assert_(np.all(e < 5))
+
+ assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], rtol=1e-3, atol=1e-6)
+ assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], rtol=1e-3, atol=1e-6)
+
+
+def test_max_step():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [1/3, 2/9]
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+ for t_span in ([5, 9], [5, 1]):
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
+ max_step=0.5, atol=atol, method=method,
+ dense_output=True)
+ assert_equal(res.t[0], t_span[0])
+ assert_equal(res.t[-1], t_span[-1])
+ assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ tc = np.linspace(*t_span)
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+
+ e = compute_error(yc, yc_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ # See comment in test_integration.
+ if method is not LSODA:
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+ t_span[1], max_step=-1)
+
+ if method is not LSODA:
+ solver = method(fun_rational, t_span[0], y0, t_span[1],
+ rtol=rtol, atol=atol, max_step=1e-20)
+ message = solver.step()
+
+ assert_equal(solver.status, 'failed')
+ assert_("step size is less" in message)
+ assert_raises(RuntimeError, solver.step)
+
+
+def test_first_step():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [1/3, 2/9]
+ first_step = 0.1
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+ for t_span in ([5, 9], [5, 1]):
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
+ max_step=0.5, atol=atol, method=method,
+ dense_output=True, first_step=first_step)
+
+ assert_equal(res.t[0], t_span[0])
+ assert_equal(res.t[-1], t_span[-1])
+ assert_allclose(first_step, np.abs(res.t[1] - 5))
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ tc = np.linspace(*t_span)
+ yc_true = sol_rational(tc)
+ yc = res.sol(tc)
+
+ e = compute_error(yc, yc_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ # See comment in test_integration.
+ if method is not LSODA:
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+ t_span[1], first_step=-1)
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+ t_span[1], first_step=5)
+
+
+def test_t_eval():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [1/3, 2/9]
+ for t_span in ([5, 9], [5, 1]):
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ t_eval = [5, 5.01, 7, 8, 8.01, 9]
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ t_eval = [5.01, 7, 8, 8.01]
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+ t_eval = [4.99, 3, 1.5, 1.1, 1.01]
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ t_eval = [4, 6]
+ assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
+ rtol=rtol, atol=atol, t_eval=t_eval)
+
+
+def test_t_eval_dense_output():
+ rtol = 1e-3
+ atol = 1e-6
+ y0 = [1/3, 2/9]
+ t_span = [5, 9]
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+ t_eval=t_eval)
+ res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+ t_eval=t_eval, dense_output=True)
+ assert_equal(res.t, t_eval)
+ assert_(res.t_events is None)
+ assert_(res.success)
+ assert_equal(res.status, 0)
+
+ assert_equal(res.t, res_d.t)
+ assert_equal(res.y, res_d.y)
+ assert_(res_d.t_events is None)
+ assert_(res_d.success)
+ assert_equal(res_d.status, 0)
+
+ # if t and y are equal only test values for one case
+ y_true = sol_rational(res.t)
+ e = compute_error(res.y, y_true, rtol, atol)
+ assert_(np.all(e < 5))
+
+
+def test_no_integration():
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+ sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
+ method=method, dense_output=True)
+ assert_equal(sol.sol(4), [2, 3])
+ assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
+
+
+def test_no_integration_class():
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+ solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
+ solver.step()
+ assert_equal(solver.status, 'finished')
+ sol = solver.dense_output()
+ assert_equal(sol(0.0), [10.0, 0.0])
+ assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
+
+ solver = method(lambda t, y: -y, 0.0, [], np.inf)
+ solver.step()
+ assert_equal(solver.status, 'finished')
+ sol = solver.dense_output()
+ assert_equal(sol(100.0), [])
+ assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
+
+
+def test_empty():
+ def fun(t, y):
+ return np.zeros((0,))
+
+ y0 = np.zeros((0,))
+
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+ sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
+ method=method, dense_output=True)
+ assert_equal(sol.sol(10), np.zeros((0,)))
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
+
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+ sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
+ method=method, dense_output=True)
+ assert_equal(sol.sol(10), np.zeros((0,)))
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
+
+
+def test_ConstantDenseOutput():
+ sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
+ assert_allclose(sol(1.5), [1, 2])
+ assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
+
+ sol = ConstantDenseOutput(0, 1, np.array([]))
+ assert_allclose(sol(1.5), np.empty(0))
+ assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
+
+
+def test_classes():
+ y0 = [1 / 3, 2 / 9]
+ for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+ solver = cls(fun_rational, 5, y0, np.inf)
+ assert_equal(solver.n, 2)
+ assert_equal(solver.status, 'running')
+ assert_equal(solver.t_bound, np.inf)
+ assert_equal(solver.direction, 1)
+ assert_equal(solver.t, 5)
+ assert_equal(solver.y, y0)
+ assert_(solver.step_size is None)
+ if cls is not LSODA:
+ assert_(solver.nfev > 0)
+ assert_(solver.njev >= 0)
+ assert_equal(solver.nlu, 0)
+ else:
+ assert_equal(solver.nfev, 0)
+ assert_equal(solver.njev, 0)
+ assert_equal(solver.nlu, 0)
+
+ assert_raises(RuntimeError, solver.dense_output)
+
+ message = solver.step()
+ assert_equal(solver.status, 'running')
+ assert_equal(message, None)
+ assert_equal(solver.n, 2)
+ assert_equal(solver.t_bound, np.inf)
+ assert_equal(solver.direction, 1)
+ assert_(solver.t > 5)
+ assert_(not np.all(np.equal(solver.y, y0)))
+ assert_(solver.step_size > 0)
+ assert_(solver.nfev > 0)
+ assert_(solver.njev >= 0)
+ assert_(solver.nlu >= 0)
+ sol = solver.dense_output()
+ assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
+
+
+def test_OdeSolution():
+ ts = np.array([0, 2, 5], dtype=float)
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
+
+ sol = OdeSolution(ts, [s1, s2])
+
+ assert_equal(sol(-1), [-1])
+ assert_equal(sol(1), [-1])
+ assert_equal(sol(2), [-1])
+ assert_equal(sol(3), [1])
+ assert_equal(sol(5), [1])
+ assert_equal(sol(6), [1])
+
+ assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
+ np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
+
+ ts = np.array([10, 4, -3])
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
+
+ sol = OdeSolution(ts, [s1, s2])
+ assert_equal(sol(11), [-1])
+ assert_equal(sol(10), [-1])
+ assert_equal(sol(5), [-1])
+ assert_equal(sol(4), [-1])
+ assert_equal(sol(0), [1])
+ assert_equal(sol(-3), [1])
+ assert_equal(sol(-4), [1])
+
+ assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
+ np.array([[-1, 1, -1, 1, -1, 1, -1]]))
+
+ ts = np.array([1, 1])
+ s = ConstantDenseOutput(1, 1, np.array([10]))
+ sol = OdeSolution(ts, [s])
+ assert_equal(sol(0), [10])
+ assert_equal(sol(1), [10])
+ assert_equal(sol(2), [10])
+
+ assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
+
+
+def test_num_jac():
+ def fun(t, y):
+ return np.vstack([
+ -0.04 * y[0] + 1e4 * y[1] * y[2],
+ 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
+ 3e7 * y[1] ** 2
+ ])
+
+ def jac(t, y):
+ return np.array([
+ [-0.04, 1e4 * y[2], 1e4 * y[1]],
+ [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
+ [0, 6e7 * y[1], 0]
+ ])
+
+ t = 1
+ y = np.array([1, 0, 0])
+ J_true = jac(t, y)
+ threshold = 1e-5
+ f = fun(t, y).ravel()
+
+ J_num, factor = num_jac(fun, t, y, f, threshold, None)
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
+
+ J_num, factor = num_jac(fun, t, y, f, threshold, factor)
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
+
+
+def test_num_jac_sparse():
+ def fun(t, y):
+ e = y[1:]**3 - y[:-1]**2
+ z = np.zeros(y.shape[1])
+ return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
+
+ def structure(n):
+ A = np.zeros((n, n), dtype=int)
+ A[0, 0] = 1
+ A[0, 1] = 1
+ for i in range(1, n - 1):
+ A[i, i - 1: i + 2] = 1
+ A[-1, -1] = 1
+ A[-1, -2] = 1
+
+ return A
+
+ np.random.seed(0)
+ n = 20
+ y = np.random.randn(n)
+ A = structure(n)
+ groups = group_columns(A)
+
+ f = fun(0, y[:, None]).ravel()
+
+ # Compare dense and sparse results, assuming that dense implementation
+ # is correct (as it is straightforward).
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
+ sparsity=(A, groups))
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
+ rtol=1e-12, atol=1e-14)
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
+
+ # Take small factors to trigger their recomputing inside.
+ factor = np.random.uniform(0, 1e-12, size=n)
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
+ sparsity=(A, groups))
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
+
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
+ rtol=1e-12, atol=1e-14)
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
+
+
+def test_args():
+
+ # sys3 is actually two decoupled systems. (x, y) form a
+ # linear oscillator, while z is a nonlinear first order
+ # system with equilibria at z=0 and z=1. If k > 0, z=1
+ # is stable and z=0 is unstable.
+
+ def sys3(t, w, omega, k, zfinal):
+ x, y, z = w
+ return [-omega*y, omega*x, k*z*(1 - z)]
+
+ def sys3_jac(t, w, omega, k, zfinal):
+ x, y, z = w
+ J = np.array([[0, -omega, 0],
+ [omega, 0, 0],
+ [0, 0, k*(1 - 2*z)]])
+ return J
+
+ def sys3_x0decreasing(t, w, omega, k, zfinal):
+ x, y, z = w
+ return x
+
+ def sys3_y0increasing(t, w, omega, k, zfinal):
+ x, y, z = w
+ return y
+
+ def sys3_zfinal(t, w, omega, k, zfinal):
+ x, y, z = w
+ return z - zfinal
+
+ # Set the event flags for the event functions.
+ sys3_x0decreasing.direction = -1
+ sys3_y0increasing.direction = 1
+ sys3_zfinal.terminal = True
+
+ omega = 2
+ k = 4
+
+ tfinal = 5
+ zfinal = 0.99
+ # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.
+ # The condition z(tfinal) = zfinal is the terminal event.
+ z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))
+
+ w0 = [0, -1, z0]
+
+ # Provide the jac argument and use the Radau method to ensure that the use
+ # of the Jacobian function is exercised.
+ # If event handling is working, the solution will stop at tfinal, not tend.
+ tend = 2*tfinal
+ sol = solve_ivp(sys3, [0, tend], w0,
+ events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],
+ dense_output=True, args=(omega, k, zfinal),
+ method='Radau', jac=sys3_jac,
+ rtol=1e-10, atol=1e-13)
+
+ # Check that we got the expected events at the expected times.
+ x0events_t = sol.t_events[0]
+ y0events_t = sol.t_events[1]
+ zfinalevents_t = sol.t_events[2]
+ assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])
+ assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])
+ assert_allclose(zfinalevents_t, [tfinal])
+
+ # Check that the solution agrees with the known exact solution.
+ t = np.linspace(0, zfinalevents_t[0], 250)
+ w = sol.sol(t)
+ assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)
+ assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)
+ assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),
+ rtol=1e-9, atol=1e-12)
+
+ # Check that the state variables have the expected values at the events.
+ x0events = sol.sol(x0events_t)
+ y0events = sol.sol(y0events_t)
+ zfinalevents = sol.sol(zfinalevents_t)
+ assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)
+ assert_allclose(x0events[1], np.ones_like(x0events[1]))
+ assert_allclose(y0events[0], np.ones_like(y0events[0]))
+ assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)
+ assert_allclose(zfinalevents[2], [zfinal])
+
+
+@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])
+def test_integration_zero_rhs(method):
+ result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)
+ assert_(result.success)
+ assert_equal(result.status, 0)
+ assert_allclose(result.y, 1.0, rtol=1e-15)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_rk.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_rk.py
new file mode 100644
index 0000000..33cb27d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ivp/tests/test_rk.py
@@ -0,0 +1,37 @@
+import pytest
+from numpy.testing import assert_allclose, assert_
+import numpy as np
+from scipy.integrate import RK23, RK45, DOP853
+from scipy.integrate._ivp import dop853_coefficients
+
+
+@pytest.mark.parametrize("solver", [RK23, RK45, DOP853])
+def test_coefficient_properties(solver):
+ assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
+ assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14)
+
+
+def test_coefficient_properties_dop853():
+ assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15)
+ assert_allclose(np.sum(dop853_coefficients.A, axis=1),
+ dop853_coefficients.C,
+ rtol=1e-14)
+
+
+@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
+def test_error_estimation(solver_class):
+ step = 0.2
+ solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step)
+ solver.step()
+ error_estimate = solver._estimate_error(solver.K, step)
+ error = solver.y - np.exp([step])
+ assert_(np.abs(error) < np.abs(error_estimate))
+
+
+@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
+def test_error_estimation_complex(solver_class):
+ h = 0.2
+ solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h)
+ solver.step()
+ err_norm = solver._estimate_error_norm(solver.K, h, scale=[1])
+ assert np.isrealobj(err_norm)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ode.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ode.py
new file mode 100644
index 0000000..d97370e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_ode.py
@@ -0,0 +1,1376 @@
+# Authors: Pearu Peterson, Pauli Virtanen, John Travers
+"""
+First-order ODE integrators.
+
+User-friendly interface to various numerical integrators for solving a
+system of first order ODEs with prescribed initial conditions::
+
+ d y(t)[i]
+ --------- = f(t,y(t))[i],
+ d t
+
+ y(t=0)[i] = y0[i],
+
+where::
+
+ i = 0, ..., len(y0) - 1
+
+class ode
+---------
+
+A generic interface class to numeric integrators. It has the following
+methods::
+
+ integrator = ode(f, jac=None)
+ integrator = integrator.set_integrator(name, **params)
+ integrator = integrator.set_initial_value(y0, t0=0.0)
+ integrator = integrator.set_f_params(*args)
+ integrator = integrator.set_jac_params(*args)
+ y1 = integrator.integrate(t1, step=False, relax=False)
+ flag = integrator.successful()
+
+class complex_ode
+-----------------
+
+This class has the same generic interface as ode, except it can handle complex
+f, y and Jacobians by transparently translating them into the equivalent
+real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
+an alternative to ode with the zvode solver, sometimes performing better.
+"""
+# XXX: Integrators must have:
+# ===========================
+# cvode - C version of vode and vodpk with many improvements.
+# Get it from http://www.netlib.org/ode/cvode.tar.gz.
+# To wrap cvode to Python, one must write the extension module by
+# hand. Its interface is too much 'advanced C' that using f2py
+# would be too complicated (or impossible).
+#
+# How to define a new integrator:
+# ===============================
+#
+# class myodeint(IntegratorBase):
+#
+# runner = or None
+#
+# def __init__(self,...): # required
+#
+#
+# def reset(self,n,has_jac): # optional
+# # n - the size of the problem (number of equations)
+# # has_jac - whether user has supplied its own routine for Jacobian
+#
+#
+# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
+# # this method is called to integrate from t=t0 to t=t1
+# # with initial condition y0. f and jac are user-supplied functions
+# # that define the problem. f_params,jac_params are additional
+# # arguments
+# # to these functions.
+#
+# if :
+# self.success = 0
+# return t1,y1
+#
+# # In addition, one can define step() and run_relax() methods (they
+# # take the same arguments as run()) if the integrator can support
+# # these features (see IntegratorBase doc strings).
+#
+# if myodeint.runner:
+# IntegratorBase.integrator_classes.append(myodeint)
+
+__all__ = ['ode', 'complex_ode']
+__version__ = "$Id$"
+__docformat__ = "restructuredtext en"
+
+import re
+import warnings
+
+from numpy import asarray, array, zeros, isscalar, real, imag, vstack
+
+from . import vode as _vode
+from . import _dop
+from . import lsoda as _lsoda
+
+
+_dop_int_dtype = _dop.types.intvar.dtype
+_vode_int_dtype = _vode.types.intvar.dtype
+_lsoda_int_dtype = _lsoda.types.intvar.dtype
+
+
+# ------------------------------------------------------------------------------
+# User interface
+# ------------------------------------------------------------------------------
+
+
+class ode(object):
+ """
+ A generic interface class to numeric integrators.
+
+ Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
+
+ *Note*: The first two arguments of ``f(t, y, ...)`` are in the
+ opposite order of the arguments in the system definition function used
+ by `scipy.integrate.odeint`.
+
+ Parameters
+ ----------
+ f : callable ``f(t, y, *f_args)``
+ Right-hand side of the differential equation. t is a scalar,
+ ``y.shape == (n,)``.
+ ``f_args`` is set by calling ``set_f_params(*args)``.
+ `f` should return a scalar, array or list (not a tuple).
+ jac : callable ``jac(t, y, *jac_args)``, optional
+ Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
+ ``jac_args`` is set by calling ``set_jac_params(*args)``.
+
+ Attributes
+ ----------
+ t : float
+ Current time.
+ y : ndarray
+ Current variable values.
+
+ See also
+ --------
+ odeint : an integrator with a simpler interface based on lsoda from ODEPACK
+ quad : for finding the area under a curve
+
+ Notes
+ -----
+ Available integrators are listed below. They can be selected using
+ the `set_integrator` method.
+
+ "vode"
+
+ Real-valued Variable-coefficient Ordinary Differential Equation
+ solver, with fixed-leading-coefficient implementation. It provides
+ implicit Adams method (for non-stiff problems) and a method based on
+ backward differentiation formulas (BDF) (for stiff problems).
+
+ Source: http://www.netlib.org/ode/vode.f
+
+ .. warning::
+
+ This integrator is not re-entrant. You cannot have two `ode`
+ instances using the "vode" integrator at the same time.
+
+ This integrator accepts the following parameters in `set_integrator`
+ method of the `ode` class:
+
+ - atol : float or sequence
+ absolute tolerance for solution
+ - rtol : float or sequence
+ relative tolerance for solution
+ - lband : None or int
+ - uband : None or int
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
+ Setting these requires your jac routine to return the jacobian
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
+ dimension of the matrix must be (lband+uband+1, len(y)).
+ - method: 'adams' or 'bdf'
+ Which solver to use, Adams (non-stiff) or BDF (stiff)
+ - with_jacobian : bool
+ This option is only considered when the user has not supplied a
+ Jacobian function and has not indicated (by setting either band)
+ that the Jacobian is banded. In this case, `with_jacobian` specifies
+ whether the iteration method of the ODE solver's correction step is
+ chord iteration with an internally generated full Jacobian or
+ functional iteration with no Jacobian.
+ - nsteps : int
+ Maximum number of (internally defined) steps allowed during one
+ call to the solver.
+ - first_step : float
+ - min_step : float
+ - max_step : float
+ Limits for the step sizes used by the integrator.
+ - order : int
+ Maximum order used by the integrator,
+ order <= 12 for Adams, <= 5 for BDF.
+
+ "zvode"
+
+ Complex-valued Variable-coefficient Ordinary Differential Equation
+ solver, with fixed-leading-coefficient implementation. It provides
+ implicit Adams method (for non-stiff problems) and a method based on
+ backward differentiation formulas (BDF) (for stiff problems).
+
+ Source: http://www.netlib.org/ode/zvode.f
+
+ .. warning::
+
+ This integrator is not re-entrant. You cannot have two `ode`
+ instances using the "zvode" integrator at the same time.
+
+ This integrator accepts the same parameters in `set_integrator`
+ as the "vode" solver.
+
+ .. note::
+
+ When using ZVODE for a stiff system, it should only be used for
+ the case in which the function f is analytic, that is, when each f(i)
+ is an analytic function of each y(j). Analyticity means that the
+ partial derivative df(i)/dy(j) is a unique complex number, and this
+ fact is critical in the way ZVODE solves the dense or banded linear
+ systems that arise in the stiff case. For a complex stiff ODE system
+ in which f is not analytic, ZVODE is likely to have convergence
+ failures, and for this problem one should instead use DVODE on the
+ equivalent real system (in the real and imaginary parts of y).
+
+ "lsoda"
+
+ Real-valued Variable-coefficient Ordinary Differential Equation
+ solver, with fixed-leading-coefficient implementation. It provides
+ automatic method switching between implicit Adams method (for non-stiff
+ problems) and a method based on backward differentiation formulas (BDF)
+ (for stiff problems).
+
+ Source: http://www.netlib.org/odepack
+
+ .. warning::
+
+ This integrator is not re-entrant. You cannot have two `ode`
+ instances using the "lsoda" integrator at the same time.
+
+ This integrator accepts the following parameters in `set_integrator`
+ method of the `ode` class:
+
+ - atol : float or sequence
+ absolute tolerance for solution
+ - rtol : float or sequence
+ relative tolerance for solution
+ - lband : None or int
+ - uband : None or int
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
+ Setting these requires your jac routine to return the jacobian
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j].
+ - with_jacobian : bool
+ *Not used.*
+ - nsteps : int
+ Maximum number of (internally defined) steps allowed during one
+ call to the solver.
+ - first_step : float
+ - min_step : float
+ - max_step : float
+ Limits for the step sizes used by the integrator.
+ - max_order_ns : int
+ Maximum order used in the nonstiff case (default 12).
+ - max_order_s : int
+ Maximum order used in the stiff case (default 5).
+ - max_hnil : int
+ Maximum number of messages reporting too small step size (t + h = t)
+ (default 0)
+ - ixpr : int
+ Whether to generate extra printing at method switches (default False).
+
+ "dopri5"
+
+ This is an explicit runge-kutta method of order (4)5 due to Dormand &
+ Prince (with stepsize control and dense output).
+
+ Authors:
+
+ E. Hairer and G. Wanner
+ Universite de Geneve, Dept. de Mathematiques
+ CH-1211 Geneve 24, Switzerland
+ e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
+
+ This code is described in [HNW93]_.
+
+ This integrator accepts the following parameters in set_integrator()
+ method of the ode class:
+
+ - atol : float or sequence
+ absolute tolerance for solution
+ - rtol : float or sequence
+ relative tolerance for solution
+ - nsteps : int
+ Maximum number of (internally defined) steps allowed during one
+ call to the solver.
+ - first_step : float
+ - max_step : float
+ - safety : float
+ Safety factor on new step selection (default 0.9)
+ - ifactor : float
+ - dfactor : float
+ Maximum factor to increase/decrease step size by in one step
+ - beta : float
+ Beta parameter for stabilised step size control.
+ - verbosity : int
+ Switch for printing messages (< 0 for no messages).
+
+ "dop853"
+
+ This is an explicit runge-kutta method of order 8(5,3) due to Dormand
+ & Prince (with stepsize control and dense output).
+
+ Options and references the same as "dopri5".
+
+ Examples
+ --------
+
+ A problem to integrate and the corresponding jacobian:
+
+ >>> from scipy.integrate import ode
+ >>>
+ >>> y0, t0 = [1.0j, 2.0], 0
+ >>>
+ >>> def f(t, y, arg1):
+ ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
+ >>> def jac(t, y, arg1):
+ ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
+
+ The integration:
+
+ >>> r = ode(f, jac).set_integrator('zvode', method='bdf')
+ >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
+ >>> t1 = 10
+ >>> dt = 1
+ >>> while r.successful() and r.t < t1:
+ ... print(r.t+dt, r.integrate(r.t+dt))
+ 1 [-0.71038232+0.23749653j 0.40000271+0.j ]
+ 2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
+ 3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
+ 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
+ 5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
+ 6.0 [0.58643071+0.339819j 0.08000018+0.j ]
+ 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
+ 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
+ 9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
+ 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
+
+ References
+ ----------
+ .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
+ Differential Equations i. Nonstiff Problems. 2nd edition.
+ Springer Series in Computational Mathematics,
+ Springer-Verlag (1993)
+
+ """
+
+ def __init__(self, f, jac=None):
+ self.stiff = 0
+ self.f = f
+ self.jac = jac
+ self.f_params = ()
+ self.jac_params = ()
+ self._y = []
+
+ @property
+ def y(self):
+ return self._y
+
+ def set_initial_value(self, y, t=0.0):
+ """Set initial conditions y(t) = y."""
+ if isscalar(y):
+ y = [y]
+ n_prev = len(self._y)
+ if not n_prev:
+ self.set_integrator('') # find first available integrator
+ self._y = asarray(y, self._integrator.scalar)
+ self.t = t
+ self._integrator.reset(len(self._y), self.jac is not None)
+ return self
+
+ def set_integrator(self, name, **integrator_params):
+ """
+ Set integrator by name.
+
+ Parameters
+ ----------
+ name : str
+ Name of the integrator.
+ integrator_params
+ Additional parameters for the integrator.
+ """
+ integrator = find_integrator(name)
+ if integrator is None:
+ # FIXME: this really should be raise an exception. Will that break
+ # any code?
+ warnings.warn('No integrator name match with %r or is not '
+ 'available.' % name)
+ else:
+ self._integrator = integrator(**integrator_params)
+ if not len(self._y):
+ self.t = 0.0
+ self._y = array([0.0], self._integrator.scalar)
+ self._integrator.reset(len(self._y), self.jac is not None)
+ return self
+
+ def integrate(self, t, step=False, relax=False):
+ """Find y=y(t), set y as an initial condition, and return y.
+
+ Parameters
+ ----------
+ t : float
+ The endpoint of the integration step.
+ step : bool
+ If True, and if the integrator supports the step method,
+ then perform a single integration step and return.
+ This parameter is provided in order to expose internals of
+ the implementation, and should not be changed from its default
+ value in most cases.
+ relax : bool
+ If True and if the integrator supports the run_relax method,
+ then integrate until t_1 >= t and return. ``relax`` is not
+ referenced if ``step=True``.
+ This parameter is provided in order to expose internals of
+ the implementation, and should not be changed from its default
+ value in most cases.
+
+ Returns
+ -------
+ y : float
+ The integrated value at t
+ """
+ if step and self._integrator.supports_step:
+ mth = self._integrator.step
+ elif relax and self._integrator.supports_run_relax:
+ mth = self._integrator.run_relax
+ else:
+ mth = self._integrator.run
+
+ try:
+ self._y, self.t = mth(self.f, self.jac or (lambda: None),
+ self._y, self.t, t,
+ self.f_params, self.jac_params)
+ except SystemError as e:
+ # f2py issue with tuple returns, see ticket 1187.
+ raise ValueError(
+ 'Function to integrate must not return a tuple.'
+ ) from e
+
+ return self._y
+
+ def successful(self):
+ """Check if integration was successful."""
+ try:
+ self._integrator
+ except AttributeError:
+ self.set_integrator('')
+ return self._integrator.success == 1
+
+ def get_return_code(self):
+ """Extracts the return code for the integration to enable better control
+ if the integration fails.
+
+ In general, a return code > 0 implies success, while a return code < 0
+ implies failure.
+
+ Notes
+ -----
+ This section describes possible return codes and their meaning, for available
+ integrators that can be selected by `set_integrator` method.
+
+ "vode"
+
+ =========== =======
+ Return Code Message
+ =========== =======
+ 2 Integration successful.
+ -1 Excess work done on this call. (Perhaps wrong MF.)
+ -2 Excess accuracy requested. (Tolerances too small.)
+ -3 Illegal input detected. (See printed message.)
+ -4 Repeated error test failures. (Check all input.)
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
+ supplied or wrong choice of MF or tolerances.)
+ -6 Error weight became zero during problem. (Solution
+ component i vanished, and ATOL or ATOL(i) = 0.)
+ =========== =======
+
+ "zvode"
+
+ =========== =======
+ Return Code Message
+ =========== =======
+ 2 Integration successful.
+ -1 Excess work done on this call. (Perhaps wrong MF.)
+ -2 Excess accuracy requested. (Tolerances too small.)
+ -3 Illegal input detected. (See printed message.)
+ -4 Repeated error test failures. (Check all input.)
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
+ supplied or wrong choice of MF or tolerances.)
+ -6 Error weight became zero during problem. (Solution
+ component i vanished, and ATOL or ATOL(i) = 0.)
+ =========== =======
+
+ "dopri5"
+
+ =========== =======
+ Return Code Message
+ =========== =======
+ 1 Integration successful.
+ 2 Integration successful (interrupted by solout).
+ -1 Input is not consistent.
+ -2 Larger nsteps is needed.
+ -3 Step size becomes too small.
+ -4 Problem is probably stiff (interrupted).
+ =========== =======
+
+ "dop853"
+
+ =========== =======
+ Return Code Message
+ =========== =======
+ 1 Integration successful.
+ 2 Integration successful (interrupted by solout).
+ -1 Input is not consistent.
+ -2 Larger nsteps is needed.
+ -3 Step size becomes too small.
+ -4 Problem is probably stiff (interrupted).
+ =========== =======
+
+ "lsoda"
+
+ =========== =======
+ Return Code Message
+ =========== =======
+ 2 Integration successful.
+ -1 Excess work done on this call (perhaps wrong Dfun type).
+ -2 Excess accuracy requested (tolerances too small).
+ -3 Illegal input detected (internal error).
+ -4 Repeated error test failures (internal error).
+ -5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
+ -6 Error weight became zero during problem.
+ -7 Internal workspace insufficient to finish (internal error).
+ =========== =======
+ """
+ try:
+ self._integrator
+ except AttributeError:
+ self.set_integrator('')
+ return self._integrator.istate
+
+ def set_f_params(self, *args):
+ """Set extra parameters for user-supplied function f."""
+ self.f_params = args
+ return self
+
+ def set_jac_params(self, *args):
+ """Set extra parameters for user-supplied function jac."""
+ self.jac_params = args
+ return self
+
+ def set_solout(self, solout):
+ """
+ Set callable to be called at every successful integration step.
+
+ Parameters
+ ----------
+ solout : callable
+ ``solout(t, y)`` is called at each internal integrator step,
+ t is a scalar providing the current independent position
+ y is the current soloution ``y.shape == (n,)``
+ solout should return -1 to stop integration
+ otherwise it should return None or 0
+
+ """
+ if self._integrator.supports_solout:
+ self._integrator.set_solout(solout)
+ if self._y is not None:
+ self._integrator.reset(len(self._y), self.jac is not None)
+ else:
+ raise ValueError("selected integrator does not support solout,"
+ " choose another one")
+
+
+def _transform_banded_jac(bjac):
+ """
+ Convert a real matrix of the form (for example)
+
+ [0 0 A B] [0 0 0 B]
+ [0 0 C D] [0 0 A D]
+ [E F G H] to [0 F C H]
+ [I J K L] [E J G L]
+ [I 0 K 0]
+
+ That is, every other column is shifted up one.
+ """
+ # Shift every other column.
+ newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
+ newjac[1:, ::2] = bjac[:, ::2]
+ newjac[:-1, 1::2] = bjac[:, 1::2]
+ return newjac
+
+
+class complex_ode(ode):
+ """
+ A wrapper of ode for complex systems.
+
+ This functions similarly as `ode`, but re-maps a complex-valued
+ equation system to a real-valued one before using the integrators.
+
+ Parameters
+ ----------
+ f : callable ``f(t, y, *f_args)``
+ Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
+ ``f_args`` is set by calling ``set_f_params(*args)``.
+ jac : callable ``jac(t, y, *jac_args)``
+ Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
+ ``jac_args`` is set by calling ``set_f_params(*args)``.
+
+ Attributes
+ ----------
+ t : float
+ Current time.
+ y : ndarray
+ Current variable values.
+
+ Examples
+ --------
+ For usage examples, see `ode`.
+
+ """
+
+ def __init__(self, f, jac=None):
+ self.cf = f
+ self.cjac = jac
+ if jac is None:
+ ode.__init__(self, self._wrap, None)
+ else:
+ ode.__init__(self, self._wrap, self._wrap_jac)
+
+ def _wrap(self, t, y, *f_args):
+ f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
+ # self.tmp is a real-valued array containing the interleaved
+ # real and imaginary parts of f.
+ self.tmp[::2] = real(f)
+ self.tmp[1::2] = imag(f)
+ return self.tmp
+
+ def _wrap_jac(self, t, y, *jac_args):
+ # jac is the complex Jacobian computed by the user-defined function.
+ jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
+
+ # jac_tmp is the real version of the complex Jacobian. Each complex
+ # entry in jac, say 2+3j, becomes a 2x2 block of the form
+ # [2 -3]
+ # [3 2]
+ jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
+ jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
+ jac_tmp[1::2, ::2] = imag(jac)
+ jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
+
+ ml = getattr(self._integrator, 'ml', None)
+ mu = getattr(self._integrator, 'mu', None)
+ if ml is not None or mu is not None:
+ # Jacobian is banded. The user's Jacobian function has computed
+ # the complex Jacobian in packed format. The corresponding
+ # real-valued version has every other column shifted up.
+ jac_tmp = _transform_banded_jac(jac_tmp)
+
+ return jac_tmp
+
+ @property
+ def y(self):
+ return self._y[::2] + 1j * self._y[1::2]
+
+ def set_integrator(self, name, **integrator_params):
+ """
+ Set integrator by name.
+
+ Parameters
+ ----------
+ name : str
+ Name of the integrator
+ integrator_params
+ Additional parameters for the integrator.
+ """
+ if name == 'zvode':
+ raise ValueError("zvode must be used with ode, not complex_ode")
+
+ lband = integrator_params.get('lband')
+ uband = integrator_params.get('uband')
+ if lband is not None or uband is not None:
+ # The Jacobian is banded. Override the user-supplied bandwidths
+ # (which are for the complex Jacobian) with the bandwidths of
+ # the corresponding real-valued Jacobian wrapper of the complex
+ # Jacobian.
+ integrator_params['lband'] = 2 * (lband or 0) + 1
+ integrator_params['uband'] = 2 * (uband or 0) + 1
+
+ return ode.set_integrator(self, name, **integrator_params)
+
+ def set_initial_value(self, y, t=0.0):
+ """Set initial conditions y(t) = y."""
+ y = asarray(y)
+ self.tmp = zeros(y.size * 2, 'float')
+ self.tmp[::2] = real(y)
+ self.tmp[1::2] = imag(y)
+ return ode.set_initial_value(self, self.tmp, t)
+
+ def integrate(self, t, step=False, relax=False):
+ """Find y=y(t), set y as an initial condition, and return y.
+
+ Parameters
+ ----------
+ t : float
+ The endpoint of the integration step.
+ step : bool
+ If True, and if the integrator supports the step method,
+ then perform a single integration step and return.
+ This parameter is provided in order to expose internals of
+ the implementation, and should not be changed from its default
+ value in most cases.
+ relax : bool
+ If True and if the integrator supports the run_relax method,
+ then integrate until t_1 >= t and return. ``relax`` is not
+ referenced if ``step=True``.
+ This parameter is provided in order to expose internals of
+ the implementation, and should not be changed from its default
+ value in most cases.
+
+ Returns
+ -------
+ y : float
+ The integrated value at t
+ """
+ y = ode.integrate(self, t, step, relax)
+ return y[::2] + 1j * y[1::2]
+
+ def set_solout(self, solout):
+ """
+ Set callable to be called at every successful integration step.
+
+ Parameters
+ ----------
+ solout : callable
+ ``solout(t, y)`` is called at each internal integrator step,
+ t is a scalar providing the current independent position
+ y is the current soloution ``y.shape == (n,)``
+ solout should return -1 to stop integration
+ otherwise it should return None or 0
+
+ """
+ if self._integrator.supports_solout:
+ self._integrator.set_solout(solout, complex=True)
+ else:
+ raise TypeError("selected integrator does not support solouta,"
+ + "choose another one")
+
+
+# ------------------------------------------------------------------------------
+# ODE integrators
+# ------------------------------------------------------------------------------
+
+def find_integrator(name):
+ for cl in IntegratorBase.integrator_classes:
+ if re.match(name, cl.__name__, re.I):
+ return cl
+ return None
+
+
+class IntegratorConcurrencyError(RuntimeError):
+ """
+ Failure due to concurrent usage of an integrator that can be used
+ only for a single problem at a time.
+
+ """
+
+ def __init__(self, name):
+ msg = ("Integrator `%s` can be used to solve only a single problem "
+ "at a time. If you want to integrate multiple problems, "
+ "consider using a different integrator "
+ "(see `ode.set_integrator`)") % name
+ RuntimeError.__init__(self, msg)
+
+
+class IntegratorBase(object):
+ runner = None # runner is None => integrator is not available
+ success = None # success==1 if integrator was called successfully
+ istate = None # istate > 0 means success, istate < 0 means failure
+ supports_run_relax = None
+ supports_step = None
+ supports_solout = False
+ integrator_classes = []
+ scalar = float
+
+ def acquire_new_handle(self):
+ # Some of the integrators have internal state (ancient
+ # Fortran...), and so only one instance can use them at a time.
+ # We keep track of this, and fail when concurrent usage is tried.
+ self.__class__.active_global_handle += 1
+ self.handle = self.__class__.active_global_handle
+
+ def check_handle(self):
+ if self.handle is not self.__class__.active_global_handle:
+ raise IntegratorConcurrencyError(self.__class__.__name__)
+
+ def reset(self, n, has_jac):
+ """Prepare integrator for call: allocate memory, set flags, etc.
+ n - number of equations.
+ has_jac - if user has supplied function for evaluating Jacobian.
+ """
+
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+ """Integrate from t=t0 to t=t1 using y0 as an initial condition.
+ Return 2-tuple (y1,t1) where y1 is the result and t=t1
+ defines the stoppage coordinate of the result.
+ """
+ raise NotImplementedError('all integrators must define '
+ 'run(f, jac, t0, t1, y0, f_params, jac_params)')
+
+ def step(self, f, jac, y0, t0, t1, f_params, jac_params):
+ """Make one integration step and return (y1,t1)."""
+ raise NotImplementedError('%s does not support step() method' %
+ self.__class__.__name__)
+
+ def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
+ """Integrate from t=t0 to t>=t1 and return (y1,t)."""
+ raise NotImplementedError('%s does not support run_relax() method' %
+ self.__class__.__name__)
+
+ # XXX: __str__ method for getting visual state of the integrator
+
+
+def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
+ """
+ Wrap a banded Jacobian function with a function that pads
+ the Jacobian with `ml` rows of zeros.
+ """
+
+ def jac_wrapper(t, y):
+ jac = asarray(jacfunc(t, y, *jac_params))
+ padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
+ return padded_jac
+
+ return jac_wrapper
+
+
+class vode(IntegratorBase):
+ runner = getattr(_vode, 'dvode', None)
+
+ messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
+ -2: 'Excess accuracy requested. (Tolerances too small.)',
+ -3: 'Illegal input detected. (See printed message.)',
+ -4: 'Repeated error test failures. (Check all input.)',
+ -5: 'Repeated convergence failures. (Perhaps bad'
+ ' Jacobian supplied or wrong choice of MF or tolerances.)',
+ -6: 'Error weight became zero during problem. (Solution'
+ ' component i vanished, and ATOL or ATOL(i) = 0.)'
+ }
+ supports_run_relax = 1
+ supports_step = 1
+ active_global_handle = 0
+
+ def __init__(self,
+ method='adams',
+ with_jacobian=False,
+ rtol=1e-6, atol=1e-12,
+ lband=None, uband=None,
+ order=12,
+ nsteps=500,
+ max_step=0.0, # corresponds to infinite
+ min_step=0.0,
+ first_step=0.0, # determined by solver
+ ):
+
+ if re.match(method, r'adams', re.I):
+ self.meth = 1
+ elif re.match(method, r'bdf', re.I):
+ self.meth = 2
+ else:
+ raise ValueError('Unknown integration method %s' % method)
+ self.with_jacobian = with_jacobian
+ self.rtol = rtol
+ self.atol = atol
+ self.mu = uband
+ self.ml = lband
+
+ self.order = order
+ self.nsteps = nsteps
+ self.max_step = max_step
+ self.min_step = min_step
+ self.first_step = first_step
+ self.success = 1
+
+ self.initialized = False
+
+ def _determine_mf_and_set_bands(self, has_jac):
+ """
+ Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
+
+ In the Fortran code, the legal values of `MF` are:
+ 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
+ -11, -12, -14, -15, -21, -22, -24, -25
+ but this Python wrapper does not use negative values.
+
+ Returns
+
+ mf = 10*self.meth + miter
+
+ self.meth is the linear multistep method:
+ self.meth == 1: method="adams"
+ self.meth == 2: method="bdf"
+
+ miter is the correction iteration method:
+ miter == 0: Functional iteraton; no Jacobian involved.
+ miter == 1: Chord iteration with user-supplied full Jacobian.
+ miter == 2: Chord iteration with internally computed full Jacobian.
+ miter == 3: Chord iteration with internally computed diagonal Jacobian.
+ miter == 4: Chord iteration with user-supplied banded Jacobian.
+ miter == 5: Chord iteration with internally computed banded Jacobian.
+
+ Side effects: If either self.mu or self.ml is not None and the other is None,
+ then the one that is None is set to 0.
+ """
+
+ jac_is_banded = self.mu is not None or self.ml is not None
+ if jac_is_banded:
+ if self.mu is None:
+ self.mu = 0
+ if self.ml is None:
+ self.ml = 0
+
+ # has_jac is True if the user provided a Jacobian function.
+ if has_jac:
+ if jac_is_banded:
+ miter = 4
+ else:
+ miter = 1
+ else:
+ if jac_is_banded:
+ if self.ml == self.mu == 0:
+ miter = 3 # Chord iteration with internal diagonal Jacobian.
+ else:
+ miter = 5 # Chord iteration with internal banded Jacobian.
+ else:
+ # self.with_jacobian is set by the user in the call to ode.set_integrator.
+ if self.with_jacobian:
+ miter = 2 # Chord iteration with internal full Jacobian.
+ else:
+ miter = 0 # Functional iteraton; no Jacobian involved.
+
+ mf = 10 * self.meth + miter
+ return mf
+
+ def reset(self, n, has_jac):
+ mf = self._determine_mf_and_set_bands(has_jac)
+
+ if mf == 10:
+ lrw = 20 + 16 * n
+ elif mf in [11, 12]:
+ lrw = 22 + 16 * n + 2 * n * n
+ elif mf == 13:
+ lrw = 22 + 17 * n
+ elif mf in [14, 15]:
+ lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
+ elif mf == 20:
+ lrw = 20 + 9 * n
+ elif mf in [21, 22]:
+ lrw = 22 + 9 * n + 2 * n * n
+ elif mf == 23:
+ lrw = 22 + 10 * n
+ elif mf in [24, 25]:
+ lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
+ else:
+ raise ValueError('Unexpected mf=%s' % mf)
+
+ if mf % 10 in [0, 3]:
+ liw = 30
+ else:
+ liw = 30 + n
+
+ rwork = zeros((lrw,), float)
+ rwork[4] = self.first_step
+ rwork[5] = self.max_step
+ rwork[6] = self.min_step
+ self.rwork = rwork
+
+ iwork = zeros((liw,), _vode_int_dtype)
+ if self.ml is not None:
+ iwork[0] = self.ml
+ if self.mu is not None:
+ iwork[1] = self.mu
+ iwork[4] = self.order
+ iwork[5] = self.nsteps
+ iwork[6] = 2 # mxhnil
+ self.iwork = iwork
+
+ self.call_args = [self.rtol, self.atol, 1, 1,
+ self.rwork, self.iwork, mf]
+ self.success = 1
+ self.initialized = False
+
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+ if self.initialized:
+ self.check_handle()
+ else:
+ self.initialized = True
+ self.acquire_new_handle()
+
+ if self.ml is not None and self.ml > 0:
+ # Banded Jacobian. Wrap the user-provided function with one
+ # that pads the Jacobian array with the extra `self.ml` rows
+ # required by the f2py-generated wrapper.
+ jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
+
+ args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
+ (f_params, jac_params))
+ y1, t, istate = self.runner(*args)
+ self.istate = istate
+ if istate < 0:
+ unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+ self.messages.get(istate, unexpected_istate_msg)))
+ self.success = 0
+ else:
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
+ self.istate = 2
+ return y1, t
+
+ def step(self, *args):
+ itask = self.call_args[2]
+ self.call_args[2] = 2
+ r = self.run(*args)
+ self.call_args[2] = itask
+ return r
+
+ def run_relax(self, *args):
+ itask = self.call_args[2]
+ self.call_args[2] = 3
+ r = self.run(*args)
+ self.call_args[2] = itask
+ return r
+
+
+if vode.runner is not None:
+ IntegratorBase.integrator_classes.append(vode)
+
+
+class zvode(vode):
+ runner = getattr(_vode, 'zvode', None)
+
+ supports_run_relax = 1
+ supports_step = 1
+ scalar = complex
+ active_global_handle = 0
+
+ def reset(self, n, has_jac):
+ mf = self._determine_mf_and_set_bands(has_jac)
+
+ if mf in (10,):
+ lzw = 15 * n
+ elif mf in (11, 12):
+ lzw = 15 * n + 2 * n ** 2
+ elif mf in (-11, -12):
+ lzw = 15 * n + n ** 2
+ elif mf in (13,):
+ lzw = 16 * n
+ elif mf in (14, 15):
+ lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
+ elif mf in (-14, -15):
+ lzw = 16 * n + (2 * self.ml + self.mu) * n
+ elif mf in (20,):
+ lzw = 8 * n
+ elif mf in (21, 22):
+ lzw = 8 * n + 2 * n ** 2
+ elif mf in (-21, -22):
+ lzw = 8 * n + n ** 2
+ elif mf in (23,):
+ lzw = 9 * n
+ elif mf in (24, 25):
+ lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
+ elif mf in (-24, -25):
+ lzw = 9 * n + (2 * self.ml + self.mu) * n
+
+ lrw = 20 + n
+
+ if mf % 10 in (0, 3):
+ liw = 30
+ else:
+ liw = 30 + n
+
+ zwork = zeros((lzw,), complex)
+ self.zwork = zwork
+
+ rwork = zeros((lrw,), float)
+ rwork[4] = self.first_step
+ rwork[5] = self.max_step
+ rwork[6] = self.min_step
+ self.rwork = rwork
+
+ iwork = zeros((liw,), _vode_int_dtype)
+ if self.ml is not None:
+ iwork[0] = self.ml
+ if self.mu is not None:
+ iwork[1] = self.mu
+ iwork[4] = self.order
+ iwork[5] = self.nsteps
+ iwork[6] = 2 # mxhnil
+ self.iwork = iwork
+
+ self.call_args = [self.rtol, self.atol, 1, 1,
+ self.zwork, self.rwork, self.iwork, mf]
+ self.success = 1
+ self.initialized = False
+
+
+if zvode.runner is not None:
+ IntegratorBase.integrator_classes.append(zvode)
+
+
+class dopri5(IntegratorBase):
+ runner = getattr(_dop, 'dopri5', None)
+ name = 'dopri5'
+ supports_solout = True
+
+ messages = {1: 'computation successful',
+ 2: 'computation successful (interrupted by solout)',
+ -1: 'input is not consistent',
+ -2: 'larger nsteps is needed',
+ -3: 'step size becomes too small',
+ -4: 'problem is probably stiff (interrupted)',
+ }
+
+ def __init__(self,
+ rtol=1e-6, atol=1e-12,
+ nsteps=500,
+ max_step=0.0,
+ first_step=0.0, # determined by solver
+ safety=0.9,
+ ifactor=10.0,
+ dfactor=0.2,
+ beta=0.0,
+ method=None,
+ verbosity=-1, # no messages if negative
+ ):
+ self.rtol = rtol
+ self.atol = atol
+ self.nsteps = nsteps
+ self.max_step = max_step
+ self.first_step = first_step
+ self.safety = safety
+ self.ifactor = ifactor
+ self.dfactor = dfactor
+ self.beta = beta
+ self.verbosity = verbosity
+ self.success = 1
+ self.set_solout(None)
+
+ def set_solout(self, solout, complex=False):
+ self.solout = solout
+ self.solout_cmplx = complex
+ if solout is None:
+ self.iout = 0
+ else:
+ self.iout = 1
+
+ def reset(self, n, has_jac):
+ work = zeros((8 * n + 21,), float)
+ work[1] = self.safety
+ work[2] = self.dfactor
+ work[3] = self.ifactor
+ work[4] = self.beta
+ work[5] = self.max_step
+ work[6] = self.first_step
+ self.work = work
+ iwork = zeros((21,), _dop_int_dtype)
+ iwork[0] = self.nsteps
+ iwork[2] = self.verbosity
+ self.iwork = iwork
+ self.call_args = [self.rtol, self.atol, self._solout,
+ self.iout, self.work, self.iwork]
+ self.success = 1
+
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+ x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
+ tuple(self.call_args) + (f_params,)))
+ self.istate = istate
+ if istate < 0:
+ unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+ self.messages.get(istate, unexpected_istate_msg)))
+ self.success = 0
+ return y, x
+
+ def _solout(self, nr, xold, x, y, nd, icomp, con):
+ if self.solout is not None:
+ if self.solout_cmplx:
+ y = y[::2] + 1j * y[1::2]
+ return self.solout(x, y)
+ else:
+ return 1
+
+
+if dopri5.runner is not None:
+ IntegratorBase.integrator_classes.append(dopri5)
+
+
+class dop853(dopri5):
+ runner = getattr(_dop, 'dop853', None)
+ name = 'dop853'
+
+ def __init__(self,
+ rtol=1e-6, atol=1e-12,
+ nsteps=500,
+ max_step=0.0,
+ first_step=0.0, # determined by solver
+ safety=0.9,
+ ifactor=6.0,
+ dfactor=0.3,
+ beta=0.0,
+ method=None,
+ verbosity=-1, # no messages if negative
+ ):
+ super(self.__class__, self).__init__(rtol, atol, nsteps, max_step,
+ first_step, safety, ifactor,
+ dfactor, beta, method,
+ verbosity)
+
+ def reset(self, n, has_jac):
+ work = zeros((11 * n + 21,), float)
+ work[1] = self.safety
+ work[2] = self.dfactor
+ work[3] = self.ifactor
+ work[4] = self.beta
+ work[5] = self.max_step
+ work[6] = self.first_step
+ self.work = work
+ iwork = zeros((21,), _dop_int_dtype)
+ iwork[0] = self.nsteps
+ iwork[2] = self.verbosity
+ self.iwork = iwork
+ self.call_args = [self.rtol, self.atol, self._solout,
+ self.iout, self.work, self.iwork]
+ self.success = 1
+
+
+if dop853.runner is not None:
+ IntegratorBase.integrator_classes.append(dop853)
+
+
+class lsoda(IntegratorBase):
+ runner = getattr(_lsoda, 'lsoda', None)
+ active_global_handle = 0
+
+ messages = {
+ 2: "Integration successful.",
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
+ -2: "Excess accuracy requested (tolerances too small).",
+ -3: "Illegal input detected (internal error).",
+ -4: "Repeated error test failures (internal error).",
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
+ -6: "Error weight became zero during problem.",
+ -7: "Internal workspace insufficient to finish (internal error)."
+ }
+
+ def __init__(self,
+ with_jacobian=False,
+ rtol=1e-6, atol=1e-12,
+ lband=None, uband=None,
+ nsteps=500,
+ max_step=0.0, # corresponds to infinite
+ min_step=0.0,
+ first_step=0.0, # determined by solver
+ ixpr=0,
+ max_hnil=0,
+ max_order_ns=12,
+ max_order_s=5,
+ method=None
+ ):
+
+ self.with_jacobian = with_jacobian
+ self.rtol = rtol
+ self.atol = atol
+ self.mu = uband
+ self.ml = lband
+
+ self.max_order_ns = max_order_ns
+ self.max_order_s = max_order_s
+ self.nsteps = nsteps
+ self.max_step = max_step
+ self.min_step = min_step
+ self.first_step = first_step
+ self.ixpr = ixpr
+ self.max_hnil = max_hnil
+ self.success = 1
+
+ self.initialized = False
+
+ def reset(self, n, has_jac):
+ # Calculate parameters for Fortran subroutine dvode.
+ if has_jac:
+ if self.mu is None and self.ml is None:
+ jt = 1
+ else:
+ if self.mu is None:
+ self.mu = 0
+ if self.ml is None:
+ self.ml = 0
+ jt = 4
+ else:
+ if self.mu is None and self.ml is None:
+ jt = 2
+ else:
+ if self.mu is None:
+ self.mu = 0
+ if self.ml is None:
+ self.ml = 0
+ jt = 5
+ lrn = 20 + (self.max_order_ns + 4) * n
+ if jt in [1, 2]:
+ lrs = 22 + (self.max_order_s + 4) * n + n * n
+ elif jt in [4, 5]:
+ lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
+ else:
+ raise ValueError('Unexpected jt=%s' % jt)
+ lrw = max(lrn, lrs)
+ liw = 20 + n
+ rwork = zeros((lrw,), float)
+ rwork[4] = self.first_step
+ rwork[5] = self.max_step
+ rwork[6] = self.min_step
+ self.rwork = rwork
+ iwork = zeros((liw,), _lsoda_int_dtype)
+ if self.ml is not None:
+ iwork[0] = self.ml
+ if self.mu is not None:
+ iwork[1] = self.mu
+ iwork[4] = self.ixpr
+ iwork[5] = self.nsteps
+ iwork[6] = self.max_hnil
+ iwork[7] = self.max_order_ns
+ iwork[8] = self.max_order_s
+ self.iwork = iwork
+ self.call_args = [self.rtol, self.atol, 1, 1,
+ self.rwork, self.iwork, jt]
+ self.success = 1
+ self.initialized = False
+
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+ if self.initialized:
+ self.check_handle()
+ else:
+ self.initialized = True
+ self.acquire_new_handle()
+ args = [f, y0, t0, t1] + self.call_args[:-1] + \
+ [jac, self.call_args[-1], f_params, 0, jac_params]
+ y1, t, istate = self.runner(*args)
+ self.istate = istate
+ if istate < 0:
+ unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+ self.messages.get(istate, unexpected_istate_msg)))
+ self.success = 0
+ else:
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
+ self.istate = 2
+ return y1, t
+
+ def step(self, *args):
+ itask = self.call_args[2]
+ self.call_args[2] = 2
+ r = self.run(*args)
+ self.call_args[2] = itask
+ return r
+
+ def run_relax(self, *args):
+ itask = self.call_args[2]
+ self.call_args[2] = 3
+ r = self.run(*args)
+ self.call_args[2] = itask
+ return r
+
+
+if lsoda.runner:
+ IntegratorBase.integrator_classes.append(lsoda)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_odepack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_odepack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..bbd945a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_odepack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quad_vec.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quad_vec.py
new file mode 100644
index 0000000..9f838e9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quad_vec.py
@@ -0,0 +1,638 @@
+import sys
+import copy
+import heapq
+import collections
+import functools
+
+import numpy as np
+
+from scipy._lib._util import MapWrapper
+
+
+class LRUDict(collections.OrderedDict):
+ def __init__(self, max_size):
+ self.__max_size = max_size
+
+ def __setitem__(self, key, value):
+ existing_key = (key in self)
+ super(LRUDict, self).__setitem__(key, value)
+ if existing_key:
+ self.move_to_end(key)
+ elif len(self) > self.__max_size:
+ self.popitem(last=False)
+
+ def update(self, other):
+ # Not needed below
+ raise NotImplementedError()
+
+
+class SemiInfiniteFunc(object):
+ """
+ Argument transform from (start, +-oo) to (0, 1)
+ """
+ def __init__(self, func, start, infty):
+ self._func = func
+ self._start = start
+ self._sgn = -1 if infty < 0 else 1
+
+ # Overflow threshold for the 1/t**2 factor
+ self._tmin = sys.float_info.min**0.5
+
+ def get_t(self, x):
+ z = self._sgn * (x - self._start) + 1
+ if z == 0:
+ # Can happen only if point not in range
+ return np.inf
+ return 1 / z
+
+ def __call__(self, t):
+ if t < self._tmin:
+ return 0.0
+ else:
+ x = self._start + self._sgn * (1 - t) / t
+ f = self._func(x)
+ return self._sgn * (f / t) / t
+
+
+class DoubleInfiniteFunc(object):
+ """
+ Argument transform from (-oo, oo) to (-1, 1)
+ """
+ def __init__(self, func):
+ self._func = func
+
+ # Overflow threshold for the 1/t**2 factor
+ self._tmin = sys.float_info.min**0.5
+
+ def get_t(self, x):
+ s = -1 if x < 0 else 1
+ return s / (abs(x) + 1)
+
+ def __call__(self, t):
+ if abs(t) < self._tmin:
+ return 0.0
+ else:
+ x = (1 - abs(t)) / t
+ f = self._func(x)
+ return (f / t) / t
+
+
+def _max_norm(x):
+ return np.amax(abs(x))
+
+
+def _get_sizeof(obj):
+ try:
+ return sys.getsizeof(obj)
+ except TypeError:
+ # occurs on pypy
+ if hasattr(obj, '__sizeof__'):
+ return int(obj.__sizeof__())
+ return 64
+
+
+class _Bunch(object):
+ def __init__(self, **kwargs):
+ self.__keys = kwargs.keys()
+ self.__dict__.update(**kwargs)
+
+ def __repr__(self):
+ return "_Bunch({})".format(", ".join("{}={}".format(k, repr(self.__dict__[k]))
+ for k in self.__keys))
+
+
+def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
+ workers=1, points=None, quadrature=None, full_output=False):
+ r"""Adaptive integration of a vector-valued function.
+
+ Parameters
+ ----------
+ f : callable
+ Vector-valued function f(x) to integrate.
+ a : float
+ Initial point.
+ b : float
+ Final point.
+ epsabs : float, optional
+ Absolute tolerance.
+ epsrel : float, optional
+ Relative tolerance.
+ norm : {'max', '2'}, optional
+ Vector norm to use for error estimation.
+ cache_size : int, optional
+ Number of bytes to use for memoization.
+ workers : int or map-like callable, optional
+ If `workers` is an integer, part of the computation is done in
+ parallel subdivided to this many tasks (using
+ :class:`python:multiprocessing.pool.Pool`).
+ Supply `-1` to use all cores available to the Process.
+ Alternatively, supply a map-like callable, such as
+ :meth:`python:multiprocessing.pool.Pool.map` for evaluating the
+ population in parallel.
+ This evaluation is carried out as ``workers(func, iterable)``.
+ points : list, optional
+ List of additional breakpoints.
+ quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
+ Quadrature rule to use on subintervals.
+ Options: 'gk21' (Gauss-Kronrod 21-point rule),
+ 'gk15' (Gauss-Kronrod 15-point rule),
+ 'trapezoid' (composite trapezoid rule).
+ Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
+ full_output : bool, optional
+ Return an additional ``info`` dictionary.
+
+ Returns
+ -------
+ res : {float, array-like}
+ Estimate for the result
+ err : float
+ Error estimate for the result in the given norm
+ info : dict
+ Returned only when ``full_output=True``.
+ Info dictionary. Is an object with the attributes:
+
+ success : bool
+ Whether integration reached target precision.
+ status : int
+ Indicator for convergence, success (0),
+ failure (1), and failure due to rounding error (2).
+ neval : int
+ Number of function evaluations.
+ intervals : ndarray, shape (num_intervals, 2)
+ Start and end points of subdivision intervals.
+ integrals : ndarray, shape (num_intervals, ...)
+ Integral for each interval.
+ Note that at most ``cache_size`` values are recorded,
+ and the array may contains *nan* for missing items.
+ errors : ndarray, shape (num_intervals,)
+ Estimated integration error for each interval.
+
+ Notes
+ -----
+ The algorithm mainly follows the implementation of QUADPACK's
+ DQAG* algorithms, implementing global error control and adaptive
+ subdivision.
+
+ The algorithm here has some differences to the QUADPACK approach:
+
+ Instead of subdividing one interval at a time, the algorithm
+ subdivides N intervals with largest errors at once. This enables
+ (partial) parallelization of the integration.
+
+ The logic of subdividing "next largest" intervals first is then
+ not implemented, and we rely on the above extension to avoid
+ concentrating on "small" intervals only.
+
+ The Wynn epsilon table extrapolation is not used (QUADPACK uses it
+ for infinite intervals). This is because the algorithm here is
+ supposed to work on vector-valued functions, in an user-specified
+ norm, and the extension of the epsilon algorithm to this case does
+ not appear to be widely agreed. For max-norm, using elementwise
+ Wynn epsilon could be possible, but we do not do this here with
+ the hope that the epsilon extrapolation is mainly useful in
+ special cases.
+
+ References
+ ----------
+ [1] R. Piessens, E. de Doncker, QUADPACK (1983).
+
+ Examples
+ --------
+ We can compute integrations of a vector-valued function:
+
+ >>> from scipy.integrate import quad_vec
+ >>> import matplotlib.pyplot as plt
+ >>> alpha = np.linspace(0.0, 2.0, num=30)
+ >>> f = lambda x: x**alpha
+ >>> x0, x1 = 0, 2
+ >>> y, err = quad_vec(f, x0, x1)
+ >>> plt.plot(alpha, y)
+ >>> plt.xlabel(r"$\alpha$")
+ >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
+ >>> plt.show()
+
+ """
+ a = float(a)
+ b = float(b)
+
+ # Use simple transformations to deal with integrals over infinite
+ # intervals.
+ kwargs = dict(epsabs=epsabs,
+ epsrel=epsrel,
+ norm=norm,
+ cache_size=cache_size,
+ limit=limit,
+ workers=workers,
+ points=points,
+ quadrature='gk15' if quadrature is None else quadrature,
+ full_output=full_output)
+ if np.isfinite(a) and np.isinf(b):
+ f2 = SemiInfiniteFunc(f, start=a, infty=b)
+ if points is not None:
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
+ return quad_vec(f2, 0, 1, **kwargs)
+ elif np.isfinite(b) and np.isinf(a):
+ f2 = SemiInfiniteFunc(f, start=b, infty=a)
+ if points is not None:
+ kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
+ res = quad_vec(f2, 0, 1, **kwargs)
+ return (-res[0],) + res[1:]
+ elif np.isinf(a) and np.isinf(b):
+ sgn = -1 if b < a else 1
+
+ # NB. explicitly split integral at t=0, which separates
+ # the positive and negative sides
+ f2 = DoubleInfiniteFunc(f)
+ if points is not None:
+ kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
+ else:
+ kwargs['points'] = (0,)
+
+ if a != b:
+ res = quad_vec(f2, -1, 1, **kwargs)
+ else:
+ res = quad_vec(f2, 1, 1, **kwargs)
+
+ return (res[0]*sgn,) + res[1:]
+ elif not (np.isfinite(a) and np.isfinite(b)):
+ raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
+
+ norm_funcs = {
+ None: _max_norm,
+ 'max': _max_norm,
+ '2': np.linalg.norm
+ }
+ if callable(norm):
+ norm_func = norm
+ else:
+ norm_func = norm_funcs[norm]
+
+
+ parallel_count = 128
+ min_intervals = 2
+
+ try:
+ _quadrature = {None: _quadrature_gk21,
+ 'gk21': _quadrature_gk21,
+ 'gk15': _quadrature_gk15,
+ 'trapz': _quadrature_trapezoid, # alias for backcompat
+ 'trapezoid': _quadrature_trapezoid}[quadrature]
+ except KeyError as e:
+ raise ValueError("unknown quadrature {!r}".format(quadrature)) from e
+
+ # Initial interval set
+ if points is None:
+ initial_intervals = [(a, b)]
+ else:
+ prev = a
+ initial_intervals = []
+ for p in sorted(points):
+ p = float(p)
+ if not (a < p < b) or p == prev:
+ continue
+ initial_intervals.append((prev, p))
+ prev = p
+ initial_intervals.append((prev, b))
+
+ global_integral = None
+ global_error = None
+ rounding_error = None
+ interval_cache = None
+ intervals = []
+ neval = 0
+
+ for x1, x2 in initial_intervals:
+ ig, err, rnd = _quadrature(x1, x2, f, norm_func)
+ neval += _quadrature.num_eval
+
+ if global_integral is None:
+ if isinstance(ig, (float, complex)):
+ # Specialize for scalars
+ if norm_func in (_max_norm, np.linalg.norm):
+ norm_func = abs
+
+ global_integral = ig
+ global_error = float(err)
+ rounding_error = float(rnd)
+
+ cache_count = cache_size // _get_sizeof(ig)
+ interval_cache = LRUDict(cache_count)
+ else:
+ global_integral += ig
+ global_error += err
+ rounding_error += rnd
+
+ interval_cache[(x1, x2)] = copy.copy(ig)
+ intervals.append((-err, x1, x2))
+
+ heapq.heapify(intervals)
+
+ CONVERGED = 0
+ NOT_CONVERGED = 1
+ ROUNDING_ERROR = 2
+ NOT_A_NUMBER = 3
+
+ status_msg = {
+ CONVERGED: "Target precision reached.",
+ NOT_CONVERGED: "Target precision not reached.",
+ ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
+ NOT_A_NUMBER: "Non-finite values encountered."
+ }
+
+ # Process intervals
+ with MapWrapper(workers) as mapwrapper:
+ ier = NOT_CONVERGED
+
+ while intervals and len(intervals) < limit:
+ # Select intervals with largest errors for subdivision
+ tol = max(epsabs, epsrel*norm_func(global_integral))
+
+ to_process = []
+ err_sum = 0
+
+ for j in range(parallel_count):
+ if not intervals:
+ break
+
+ if j > 0 and err_sum > global_error - tol/8:
+ # avoid unnecessary parallel splitting
+ break
+
+ interval = heapq.heappop(intervals)
+
+ neg_old_err, a, b = interval
+ old_int = interval_cache.pop((a, b), None)
+ to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
+ err_sum += -neg_old_err
+
+ # Subdivide intervals
+ for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
+ neval += dneval
+ global_integral += dint
+ global_error += derr
+ rounding_error += dround_err
+ for x in subint:
+ x1, x2, ig, err = x
+ interval_cache[(x1, x2)] = ig
+ heapq.heappush(intervals, (-err, x1, x2))
+
+ # Termination check
+ if len(intervals) >= min_intervals:
+ tol = max(epsabs, epsrel*norm_func(global_integral))
+ if global_error < tol/8:
+ ier = CONVERGED
+ break
+ if global_error < rounding_error:
+ ier = ROUNDING_ERROR
+ break
+
+ if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
+ ier = NOT_A_NUMBER
+ break
+
+ res = global_integral
+ err = global_error + rounding_error
+
+ if full_output:
+ res_arr = np.asarray(res)
+ dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
+ integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
+ for z in intervals], dtype=res_arr.dtype)
+ errors = np.array([-z[0] for z in intervals])
+ intervals = np.array([[z[1], z[2]] for z in intervals])
+
+ info = _Bunch(neval=neval,
+ success=(ier == CONVERGED),
+ status=ier,
+ message=status_msg[ier],
+ intervals=intervals,
+ integrals=integrals,
+ errors=errors)
+ return (res, err, info)
+ else:
+ return (res, err)
+
+
+def _subdivide_interval(args):
+ interval, f, norm_func, _quadrature = args
+ old_err, a, b, old_int = interval
+
+ c = 0.5 * (a + b)
+
+ # Left-hand side
+ if getattr(_quadrature, 'cache_size', 0) > 0:
+ f = functools.lru_cache(_quadrature.cache_size)(f)
+
+ s1, err1, round1 = _quadrature(a, c, f, norm_func)
+ dneval = _quadrature.num_eval
+ s2, err2, round2 = _quadrature(c, b, f, norm_func)
+ dneval += _quadrature.num_eval
+ if old_int is None:
+ old_int, _, _ = _quadrature(a, b, f, norm_func)
+ dneval += _quadrature.num_eval
+
+ if getattr(_quadrature, 'cache_size', 0) > 0:
+ dneval = f.cache_info().misses
+
+ dint = s1 + s2 - old_int
+ derr = err1 + err2 - old_err
+ dround_err = round1 + round2
+
+ subintervals = ((a, c, s1, err1), (c, b, s2, err2))
+ return dint, derr, dround_err, subintervals, dneval
+
+
+def _quadrature_trapezoid(x1, x2, f, norm_func):
+ """
+ Composite trapezoid quadrature
+ """
+ x3 = 0.5*(x1 + x2)
+ f1 = f(x1)
+ f2 = f(x2)
+ f3 = f(x3)
+
+ s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
+
+ round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ + 2*float(norm_func(f3))
+ + float(norm_func(f2))) * 2e-16
+
+ s1 = 0.5 * (x2 - x1) * (f1 + f2)
+ err = 1/3 * float(norm_func(s1 - s2))
+ return s2, err, round_err
+
+
+_quadrature_trapezoid.cache_size = 3 * 3
+_quadrature_trapezoid.num_eval = 3
+
+
+def _quadrature_gk(a, b, f, norm_func, x, w, v):
+ """
+ Generic Gauss-Kronrod quadrature
+ """
+
+ fv = [0.0]*len(x)
+
+ c = 0.5 * (a + b)
+ h = 0.5 * (b - a)
+
+ # Gauss-Kronrod
+ s_k = 0.0
+ s_k_abs = 0.0
+ for i in range(len(x)):
+ ff = f(c + h*x[i])
+ fv[i] = ff
+
+ vv = v[i]
+
+ # \int f(x)
+ s_k += vv * ff
+ # \int |f(x)|
+ s_k_abs += vv * abs(ff)
+
+ # Gauss
+ s_g = 0.0
+ for i in range(len(w)):
+ s_g += w[i] * fv[2*i + 1]
+
+ # Quadrature of abs-deviation from average
+ s_k_dabs = 0.0
+ y0 = s_k / 2.0
+ for i in range(len(x)):
+ # \int |f(x) - y0|
+ s_k_dabs += v[i] * abs(fv[i] - y0)
+
+ # Use similar error estimation as quadpack
+ err = float(norm_func((s_k - s_g) * h))
+ dabs = float(norm_func(s_k_dabs * h))
+ if dabs != 0 and err != 0:
+ err = dabs * min(1.0, (200 * err / dabs)**1.5)
+
+ eps = sys.float_info.epsilon
+ round_err = float(norm_func(50 * eps * h * s_k_abs))
+
+ if round_err > sys.float_info.min:
+ err = max(err, round_err)
+
+ return h * s_k, err, round_err
+
+
+def _quadrature_gk21(a, b, f, norm_func):
+ """
+ Gauss-Kronrod 21 quadrature with error estimate
+ """
+ # Gauss-Kronrod points
+ x = (0.995657163025808080735527280689003,
+ 0.973906528517171720077964012084452,
+ 0.930157491355708226001207180059508,
+ 0.865063366688984510732096688423493,
+ 0.780817726586416897063717578345042,
+ 0.679409568299024406234327365114874,
+ 0.562757134668604683339000099272694,
+ 0.433395394129247190799265943165784,
+ 0.294392862701460198131126603103866,
+ 0.148874338981631210884826001129720,
+ 0,
+ -0.148874338981631210884826001129720,
+ -0.294392862701460198131126603103866,
+ -0.433395394129247190799265943165784,
+ -0.562757134668604683339000099272694,
+ -0.679409568299024406234327365114874,
+ -0.780817726586416897063717578345042,
+ -0.865063366688984510732096688423493,
+ -0.930157491355708226001207180059508,
+ -0.973906528517171720077964012084452,
+ -0.995657163025808080735527280689003)
+
+ # 10-point weights
+ w = (0.066671344308688137593568809893332,
+ 0.149451349150580593145776339657697,
+ 0.219086362515982043995534934228163,
+ 0.269266719309996355091226921569469,
+ 0.295524224714752870173892994651338,
+ 0.295524224714752870173892994651338,
+ 0.269266719309996355091226921569469,
+ 0.219086362515982043995534934228163,
+ 0.149451349150580593145776339657697,
+ 0.066671344308688137593568809893332)
+
+ # 21-point weights
+ v = (0.011694638867371874278064396062192,
+ 0.032558162307964727478818972459390,
+ 0.054755896574351996031381300244580,
+ 0.075039674810919952767043140916190,
+ 0.093125454583697605535065465083366,
+ 0.109387158802297641899210590325805,
+ 0.123491976262065851077958109831074,
+ 0.134709217311473325928054001771707,
+ 0.142775938577060080797094273138717,
+ 0.147739104901338491374841515972068,
+ 0.149445554002916905664936468389821,
+ 0.147739104901338491374841515972068,
+ 0.142775938577060080797094273138717,
+ 0.134709217311473325928054001771707,
+ 0.123491976262065851077958109831074,
+ 0.109387158802297641899210590325805,
+ 0.093125454583697605535065465083366,
+ 0.075039674810919952767043140916190,
+ 0.054755896574351996031381300244580,
+ 0.032558162307964727478818972459390,
+ 0.011694638867371874278064396062192)
+
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
+
+
+_quadrature_gk21.num_eval = 21
+
+
+def _quadrature_gk15(a, b, f, norm_func):
+ """
+ Gauss-Kronrod 15 quadrature with error estimate
+ """
+ # Gauss-Kronrod points
+ x = (0.991455371120812639206854697526329,
+ 0.949107912342758524526189684047851,
+ 0.864864423359769072789712788640926,
+ 0.741531185599394439863864773280788,
+ 0.586087235467691130294144838258730,
+ 0.405845151377397166906606412076961,
+ 0.207784955007898467600689403773245,
+ 0.000000000000000000000000000000000,
+ -0.207784955007898467600689403773245,
+ -0.405845151377397166906606412076961,
+ -0.586087235467691130294144838258730,
+ -0.741531185599394439863864773280788,
+ -0.864864423359769072789712788640926,
+ -0.949107912342758524526189684047851,
+ -0.991455371120812639206854697526329)
+
+ # 7-point weights
+ w = (0.129484966168869693270611432679082,
+ 0.279705391489276667901467771423780,
+ 0.381830050505118944950369775488975,
+ 0.417959183673469387755102040816327,
+ 0.381830050505118944950369775488975,
+ 0.279705391489276667901467771423780,
+ 0.129484966168869693270611432679082)
+
+ # 15-point weights
+ v = (0.022935322010529224963732008058970,
+ 0.063092092629978553290700663189204,
+ 0.104790010322250183839876322541518,
+ 0.140653259715525918745189590510238,
+ 0.169004726639267902826583426598550,
+ 0.190350578064785409913256402421014,
+ 0.204432940075298892414161999234649,
+ 0.209482141084727828012999174891714,
+ 0.204432940075298892414161999234649,
+ 0.190350578064785409913256402421014,
+ 0.169004726639267902826583426598550,
+ 0.140653259715525918745189590510238,
+ 0.104790010322250183839876322541518,
+ 0.063092092629978553290700663189204,
+ 0.022935322010529224963732008058970)
+
+ return _quadrature_gk(a, b, f, norm_func, x, w, v)
+
+
+_quadrature_gk15.num_eval = 15
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadpack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadpack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..6dfe66b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadpack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadrature.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadrature.py
new file mode 100644
index 0000000..f87e204
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_quadrature.py
@@ -0,0 +1,1007 @@
+import functools
+import numpy as np
+import math
+import types
+import warnings
+
+# trapezoid is a public function for scipy.integrate,
+# even though it's actually a NumPy function.
+from numpy import trapz as trapezoid
+from scipy.special import roots_legendre
+from scipy.special import gammaln
+
+__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
+ 'trapezoid', 'trapz', 'simps', 'simpson',
+ 'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
+ 'AccuracyWarning']
+
+
+# Make See Also linking for our local copy work properly
+def _copy_func(f):
+ """Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
+ g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
+ argdefs=f.__defaults__, closure=f.__closure__)
+ g = functools.update_wrapper(g, f)
+ g.__kwdefaults__ = f.__kwdefaults__
+ return g
+
+
+trapezoid = _copy_func(trapezoid)
+if trapezoid.__doc__:
+ trapezoid.__doc__ = trapezoid.__doc__.replace(
+ 'sum, cumsum', 'numpy.cumsum')
+
+
+# Note: alias kept for backwards compatibility. Rename was done
+# because trapz is a slur in colloquial English (see gh-12924).
+def trapz(y, x=None, dx=1.0, axis=-1):
+ """`An alias of `trapezoid`.
+
+ `trapz` is kept for backwards compatibility. For new code, prefer
+ `trapezoid` instead.
+ """
+ return trapezoid(y, x=x, dx=dx, axis=axis)
+
+
+class AccuracyWarning(Warning):
+ pass
+
+
+def _cached_roots_legendre(n):
+ """
+ Cache roots_legendre results to speed up calls of the fixed_quad
+ function.
+ """
+ if n in _cached_roots_legendre.cache:
+ return _cached_roots_legendre.cache[n]
+
+ _cached_roots_legendre.cache[n] = roots_legendre(n)
+ return _cached_roots_legendre.cache[n]
+
+
+_cached_roots_legendre.cache = dict()
+
+
+def fixed_quad(func, a, b, args=(), n=5):
+ """
+ Compute a definite integral using fixed-order Gaussian quadrature.
+
+ Integrate `func` from `a` to `b` using Gaussian quadrature of
+ order `n`.
+
+ Parameters
+ ----------
+ func : callable
+ A Python function or method to integrate (must accept vector inputs).
+ If integrating a vector-valued function, the returned array must have
+ shape ``(..., len(x))``.
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+ args : tuple, optional
+ Extra arguments to pass to function, if any.
+ n : int, optional
+ Order of quadrature integration. Default is 5.
+
+ Returns
+ -------
+ val : float
+ Gaussian quadrature approximation to the integral
+ none : None
+ Statically returned value of None
+
+
+ See Also
+ --------
+ quad : adaptive quadrature using QUADPACK
+ dblquad : double integrals
+ tplquad : triple integrals
+ romberg : adaptive Romberg quadrature
+ quadrature : adaptive Gaussian quadrature
+ romb : integrators for sampled data
+ simpson : integrators for sampled data
+ cumulative_trapezoid : cumulative integration for sampled data
+ ode : ODE integrator
+ odeint : ODE integrator
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> f = lambda x: x**8
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
+ (0.1110884353741496, None)
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
+ (0.11111111111111102, None)
+ >>> print(1/9.0) # analytical result
+ 0.1111111111111111
+
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
+ (0.9999999771971152, None)
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
+ (1.000000000039565, None)
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
+ 1.0
+
+ """
+ x, w = _cached_roots_legendre(n)
+ x = np.real(x)
+ if np.isinf(a) or np.isinf(b):
+ raise ValueError("Gaussian quadrature is only available for "
+ "finite limits.")
+ y = (b-a)*(x+1)/2.0 + a
+ return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
+
+
+def vectorize1(func, args=(), vec_func=False):
+ """Vectorize the call to a function.
+
+ This is an internal utility function used by `romberg` and
+ `quadrature` to create a vectorized version of a function.
+
+ If `vec_func` is True, the function `func` is assumed to take vector
+ arguments.
+
+ Parameters
+ ----------
+ func : callable
+ User defined function.
+ args : tuple, optional
+ Extra arguments for the function.
+ vec_func : bool, optional
+ True if the function func takes vector arguments.
+
+ Returns
+ -------
+ vfunc : callable
+ A function that will take a vector argument and return the
+ result.
+
+ """
+ if vec_func:
+ def vfunc(x):
+ return func(x, *args)
+ else:
+ def vfunc(x):
+ if np.isscalar(x):
+ return func(x, *args)
+ x = np.asarray(x)
+ # call with first point to get output type
+ y0 = func(x[0], *args)
+ n = len(x)
+ dtype = getattr(y0, 'dtype', type(y0))
+ output = np.empty((n,), dtype=dtype)
+ output[0] = y0
+ for i in range(1, n):
+ output[i] = func(x[i], *args)
+ return output
+ return vfunc
+
+
+def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
+ vec_func=True, miniter=1):
+ """
+ Compute a definite integral using fixed-tolerance Gaussian quadrature.
+
+ Integrate `func` from `a` to `b` using Gaussian quadrature
+ with absolute tolerance `tol`.
+
+ Parameters
+ ----------
+ func : function
+ A Python function or method to integrate.
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+ args : tuple, optional
+ Extra arguments to pass to function.
+ tol, rtol : float, optional
+ Iteration stops when error between last two iterates is less than
+ `tol` OR the relative change is less than `rtol`.
+ maxiter : int, optional
+ Maximum order of Gaussian quadrature.
+ vec_func : bool, optional
+ True or False if func handles arrays as arguments (is
+ a "vector" function). Default is True.
+ miniter : int, optional
+ Minimum order of Gaussian quadrature.
+
+ Returns
+ -------
+ val : float
+ Gaussian quadrature approximation (within tolerance) to integral.
+ err : float
+ Difference between last two estimates of the integral.
+
+ See also
+ --------
+ romberg: adaptive Romberg quadrature
+ fixed_quad: fixed-order Gaussian quadrature
+ quad: adaptive quadrature using QUADPACK
+ dblquad: double integrals
+ tplquad: triple integrals
+ romb: integrator for sampled data
+ simpson: integrator for sampled data
+ cumulative_trapezoid: cumulative integration for sampled data
+ ode: ODE integrator
+ odeint: ODE integrator
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> f = lambda x: x**8
+ >>> integrate.quadrature(f, 0.0, 1.0)
+ (0.11111111111111106, 4.163336342344337e-17)
+ >>> print(1/9.0) # analytical result
+ 0.1111111111111111
+
+ >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
+ (0.9999999999999536, 3.9611425250996035e-11)
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
+ 1.0
+
+ """
+ if not isinstance(args, tuple):
+ args = (args,)
+ vfunc = vectorize1(func, args, vec_func=vec_func)
+ val = np.inf
+ err = np.inf
+ maxiter = max(miniter+1, maxiter)
+ for n in range(miniter, maxiter+1):
+ newval = fixed_quad(vfunc, a, b, (), n)[0]
+ err = abs(newval-val)
+ val = newval
+
+ if err < tol or err < rtol*abs(val):
+ break
+ else:
+ warnings.warn(
+ "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
+ AccuracyWarning)
+ return val, err
+
+
+def tupleset(t, i, value):
+ l = list(t)
+ l[i] = value
+ return tuple(l)
+
+
+# Note: alias kept for backwards compatibility. Rename was done
+# because cumtrapz is a slur in colloquial English (see gh-12924).
+def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
+ """`An alias of `cumulative_trapezoid`.
+
+ `cumtrapz` is kept for backwards compatibility. For new code, prefer
+ `cumulative_trapezoid` instead.
+ """
+ return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
+
+
+def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
+ """
+ Cumulatively integrate y(x) using the composite trapezoidal rule.
+
+ Parameters
+ ----------
+ y : array_like
+ Values to integrate.
+ x : array_like, optional
+ The coordinate to integrate along. If None (default), use spacing `dx`
+ between consecutive elements in `y`.
+ dx : float, optional
+ Spacing between elements of `y`. Only used if `x` is None.
+ axis : int, optional
+ Specifies the axis to cumulate. Default is -1 (last axis).
+ initial : scalar, optional
+ If given, insert this value at the beginning of the returned result.
+ Typically this value should be 0. Default is None, which means no
+ value at ``x[0]`` is returned and `res` has one element less than `y`
+ along the axis of integration.
+
+ Returns
+ -------
+ res : ndarray
+ The result of cumulative integration of `y` along `axis`.
+ If `initial` is None, the shape is such that the axis of integration
+ has one less value than `y`. If `initial` is given, the shape is equal
+ to that of `y`.
+
+ See Also
+ --------
+ numpy.cumsum, numpy.cumprod
+ quad: adaptive quadrature using QUADPACK
+ romberg: adaptive Romberg quadrature
+ quadrature: adaptive Gaussian quadrature
+ fixed_quad: fixed-order Gaussian quadrature
+ dblquad: double integrals
+ tplquad: triple integrals
+ romb: integrators for sampled data
+ ode: ODE integrators
+ odeint: ODE integrators
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> import matplotlib.pyplot as plt
+
+ >>> x = np.linspace(-2, 2, num=20)
+ >>> y = x
+ >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
+ >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
+ >>> plt.show()
+
+ """
+ y = np.asarray(y)
+ if x is None:
+ d = dx
+ else:
+ x = np.asarray(x)
+ if x.ndim == 1:
+ d = np.diff(x)
+ # reshape to correct shape
+ shape = [1] * y.ndim
+ shape[axis] = -1
+ d = d.reshape(shape)
+ elif len(x.shape) != len(y.shape):
+ raise ValueError("If given, shape of x must be 1-D or the "
+ "same as y.")
+ else:
+ d = np.diff(x, axis=axis)
+
+ if d.shape[axis] != y.shape[axis] - 1:
+ raise ValueError("If given, length of x along axis must be the "
+ "same as y.")
+
+ nd = len(y.shape)
+ slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
+ slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
+ res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
+
+ if initial is not None:
+ if not np.isscalar(initial):
+ raise ValueError("`initial` parameter should be a scalar.")
+
+ shape = list(res.shape)
+ shape[axis] = 1
+ res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
+ axis=axis)
+
+ return res
+
+
+def _basic_simpson(y, start, stop, x, dx, axis):
+ nd = len(y.shape)
+ if start is None:
+ start = 0
+ step = 2
+ slice_all = (slice(None),)*nd
+ slice0 = tupleset(slice_all, axis, slice(start, stop, step))
+ slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
+ slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
+
+ if x is None: # Even-spaced Simpson's rule.
+ result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
+ axis=axis)
+ else:
+ # Account for possibly different spacings.
+ # Simpson's rule changes a bit.
+ h = np.diff(x, axis=axis)
+ sl0 = tupleset(slice_all, axis, slice(start, stop, step))
+ sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
+ h0 = h[sl0]
+ h1 = h[sl1]
+ hsum = h0 + h1
+ hprod = h0 * h1
+ h0divh1 = h0 / h1
+ tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
+ y[slice1]*hsum*hsum/hprod +
+ y[slice2]*(2-h0divh1))
+ result = np.sum(tmp, axis=axis)
+ return result
+
+
+# Note: alias kept for backwards compatibility. simps was renamed to simpson
+# because the former is a slur in colloquial English (see gh-12924).
+def simps(y, x=None, dx=1, axis=-1, even='avg'):
+ """`An alias of `simpson`.
+
+ `simps` is kept for backwards compatibility. For new code, prefer
+ `simpson` instead.
+ """
+ return simpson(y, x=x, dx=dx, axis=axis, even=even)
+
+
+def simpson(y, x=None, dx=1, axis=-1, even='avg'):
+ """
+ Integrate y(x) using samples along the given axis and the composite
+ Simpson's rule. If x is None, spacing of dx is assumed.
+
+ If there are an even number of samples, N, then there are an odd
+ number of intervals (N-1), but Simpson's rule requires an even number
+ of intervals. The parameter 'even' controls how this is handled.
+
+ Parameters
+ ----------
+ y : array_like
+ Array to be integrated.
+ x : array_like, optional
+ If given, the points at which `y` is sampled.
+ dx : int, optional
+ Spacing of integration points along axis of `x`. Only used when
+ `x` is None. Default is 1.
+ axis : int, optional
+ Axis along which to integrate. Default is the last axis.
+ even : str {'avg', 'first', 'last'}, optional
+ 'avg' : Average two results:1) use the first N-2 intervals with
+ a trapezoidal rule on the last interval and 2) use the last
+ N-2 intervals with a trapezoidal rule on the first interval.
+
+ 'first' : Use Simpson's rule for the first N-2 intervals with
+ a trapezoidal rule on the last interval.
+
+ 'last' : Use Simpson's rule for the last N-2 intervals with a
+ trapezoidal rule on the first interval.
+
+ See Also
+ --------
+ quad: adaptive quadrature using QUADPACK
+ romberg: adaptive Romberg quadrature
+ quadrature: adaptive Gaussian quadrature
+ fixed_quad: fixed-order Gaussian quadrature
+ dblquad: double integrals
+ tplquad: triple integrals
+ romb: integrators for sampled data
+ cumulative_trapezoid: cumulative integration for sampled data
+ ode: ODE integrators
+ odeint: ODE integrators
+
+ Notes
+ -----
+ For an odd number of samples that are equally spaced the result is
+ exact if the function is a polynomial of order 3 or less. If
+ the samples are not equally spaced, then the result is exact only
+ if the function is a polynomial of order 2 or less.
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> x = np.arange(0, 10)
+ >>> y = np.arange(0, 10)
+
+ >>> integrate.simpson(y, x)
+ 40.5
+
+ >>> y = np.power(x, 3)
+ >>> integrate.simpson(y, x)
+ 1642.5
+ >>> integrate.quad(lambda x: x**3, 0, 9)[0]
+ 1640.25
+
+ >>> integrate.simpson(y, x, even='first')
+ 1644.5
+
+ """
+ y = np.asarray(y)
+ nd = len(y.shape)
+ N = y.shape[axis]
+ last_dx = dx
+ first_dx = dx
+ returnshape = 0
+ if x is not None:
+ x = np.asarray(x)
+ if len(x.shape) == 1:
+ shapex = [1] * nd
+ shapex[axis] = x.shape[0]
+ saveshape = x.shape
+ returnshape = 1
+ x = x.reshape(tuple(shapex))
+ elif len(x.shape) != len(y.shape):
+ raise ValueError("If given, shape of x must be 1-D or the "
+ "same as y.")
+ if x.shape[axis] != N:
+ raise ValueError("If given, length of x along axis must be the "
+ "same as y.")
+ if N % 2 == 0:
+ val = 0.0
+ result = 0.0
+ slice1 = (slice(None),)*nd
+ slice2 = (slice(None),)*nd
+ if even not in ['avg', 'last', 'first']:
+ raise ValueError("Parameter 'even' must be "
+ "'avg', 'last', or 'first'.")
+ # Compute using Simpson's rule on first intervals
+ if even in ['avg', 'first']:
+ slice1 = tupleset(slice1, axis, -1)
+ slice2 = tupleset(slice2, axis, -2)
+ if x is not None:
+ last_dx = x[slice1] - x[slice2]
+ val += 0.5*last_dx*(y[slice1]+y[slice2])
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
+ # Compute using Simpson's rule on last set of intervals
+ if even in ['avg', 'last']:
+ slice1 = tupleset(slice1, axis, 0)
+ slice2 = tupleset(slice2, axis, 1)
+ if x is not None:
+ first_dx = x[tuple(slice2)] - x[tuple(slice1)]
+ val += 0.5*first_dx*(y[slice2]+y[slice1])
+ result += _basic_simpson(y, 1, N-2, x, dx, axis)
+ if even == 'avg':
+ val /= 2.0
+ result /= 2.0
+ result = result + val
+ else:
+ result = _basic_simpson(y, 0, N-2, x, dx, axis)
+ if returnshape:
+ x = x.reshape(saveshape)
+ return result
+
+
+def romb(y, dx=1.0, axis=-1, show=False):
+ """
+ Romberg integration using samples of a function.
+
+ Parameters
+ ----------
+ y : array_like
+ A vector of ``2**k + 1`` equally-spaced samples of a function.
+ dx : float, optional
+ The sample spacing. Default is 1.
+ axis : int, optional
+ The axis along which to integrate. Default is -1 (last axis).
+ show : bool, optional
+ When `y` is a single 1-D array, then if this argument is True
+ print the table showing Richardson extrapolation from the
+ samples. Default is False.
+
+ Returns
+ -------
+ romb : ndarray
+ The integrated result for `axis`.
+
+ See also
+ --------
+ quad : adaptive quadrature using QUADPACK
+ romberg : adaptive Romberg quadrature
+ quadrature : adaptive Gaussian quadrature
+ fixed_quad : fixed-order Gaussian quadrature
+ dblquad : double integrals
+ tplquad : triple integrals
+ simpson : integrators for sampled data
+ cumulative_trapezoid : cumulative integration for sampled data
+ ode : ODE integrators
+ odeint : ODE integrators
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> x = np.arange(10, 14.25, 0.25)
+ >>> y = np.arange(3, 12)
+
+ >>> integrate.romb(y)
+ 56.0
+
+ >>> y = np.sin(np.power(x, 2.5))
+ >>> integrate.romb(y)
+ -0.742561336672229
+
+ >>> integrate.romb(y, show=True)
+ Richardson Extrapolation Table for Romberg Integration
+ ====================================================================
+ -0.81576
+ 4.63862 6.45674
+ -1.10581 -3.02062 -3.65245
+ -2.57379 -3.06311 -3.06595 -3.05664
+ -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
+ ====================================================================
+ -0.742561336672229
+ """
+ y = np.asarray(y)
+ nd = len(y.shape)
+ Nsamps = y.shape[axis]
+ Ninterv = Nsamps-1
+ n = 1
+ k = 0
+ while n < Ninterv:
+ n <<= 1
+ k += 1
+ if n != Ninterv:
+ raise ValueError("Number of samples must be one plus a "
+ "non-negative power of 2.")
+
+ R = {}
+ slice_all = (slice(None),) * nd
+ slice0 = tupleset(slice_all, axis, 0)
+ slicem1 = tupleset(slice_all, axis, -1)
+ h = Ninterv * np.asarray(dx, dtype=float)
+ R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
+ slice_R = slice_all
+ start = stop = step = Ninterv
+ for i in range(1, k+1):
+ start >>= 1
+ slice_R = tupleset(slice_R, axis, slice(start, stop, step))
+ step >>= 1
+ R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
+ for j in range(1, i+1):
+ prev = R[(i, j-1)]
+ R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
+ h /= 2.0
+
+ if show:
+ if not np.isscalar(R[(0, 0)]):
+ print("*** Printing table only supported for integrals" +
+ " of a single data set.")
+ else:
+ try:
+ precis = show[0]
+ except (TypeError, IndexError):
+ precis = 5
+ try:
+ width = show[1]
+ except (TypeError, IndexError):
+ width = 8
+ formstr = "%%%d.%df" % (width, precis)
+
+ title = "Richardson Extrapolation Table for Romberg Integration"
+ print("", title.center(68), "=" * 68, sep="\n", end="\n")
+ for i in range(k+1):
+ for j in range(i+1):
+ print(formstr % R[(i, j)], end=" ")
+ print()
+ print("=" * 68)
+ print()
+
+ return R[(k, k)]
+
+# Romberg quadratures for numeric integration.
+#
+# Written by Scott M. Ransom
+# last revision: 14 Nov 98
+#
+# Cosmetic changes by Konrad Hinsen
+# last revision: 1999-7-21
+#
+# Adapted to SciPy by Travis Oliphant
+# last revision: Dec 2001
+
+
+def _difftrap(function, interval, numtraps):
+ """
+ Perform part of the trapezoidal rule to integrate a function.
+ Assume that we had called difftrap with all lower powers-of-2
+ starting with 1. Calling difftrap only returns the summation
+ of the new ordinates. It does _not_ multiply by the width
+ of the trapezoids. This must be performed by the caller.
+ 'function' is the function to evaluate (must accept vector arguments).
+ 'interval' is a sequence with lower and upper limits
+ of integration.
+ 'numtraps' is the number of trapezoids to use (must be a
+ power-of-2).
+ """
+ if numtraps <= 0:
+ raise ValueError("numtraps must be > 0 in difftrap().")
+ elif numtraps == 1:
+ return 0.5*(function(interval[0])+function(interval[1]))
+ else:
+ numtosum = numtraps/2
+ h = float(interval[1]-interval[0])/numtosum
+ lox = interval[0] + 0.5 * h
+ points = lox + h * np.arange(numtosum)
+ s = np.sum(function(points), axis=0)
+ return s
+
+
+def _romberg_diff(b, c, k):
+ """
+ Compute the differences for the Romberg quadrature corrections.
+ See Forman Acton's "Real Computing Made Real," p 143.
+ """
+ tmp = 4.0**k
+ return (tmp * c - b)/(tmp - 1.0)
+
+
+def _printresmat(function, interval, resmat):
+ # Print the Romberg result matrix.
+ i = j = 0
+ print('Romberg integration of', repr(function), end=' ')
+ print('from', interval)
+ print('')
+ print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
+ for i in range(len(resmat)):
+ print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
+ for j in range(i+1):
+ print('%9f' % (resmat[i][j]), end=' ')
+ print('')
+ print('')
+ print('The final result is', resmat[i][j], end=' ')
+ print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
+
+
+def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
+ divmax=10, vec_func=False):
+ """
+ Romberg integration of a callable function or method.
+
+ Returns the integral of `function` (a function of one variable)
+ over the interval (`a`, `b`).
+
+ If `show` is 1, the triangular array of the intermediate results
+ will be printed. If `vec_func` is True (default is False), then
+ `function` is assumed to support vector arguments.
+
+ Parameters
+ ----------
+ function : callable
+ Function to be integrated.
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+
+ Returns
+ -------
+ results : float
+ Result of the integration.
+
+ Other Parameters
+ ----------------
+ args : tuple, optional
+ Extra arguments to pass to function. Each element of `args` will
+ be passed as a single argument to `func`. Default is to pass no
+ extra arguments.
+ tol, rtol : float, optional
+ The desired absolute and relative tolerances. Defaults are 1.48e-8.
+ show : bool, optional
+ Whether to print the results. Default is False.
+ divmax : int, optional
+ Maximum order of extrapolation. Default is 10.
+ vec_func : bool, optional
+ Whether `func` handles arrays as arguments (i.e., whether it is a
+ "vector" function). Default is False.
+
+ See Also
+ --------
+ fixed_quad : Fixed-order Gaussian quadrature.
+ quad : Adaptive quadrature using QUADPACK.
+ dblquad : Double integrals.
+ tplquad : Triple integrals.
+ romb : Integrators for sampled data.
+ simpson : Integrators for sampled data.
+ cumulative_trapezoid : Cumulative integration for sampled data.
+ ode : ODE integrator.
+ odeint : ODE integrator.
+
+ References
+ ----------
+ .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
+
+ Examples
+ --------
+ Integrate a gaussian from 0 to 1 and compare to the error function.
+
+ >>> from scipy import integrate
+ >>> from scipy.special import erf
+ >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
+ >>> result = integrate.romberg(gaussian, 0, 1, show=True)
+ Romberg integration of from [0, 1]
+
+ ::
+
+ Steps StepSize Results
+ 1 1.000000 0.385872
+ 2 0.500000 0.412631 0.421551
+ 4 0.250000 0.419184 0.421368 0.421356
+ 8 0.125000 0.420810 0.421352 0.421350 0.421350
+ 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
+ 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
+
+ The final result is 0.421350396475 after 33 function evaluations.
+
+ >>> print("%g %g" % (2*result, erf(1)))
+ 0.842701 0.842701
+
+ """
+ if np.isinf(a) or np.isinf(b):
+ raise ValueError("Romberg integration only available "
+ "for finite limits.")
+ vfunc = vectorize1(function, args, vec_func=vec_func)
+ n = 1
+ interval = [a, b]
+ intrange = b - a
+ ordsum = _difftrap(vfunc, interval, n)
+ result = intrange * ordsum
+ resmat = [[result]]
+ err = np.inf
+ last_row = resmat[0]
+ for i in range(1, divmax+1):
+ n *= 2
+ ordsum += _difftrap(vfunc, interval, n)
+ row = [intrange * ordsum / n]
+ for k in range(i):
+ row.append(_romberg_diff(last_row[k], row[k], k+1))
+ result = row[i]
+ lastresult = last_row[i-1]
+ if show:
+ resmat.append(row)
+ err = abs(result - lastresult)
+ if err < tol or err < rtol * abs(result):
+ break
+ last_row = row
+ else:
+ warnings.warn(
+ "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
+ AccuracyWarning)
+
+ if show:
+ _printresmat(vfunc, interval, resmat)
+ return result
+
+
+# Coefficients for Newton-Cotes quadrature
+#
+# These are the points being used
+# to construct the local interpolating polynomial
+# a are the weights for Newton-Cotes integration
+# B is the error coefficient.
+# error in these coefficients grows as N gets larger.
+# or as samples are closer and closer together
+
+# You can use maxima to find these rational coefficients
+# for equally spaced data using the commands
+# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
+# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
+# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
+# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
+#
+# pre-computed for equally-spaced weights
+#
+# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
+#
+# a = num_a*array(int_a)/den_a
+# B = num_B*1.0 / den_B
+#
+# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
+# where k = N // 2
+#
+_builtincoeffs = {
+ 1: (1,2,[1,1],-1,12),
+ 2: (1,3,[1,4,1],-1,90),
+ 3: (3,8,[1,3,3,1],-3,80),
+ 4: (2,45,[7,32,12,32,7],-8,945),
+ 5: (5,288,[19,75,50,50,75,19],-275,12096),
+ 6: (1,140,[41,216,27,272,27,216,41],-9,1400),
+ 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
+ 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
+ -2368,467775),
+ 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
+ 15741,2857], -4671, 394240),
+ 10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
+ -260550,272400,-48525,106300,16067],
+ -673175, 163459296),
+ 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
+ 15493566,15493566,-9595542,25226685,-3237113,
+ 13486539,2171465], -2224234463, 237758976000),
+ 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
+ 87516288,-87797136,87516288,-51491295,35725120,
+ -7587864,9903168,1364651], -3012, 875875),
+ 13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
+ 156074417954,-151659573325,206683437987,
+ -43111992612,-43111992612,206683437987,
+ -151659573325,156074417954,-31268252574,
+ 56280729661,8181904909], -2639651053,
+ 344881152000),
+ 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
+ -6625093363,12630121616,-16802270373,19534438464,
+ -16802270373,12630121616,-6625093363,3501442784,
+ -770720657,710986864,90241897], -3740727473,
+ 1275983280000)
+ }
+
+
+def newton_cotes(rn, equal=0):
+ r"""
+ Return weights and error coefficient for Newton-Cotes integration.
+
+ Suppose we have (N+1) samples of f at the positions
+ x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
+ integral between x_0 and x_N is:
+
+ :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ + B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
+
+ where :math:`\xi \in [x_0,x_N]`
+ and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
+
+ If the samples are equally-spaced and N is even, then the error
+ term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
+
+ Parameters
+ ----------
+ rn : int
+ The integer order for equally-spaced data or the relative positions of
+ the samples with the first sample at 0 and the last at N, where N+1 is
+ the length of `rn`. N is the order of the Newton-Cotes integration.
+ equal : int, optional
+ Set to 1 to enforce equally spaced data.
+
+ Returns
+ -------
+ an : ndarray
+ 1-D array of weights to apply to the function at the provided sample
+ positions.
+ B : float
+ Error coefficient.
+
+ Examples
+ --------
+ Compute the integral of sin(x) in [0, :math:`\pi`]:
+
+ >>> from scipy.integrate import newton_cotes
+ >>> def f(x):
+ ... return np.sin(x)
+ >>> a = 0
+ >>> b = np.pi
+ >>> exact = 2
+ >>> for N in [2, 4, 6, 8, 10]:
+ ... x = np.linspace(a, b, N + 1)
+ ... an, B = newton_cotes(N, 1)
+ ... dx = (b - a) / N
+ ... quad = dx * np.sum(an * f(x))
+ ... error = abs(quad - exact)
+ ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
+ ...
+ 2 2.094395102 9.43951e-02
+ 4 1.998570732 1.42927e-03
+ 6 2.000017814 1.78136e-05
+ 8 1.999999835 1.64725e-07
+ 10 2.000000001 1.14677e-09
+
+ Notes
+ -----
+ Normally, the Newton-Cotes rules are used on smaller integration
+ regions and a composite rule is used to return the total integral.
+
+ """
+ try:
+ N = len(rn)-1
+ if equal:
+ rn = np.arange(N+1)
+ elif np.all(np.diff(rn) == 1):
+ equal = 1
+ except Exception:
+ N = rn
+ rn = np.arange(N+1)
+ equal = 1
+
+ if equal and N in _builtincoeffs:
+ na, da, vi, nb, db = _builtincoeffs[N]
+ an = na * np.array(vi, dtype=float) / da
+ return an, float(nb)/db
+
+ if (rn[0] != 0) or (rn[-1] != N):
+ raise ValueError("The sample positions must start at 0"
+ " and end at N")
+ yi = rn / float(N)
+ ti = 2 * yi - 1
+ nvec = np.arange(N+1)
+ C = ti ** nvec[:, np.newaxis]
+ Cinv = np.linalg.inv(C)
+ # improve precision of result
+ for i in range(2):
+ Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
+ vec = 2.0 / (nvec[::2]+1)
+ ai = Cinv[:, ::2].dot(vec) * (N / 2.)
+
+ if (N % 2 == 0) and equal:
+ BN = N/(N+3.)
+ power = N+2
+ else:
+ BN = N/(N+2.)
+ power = N+1
+
+ BN = BN - np.dot(yi**power, ai)
+ p1 = power+1
+ fac = power*math.log(N) - gammaln(p1)
+ fac = math.exp(fac)
+ return ai, BN*fac
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_multivariate.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_multivariate.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..5ea831c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_multivariate.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_odeint_banded.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_odeint_banded.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..c9139bd
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/_test_odeint_banded.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/lsoda.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/lsoda.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..4738c87
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/lsoda.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/odepack.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/odepack.py
new file mode 100644
index 0000000..8119d2a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/odepack.py
@@ -0,0 +1,259 @@
+# Author: Travis Oliphant
+
+__all__ = ['odeint']
+
+import numpy as np
+from . import _odepack
+from copy import copy
+import warnings
+
+
+class ODEintWarning(Warning):
+ pass
+
+
+_msgs = {2: "Integration successful.",
+ 1: "Nothing was done; the integration time was 0.",
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
+ -2: "Excess accuracy requested (tolerances too small).",
+ -3: "Illegal input detected (internal error).",
+ -4: "Repeated error test failures (internal error).",
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
+ -6: "Error weight became zero during problem.",
+ -7: "Internal workspace insufficient to finish (internal error).",
+ -8: "Run terminated (internal error)."
+ }
+
+
+def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
+ ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
+ hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
+ mxords=5, printmessg=0, tfirst=False):
+ """
+ Integrate a system of ordinary differential equations.
+
+ .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
+ differential equation.
+
+ Solve a system of ordinary differential equations using lsoda from the
+ FORTRAN library odepack.
+
+ Solves the initial value problem for stiff or non-stiff systems
+ of first order ode-s::
+
+ dy/dt = func(y, t, ...) [or func(t, y, ...)]
+
+ where y can be a vector.
+
+ .. note:: By default, the required order of the first two arguments of
+ `func` are in the opposite order of the arguments in the system
+ definition function used by the `scipy.integrate.ode` class and
+ the function `scipy.integrate.solve_ivp`. To use a function with
+ the signature ``func(t, y, ...)``, the argument `tfirst` must be
+ set to ``True``.
+
+ Parameters
+ ----------
+ func : callable(y, t, ...) or callable(t, y, ...)
+ Computes the derivative of y at t.
+ If the signature is ``callable(t, y, ...)``, then the argument
+ `tfirst` must be set ``True``.
+ y0 : array
+ Initial condition on y (can be a vector).
+ t : array
+ A sequence of time points for which to solve for y. The initial
+ value point should be the first element of this sequence.
+ This sequence must be monotonically increasing or monotonically
+ decreasing; repeated values are allowed.
+ args : tuple, optional
+ Extra arguments to pass to function.
+ Dfun : callable(y, t, ...) or callable(t, y, ...)
+ Gradient (Jacobian) of `func`.
+ If the signature is ``callable(t, y, ...)``, then the argument
+ `tfirst` must be set ``True``.
+ col_deriv : bool, optional
+ True if `Dfun` defines derivatives down columns (faster),
+ otherwise `Dfun` should define derivatives across rows.
+ full_output : bool, optional
+ True if to return a dictionary of optional outputs as the second output
+ printmessg : bool, optional
+ Whether to print the convergence message
+ tfirst: bool, optional
+ If True, the first two arguments of `func` (and `Dfun`, if given)
+ must ``t, y`` instead of the default ``y, t``.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ y : array, shape (len(t), len(y0))
+ Array containing the value of y for each desired time in t,
+ with the initial value `y0` in the first row.
+ infodict : dict, only returned if full_output == True
+ Dictionary containing additional output information
+
+ ======= ============================================================
+ key meaning
+ ======= ============================================================
+ 'hu' vector of step sizes successfully used for each time step
+ 'tcur' vector with the value of t reached for each time step
+ (will always be at least as large as the input times)
+ 'tolsf' vector of tolerance scale factors, greater than 1.0,
+ computed when a request for too much accuracy was detected
+ 'tsw' value of t at the time of the last method switch
+ (given for each time step)
+ 'nst' cumulative number of time steps
+ 'nfe' cumulative number of function evaluations for each time step
+ 'nje' cumulative number of jacobian evaluations for each time step
+ 'nqu' a vector of method orders for each successful step
+ 'imxer' index of the component of largest magnitude in the
+ weighted local error vector (e / ewt) on an error return, -1
+ otherwise
+ 'lenrw' the length of the double work array required
+ 'leniw' the length of integer work array required
+ 'mused' a vector of method indicators for each successful time step:
+ 1: adams (nonstiff), 2: bdf (stiff)
+ ======= ============================================================
+
+ Other Parameters
+ ----------------
+ ml, mu : int, optional
+ If either of these are not None or non-negative, then the
+ Jacobian is assumed to be banded. These give the number of
+ lower and upper non-zero diagonals in this banded matrix.
+ For the banded case, `Dfun` should return a matrix whose
+ rows contain the non-zero bands (starting with the lowest diagonal).
+ Thus, the return matrix `jac` from `Dfun` should have shape
+ ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
+ The data in `jac` must be stored such that ``jac[i - j + mu, j]``
+ holds the derivative of the `i`th equation with respect to the `j`th
+ state variable. If `col_deriv` is True, the transpose of this
+ `jac` must be returned.
+ rtol, atol : float, optional
+ The input parameters `rtol` and `atol` determine the error
+ control performed by the solver. The solver will control the
+ vector, e, of estimated local errors in y, according to an
+ inequality of the form ``max-norm of (e / ewt) <= 1``,
+ where ewt is a vector of positive error weights computed as
+ ``ewt = rtol * abs(y) + atol``.
+ rtol and atol can be either vectors the same length as y or scalars.
+ Defaults to 1.49012e-8.
+ tcrit : ndarray, optional
+ Vector of critical points (e.g., singularities) where integration
+ care should be taken.
+ h0 : float, (0: solver-determined), optional
+ The step size to be attempted on the first step.
+ hmax : float, (0: solver-determined), optional
+ The maximum absolute step size allowed.
+ hmin : float, (0: solver-determined), optional
+ The minimum absolute step size allowed.
+ ixpr : bool, optional
+ Whether to generate extra printing at method switches.
+ mxstep : int, (0: solver-determined), optional
+ Maximum number of (internally defined) steps allowed for each
+ integration point in t.
+ mxhnil : int, (0: solver-determined), optional
+ Maximum number of messages printed.
+ mxordn : int, (0: solver-determined), optional
+ Maximum order to be allowed for the non-stiff (Adams) method.
+ mxords : int, (0: solver-determined), optional
+ Maximum order to be allowed for the stiff (BDF) method.
+
+ See Also
+ --------
+ solve_ivp : solve an initial value problem for a system of ODEs
+ ode : a more object-oriented integrator based on VODE
+ quad : for finding the area under a curve
+
+ Examples
+ --------
+ The second order differential equation for the angle `theta` of a
+ pendulum acted on by gravity with friction can be written::
+
+ theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
+
+ where `b` and `c` are positive constants, and a prime (') denotes a
+ derivative. To solve this equation with `odeint`, we must first convert
+ it to a system of first order equations. By defining the angular
+ velocity ``omega(t) = theta'(t)``, we obtain the system::
+
+ theta'(t) = omega(t)
+ omega'(t) = -b*omega(t) - c*sin(theta(t))
+
+ Let `y` be the vector [`theta`, `omega`]. We implement this system
+ in Python as:
+
+ >>> def pend(y, t, b, c):
+ ... theta, omega = y
+ ... dydt = [omega, -b*omega - c*np.sin(theta)]
+ ... return dydt
+ ...
+
+ We assume the constants are `b` = 0.25 and `c` = 5.0:
+
+ >>> b = 0.25
+ >>> c = 5.0
+
+ For initial conditions, we assume the pendulum is nearly vertical
+ with `theta(0)` = `pi` - 0.1, and is initially at rest, so
+ `omega(0)` = 0. Then the vector of initial conditions is
+
+ >>> y0 = [np.pi - 0.1, 0.0]
+
+ We will generate a solution at 101 evenly spaced samples in the interval
+ 0 <= `t` <= 10. So our array of times is:
+
+ >>> t = np.linspace(0, 10, 101)
+
+ Call `odeint` to generate the solution. To pass the parameters
+ `b` and `c` to `pend`, we give them to `odeint` using the `args`
+ argument.
+
+ >>> from scipy.integrate import odeint
+ >>> sol = odeint(pend, y0, t, args=(b, c))
+
+ The solution is an array with shape (101, 2). The first column
+ is `theta(t)`, and the second is `omega(t)`. The following code
+ plots both components.
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
+ >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
+ >>> plt.legend(loc='best')
+ >>> plt.xlabel('t')
+ >>> plt.grid()
+ >>> plt.show()
+ """
+
+ if ml is None:
+ ml = -1 # changed to zero inside function call
+ if mu is None:
+ mu = -1 # changed to zero inside function call
+
+ dt = np.diff(t)
+ if not((dt >= 0).all() or (dt <= 0).all()):
+ raise ValueError("The values in t must be monotonically increasing "
+ "or monotonically decreasing; repeated values are "
+ "allowed.")
+
+ t = copy(t)
+ y0 = copy(y0)
+ output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
+ full_output, rtol, atol, tcrit, h0, hmax, hmin,
+ ixpr, mxstep, mxhnil, mxordn, mxords,
+ int(bool(tfirst)))
+ if output[-1] < 0:
+ warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
+ warnings.warn(warning_msg, ODEintWarning)
+ elif printmessg:
+ warning_msg = _msgs[output[-1]]
+ warnings.warn(warning_msg, ODEintWarning)
+
+ if full_output:
+ output[1]['message'] = _msgs[output[-1]]
+
+ output = output[:-1]
+ if len(output) == 1:
+ return output[0]
+ else:
+ return output
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/quadpack.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/quadpack.py
new file mode 100644
index 0000000..4fe01aa
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/quadpack.py
@@ -0,0 +1,898 @@
+# Author: Travis Oliphant 2001
+# Author: Nathan Woods 2013 (nquad &c)
+import sys
+import warnings
+from functools import partial
+
+from . import _quadpack
+import numpy
+from numpy import Inf
+
+__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
+ 'IntegrationWarning']
+
+
+error = _quadpack.error
+
+class IntegrationWarning(UserWarning):
+ """
+ Warning on issues during integration.
+ """
+ pass
+
+
+def quad_explain(output=sys.stdout):
+ """
+ Print extra information about integrate.quad() parameters and returns.
+
+ Parameters
+ ----------
+ output : instance with "write" method, optional
+ Information about `quad` is passed to ``output.write()``.
+ Default is ``sys.stdout``.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ We can show detailed information of the `integrate.quad` function in stdout:
+
+ >>> from scipy.integrate import quad_explain
+ >>> quad_explain()
+
+ """
+ output.write(quad.__doc__)
+
+
+def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
+ limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
+ limlst=50):
+ """
+ Compute a definite integral.
+
+ Integrate func from `a` to `b` (possibly infinite interval) using a
+ technique from the Fortran library QUADPACK.
+
+ Parameters
+ ----------
+ func : {function, scipy.LowLevelCallable}
+ A Python function or method to integrate. If `func` takes many
+ arguments, it is integrated along the axis corresponding to the
+ first argument.
+
+ If the user desires improved integration performance, then `f` may
+ be a `scipy.LowLevelCallable` with one of the signatures::
+
+ double func(double x)
+ double func(double x, void *user_data)
+ double func(int n, double *xx)
+ double func(int n, double *xx, void *user_data)
+
+ The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
+ In the call forms with ``xx``, ``n`` is the length of the ``xx``
+ array which contains ``xx[0] == x`` and the rest of the items are
+ numbers contained in the ``args`` argument of quad.
+
+ In addition, certain ctypes call signatures are supported for
+ backward compatibility, but those should not be used in new code.
+ a : float
+ Lower limit of integration (use -numpy.inf for -infinity).
+ b : float
+ Upper limit of integration (use numpy.inf for +infinity).
+ args : tuple, optional
+ Extra arguments to pass to `func`.
+ full_output : int, optional
+ Non-zero to return a dictionary of integration information.
+ If non-zero, warning messages are also suppressed and the
+ message is appended to the output tuple.
+
+ Returns
+ -------
+ y : float
+ The integral of func from `a` to `b`.
+ abserr : float
+ An estimate of the absolute error in the result.
+ infodict : dict
+ A dictionary containing additional information.
+ Run scipy.integrate.quad_explain() for more information.
+ message
+ A convergence message.
+ explain
+ Appended only with 'cos' or 'sin' weighting and infinite
+ integration limits, it contains an explanation of the codes in
+ infodict['ierlst']
+
+ Other Parameters
+ ----------------
+ epsabs : float or int, optional
+ Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
+ where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
+ numerical approximation. See `epsrel` below.
+ epsrel : float or int, optional
+ Relative error tolerance. Default is 1.49e-8.
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
+ and ``50 * (machine epsilon)``. See `epsabs` above.
+ limit : float or int, optional
+ An upper bound on the number of subintervals used in the adaptive
+ algorithm.
+ points : (sequence of floats,ints), optional
+ A sequence of break points in the bounded integration interval
+ where local difficulties of the integrand may occur (e.g.,
+ singularities, discontinuities). The sequence does not have
+ to be sorted. Note that this option cannot be used in conjunction
+ with ``weight``.
+ weight : float or int, optional
+ String indicating weighting function. Full explanation for this
+ and the remaining arguments can be found below.
+ wvar : optional
+ Variables for use with weighting functions.
+ wopts : optional
+ Optional input for reusing Chebyshev moments.
+ maxp1 : float or int, optional
+ An upper bound on the number of Chebyshev moments.
+ limlst : int, optional
+ Upper bound on the number of cycles (>=3) for use with a sinusoidal
+ weighting and an infinite end-point.
+
+ See Also
+ --------
+ dblquad : double integral
+ tplquad : triple integral
+ nquad : n-dimensional integrals (uses `quad` recursively)
+ fixed_quad : fixed-order Gaussian quadrature
+ quadrature : adaptive Gaussian quadrature
+ odeint : ODE integrator
+ ode : ODE integrator
+ simpson : integrator for sampled data
+ romb : integrator for sampled data
+ scipy.special : for coefficients and roots of orthogonal polynomials
+
+ Notes
+ -----
+
+ **Extra information for quad() inputs and outputs**
+
+ If full_output is non-zero, then the third output argument
+ (infodict) is a dictionary with entries as tabulated below. For
+ infinite limits, the range is transformed to (0,1) and the
+ optional outputs are given with respect to this transformed range.
+ Let M be the input argument limit and let K be infodict['last'].
+ The entries are:
+
+ 'neval'
+ The number of function evaluations.
+ 'last'
+ The number, K, of subintervals produced in the subdivision process.
+ 'alist'
+ A rank-1 array of length M, the first K elements of which are the
+ left end points of the subintervals in the partition of the
+ integration range.
+ 'blist'
+ A rank-1 array of length M, the first K elements of which are the
+ right end points of the subintervals.
+ 'rlist'
+ A rank-1 array of length M, the first K elements of which are the
+ integral approximations on the subintervals.
+ 'elist'
+ A rank-1 array of length M, the first K elements of which are the
+ moduli of the absolute error estimates on the subintervals.
+ 'iord'
+ A rank-1 integer array of length M, the first L elements of
+ which are pointers to the error estimates over the subintervals
+ with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
+ sequence ``infodict['iord']`` and let E be the sequence
+ ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
+ decreasing sequence.
+
+ If the input argument points is provided (i.e., it is not None),
+ the following additional outputs are placed in the output
+ dictionary. Assume the points sequence is of length P.
+
+ 'pts'
+ A rank-1 array of length P+2 containing the integration limits
+ and the break points of the intervals in ascending order.
+ This is an array giving the subintervals over which integration
+ will occur.
+ 'level'
+ A rank-1 integer array of length M (=limit), containing the
+ subdivision levels of the subintervals, i.e., if (aa,bb) is a
+ subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
+ are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
+ if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
+ 'ndin'
+ A rank-1 integer array of length P+2. After the first integration
+ over the intervals (pts[1], pts[2]), the error estimates over some
+ of the intervals may have been increased artificially in order to
+ put their subdivision forward. This array has ones in slots
+ corresponding to the subintervals for which this happens.
+
+ **Weighting the integrand**
+
+ The input variables, *weight* and *wvar*, are used to weight the
+ integrand by a select list of functions. Different integration
+ methods are used to compute the integral with these weighting
+ functions, and these do not support specifying break points. The
+ possible values of weight and the corresponding weighting functions are.
+
+ ========== =================================== =====================
+ ``weight`` Weight function used ``wvar``
+ ========== =================================== =====================
+ 'cos' cos(w*x) wvar = w
+ 'sin' sin(w*x) wvar = w
+ 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
+ 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
+ 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
+ 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
+ 'cauchy' 1/(x-c) wvar = c
+ ========== =================================== =====================
+
+ wvar holds the parameter w, (alpha, beta), or c depending on the weight
+ selected. In these expressions, a and b are the integration limits.
+
+ For the 'cos' and 'sin' weighting, additional inputs and outputs are
+ available.
+
+ For finite integration limits, the integration is performed using a
+ Clenshaw-Curtis method which uses Chebyshev moments. For repeated
+ calculations, these moments are saved in the output dictionary:
+
+ 'momcom'
+ The maximum level of Chebyshev moments that have been computed,
+ i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
+ computed for intervals of length ``|b-a| * 2**(-l)``,
+ ``l=0,1,...,M_c``.
+ 'nnlog'
+ A rank-1 integer array of length M(=limit), containing the
+ subdivision levels of the subintervals, i.e., an element of this
+ array is equal to l if the corresponding subinterval is
+ ``|b-a|* 2**(-l)``.
+ 'chebmo'
+ A rank-2 array of shape (25, maxp1) containing the computed
+ Chebyshev moments. These can be passed on to an integration
+ over the same interval by passing this array as the second
+ element of the sequence wopts and passing infodict['momcom'] as
+ the first element.
+
+ If one of the integration limits is infinite, then a Fourier integral is
+ computed (assuming w neq 0). If full_output is 1 and a numerical error
+ is encountered, besides the error message attached to the output tuple,
+ a dictionary is also appended to the output tuple which translates the
+ error codes in the array ``info['ierlst']`` to English messages. The
+ output information dictionary contains the following entries instead of
+ 'last', 'alist', 'blist', 'rlist', and 'elist':
+
+ 'lst'
+ The number of subintervals needed for the integration (call it ``K_f``).
+ 'rslst'
+ A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
+ contain the integral contribution over the interval
+ ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
+ and ``k=1,2,...,K_f``.
+ 'erlst'
+ A rank-1 array of length ``M_f`` containing the error estimate
+ corresponding to the interval in the same position in
+ ``infodict['rslist']``.
+ 'ierlst'
+ A rank-1 integer array of length ``M_f`` containing an error flag
+ corresponding to the interval in the same position in
+ ``infodict['rslist']``. See the explanation dictionary (last entry
+ in the output tuple) for the meaning of the codes.
+
+ Examples
+ --------
+ Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
+
+ >>> from scipy import integrate
+ >>> x2 = lambda x: x**2
+ >>> integrate.quad(x2, 0, 4)
+ (21.333333333333332, 2.3684757858670003e-13)
+ >>> print(4**3 / 3.) # analytical result
+ 21.3333333333
+
+ Calculate :math:`\\int^\\infty_0 e^{-x} dx`
+
+ >>> invexp = lambda x: np.exp(-x)
+ >>> integrate.quad(invexp, 0, np.inf)
+ (1.0, 5.842605999138044e-11)
+
+ >>> f = lambda x,a : a*x
+ >>> y, err = integrate.quad(f, 0, 1, args=(1,))
+ >>> y
+ 0.5
+ >>> y, err = integrate.quad(f, 0, 1, args=(3,))
+ >>> y
+ 1.5
+
+ Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
+ y parameter as 1::
+
+ testlib.c =>
+ double func(int n, double args[n]){
+ return args[0]*args[0] + args[1]*args[1];}
+ compile to library testlib.*
+
+ ::
+
+ from scipy import integrate
+ import ctypes
+ lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
+ lib.func.restype = ctypes.c_double
+ lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
+ integrate.quad(lib.func,0,1,(1))
+ #(1.3333333333333333, 1.4802973661668752e-14)
+ print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
+ # 1.3333333333333333
+
+ Be aware that pulse shapes and other sharp features as compared to the
+ size of the integration interval may not be integrated correctly using
+ this method. A simplified example of this limitation is integrating a
+ y-axis reflected step function with many zero values within the integrals
+ bounds.
+
+ >>> y = lambda x: 1 if x<=0 else 0
+ >>> integrate.quad(y, -1, 1)
+ (1.0, 1.1102230246251565e-14)
+ >>> integrate.quad(y, -1, 100)
+ (1.0000000002199108, 1.0189464580163188e-08)
+ >>> integrate.quad(y, -1, 10000)
+ (0.0, 0.0)
+
+ """
+ if not isinstance(args, tuple):
+ args = (args,)
+
+ # check the limits of integration: \int_a^b, expect a < b
+ flip, a, b = b < a, min(a, b), max(a, b)
+
+ if weight is None:
+ retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
+ points)
+ else:
+ if points is not None:
+ msg = ("Break points cannot be specified when using weighted integrand.\n"
+ "Continuing, ignoring specified points.")
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
+ retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
+ limlst, limit, maxp1, weight, wvar, wopts)
+
+ if flip:
+ retval = (-retval[0],) + retval[1:]
+
+ ier = retval[-1]
+ if ier == 0:
+ return retval[:-1]
+
+ msgs = {80: "A Python error occurred possibly while calling the function.",
+ 1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
+ 2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
+ 3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
+ 4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
+ 5: "The integral is probably divergent, or slowly convergent.",
+ 6: "The input is invalid.",
+ 7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
+ 'unknown': "Unknown error."}
+
+ if weight in ['cos','sin'] and (b == Inf or a == -Inf):
+ msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
+ msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
+ msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
+ explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
+ 2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
+ 3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
+ 4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
+ 5: "The integral over this cycle is probably divergent or slowly convergent."}
+
+ try:
+ msg = msgs[ier]
+ except KeyError:
+ msg = msgs['unknown']
+
+ if ier in [1,2,3,4,5,7]:
+ if full_output:
+ if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
+ return retval[:-1] + (msg, explain)
+ else:
+ return retval[:-1] + (msg,)
+ else:
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
+ return retval[:-1]
+
+ elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
+ if epsabs <= 0: # Small error tolerance - applies to all methods
+ if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
+ msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
+ " 5e-29 and 50*(machine epsilon).")
+ elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
+ msg = ("Sine or cosine weighted intergals with infinite domain"
+ " must have 'epsabs'>0.")
+
+ elif weight is None:
+ if points is None: # QAGSE/QAGIE
+ msg = ("Invalid 'limit' argument. There must be"
+ " at least one subinterval")
+ else: # QAGPE
+ if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
+ msg = ("All break points in 'points' must lie within the"
+ " integration limits.")
+ elif len(points) >= limit:
+ msg = ("Number of break points ({:d})"
+ " must be less than subinterval"
+ " limit ({:d})").format(len(points), limit)
+
+ else:
+ if maxp1 < 1:
+ msg = "Chebyshev moment limit maxp1 must be >=1."
+
+ elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
+ msg = "Cycle limit limlst must be >=3."
+
+ elif weight.startswith('alg'): # QAWSE
+ if min(wvar) < -1:
+ msg = "wvar parameters (alpha, beta) must both be >= -1."
+ if b < a:
+ msg = "Integration limits a, b must satistfy a>> from scipy import integrate
+ >>> f = lambda y, x: x*y**2
+ >>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
+ (0.6666666666666667, 7.401486830834377e-15)
+
+ """
+
+ def temp_ranges(*args):
+ return [gfun(args[0]) if callable(gfun) else gfun,
+ hfun(args[0]) if callable(hfun) else hfun]
+
+ return nquad(func, [temp_ranges, [a, b]], args=args,
+ opts={"epsabs": epsabs, "epsrel": epsrel})
+
+
+def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
+ epsrel=1.49e-8):
+ """
+ Compute a triple (definite) integral.
+
+ Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
+ ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
+
+ Parameters
+ ----------
+ func : function
+ A Python function or method of at least three variables in the
+ order (z, y, x).
+ a, b : float
+ The limits of integration in x: `a` < `b`
+ gfun : function or float
+ The lower boundary curve in y which is a function taking a single
+ floating point argument (x) and returning a floating point result
+ or a float indicating a constant boundary curve.
+ hfun : function or float
+ The upper boundary curve in y (same requirements as `gfun`).
+ qfun : function or float
+ The lower boundary surface in z. It must be a function that takes
+ two floats in the order (x, y) and returns a float or a float
+ indicating a constant boundary surface.
+ rfun : function or float
+ The upper boundary surface in z. (Same requirements as `qfun`.)
+ args : tuple, optional
+ Extra arguments to pass to `func`.
+ epsabs : float, optional
+ Absolute tolerance passed directly to the innermost 1-D quadrature
+ integration. Default is 1.49e-8.
+ epsrel : float, optional
+ Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
+
+ Returns
+ -------
+ y : float
+ The resultant integral.
+ abserr : float
+ An estimate of the error.
+
+ See Also
+ --------
+ quad: Adaptive quadrature using QUADPACK
+ quadrature: Adaptive Gaussian quadrature
+ fixed_quad: Fixed-order Gaussian quadrature
+ dblquad: Double integrals
+ nquad : N-dimensional integrals
+ romb: Integrators for sampled data
+ simpson: Integrators for sampled data
+ ode: ODE integrators
+ odeint: ODE integrators
+ scipy.special: For coefficients and roots of orthogonal polynomials
+
+ Examples
+ --------
+
+ Compute the triple integral of ``x * y * z``, over ``x`` ranging
+ from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
+
+ >>> from scipy import integrate
+ >>> f = lambda z, y, x: x*y*z
+ >>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
+ ... lambda x, y: 0, lambda x, y: 1)
+ (1.8750000000000002, 3.324644794257407e-14)
+
+
+ """
+ # f(z, y, x)
+ # qfun/rfun (x, y)
+ # gfun/hfun(x)
+ # nquad will hand (y, x, t0, ...) to ranges0
+ # nquad will hand (x, t0, ...) to ranges1
+ # Stupid different API...
+
+ def ranges0(*args):
+ return [qfun(args[1], args[0]) if callable(qfun) else qfun,
+ rfun(args[1], args[0]) if callable(rfun) else rfun]
+
+ def ranges1(*args):
+ return [gfun(args[0]) if callable(gfun) else gfun,
+ hfun(args[0]) if callable(hfun) else hfun]
+
+ ranges = [ranges0, ranges1, [a, b]]
+ return nquad(func, ranges, args=args,
+ opts={"epsabs": epsabs, "epsrel": epsrel})
+
+
+def nquad(func, ranges, args=None, opts=None, full_output=False):
+ """
+ Integration over multiple variables.
+
+ Wraps `quad` to enable integration over multiple variables.
+ Various options allow improved integration of discontinuous functions, as
+ well as the use of weighted integration, and generally finer control of the
+ integration process.
+
+ Parameters
+ ----------
+ func : {callable, scipy.LowLevelCallable}
+ The function to be integrated. Has arguments of ``x0, ... xn``,
+ ``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
+ which must be floats. Where ```t0, ... tm``` are extra arguments
+ passed in args.
+ Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
+ Integration is carried out in order. That is, integration over ``x0``
+ is the innermost integral, and ``xn`` is the outermost.
+
+ If the user desires improved integration performance, then `f` may
+ be a `scipy.LowLevelCallable` with one of the signatures::
+
+ double func(int n, double *xx)
+ double func(int n, double *xx, void *user_data)
+
+ where ``n`` is the number of variables and args. The ``xx`` array
+ contains the coordinates and extra arguments. ``user_data`` is the data
+ contained in the `scipy.LowLevelCallable`.
+ ranges : iterable object
+ Each element of ranges may be either a sequence of 2 numbers, or else
+ a callable that returns such a sequence. ``ranges[0]`` corresponds to
+ integration over x0, and so on. If an element of ranges is a callable,
+ then it will be called with all of the integration arguments available,
+ as well as any parametric arguments. e.g., if
+ ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
+ either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
+ args : iterable object, optional
+ Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
+ ``opts``.
+ opts : iterable object or dict, optional
+ Options to be passed to `quad`. May be empty, a dict, or
+ a sequence of dicts or functions that return a dict. If empty, the
+ default options from scipy.integrate.quad are used. If a dict, the same
+ options are used for all levels of integraion. If a sequence, then each
+ element of the sequence corresponds to a particular integration. e.g.,
+ opts[0] corresponds to integration over x0, and so on. If a callable,
+ the signature must be the same as for ``ranges``. The available
+ options together with their default values are:
+
+ - epsabs = 1.49e-08
+ - epsrel = 1.49e-08
+ - limit = 50
+ - points = None
+ - weight = None
+ - wvar = None
+ - wopts = None
+
+ For more information on these options, see `quad` and `quad_explain`.
+
+ full_output : bool, optional
+ Partial implementation of ``full_output`` from scipy.integrate.quad.
+ The number of integrand function evaluations ``neval`` can be obtained
+ by setting ``full_output=True`` when calling nquad.
+
+ Returns
+ -------
+ result : float
+ The result of the integration.
+ abserr : float
+ The maximum of the estimates of the absolute error in the various
+ integration results.
+ out_dict : dict, optional
+ A dict containing additional information on the integration.
+
+ See Also
+ --------
+ quad : 1-D numerical integration
+ dblquad, tplquad : double and triple integrals
+ fixed_quad : fixed-order Gaussian quadrature
+ quadrature : adaptive Gaussian quadrature
+
+ Examples
+ --------
+ >>> from scipy import integrate
+ >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
+ ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
+ >>> def opts0(*args, **kwargs):
+ ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
+ >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
+ ... opts=[opts0,{},{},{}], full_output=True)
+ (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
+
+ >>> scale = .1
+ >>> def func2(x0, x1, x2, x3, t0, t1):
+ ... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
+ >>> def lim0(x1, x2, x3, t0, t1):
+ ... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
+ ... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
+ >>> def lim1(x2, x3, t0, t1):
+ ... return [scale * (t0*x2 + t1*x3) - 1,
+ ... scale * (t0*x2 + t1*x3) + 1]
+ >>> def lim2(x3, t0, t1):
+ ... return [scale * (x3 + t0**2*t1**3) - 1,
+ ... scale * (x3 + t0**2*t1**3) + 1]
+ >>> def lim3(t0, t1):
+ ... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
+ >>> def opts0(x1, x2, x3, t0, t1):
+ ... return {'points' : [t0 - t1*x1]}
+ >>> def opts1(x2, x3, t0, t1):
+ ... return {}
+ >>> def opts2(x3, t0, t1):
+ ... return {}
+ >>> def opts3(t0, t1):
+ ... return {}
+ >>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
+ ... opts=[opts0, opts1, opts2, opts3])
+ (25.066666666666666, 2.7829590483937256e-13)
+
+ """
+ depth = len(ranges)
+ ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
+ if args is None:
+ args = ()
+ if opts is None:
+ opts = [dict([])] * depth
+
+ if isinstance(opts, dict):
+ opts = [_OptFunc(opts)] * depth
+ else:
+ opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
+ return _NQuad(func, ranges, opts, full_output).integrate(*args)
+
+
+class _RangeFunc(object):
+ def __init__(self, range_):
+ self.range_ = range_
+
+ def __call__(self, *args):
+ """Return stored value.
+
+ *args needed because range_ can be float or func, and is called with
+ variable number of parameters.
+ """
+ return self.range_
+
+
+class _OptFunc(object):
+ def __init__(self, opt):
+ self.opt = opt
+
+ def __call__(self, *args):
+ """Return stored dict."""
+ return self.opt
+
+
+class _NQuad(object):
+ def __init__(self, func, ranges, opts, full_output):
+ self.abserr = 0
+ self.func = func
+ self.ranges = ranges
+ self.opts = opts
+ self.maxdepth = len(ranges)
+ self.full_output = full_output
+ if self.full_output:
+ self.out_dict = {'neval': 0}
+
+ def integrate(self, *args, **kwargs):
+ depth = kwargs.pop('depth', 0)
+ if kwargs:
+ raise ValueError('unexpected kwargs')
+
+ # Get the integration range and options for this depth.
+ ind = -(depth + 1)
+ fn_range = self.ranges[ind]
+ low, high = fn_range(*args)
+ fn_opt = self.opts[ind]
+ opt = dict(fn_opt(*args))
+
+ if 'points' in opt:
+ opt['points'] = [x for x in opt['points'] if low <= x <= high]
+ if depth + 1 == self.maxdepth:
+ f = self.func
+ else:
+ f = partial(self.integrate, depth=depth+1)
+ quad_r = quad(f, low, high, args=args, full_output=self.full_output,
+ **opt)
+ value = quad_r[0]
+ abserr = quad_r[1]
+ if self.full_output:
+ infodict = quad_r[2]
+ # The 'neval' parameter in full_output returns the total
+ # number of times the integrand function was evaluated.
+ # Therefore, only the innermost integration loop counts.
+ if depth + 1 == self.maxdepth:
+ self.out_dict['neval'] += infodict['neval']
+ self.abserr = max(self.abserr, abserr)
+ if depth > 0:
+ return value
+ else:
+ # Final result of N-D integration with error
+ if self.full_output:
+ return value, self.abserr, self.out_dict
+ else:
+ return value, self.abserr
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/setup.py
new file mode 100644
index 0000000..11ce3d1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/setup.py
@@ -0,0 +1,113 @@
+import os
+from os.path import join
+
+from scipy._build_utils import numpy_nodepr_api
+
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ from scipy._build_utils.system_info import get_info
+ from scipy._build_utils import (uses_blas64, blas_ilp64_pre_build_hook,
+ combine_dict, get_f2py_int64_options)
+
+ config = Configuration('integrate', parent_package, top_path)
+
+ if uses_blas64():
+ lapack_opt = get_info('lapack_ilp64_opt', 2)
+ pre_build_hook = blas_ilp64_pre_build_hook(lapack_opt)
+ f2py_options = get_f2py_int64_options()
+ else:
+ lapack_opt = get_info('lapack_opt')
+ pre_build_hook = None
+ f2py_options = None
+
+ mach_src = [join('mach','*.f')]
+ quadpack_src = [join('quadpack', '*.f')]
+ lsoda_src = [join('odepack', fn) for fn in [
+ 'blkdta000.f', 'bnorm.f', 'cfode.f',
+ 'ewset.f', 'fnorm.f', 'intdy.f',
+ 'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f',
+ 'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f',
+ 'xsetun.f']]
+ vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')]
+ dop_src = [join('dop','*.f')]
+ quadpack_test_src = [join('tests','_test_multivariate.c')]
+ odeint_banded_test_src = [join('tests', 'banded5x5.f')]
+
+ config.add_library('mach', sources=mach_src, config_fc={'noopt': (__file__, 1)},
+ _pre_build_hook=pre_build_hook)
+ config.add_library('quadpack', sources=quadpack_src, _pre_build_hook=pre_build_hook)
+ config.add_library('lsoda', sources=lsoda_src, _pre_build_hook=pre_build_hook)
+ config.add_library('vode', sources=vode_src, _pre_build_hook=pre_build_hook)
+ config.add_library('dop', sources=dop_src, _pre_build_hook=pre_build_hook)
+
+ # Extensions
+ # quadpack:
+ include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')]
+ cfg = combine_dict(lapack_opt,
+ include_dirs=include_dirs,
+ libraries=['quadpack', 'mach'])
+ config.add_extension('_quadpack',
+ sources=['_quadpackmodule.c'],
+ depends=(['__quadpack.h']
+ + quadpack_src + mach_src),
+ **cfg)
+
+ # odepack/lsoda-odeint
+ cfg = combine_dict(lapack_opt, numpy_nodepr_api,
+ libraries=['lsoda', 'mach'])
+ config.add_extension('_odepack',
+ sources=['_odepackmodule.c'],
+ depends=(lsoda_src + mach_src),
+ **cfg)
+
+ # vode
+ cfg = combine_dict(lapack_opt,
+ libraries=['vode'])
+ ext = config.add_extension('vode',
+ sources=['vode.pyf'],
+ depends=vode_src,
+ f2py_options=f2py_options,
+ **cfg)
+ ext._pre_build_hook = pre_build_hook
+
+ # lsoda
+ cfg = combine_dict(lapack_opt,
+ libraries=['lsoda', 'mach'])
+ ext = config.add_extension('lsoda',
+ sources=['lsoda.pyf'],
+ depends=(lsoda_src + mach_src),
+ f2py_options=f2py_options,
+ **cfg)
+ ext._pre_build_hook = pre_build_hook
+
+ # dop
+ ext = config.add_extension('_dop',
+ sources=['dop.pyf'],
+ libraries=['dop'],
+ depends=dop_src,
+ f2py_options=f2py_options)
+ ext._pre_build_hook = pre_build_hook
+
+ config.add_extension('_test_multivariate',
+ sources=quadpack_test_src)
+
+ # Fortran+f2py extension module for testing odeint.
+ cfg = combine_dict(lapack_opt,
+ libraries=['lsoda', 'mach'])
+ ext = config.add_extension('_test_odeint_banded',
+ sources=odeint_banded_test_src,
+ depends=(lsoda_src + mach_src),
+ f2py_options=f2py_options,
+ **cfg)
+ ext._pre_build_hook = pre_build_hook
+
+ config.add_subpackage('_ivp')
+
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/_test_multivariate.c b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/_test_multivariate.c
new file mode 100644
index 0000000..c19b406
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/_test_multivariate.c
@@ -0,0 +1,124 @@
+#include
+
+#include "math.h"
+
+const double PI = 3.141592653589793238462643383279502884;
+
+static double
+_multivariate_typical(int n, double *args)
+{
+ return cos(args[1] * args[0] - args[2] * sin(args[0])) / PI;
+}
+
+static double
+_multivariate_indefinite(int n, double *args)
+{
+ return -exp(-args[0]) * log(args[0]);
+}
+
+static double
+_multivariate_sin(int n, double *args)
+{
+ return sin(args[0]);
+}
+
+static double
+_sin_0(double x, void *user_data)
+{
+ return sin(x);
+}
+
+static double
+_sin_1(int ndim, double *x, void *user_data)
+{
+ return sin(x[0]);
+}
+
+static double
+_sin_2(double x)
+{
+ return sin(x);
+}
+
+static double
+_sin_3(int ndim, double *x)
+{
+ return sin(x[0]);
+}
+
+
+typedef struct {
+ char *name;
+ void *ptr;
+} routine_t;
+
+
+static const routine_t routines[] = {
+ {"_multivariate_typical", &_multivariate_typical},
+ {"_multivariate_indefinite", &_multivariate_indefinite},
+ {"_multivariate_sin", &_multivariate_sin},
+ {"_sin_0", &_sin_0},
+ {"_sin_1", &_sin_1},
+ {"_sin_2", &_sin_2},
+ {"_sin_3", &_sin_3}
+};
+
+
+static int create_pointers(PyObject *module)
+{
+ PyObject *d, *obj = NULL;
+ int i;
+
+ d = PyModule_GetDict(module);
+ if (d == NULL) {
+ goto fail;
+ }
+
+ for (i = 0; i < sizeof(routines) / sizeof(routine_t); ++i) {
+ obj = PyLong_FromVoidPtr(routines[i].ptr);
+ if (obj == NULL) {
+ goto fail;
+ }
+
+ if (PyDict_SetItemString(d, routines[i].name, obj)) {
+ goto fail;
+ }
+
+ Py_DECREF(obj);
+ obj = NULL;
+ }
+
+ Py_XDECREF(obj);
+ return 0;
+
+fail:
+ Py_XDECREF(obj);
+ return -1;
+}
+
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_test_multivariate",
+ NULL,
+ -1,
+ NULL, /* Empty methods section */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyObject *PyInit__test_multivariate(void)
+{
+ PyObject *m;
+ m = PyModule_Create(&moduledef);
+ if (m == NULL) {
+ return NULL;
+ }
+ if (create_pointers(m)) {
+ Py_DECREF(m);
+ return NULL;
+ }
+ return m;
+}
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/banded5x5.f b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/banded5x5.f
new file mode 100644
index 0000000..8a56593
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/banded5x5.f
@@ -0,0 +1,240 @@
+c banded5x5.f
+c
+c This Fortran library contains implementations of the
+c differential equation
+c dy/dt = A*y
+c where A is a 5x5 banded matrix (see below for the actual
+c values). These functions will be used to test
+c scipy.integrate.odeint.
+c
+c The idea is to solve the system two ways: pure Fortran, and
+c using odeint. The "pure Fortran" solver is implemented in
+c the subroutine banded5x5_solve below. It calls LSODA to
+c solve the system.
+c
+c To solve the same system using odeint, the functions in this
+c file are given a python wrapper using f2py. Then the code
+c in test_odeint_jac.py uses the wrapper to implement the
+c equation and Jacobian functions required by odeint. Because
+c those functions ultimately call the Fortran routines defined
+c in this file, the two method (pure Fortran and odeint) should
+c produce exactly the same results. (That's assuming floating
+c point calculations are deterministic, which can be an
+c incorrect assumption.) If we simply re-implemented the
+c equation and Jacobian functions using just python and numpy,
+c the floating point calculations would not be performed in
+c the same sequence as in the Fortran code, and we would obtain
+c different answers. The answer for either method would be
+c numerically "correct", but the errors would be different,
+c and the counts of function and Jacobian evaluations would
+c likely be different.
+c
+ block data jacobian
+ implicit none
+
+ double precision bands
+ dimension bands(4,5)
+ common /jac/ bands
+
+c The data for a banded Jacobian stored in packed banded
+c format. The full Jacobian is
+c
+c -1, 0.25, 0, 0, 0
+c 0.25, -5, 0.25, 0, 0
+c 0.10, 0.25, -25, 0.25, 0
+c 0, 0.10, 0.25, -125, 0.25
+c 0, 0, 0.10, 0.25, -625
+c
+c The columns in the following layout of numbers are
+c the upper diagonal, main diagonal and two lower diagonals
+c (i.e. each row in the layout is a column of the packed
+c banded Jacobian). The values 0.00D0 are in the "don't
+c care" positions.
+
+ data bands/
+ + 0.00D0, -1.0D0, 0.25D0, 0.10D0,
+ + 0.25D0, -5.0D0, 0.25D0, 0.10D0,
+ + 0.25D0, -25.0D0, 0.25D0, 0.10D0,
+ + 0.25D0, -125.0D0, 0.25D0, 0.00D0,
+ + 0.25D0, -625.0D0, 0.00D0, 0.00D0
+ + /
+
+ end
+
+ subroutine getbands(jac)
+ double precision jac
+ dimension jac(4, 5)
+cf2py intent(out) jac
+
+ double precision bands
+ dimension bands(4,5)
+ common /jac/ bands
+
+ integer i, j
+ do 5 i = 1, 4
+ do 5 j = 1, 5
+ jac(i, j) = bands(i, j)
+ 5 continue
+
+ return
+ end
+
+c
+c Differential equations, right-hand-side
+c
+ subroutine banded5x5(n, t, y, f)
+ implicit none
+ integer n
+ double precision t, y, f
+ dimension y(n), f(n)
+
+ double precision bands
+ dimension bands(4,5)
+ common /jac/ bands
+
+ f(1) = bands(2,1)*y(1) + bands(1,2)*y(2)
+ f(2) = bands(3,1)*y(1) + bands(2,2)*y(2) + bands(1,3)*y(3)
+ f(3) = bands(4,1)*y(1) + bands(3,2)*y(2) + bands(2,3)*y(3)
+ + + bands(1,4)*y(4)
+ f(4) = bands(4,2)*y(2) + bands(3,3)*y(3) + bands(2,4)*y(4)
+ + + bands(1,5)*y(5)
+ f(5) = bands(4,3)*y(3) + bands(3,4)*y(4) + bands(2,5)*y(5)
+
+ return
+ end
+
+c
+c Jacobian
+c
+c The subroutine assumes that the full Jacobian is to be computed.
+c ml and mu are ignored, and nrowpd is assumed to be n.
+c
+ subroutine banded5x5_jac(n, t, y, ml, mu, jac, nrowpd)
+ implicit none
+ integer n, ml, mu, nrowpd
+ double precision t, y, jac
+ dimension y(n), jac(nrowpd, n)
+
+ integer i, j
+
+ double precision bands
+ dimension bands(4,5)
+ common /jac/ bands
+
+ do 15 i = 1, 4
+ do 15 j = 1, 5
+ if ((i - j) .gt. 0) then
+ jac(i - j, j) = bands(i, j)
+ end if
+15 continue
+
+ return
+ end
+
+c
+c Banded Jacobian
+c
+c ml = 2, mu = 1
+c
+ subroutine banded5x5_bjac(n, t, y, ml, mu, bjac, nrowpd)
+ implicit none
+ integer n, ml, mu, nrowpd
+ double precision t, y, bjac
+ dimension y(5), bjac(nrowpd, n)
+
+ integer i, j
+
+ double precision bands
+ dimension bands(4,5)
+ common /jac/ bands
+
+ do 20 i = 1, 4
+ do 20 j = 1, 5
+ bjac(i, j) = bands(i, j)
+ 20 continue
+
+ return
+ end
+
+
+ subroutine banded5x5_solve(y, nsteps, dt, jt, nst, nfe, nje)
+
+c jt is the Jacobian type:
+c jt = 1 Use the full Jacobian.
+c jt = 4 Use the banded Jacobian.
+c nst, nfe and nje are outputs:
+c nst: Total number of internal steps
+c nfe: Total number of function (i.e. right-hand-side)
+c evaluations
+c nje: Total number of Jacobian evaluations
+
+ implicit none
+
+ external banded5x5
+ external banded5x5_jac
+ external banded5x5_bjac
+ external LSODA
+
+c Arguments...
+ double precision y, dt
+ integer nsteps, jt, nst, nfe, nje
+cf2py intent(inout) y
+cf2py intent(in) nsteps, dt, jt
+cf2py intent(out) nst, nfe, nje
+
+c Local variables...
+ double precision atol, rtol, t, tout, rwork
+ integer iwork
+ dimension y(5), rwork(500), iwork(500)
+ integer neq, i
+ integer itol, iopt, itask, istate, lrw, liw
+
+c Common block...
+ double precision jacband
+ dimension jacband(4,5)
+ common /jac/ jacband
+
+c --- t range ---
+ t = 0.0D0
+
+c --- Solver tolerances ---
+ rtol = 1.0D-11
+ atol = 1.0D-13
+ itol = 1
+
+c --- Other LSODA parameters ---
+ neq = 5
+ itask = 1
+ istate = 1
+ iopt = 0
+ iwork(1) = 2
+ iwork(2) = 1
+ lrw = 500
+ liw = 500
+
+c --- Call LSODA in a loop to compute the solution ---
+ do 40 i = 1, nsteps
+ tout = i*dt
+ if (jt .eq. 1) then
+ call LSODA(banded5x5, neq, y, t, tout,
+ & itol, rtol, atol, itask, istate, iopt,
+ & rwork, lrw, iwork, liw,
+ & banded5x5_jac, jt)
+ else
+ call LSODA(banded5x5, neq, y, t, tout,
+ & itol, rtol, atol, itask, istate, iopt,
+ & rwork, lrw, iwork, liw,
+ & banded5x5_bjac, jt)
+ end if
+ 40 if (istate .lt. 0) goto 80
+
+ nst = iwork(11)
+ nfe = iwork(12)
+ nje = iwork(13)
+
+ return
+
+ 80 write (6,89) istate
+ 89 format(1X,"Error: istate=",I3)
+ return
+ end
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test__quad_vec.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test__quad_vec.py
new file mode 100644
index 0000000..2e53611
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test__quad_vec.py
@@ -0,0 +1,176 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose
+
+from scipy.integrate import quad_vec
+
+quadrature_params = pytest.mark.parametrize(
+ 'quadrature', [None, "gk15", "gk21", "trapezoid"])
+
+
+@quadrature_params
+def test_quad_vec_simple(quadrature):
+ n = np.arange(10)
+ f = lambda x: x**n
+ for epsabs in [0.1, 1e-3, 1e-6]:
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
+ # slow: skip
+ continue
+
+ kwargs = dict(epsabs=epsabs, quadrature=quadrature)
+
+ exact = 2**(n+1)/(n + 1)
+
+ res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+ res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
+ assert np.linalg.norm(res - exact) < epsabs
+
+ res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+ res, err, *rest = quad_vec(f, 0, 2, norm='max',
+ epsrel=1e-8,
+ full_output=True,
+ limit=10000,
+ **kwargs)
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+
+@quadrature_params
+def test_quad_vec_simple_inf(quadrature):
+ f = lambda x: 1 / (1 + np.float64(x)**2)
+
+ for epsabs in [0.1, 1e-3, 1e-6]:
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
+ # slow: skip
+ continue
+
+ kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
+
+ res, err = quad_vec(f, 0, np.inf, **kwargs)
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, 0, -np.inf, **kwargs)
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, -np.inf, 0, **kwargs)
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, np.inf, 0, **kwargs)
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
+ assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
+ assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, np.inf, np.inf, **kwargs)
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
+
+ res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+ f = lambda x: np.sin(x + 2) / (1 + x**2)
+ exact = np.pi / np.e * np.sin(2)
+ epsabs = 1e-5
+
+ res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
+ quadrature=quadrature, full_output=True)
+ assert info.status == 1
+ assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
+
+
+def _lorenzian(x):
+ return 1 / (1 + x**2)
+
+
+def test_quad_vec_pool():
+ from multiprocessing.dummy import Pool
+
+ f = _lorenzian
+ res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
+
+ with Pool(10) as pool:
+ f = lambda x: 1 / (1 + x**2)
+ res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
+
+
+@quadrature_params
+def test_num_eval(quadrature):
+ def f(x):
+ count[0] += 1
+ return x**5
+
+ count = [0]
+ res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
+ assert res[2].neval == count[0]
+
+
+def test_info():
+ def f(x):
+ return np.ones((3, 2, 1))
+
+ res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
+
+ assert info.success == True
+ assert info.status == 0
+ assert info.message == 'Target precision reached.'
+ assert info.neval > 0
+ assert info.intervals.shape[1] == 2
+ assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
+ assert info.errors.shape == (info.intervals.shape[0],)
+
+
+def test_nan_inf():
+ def f_nan(x):
+ return np.nan
+
+ def f_inf(x):
+ return np.inf if x < 0.1 else 1/x
+
+ res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
+ assert info.status == 3
+
+ res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
+ assert info.status == 3
+
+
+@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
+ (-np.inf, np.inf), (np.inf, -np.inf)])
+def test_points(a, b):
+ # Check that initial interval splitting is done according to
+ # `points`, by checking that consecutive sets of 15 point (for
+ # gk15) function evaluations lie between `points`
+
+ points = (0, 0.25, 0.5, 0.75, 1.0)
+ points += tuple(-x for x in points)
+
+ quadrature_points = 15
+ interval_sets = []
+ count = 0
+
+ def f(x):
+ nonlocal count
+
+ if count % quadrature_points == 0:
+ interval_sets.append(set())
+
+ count += 1
+ interval_sets[-1].add(float(x))
+ return 0.0
+
+ quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
+
+ # Check that all point sets lie in a single `points` interval
+ for p in interval_sets:
+ j = np.searchsorted(sorted(points), tuple(p))
+ assert np.all(j == j[0])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_banded_ode_solvers.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_banded_ode_solvers.py
new file mode 100644
index 0000000..f34d45d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_banded_ode_solvers.py
@@ -0,0 +1,218 @@
+import itertools
+import numpy as np
+from numpy.testing import assert_allclose
+from scipy.integrate import ode
+
+
+def _band_count(a):
+ """Returns ml and mu, the lower and upper band sizes of a."""
+ nrows, ncols = a.shape
+ ml = 0
+ for k in range(-nrows+1, 0):
+ if np.diag(a, k).any():
+ ml = -k
+ break
+ mu = 0
+ for k in range(nrows-1, 0, -1):
+ if np.diag(a, k).any():
+ mu = k
+ break
+ return ml, mu
+
+
+def _linear_func(t, y, a):
+ """Linear system dy/dt = a * y"""
+ return a.dot(y)
+
+
+def _linear_jac(t, y, a):
+ """Jacobian of a * y is a."""
+ return a
+
+
+def _linear_banded_jac(t, y, a):
+ """Banded Jacobian."""
+ ml, mu = _band_count(a)
+ bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
+ bjac.append(np.diag(a))
+ for k in range(-1, -ml-1, -1):
+ bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
+ return bjac
+
+
+def _solve_linear_sys(a, y0, tend=1, dt=0.1,
+ solver=None, method='bdf', use_jac=True,
+ with_jacobian=False, banded=False):
+ """Use scipy.integrate.ode to solve a linear system of ODEs.
+
+ a : square ndarray
+ Matrix of the linear system to be solved.
+ y0 : ndarray
+ Initial condition
+ tend : float
+ Stop time.
+ dt : float
+ Step size of the output.
+ solver : str
+ If not None, this must be "vode", "lsoda" or "zvode".
+ method : str
+ Either "bdf" or "adams".
+ use_jac : bool
+ Determines if the jacobian function is passed to ode().
+ with_jacobian : bool
+ Passed to ode.set_integrator().
+ banded : bool
+ Determines whether a banded or full jacobian is used.
+ If `banded` is True, `lband` and `uband` are determined by the
+ values in `a`.
+ """
+ if banded:
+ lband, uband = _band_count(a)
+ else:
+ lband = None
+ uband = None
+
+ if use_jac:
+ if banded:
+ r = ode(_linear_func, _linear_banded_jac)
+ else:
+ r = ode(_linear_func, _linear_jac)
+ else:
+ r = ode(_linear_func)
+
+ if solver is None:
+ if np.iscomplexobj(a):
+ solver = "zvode"
+ else:
+ solver = "vode"
+
+ r.set_integrator(solver,
+ with_jacobian=with_jacobian,
+ method=method,
+ lband=lband, uband=uband,
+ rtol=1e-9, atol=1e-10,
+ )
+ t0 = 0
+ r.set_initial_value(y0, t0)
+ r.set_f_params(a)
+ r.set_jac_params(a)
+
+ t = [t0]
+ y = [y0]
+ while r.successful() and r.t < tend:
+ r.integrate(r.t + dt)
+ t.append(r.t)
+ y.append(r.y)
+
+ t = np.array(t)
+ y = np.array(y)
+ return t, y
+
+
+def _analytical_solution(a, y0, t):
+ """
+ Analytical solution to the linear differential equations dy/dt = a*y.
+
+ The solution is only valid if `a` is diagonalizable.
+
+ Returns a 2-D array with shape (len(t), len(y0)).
+ """
+ lam, v = np.linalg.eig(a)
+ c = np.linalg.solve(v, y0)
+ e = c * np.exp(lam * t.reshape(-1, 1))
+ sol = e.dot(v.T)
+ return sol
+
+
+def test_banded_ode_solvers():
+ # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
+ # with a system that has a banded Jacobian matrix.
+
+ t_exact = np.linspace(0, 1.0, 5)
+
+ # --- Real arrays for testing the "lsoda" and "vode" solvers ---
+
+ # lband = 2, uband = 1:
+ a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
+ [0.2, -0.5, 0.9, 0.0, 0.0],
+ [0.1, 0.1, -0.4, 0.1, 0.0],
+ [0.0, 0.3, -0.1, -0.9, -0.3],
+ [0.0, 0.0, 0.1, 0.1, -0.7]])
+
+ # lband = 0, uband = 1:
+ a_real_upper = np.triu(a_real)
+
+ # lband = 2, uband = 0:
+ a_real_lower = np.tril(a_real)
+
+ # lband = 0, uband = 0:
+ a_real_diag = np.triu(a_real_lower)
+
+ real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
+ real_solutions = []
+
+ for a in real_matrices:
+ y0 = np.arange(1, a.shape[0] + 1)
+ y_exact = _analytical_solution(a, y0, t_exact)
+ real_solutions.append((y0, t_exact, y_exact))
+
+ def check_real(idx, solver, meth, use_jac, with_jac, banded):
+ a = real_matrices[idx]
+ y0, t_exact, y_exact = real_solutions[idx]
+ t, y = _solve_linear_sys(a, y0,
+ tend=t_exact[-1],
+ dt=t_exact[1] - t_exact[0],
+ solver=solver,
+ method=meth,
+ use_jac=use_jac,
+ with_jacobian=with_jac,
+ banded=banded)
+ assert_allclose(t, t_exact)
+ assert_allclose(y, y_exact)
+
+ for idx in range(len(real_matrices)):
+ p = [['vode', 'lsoda'], # solver
+ ['bdf', 'adams'], # method
+ [False, True], # use_jac
+ [False, True], # with_jacobian
+ [False, True]] # banded
+ for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
+ check_real(idx, solver, meth, use_jac, with_jac, banded)
+
+ # --- Complex arrays for testing the "zvode" solver ---
+
+ # complex, lband = 2, uband = 1:
+ a_complex = a_real - 0.5j * a_real
+
+ # complex, lband = 0, uband = 0:
+ a_complex_diag = np.diag(np.diag(a_complex))
+
+ complex_matrices = [a_complex, a_complex_diag]
+ complex_solutions = []
+
+ for a in complex_matrices:
+ y0 = np.arange(1, a.shape[0] + 1) + 1j
+ y_exact = _analytical_solution(a, y0, t_exact)
+ complex_solutions.append((y0, t_exact, y_exact))
+
+ def check_complex(idx, solver, meth, use_jac, with_jac, banded):
+ a = complex_matrices[idx]
+ y0, t_exact, y_exact = complex_solutions[idx]
+ t, y = _solve_linear_sys(a, y0,
+ tend=t_exact[-1],
+ dt=t_exact[1] - t_exact[0],
+ solver=solver,
+ method=meth,
+ use_jac=use_jac,
+ with_jacobian=with_jac,
+ banded=banded)
+ assert_allclose(t, t_exact)
+ assert_allclose(y, y_exact)
+
+ for idx in range(len(complex_matrices)):
+ p = [['bdf', 'adams'], # method
+ [False, True], # use_jac
+ [False, True], # with_jacobian
+ [False, True]] # banded
+ for meth, use_jac, with_jac, banded in itertools.product(*p):
+ check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_bvp.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_bvp.py
new file mode 100644
index 0000000..5eee766
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_bvp.py
@@ -0,0 +1,709 @@
+import sys
+
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_equal, assert_allclose,
+ assert_equal)
+from pytest import raises as assert_raises
+
+from scipy.sparse import coo_matrix
+from scipy.special import erf
+from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
+ estimate_bc_jac, compute_jac_indices,
+ construct_global_jac, solve_bvp)
+
+
+def exp_fun(x, y):
+ return np.vstack((y[1], y[0]))
+
+
+def exp_fun_jac(x, y):
+ df_dy = np.empty((2, 2, x.shape[0]))
+ df_dy[0, 0] = 0
+ df_dy[0, 1] = 1
+ df_dy[1, 0] = 1
+ df_dy[1, 1] = 0
+ return df_dy
+
+
+def exp_bc(ya, yb):
+ return np.hstack((ya[0] - 1, yb[0]))
+
+
+def exp_bc_complex(ya, yb):
+ return np.hstack((ya[0] - 1 - 1j, yb[0]))
+
+
+def exp_bc_jac(ya, yb):
+ dbc_dya = np.array([
+ [1, 0],
+ [0, 0]
+ ])
+ dbc_dyb = np.array([
+ [0, 0],
+ [1, 0]
+ ])
+ return dbc_dya, dbc_dyb
+
+
+def exp_sol(x):
+ return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
+
+
+def sl_fun(x, y, p):
+ return np.vstack((y[1], -p[0]**2 * y[0]))
+
+
+def sl_fun_jac(x, y, p):
+ n, m = y.shape
+ df_dy = np.empty((n, 2, m))
+ df_dy[0, 0] = 0
+ df_dy[0, 1] = 1
+ df_dy[1, 0] = -p[0]**2
+ df_dy[1, 1] = 0
+
+ df_dp = np.empty((n, 1, m))
+ df_dp[0, 0] = 0
+ df_dp[1, 0] = -2 * p[0] * y[0]
+
+ return df_dy, df_dp
+
+
+def sl_bc(ya, yb, p):
+ return np.hstack((ya[0], yb[0], ya[1] - p[0]))
+
+
+def sl_bc_jac(ya, yb, p):
+ dbc_dya = np.zeros((3, 2))
+ dbc_dya[0, 0] = 1
+ dbc_dya[2, 1] = 1
+
+ dbc_dyb = np.zeros((3, 2))
+ dbc_dyb[1, 0] = 1
+
+ dbc_dp = np.zeros((3, 1))
+ dbc_dp[2, 0] = -1
+
+ return dbc_dya, dbc_dyb, dbc_dp
+
+
+def sl_sol(x, p):
+ return np.sin(p[0] * x)
+
+
+def emden_fun(x, y):
+ return np.vstack((y[1], -y[0]**5))
+
+
+def emden_fun_jac(x, y):
+ df_dy = np.empty((2, 2, x.shape[0]))
+ df_dy[0, 0] = 0
+ df_dy[0, 1] = 1
+ df_dy[1, 0] = -5 * y[0]**4
+ df_dy[1, 1] = 0
+ return df_dy
+
+
+def emden_bc(ya, yb):
+ return np.array([ya[1], yb[0] - (3/4)**0.5])
+
+
+def emden_bc_jac(ya, yb):
+ dbc_dya = np.array([
+ [0, 1],
+ [0, 0]
+ ])
+ dbc_dyb = np.array([
+ [0, 0],
+ [1, 0]
+ ])
+ return dbc_dya, dbc_dyb
+
+
+def emden_sol(x):
+ return (1 + x**2/3)**-0.5
+
+
+def undefined_fun(x, y):
+ return np.zeros_like(y)
+
+
+def undefined_bc(ya, yb):
+ return np.array([ya[0], yb[0] - 1])
+
+
+def big_fun(x, y):
+ f = np.zeros_like(y)
+ f[::2] = y[1::2]
+ return f
+
+
+def big_bc(ya, yb):
+ return np.hstack((ya[::2], yb[::2] - 1))
+
+
+def big_sol(x, n):
+ y = np.ones((2 * n, x.size))
+ y[::2] = x
+ return x
+
+
+def big_fun_with_parameters(x, y, p):
+ """ Big version of sl_fun, with two parameters.
+
+ The two differential equations represented by sl_fun are broadcast to the
+ number of rows of y, rotating between the parameters p[0] and p[1].
+ Here are the differential equations:
+
+ dy[0]/dt = y[1]
+ dy[1]/dt = -p[0]**2 * y[0]
+ dy[2]/dt = y[3]
+ dy[3]/dt = -p[1]**2 * y[2]
+ dy[4]/dt = y[5]
+ dy[5]/dt = -p[0]**2 * y[4]
+ dy[6]/dt = y[7]
+ dy[7]/dt = -p[1]**2 * y[6]
+ .
+ .
+ .
+
+ """
+ f = np.zeros_like(y)
+ f[::2] = y[1::2]
+ f[1::4] = -p[0]**2 * y[::4]
+ f[3::4] = -p[1]**2 * y[2::4]
+ return f
+
+
+def big_fun_with_parameters_jac(x, y, p):
+ # big version of sl_fun_jac, with two parameters
+ n, m = y.shape
+ df_dy = np.zeros((n, n, m))
+ df_dy[range(0, n, 2), range(1, n, 2)] = 1
+ df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
+ df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
+
+ df_dp = np.zeros((n, 2, m))
+ df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
+ df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
+
+ return df_dy, df_dp
+
+
+def big_bc_with_parameters(ya, yb, p):
+ # big version of sl_bc, with two parameters
+ return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
+
+
+def big_bc_with_parameters_jac(ya, yb, p):
+ # big version of sl_bc_jac, with two parameters
+ n = ya.shape[0]
+ dbc_dya = np.zeros((n + 2, n))
+ dbc_dyb = np.zeros((n + 2, n))
+
+ dbc_dya[range(n // 2), range(0, n, 2)] = 1
+ dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
+
+ dbc_dp = np.zeros((n + 2, 2))
+ dbc_dp[n, 0] = -1
+ dbc_dya[n, 1] = 1
+ dbc_dp[n + 1, 1] = -1
+ dbc_dya[n + 1, 3] = 1
+
+ return dbc_dya, dbc_dyb, dbc_dp
+
+
+def big_sol_with_parameters(x, p):
+ # big version of sl_sol, with two parameters
+ return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
+
+
+def shock_fun(x, y):
+ eps = 1e-3
+ return np.vstack((
+ y[1],
+ -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
+ np.pi * x * np.sin(np.pi * x)) / eps
+ ))
+
+
+def shock_bc(ya, yb):
+ return np.array([ya[0] + 2, yb[0]])
+
+
+def shock_sol(x):
+ eps = 1e-3
+ k = np.sqrt(2 * eps)
+ return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
+
+
+def nonlin_bc_fun(x, y):
+ # laplace eq.
+ return np.stack([y[1], np.zeros_like(x)])
+
+
+def nonlin_bc_bc(ya, yb):
+ phiA, phipA = ya
+ phiC, phipC = yb
+
+ kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
+
+ # Butler-Volmer Kinetics at Anode
+ hA = 0.0-phiA-0.0
+ iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
+ res0 = iA + kappa * phipA
+
+ # Butler-Volmer Kinetics at Cathode
+ hC = V - phiC - 1.0
+ iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
+ res1 = iC - kappa*phipC
+
+ return np.array([res0, res1])
+
+
+def nonlin_bc_sol(x):
+ return -0.13426436116763119 - 1.1308709 * x
+
+
+def test_modify_mesh():
+ x = np.array([0, 1, 3, 9], dtype=float)
+ x_new = modify_mesh(x, np.array([0]), np.array([2]))
+ assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
+
+ x = np.array([-6, -3, 0, 3, 6], dtype=float)
+ x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
+ assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
+
+
+def test_compute_fun_jac():
+ x = np.linspace(0, 1, 5)
+ y = np.empty((2, x.shape[0]))
+ y[0] = 0.01
+ y[1] = 0.02
+ p = np.array([])
+ df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
+ df_dy_an = exp_fun_jac(x, y)
+ assert_allclose(df_dy, df_dy_an)
+ assert_(df_dp is None)
+
+ x = np.linspace(0, np.pi, 5)
+ y = np.empty((2, x.shape[0]))
+ y[0] = np.sin(x)
+ y[1] = np.cos(x)
+ p = np.array([1.0])
+ df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
+ df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
+ assert_allclose(df_dy, df_dy_an)
+ assert_allclose(df_dp, df_dp_an)
+
+ x = np.linspace(0, 1, 10)
+ y = np.empty((2, x.shape[0]))
+ y[0] = (3/4)**0.5
+ y[1] = 1e-4
+ p = np.array([])
+ df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
+ df_dy_an = emden_fun_jac(x, y)
+ assert_allclose(df_dy, df_dy_an)
+ assert_(df_dp is None)
+
+
+def test_compute_bc_jac():
+ ya = np.array([-1.0, 2])
+ yb = np.array([0.5, 3])
+ p = np.array([])
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
+ lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
+ dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
+ assert_allclose(dbc_dya, dbc_dya_an)
+ assert_allclose(dbc_dyb, dbc_dyb_an)
+ assert_(dbc_dp is None)
+
+ ya = np.array([0.0, 1])
+ yb = np.array([0.0, -1])
+ p = np.array([0.5])
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
+ dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
+ assert_allclose(dbc_dya, dbc_dya_an)
+ assert_allclose(dbc_dyb, dbc_dyb_an)
+ assert_allclose(dbc_dp, dbc_dp_an)
+
+ ya = np.array([0.5, 100])
+ yb = np.array([-1000, 10.5])
+ p = np.array([])
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
+ lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
+ dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
+ assert_allclose(dbc_dya, dbc_dya_an)
+ assert_allclose(dbc_dyb, dbc_dyb_an)
+ assert_(dbc_dp is None)
+
+
+def test_compute_jac_indices():
+ n = 2
+ m = 4
+ k = 2
+ i, j = compute_jac_indices(n, m, k)
+ s = coo_matrix((np.ones_like(i), (i, j))).toarray()
+ s_true = np.array([
+ [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
+ [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+ ])
+ assert_array_equal(s, s_true)
+
+
+def test_compute_global_jac():
+ n = 2
+ m = 5
+ k = 1
+ i_jac, j_jac = compute_jac_indices(2, 5, 1)
+ x = np.linspace(0, 1, 5)
+ h = np.diff(x)
+ y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
+ p = np.array([3.0])
+
+ f = sl_fun(x, y, p)
+
+ x_middle = x[:-1] + 0.5 * h
+ y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
+
+ df_dy, df_dp = sl_fun_jac(x, y, p)
+ df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
+ dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
+
+ J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
+ df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
+ J = J.toarray()
+
+ def J_block(h, p):
+ return np.array([
+ [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
+ [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
+ ])
+
+ J_true = np.zeros((m * n + k, m * n + k))
+ for i in range(m - 1):
+ J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
+
+ J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
+ J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
+ h**2/6 * (y[1, :-1] - y[1, 1:]))
+
+ J_true[8, 0] = 1
+ J_true[9, 8] = 1
+ J_true[10, 1] = 1
+ J_true[10, 10] = -1
+
+ assert_allclose(J, J_true, rtol=1e-10)
+
+ df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
+ df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
+ dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
+ J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
+ df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
+ J = J.toarray()
+ assert_allclose(J, J_true, rtol=1e-8, atol=1e-9)
+
+
+def test_parameter_validation():
+ x = [0, 1, 0.5]
+ y = np.zeros((2, 3))
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
+
+ x = np.linspace(0, 1, 5)
+ y = np.zeros((2, 4))
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
+
+ fun = lambda x, y, p: exp_fun(x, y)
+ bc = lambda ya, yb, p: exp_bc(ya, yb)
+
+ y = np.zeros((2, x.shape[0]))
+ assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
+
+ def wrong_shape_fun(x, y):
+ return np.zeros(3)
+
+ assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
+
+ S = np.array([[0, 0]])
+ assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
+
+
+def test_no_params():
+ x = np.linspace(0, 1, 5)
+ x_test = np.linspace(0, 1, 100)
+ y = np.zeros((2, x.shape[0]))
+ for fun_jac in [None, exp_fun_jac]:
+ for bc_jac in [None, exp_bc_jac]:
+ sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
+ bc_jac=bc_jac)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_equal(sol.x.size, 5)
+
+ sol_test = sol.sol(x_test)
+
+ assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
+
+ f_test = exp_fun(x_test, sol_test)
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res**2, axis=0)**0.5
+ assert_(np.all(norm_res < 1e-3))
+
+ assert_(np.all(sol.rms_residuals < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_with_params():
+ x = np.linspace(0, np.pi, 5)
+ x_test = np.linspace(0, np.pi, 100)
+ y = np.ones((2, x.shape[0]))
+
+ for fun_jac in [None, sl_fun_jac]:
+ for bc_jac in [None, sl_bc_jac]:
+ sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
+ bc_jac=bc_jac)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_(sol.x.size < 10)
+
+ assert_allclose(sol.p, [1], rtol=1e-4)
+
+ sol_test = sol.sol(x_test)
+
+ assert_allclose(sol_test[0], sl_sol(x_test, [1]),
+ rtol=1e-4, atol=1e-4)
+
+ f_test = sl_fun(x_test, sol_test, [1])
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+ assert_(np.all(norm_res < 1e-3))
+
+ assert_(np.all(sol.rms_residuals < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_singular_term():
+ x = np.linspace(0, 1, 10)
+ x_test = np.linspace(0.05, 1, 100)
+ y = np.empty((2, 10))
+ y[0] = (3/4)**0.5
+ y[1] = 1e-4
+ S = np.array([[0, 0], [0, -2]])
+
+ for fun_jac in [None, emden_fun_jac]:
+ for bc_jac in [None, emden_bc_jac]:
+ sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
+ bc_jac=bc_jac)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_equal(sol.x.size, 10)
+
+ sol_test = sol.sol(x_test)
+ assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
+
+ f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+ assert_(np.all(norm_res < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_complex():
+ # The test is essentially the same as test_no_params, but boundary
+ # conditions are turned into complex.
+ x = np.linspace(0, 1, 5)
+ x_test = np.linspace(0, 1, 100)
+ y = np.zeros((2, x.shape[0]), dtype=complex)
+ for fun_jac in [None, exp_fun_jac]:
+ for bc_jac in [None, exp_bc_jac]:
+ sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
+ bc_jac=bc_jac)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ sol_test = sol.sol(x_test)
+
+ assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
+ assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
+
+ f_test = exp_fun(x_test, sol_test)
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
+ axis=0) ** 0.5
+ assert_(np.all(norm_res < 1e-3))
+
+ assert_(np.all(sol.rms_residuals < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_failures():
+ x = np.linspace(0, 1, 2)
+ y = np.zeros((2, x.size))
+ res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
+ assert_equal(res.status, 1)
+ assert_(not res.success)
+
+ x = np.linspace(0, 1, 5)
+ y = np.zeros((2, x.size))
+ res = solve_bvp(undefined_fun, undefined_bc, x, y)
+ assert_equal(res.status, 2)
+ assert_(not res.success)
+
+
+def test_big_problem():
+ n = 30
+ x = np.linspace(0, 1, 5)
+ y = np.zeros((2 * n, x.size))
+ sol = solve_bvp(big_fun, big_bc, x, y)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ sol_test = sol.sol(x)
+
+ assert_allclose(sol_test[0], big_sol(x, n))
+
+ f_test = big_fun(x, sol_test)
+ r = sol.sol(x, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
+ assert_(np.all(norm_res < 1e-3))
+
+ assert_(np.all(sol.rms_residuals < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_big_problem_with_parameters():
+ n = 30
+ x = np.linspace(0, np.pi, 5)
+ x_test = np.linspace(0, np.pi, 100)
+ y = np.ones((2 * n, x.size))
+
+ for fun_jac in [None, big_fun_with_parameters_jac]:
+ for bc_jac in [None, big_bc_with_parameters_jac]:
+ sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x,
+ y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_allclose(sol.p, [1, 1], rtol=1e-4)
+
+ sol_test = sol.sol(x_test)
+
+ for isol in range(0, n, 4):
+ assert_allclose(sol_test[isol],
+ big_sol_with_parameters(x_test, [1, 1])[0],
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(sol_test[isol + 2],
+ big_sol_with_parameters(x_test, [1, 1])[1],
+ rtol=1e-4, atol=1e-4)
+
+ f_test = big_fun_with_parameters(x_test, sol_test, [1, 1])
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+ assert_(np.all(norm_res < 1e-3))
+
+ assert_(np.all(sol.rms_residuals < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_shock_layer():
+ x = np.linspace(-1, 1, 5)
+ x_test = np.linspace(-1, 1, 100)
+ y = np.zeros((2, x.size))
+ sol = solve_bvp(shock_fun, shock_bc, x, y)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_(sol.x.size < 110)
+
+ sol_test = sol.sol(x_test)
+ assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
+
+ f_test = shock_fun(x_test, sol_test)
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+ assert_(np.all(norm_res < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_nonlin_bc():
+ x = np.linspace(0, 0.1, 5)
+ x_test = x
+ y = np.zeros([2, x.size])
+ sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y)
+
+ assert_equal(sol.status, 0)
+ assert_(sol.success)
+
+ assert_(sol.x.size < 8)
+
+ sol_test = sol.sol(x_test)
+ assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5)
+
+ f_test = nonlin_bc_fun(x_test, sol_test)
+ r = sol.sol(x_test, 1) - f_test
+ rel_res = r / (1 + np.abs(f_test))
+ norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+ assert_(np.all(norm_res < 1e-3))
+ assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+ assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_verbose():
+ # Smoke test that checks the printing does something and does not crash
+ x = np.linspace(0, 1, 5)
+ y = np.zeros((2, x.shape[0]))
+ for verbose in [0, 1, 2]:
+ old_stdout = sys.stdout
+ sys.stdout = StringIO()
+ try:
+ sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
+ text = sys.stdout.getvalue()
+ finally:
+ sys.stdout = old_stdout
+
+ assert_(sol.success)
+ if verbose == 0:
+ assert_(not text, text)
+ if verbose >= 1:
+ assert_("Solved in" in text, text)
+ if verbose >= 2:
+ assert_("Max residual" in text, text)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_integrate.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_integrate.py
new file mode 100644
index 0000000..1cf4175
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_integrate.py
@@ -0,0 +1,830 @@
+# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
+"""
+Tests for numerical integration.
+"""
+import numpy as np
+from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
+ allclose)
+
+from numpy.testing import (
+ assert_, assert_array_almost_equal,
+ assert_allclose, assert_array_equal, assert_equal, assert_warns)
+from pytest import raises as assert_raises
+from scipy.integrate import odeint, ode, complex_ode
+
+#------------------------------------------------------------------------------
+# Test ODE integrators
+#------------------------------------------------------------------------------
+
+
+class TestOdeint(object):
+ # Check integrate.odeint
+
+ def _do_problem(self, problem):
+ t = arange(0.0, problem.stop_t, 0.05)
+
+ # Basic case
+ z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
+ assert_(problem.verify(z, t))
+
+ # Use tfirst=True
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
+ full_output=True, tfirst=True)
+ assert_(problem.verify(z, t))
+
+ if hasattr(problem, 'jac'):
+ # Use Dfun
+ z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
+ full_output=True)
+ assert_(problem.verify(z, t))
+
+ # Use Dfun and tfirst=True
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
+ Dfun=lambda t, y: problem.jac(y, t),
+ full_output=True, tfirst=True)
+ assert_(problem.verify(z, t))
+
+ def test_odeint(self):
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.cmplx:
+ continue
+ self._do_problem(problem)
+
+
+class TestODEClass(object):
+
+ ode_class = None # Set in subclass.
+
+ def _do_problem(self, problem, integrator, method='adams'):
+
+ # ode has callback arguments in different order than odeint
+ f = lambda t, z: problem.f(z, t)
+ jac = None
+ if hasattr(problem, 'jac'):
+ jac = lambda t, z: problem.jac(z, t)
+
+ integrator_params = {}
+ if problem.lband is not None or problem.uband is not None:
+ integrator_params['uband'] = problem.uband
+ integrator_params['lband'] = problem.lband
+
+ ig = self.ode_class(f, jac)
+ ig.set_integrator(integrator,
+ atol=problem.atol/10,
+ rtol=problem.rtol/10,
+ method=method,
+ **integrator_params)
+
+ ig.set_initial_value(problem.z0, t=0.0)
+ z = ig.integrate(problem.stop_t)
+
+ assert_array_equal(z, ig.y)
+ assert_(ig.successful(), (problem, method))
+ assert_(ig.get_return_code() > 0, (problem, method))
+ assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
+
+
+class TestOde(TestODEClass):
+
+ ode_class = ode
+
+ def test_vode(self):
+ # Check the vode solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.cmplx:
+ continue
+ if not problem.stiff:
+ self._do_problem(problem, 'vode', 'adams')
+ self._do_problem(problem, 'vode', 'bdf')
+
+ def test_zvode(self):
+ # Check the zvode solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if not problem.stiff:
+ self._do_problem(problem, 'zvode', 'adams')
+ self._do_problem(problem, 'zvode', 'bdf')
+
+ def test_lsoda(self):
+ # Check the lsoda solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.cmplx:
+ continue
+ self._do_problem(problem, 'lsoda')
+
+ def test_dopri5(self):
+ # Check the dopri5 solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.cmplx:
+ continue
+ if problem.stiff:
+ continue
+ if hasattr(problem, 'jac'):
+ continue
+ self._do_problem(problem, 'dopri5')
+
+ def test_dop853(self):
+ # Check the dop853 solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.cmplx:
+ continue
+ if problem.stiff:
+ continue
+ if hasattr(problem, 'jac'):
+ continue
+ self._do_problem(problem, 'dop853')
+
+ def test_concurrent_fail(self):
+ for sol in ('vode', 'zvode', 'lsoda'):
+ f = lambda t, y: 1.0
+
+ r = ode(f).set_integrator(sol)
+ r.set_initial_value(0, 0)
+
+ r2 = ode(f).set_integrator(sol)
+ r2.set_initial_value(0, 0)
+
+ r.integrate(r.t + 0.1)
+ r2.integrate(r2.t + 0.1)
+
+ assert_raises(RuntimeError, r.integrate, r.t + 0.1)
+
+ def test_concurrent_ok(self):
+ f = lambda t, y: 1.0
+
+ for k in range(3):
+ for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
+ r = ode(f).set_integrator(sol)
+ r.set_initial_value(0, 0)
+
+ r2 = ode(f).set_integrator(sol)
+ r2.set_initial_value(0, 0)
+
+ r.integrate(r.t + 0.1)
+ r2.integrate(r2.t + 0.1)
+ r2.integrate(r2.t + 0.1)
+
+ assert_allclose(r.y, 0.1)
+ assert_allclose(r2.y, 0.2)
+
+ for sol in ('dopri5', 'dop853'):
+ r = ode(f).set_integrator(sol)
+ r.set_initial_value(0, 0)
+
+ r2 = ode(f).set_integrator(sol)
+ r2.set_initial_value(0, 0)
+
+ r.integrate(r.t + 0.1)
+ r.integrate(r.t + 0.1)
+ r2.integrate(r2.t + 0.1)
+ r.integrate(r.t + 0.1)
+ r2.integrate(r2.t + 0.1)
+
+ assert_allclose(r.y, 0.3)
+ assert_allclose(r2.y, 0.2)
+
+
+class TestComplexOde(TestODEClass):
+
+ ode_class = complex_ode
+
+ def test_vode(self):
+ # Check the vode solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if not problem.stiff:
+ self._do_problem(problem, 'vode', 'adams')
+ else:
+ self._do_problem(problem, 'vode', 'bdf')
+
+ def test_lsoda(self):
+ # Check the lsoda solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ self._do_problem(problem, 'lsoda')
+
+ def test_dopri5(self):
+ # Check the dopri5 solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.stiff:
+ continue
+ if hasattr(problem, 'jac'):
+ continue
+ self._do_problem(problem, 'dopri5')
+
+ def test_dop853(self):
+ # Check the dop853 solver
+ for problem_cls in PROBLEMS:
+ problem = problem_cls()
+ if problem.stiff:
+ continue
+ if hasattr(problem, 'jac'):
+ continue
+ self._do_problem(problem, 'dop853')
+
+
+class TestSolout(object):
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
+ def _run_solout_test(self, integrator):
+ # Check correct usage of solout
+ ts = []
+ ys = []
+ t0 = 0.0
+ tend = 10.0
+ y0 = [1.0, 2.0]
+
+ def solout(t, y):
+ ts.append(t)
+ ys.append(y.copy())
+
+ def rhs(t, y):
+ return [y[0] + y[1], -y[1]**2]
+
+ ig = ode(rhs).set_integrator(integrator)
+ ig.set_solout(solout)
+ ig.set_initial_value(y0, t0)
+ ret = ig.integrate(tend)
+ assert_array_equal(ys[0], y0)
+ assert_array_equal(ys[-1], ret)
+ assert_equal(ts[0], t0)
+ assert_equal(ts[-1], tend)
+
+ def test_solout(self):
+ for integrator in ('dopri5', 'dop853'):
+ self._run_solout_test(integrator)
+
+ def _run_solout_after_initial_test(self, integrator):
+ # Check if solout works even if it is set after the initial value.
+ ts = []
+ ys = []
+ t0 = 0.0
+ tend = 10.0
+ y0 = [1.0, 2.0]
+
+ def solout(t, y):
+ ts.append(t)
+ ys.append(y.copy())
+
+ def rhs(t, y):
+ return [y[0] + y[1], -y[1]**2]
+
+ ig = ode(rhs).set_integrator(integrator)
+ ig.set_initial_value(y0, t0)
+ ig.set_solout(solout)
+ ret = ig.integrate(tend)
+ assert_array_equal(ys[0], y0)
+ assert_array_equal(ys[-1], ret)
+ assert_equal(ts[0], t0)
+ assert_equal(ts[-1], tend)
+
+ def test_solout_after_initial(self):
+ for integrator in ('dopri5', 'dop853'):
+ self._run_solout_after_initial_test(integrator)
+
+ def _run_solout_break_test(self, integrator):
+ # Check correct usage of stopping via solout
+ ts = []
+ ys = []
+ t0 = 0.0
+ tend = 10.0
+ y0 = [1.0, 2.0]
+
+ def solout(t, y):
+ ts.append(t)
+ ys.append(y.copy())
+ if t > tend/2.0:
+ return -1
+
+ def rhs(t, y):
+ return [y[0] + y[1], -y[1]**2]
+
+ ig = ode(rhs).set_integrator(integrator)
+ ig.set_solout(solout)
+ ig.set_initial_value(y0, t0)
+ ret = ig.integrate(tend)
+ assert_array_equal(ys[0], y0)
+ assert_array_equal(ys[-1], ret)
+ assert_equal(ts[0], t0)
+ assert_(ts[-1] > tend/2.0)
+ assert_(ts[-1] < tend)
+
+ def test_solout_break(self):
+ for integrator in ('dopri5', 'dop853'):
+ self._run_solout_break_test(integrator)
+
+
+class TestComplexSolout(object):
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
+ def _run_solout_test(self, integrator):
+ # Check correct usage of solout
+ ts = []
+ ys = []
+ t0 = 0.0
+ tend = 20.0
+ y0 = [0.0]
+
+ def solout(t, y):
+ ts.append(t)
+ ys.append(y.copy())
+
+ def rhs(t, y):
+ return [1.0/(t - 10.0 - 1j)]
+
+ ig = complex_ode(rhs).set_integrator(integrator)
+ ig.set_solout(solout)
+ ig.set_initial_value(y0, t0)
+ ret = ig.integrate(tend)
+ assert_array_equal(ys[0], y0)
+ assert_array_equal(ys[-1], ret)
+ assert_equal(ts[0], t0)
+ assert_equal(ts[-1], tend)
+
+ def test_solout(self):
+ for integrator in ('dopri5', 'dop853'):
+ self._run_solout_test(integrator)
+
+ def _run_solout_break_test(self, integrator):
+ # Check correct usage of stopping via solout
+ ts = []
+ ys = []
+ t0 = 0.0
+ tend = 20.0
+ y0 = [0.0]
+
+ def solout(t, y):
+ ts.append(t)
+ ys.append(y.copy())
+ if t > tend/2.0:
+ return -1
+
+ def rhs(t, y):
+ return [1.0/(t - 10.0 - 1j)]
+
+ ig = complex_ode(rhs).set_integrator(integrator)
+ ig.set_solout(solout)
+ ig.set_initial_value(y0, t0)
+ ret = ig.integrate(tend)
+ assert_array_equal(ys[0], y0)
+ assert_array_equal(ys[-1], ret)
+ assert_equal(ts[0], t0)
+ assert_(ts[-1] > tend/2.0)
+ assert_(ts[-1] < tend)
+
+ def test_solout_break(self):
+ for integrator in ('dopri5', 'dop853'):
+ self._run_solout_break_test(integrator)
+
+
+#------------------------------------------------------------------------------
+# Test problems
+#------------------------------------------------------------------------------
+
+
+class ODE:
+ """
+ ODE problem
+ """
+ stiff = False
+ cmplx = False
+ stop_t = 1
+ z0 = []
+
+ lband = None
+ uband = None
+
+ atol = 1e-6
+ rtol = 1e-5
+
+
+class SimpleOscillator(ODE):
+ r"""
+ Free vibration of a simple oscillator::
+ m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
+ Solution::
+ u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
+ """
+ stop_t = 1 + 0.09
+ z0 = array([1.0, 0.1], float)
+
+ k = 4.0
+ m = 1.0
+
+ def f(self, z, t):
+ tmp = zeros((2, 2), float)
+ tmp[0, 1] = 1.0
+ tmp[1, 0] = -self.k / self.m
+ return dot(tmp, z)
+
+ def verify(self, zs, t):
+ omega = sqrt(self.k / self.m)
+ u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
+ return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
+
+
+class ComplexExp(ODE):
+ r"""The equation :lm:`\dot u = i u`"""
+ stop_t = 1.23*pi
+ z0 = exp([1j, 2j, 3j, 4j, 5j])
+ cmplx = True
+
+ def f(self, z, t):
+ return 1j*z
+
+ def jac(self, z, t):
+ return 1j*eye(5)
+
+ def verify(self, zs, t):
+ u = self.z0 * exp(1j*t)
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
+
+
+class Pi(ODE):
+ r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
+ stop_t = 20
+ z0 = [0]
+ cmplx = True
+
+ def f(self, z, t):
+ return array([1./(t - 10 + 1j)])
+
+ def verify(self, zs, t):
+ u = -2j * np.arctan(10)
+ return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
+
+
+class CoupledDecay(ODE):
+ r"""
+ 3 coupled decays suited for banded treatment
+ (banded mode makes it necessary when N>>3)
+ """
+
+ stiff = True
+ stop_t = 0.5
+ z0 = [5.0, 7.0, 13.0]
+ lband = 1
+ uband = 0
+
+ lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
+
+ def f(self, z, t):
+ lmbd = self.lmbd
+ return np.array([-lmbd[0]*z[0],
+ -lmbd[1]*z[1] + lmbd[0]*z[0],
+ -lmbd[2]*z[2] + lmbd[1]*z[1]])
+
+ def jac(self, z, t):
+ # The full Jacobian is
+ #
+ # [-lmbd[0] 0 0 ]
+ # [ lmbd[0] -lmbd[1] 0 ]
+ # [ 0 lmbd[1] -lmbd[2]]
+ #
+ # The lower and upper bandwidths are lband=1 and uband=0, resp.
+ # The representation of this array in packed format is
+ #
+ # [-lmbd[0] -lmbd[1] -lmbd[2]]
+ # [ lmbd[0] lmbd[1] 0 ]
+
+ lmbd = self.lmbd
+ j = np.zeros((self.lband + self.uband + 1, 3), order='F')
+
+ def set_j(ri, ci, val):
+ j[self.uband + ri - ci, ci] = val
+ set_j(0, 0, -lmbd[0])
+ set_j(1, 0, lmbd[0])
+ set_j(1, 1, -lmbd[1])
+ set_j(2, 1, lmbd[1])
+ set_j(2, 2, -lmbd[2])
+ return j
+
+ def verify(self, zs, t):
+ # Formulae derived by hand
+ lmbd = np.array(self.lmbd)
+ d10 = lmbd[1] - lmbd[0]
+ d21 = lmbd[2] - lmbd[1]
+ d20 = lmbd[2] - lmbd[0]
+ e0 = np.exp(-lmbd[0] * t)
+ e1 = np.exp(-lmbd[1] * t)
+ e2 = np.exp(-lmbd[2] * t)
+ u = np.vstack((
+ self.z0[0] * e0,
+ self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
+ self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
+ lmbd[1] * lmbd[0] * self.z0[0] / d10 *
+ (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
+
+
+PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
+
+#------------------------------------------------------------------------------
+
+
+def f(t, x):
+ dxdt = [x[1], -x[0]]
+ return dxdt
+
+
+def jac(t, x):
+ j = array([[0.0, 1.0],
+ [-1.0, 0.0]])
+ return j
+
+
+def f1(t, x, omega):
+ dxdt = [omega*x[1], -omega*x[0]]
+ return dxdt
+
+
+def jac1(t, x, omega):
+ j = array([[0.0, omega],
+ [-omega, 0.0]])
+ return j
+
+
+def f2(t, x, omega1, omega2):
+ dxdt = [omega1*x[1], -omega2*x[0]]
+ return dxdt
+
+
+def jac2(t, x, omega1, omega2):
+ j = array([[0.0, omega1],
+ [-omega2, 0.0]])
+ return j
+
+
+def fv(t, x, omega):
+ dxdt = [omega[0]*x[1], -omega[1]*x[0]]
+ return dxdt
+
+
+def jacv(t, x, omega):
+ j = array([[0.0, omega[0]],
+ [-omega[1], 0.0]])
+ return j
+
+
+class ODECheckParameterUse(object):
+ """Call an ode-class solver with several cases of parameter use."""
+
+ # solver_name must be set before tests can be run with this class.
+
+ # Set these in subclasses.
+ solver_name = ''
+ solver_uses_jac = False
+
+ def _get_solver(self, f, jac):
+ solver = ode(f, jac)
+ if self.solver_uses_jac:
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
+ with_jacobian=self.solver_uses_jac)
+ else:
+ # XXX Shouldn't set_integrator *always* accept the keyword arg
+ # 'with_jacobian', and perhaps raise an exception if it is set
+ # to True if the solver can't actually use it?
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
+ return solver
+
+ def _check_solver(self, solver):
+ ic = [1.0, 0.0]
+ solver.set_initial_value(ic, 0.0)
+ solver.integrate(pi)
+ assert_array_almost_equal(solver.y, [-1.0, 0.0])
+
+ def test_no_params(self):
+ solver = self._get_solver(f, jac)
+ self._check_solver(solver)
+
+ def test_one_scalar_param(self):
+ solver = self._get_solver(f1, jac1)
+ omega = 1.0
+ solver.set_f_params(omega)
+ if self.solver_uses_jac:
+ solver.set_jac_params(omega)
+ self._check_solver(solver)
+
+ def test_two_scalar_params(self):
+ solver = self._get_solver(f2, jac2)
+ omega1 = 1.0
+ omega2 = 1.0
+ solver.set_f_params(omega1, omega2)
+ if self.solver_uses_jac:
+ solver.set_jac_params(omega1, omega2)
+ self._check_solver(solver)
+
+ def test_vector_param(self):
+ solver = self._get_solver(fv, jacv)
+ omega = [1.0, 1.0]
+ solver.set_f_params(omega)
+ if self.solver_uses_jac:
+ solver.set_jac_params(omega)
+ self._check_solver(solver)
+
+ def test_warns_on_failure(self):
+ # Set nsteps small to ensure failure
+ solver = self._get_solver(f, jac)
+ solver.set_integrator(self.solver_name, nsteps=1)
+ ic = [1.0, 0.0]
+ solver.set_initial_value(ic, 0.0)
+ assert_warns(UserWarning, solver.integrate, pi)
+
+
+class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
+ solver_name = 'dopri5'
+ solver_uses_jac = False
+
+
+class TestDOP853CheckParameterUse(ODECheckParameterUse):
+ solver_name = 'dop853'
+ solver_uses_jac = False
+
+
+class TestVODECheckParameterUse(ODECheckParameterUse):
+ solver_name = 'vode'
+ solver_uses_jac = True
+
+
+class TestZVODECheckParameterUse(ODECheckParameterUse):
+ solver_name = 'zvode'
+ solver_uses_jac = True
+
+
+class TestLSODACheckParameterUse(ODECheckParameterUse):
+ solver_name = 'lsoda'
+ solver_uses_jac = True
+
+
+def test_odeint_trivial_time():
+ # Test that odeint succeeds when given a single time point
+ # and full_output=True. This is a regression test for gh-4282.
+ y0 = 1
+ t = [0]
+ y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
+ assert_array_equal(y, np.array([[y0]]))
+
+
+def test_odeint_banded_jacobian():
+ # Test the use of the `Dfun`, `ml` and `mu` options of odeint.
+
+ def func(y, t, c):
+ return c.dot(y)
+
+ def jac(y, t, c):
+ return c
+
+ def jac_transpose(y, t, c):
+ return c.T.copy(order='C')
+
+ def bjac_rows(y, t, c):
+ jac = np.row_stack((np.r_[0, np.diag(c, 1)],
+ np.diag(c),
+ np.r_[np.diag(c, -1), 0],
+ np.r_[np.diag(c, -2), 0, 0]))
+ return jac
+
+ def bjac_cols(y, t, c):
+ return bjac_rows(y, t, c).T.copy(order='C')
+
+ c = array([[-205, 0.01, 0.00, 0.0],
+ [0.1, -2.50, 0.02, 0.0],
+ [1e-3, 0.01, -2.0, 0.01],
+ [0.00, 0.00, 0.1, -1.0]])
+
+ y0 = np.ones(4)
+ t = np.array([0, 5, 10, 100])
+
+ # Use the full Jacobian.
+ sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
+ atol=1e-13, rtol=1e-11, mxstep=10000,
+ Dfun=jac)
+
+ # Use the transposed full Jacobian, with col_deriv=True.
+ sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
+ atol=1e-13, rtol=1e-11, mxstep=10000,
+ Dfun=jac_transpose, col_deriv=True)
+
+ # Use the banded Jacobian.
+ sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
+ atol=1e-13, rtol=1e-11, mxstep=10000,
+ Dfun=bjac_rows, ml=2, mu=1)
+
+ # Use the transposed banded Jacobian, with col_deriv=True.
+ sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
+ atol=1e-13, rtol=1e-11, mxstep=10000,
+ Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
+
+ assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
+ assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
+ assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
+
+ # Verify that the number of jacobian evaluations was the same for the
+ # calls of odeint with a full jacobian and with a banded jacobian. This is
+ # a regression test--there was a bug in the handling of banded jacobians
+ # that resulted in an incorrect jacobian matrix being passed to the LSODA
+ # code. That would cause errors or excessive jacobian evaluations.
+ assert_array_equal(info1['nje'], info2['nje'])
+ assert_array_equal(info3['nje'], info4['nje'])
+
+ # Test the use of tfirst
+ sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
+ full_output=True, atol=1e-13, rtol=1e-11,
+ mxstep=10000,
+ Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
+ # The code should execute the exact same sequence of floating point
+ # calculations, so these should be exactly equal. We'll be safe and use
+ # a small tolerance.
+ assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
+
+
+def test_odeint_errors():
+ def sys1d(x, t):
+ return -100*x
+
+ def bad1(x, t):
+ return 1.0/0
+
+ def bad2(x, t):
+ return "foo"
+
+ def bad_jac1(x, t):
+ return 1.0/0
+
+ def bad_jac2(x, t):
+ return [["foo"]]
+
+ def sys2d(x, t):
+ return [-100*x[0], -0.1*x[1]]
+
+ def sys2d_bad_jac(x, t):
+ return [[1.0/0, 0], [0, -0.1]]
+
+ assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
+ assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
+
+ assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
+ assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
+
+ assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
+ Dfun=sys2d_bad_jac)
+
+
+def test_odeint_bad_shapes():
+ # Tests of some errors that can occur with odeint.
+
+ def badrhs(x, t):
+ return [1, -1]
+
+ def sys1(x, t):
+ return -100*x
+
+ def badjac(x, t):
+ return [[0, 0, 0]]
+
+ # y0 must be at most 1-d.
+ bad_y0 = [[0, 0], [0, 0]]
+ assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
+
+ # t must be at most 1-d.
+ bad_t = [[0, 1], [2, 3]]
+ assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
+
+ # y0 is 10, but badrhs(x, t) returns [1, -1].
+ assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
+
+ # shape of array returned by badjac(x, t) is not correct.
+ assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
+
+
+def test_repeated_t_values():
+ """Regression test for gh-8217."""
+
+ def func(x, t):
+ return -0.25*x
+
+ t = np.zeros(10)
+ sol = odeint(func, [1.], t)
+ assert_array_equal(sol, np.ones((len(t), 1)))
+
+ tau = 4*np.log(2)
+ t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
+ sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
+ expected_sol = np.array([[1.0, 2.0]]*9 +
+ [[0.5, 1.0],
+ [0.25, 0.5],
+ [0.25, 0.5],
+ [0.125, 0.25]])
+ assert_allclose(sol, expected_sol)
+
+ # Edge case: empty t sequence.
+ sol = odeint(func, [1.], [])
+ assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
+
+ # t values are not monotonic.
+ assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
+ assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_odeint_jac.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_odeint_jac.py
new file mode 100644
index 0000000..ef14890
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_odeint_jac.py
@@ -0,0 +1,75 @@
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+from scipy.integrate import odeint
+import scipy.integrate._test_odeint_banded as banded5x5
+
+
+def rhs(y, t):
+ dydt = np.zeros_like(y)
+ banded5x5.banded5x5(t, y, dydt)
+ return dydt
+
+
+def jac(y, t):
+ n = len(y)
+ jac = np.zeros((n, n), order='F')
+ banded5x5.banded5x5_jac(t, y, 1, 1, jac)
+ return jac
+
+
+def bjac(y, t):
+ n = len(y)
+ bjac = np.zeros((4, n), order='F')
+ banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
+ return bjac
+
+
+JACTYPE_FULL = 1
+JACTYPE_BANDED = 4
+
+
+def check_odeint(jactype):
+ if jactype == JACTYPE_FULL:
+ ml = None
+ mu = None
+ jacobian = jac
+ elif jactype == JACTYPE_BANDED:
+ ml = 2
+ mu = 1
+ jacobian = bjac
+ else:
+ raise ValueError("invalid jactype: %r" % (jactype,))
+
+ y0 = np.arange(1.0, 6.0)
+ # These tolerances must match the tolerances used in banded5x5.f.
+ rtol = 1e-11
+ atol = 1e-13
+ dt = 0.125
+ nsteps = 64
+ t = dt * np.arange(nsteps+1)
+
+ sol, info = odeint(rhs, y0, t,
+ Dfun=jacobian, ml=ml, mu=mu,
+ atol=atol, rtol=rtol, full_output=True)
+ yfinal = sol[-1]
+ odeint_nst = info['nst'][-1]
+ odeint_nfe = info['nfe'][-1]
+ odeint_nje = info['nje'][-1]
+
+ y1 = y0.copy()
+ # Pure Fortran solution. y1 is modified in-place.
+ nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
+
+ # It is likely that yfinal and y1 are *exactly* the same, but
+ # we'll be cautious and use assert_allclose.
+ assert_allclose(yfinal, y1, rtol=1e-12)
+ assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
+
+
+def test_odeint_full_jac():
+ check_odeint(JACTYPE_FULL)
+
+
+def test_odeint_banded_jac():
+ check_odeint(JACTYPE_BANDED)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadpack.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadpack.py
new file mode 100644
index 0000000..6f27b27
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadpack.py
@@ -0,0 +1,411 @@
+import sys
+import math
+import numpy as np
+from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
+from numpy.testing import (assert_,
+ assert_allclose, assert_array_less, assert_almost_equal)
+import pytest
+
+from scipy.integrate import quad, dblquad, tplquad, nquad
+from scipy._lib._ccallback import LowLevelCallable
+
+import ctypes
+import ctypes.util
+from scipy._lib._ccallback_c import sine_ctypes
+
+import scipy.integrate._test_multivariate as clib_test
+
+
+def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
+ value, err = value_and_err
+ assert_allclose(value, tabled_value, atol=err, rtol=0)
+ if errTol is not None:
+ assert_array_less(err, errTol)
+
+
+def get_clib_test_routine(name, restype, *argtypes):
+ ptr = getattr(clib_test, name)
+ return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
+
+
+class TestCtypesQuad(object):
+ def setup_method(self):
+ if sys.platform == 'win32':
+ files = ['api-ms-win-crt-math-l1-1-0.dll']
+ elif sys.platform == 'darwin':
+ files = ['libm.dylib']
+ else:
+ files = ['libm.so', 'libm.so.6']
+
+ for file in files:
+ try:
+ self.lib = ctypes.CDLL(file)
+ break
+ except OSError:
+ pass
+ else:
+ # This test doesn't work on some Linux platforms (Fedora for
+ # example) that put an ld script in libm.so - see gh-5370
+ pytest.skip("Ctypes can't import libm.so")
+
+ restype = ctypes.c_double
+ argtypes = (ctypes.c_double,)
+ for name in ['sin', 'cos', 'tan']:
+ func = getattr(self.lib, name)
+ func.restype = restype
+ func.argtypes = argtypes
+
+ def test_typical(self):
+ assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
+ assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
+ assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
+
+ def test_ctypes_sine(self):
+ quad(LowLevelCallable(sine_ctypes), 0, 1)
+
+ def test_ctypes_variants(self):
+ sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
+ ctypes.c_double, ctypes.c_void_p)
+
+ sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
+ ctypes.c_int, ctypes.POINTER(ctypes.c_double),
+ ctypes.c_void_p)
+
+ sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
+ ctypes.c_double)
+
+ sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
+ ctypes.c_int, ctypes.POINTER(ctypes.c_double))
+
+ sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
+ ctypes.c_int, ctypes.c_double)
+
+ all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
+ legacy_sigs = [sin_2, sin_4]
+ legacy_only_sigs = [sin_4]
+
+ # LowLevelCallables work for new signatures
+ for j, func in enumerate(all_sigs):
+ callback = LowLevelCallable(func)
+ if func in legacy_only_sigs:
+ pytest.raises(ValueError, quad, callback, 0, pi)
+ else:
+ assert_allclose(quad(callback, 0, pi)[0], 2.0)
+
+ # Plain ctypes items work only for legacy signatures
+ for j, func in enumerate(legacy_sigs):
+ if func in legacy_sigs:
+ assert_allclose(quad(func, 0, pi)[0], 2.0)
+ else:
+ pytest.raises(ValueError, quad, func, 0, pi)
+
+
+class TestMultivariateCtypesQuad(object):
+ def setup_method(self):
+ restype = ctypes.c_double
+ argtypes = (ctypes.c_int, ctypes.c_double)
+ for name in ['_multivariate_typical', '_multivariate_indefinite',
+ '_multivariate_sin']:
+ func = get_clib_test_routine(name, restype, *argtypes)
+ setattr(self, name, func)
+
+ def test_typical(self):
+ # 1) Typical function with two extra arguments:
+ assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
+ 0.30614353532540296487)
+
+ def test_indefinite(self):
+ # 2) Infinite integration limits --- Euler's constant
+ assert_quad(quad(self._multivariate_indefinite, 0, Inf),
+ 0.577215664901532860606512)
+
+ def test_threadsafety(self):
+ # Ensure multivariate ctypes are threadsafe
+ def threadsafety(y):
+ return y + quad(self._multivariate_sin, 0, 1)[0]
+ assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
+
+
+class TestQuad(object):
+ def test_typical(self):
+ # 1) Typical function with two extra arguments:
+ def myfunc(x, n, z): # Bessel function integrand
+ return cos(n*x-z*sin(x))/pi
+ assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
+
+ def test_indefinite(self):
+ # 2) Infinite integration limits --- Euler's constant
+ def myfunc(x): # Euler's constant integrand
+ return -exp(-x)*log(x)
+ assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
+
+ def test_singular(self):
+ # 3) Singular points in region of integration.
+ def myfunc(x):
+ if 0 < x < 2.5:
+ return sin(x)
+ elif 2.5 <= x <= 5.0:
+ return exp(-x)
+ else:
+ return 0.0
+
+ assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
+ 1 - cos(2.5) + exp(-2.5) - exp(-5.0))
+
+ def test_sine_weighted_finite(self):
+ # 4) Sine weighted integral (finite limits)
+ def myfunc(x, a):
+ return exp(a*(x-1))
+
+ ome = 2.0**3.4
+ assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
+ (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
+
+ def test_sine_weighted_infinite(self):
+ # 5) Sine weighted integral (infinite limits)
+ def myfunc(x, a):
+ return exp(-x*a)
+
+ a = 4.0
+ ome = 3.0
+ assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
+ ome/(a**2 + ome**2))
+
+ def test_cosine_weighted_infinite(self):
+ # 6) Cosine weighted integral (negative infinite limits)
+ def myfunc(x, a):
+ return exp(x*a)
+
+ a = 2.5
+ ome = 2.3
+ assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
+ a/(a**2 + ome**2))
+
+ def test_algebraic_log_weight(self):
+ # 6) Algebraic-logarithmic weight.
+ def myfunc(x, a):
+ return 1/(1+x+2**(-a))
+
+ a = 1.5
+ assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
+ wvar=(-0.5, -0.5)),
+ pi/sqrt((1+2**(-a))**2 - 1))
+
+ def test_cauchypv_weight(self):
+ # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
+ def myfunc(x, a):
+ return 2.0**(-a)/((x-1)**2+4.0**(-a))
+
+ a = 0.4
+ tabledValue = ((2.0**(-0.4)*log(1.5) -
+ 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
+ arctan(2.0**(a+2)) -
+ arctan(2.0**a)) /
+ (4.0**(-a) + 1))
+ assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
+ tabledValue, errTol=1.9e-8)
+
+ def test_b_less_than_a(self):
+ def f(x, p, q):
+ return p * np.exp(-q*x)
+
+ val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
+ val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+ def test_b_less_than_a_2(self):
+ def f(x, s):
+ return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
+
+ val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
+ val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+ def test_b_less_than_a_3(self):
+ def f(x):
+ return 1.0
+
+ val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
+ val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
+ assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+ def test_b_less_than_a_full_output(self):
+ def f(x):
+ return 1.0
+
+ res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
+ res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
+ err = max(res_1[1], res_2[1])
+ assert_allclose(res_1[0], -res_2[0], atol=err)
+
+ def test_double_integral(self):
+ # 8) Double Integral test
+ def simpfunc(y, x): # Note order of arguments.
+ return x+y
+
+ a, b = 1.0, 2.0
+ assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
+ 5/6.0 * (b**3.0-a**3.0))
+
+ def test_double_integral2(self):
+ def func(x0, x1, t0, t1):
+ return x0 + x1 + t0 + t1
+ g = lambda x: x
+ h = lambda x: 2 * x
+ args = 1, 2
+ assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
+
+ def test_double_integral3(self):
+ def func(x0, x1):
+ return x0 + x1 + 1 + 2
+ assert_quad(dblquad(func, 1, 2, 1, 2),6.)
+
+ def test_triple_integral(self):
+ # 9) Triple Integral test
+ def simpfunc(z, y, x, t): # Note order of arguments.
+ return (x+y+z)*t
+
+ a, b = 1.0, 2.0
+ assert_quad(tplquad(simpfunc, a, b,
+ lambda x: x, lambda x: 2*x,
+ lambda x, y: x - y, lambda x, y: x + y,
+ (2.,)),
+ 2*8/3.0 * (b**4.0 - a**4.0))
+
+
+class TestNQuad(object):
+ def test_fixed_limits(self):
+ def func1(x0, x1, x2, x3):
+ val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
+ (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
+ return val
+
+ def opts_basic(*args):
+ return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
+
+ res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
+ opts=[opts_basic, {}, {}, {}], full_output=True)
+ assert_quad(res[:-1], 1.5267454070738635)
+ assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
+
+ def test_variable_limits(self):
+ scale = .1
+
+ def func2(x0, x1, x2, x3, t0, t1):
+ val = (x0*x1*x3**2 + np.sin(x2) + 1 +
+ (1 if x0 + t1*x1 - t0 > 0 else 0))
+ return val
+
+ def lim0(x1, x2, x3, t0, t1):
+ return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
+ scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
+
+ def lim1(x2, x3, t0, t1):
+ return [scale * (t0*x2 + t1*x3) - 1,
+ scale * (t0*x2 + t1*x3) + 1]
+
+ def lim2(x3, t0, t1):
+ return [scale * (x3 + t0**2*t1**3) - 1,
+ scale * (x3 + t0**2*t1**3) + 1]
+
+ def lim3(t0, t1):
+ return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
+
+ def opts0(x1, x2, x3, t0, t1):
+ return {'points': [t0 - t1*x1]}
+
+ def opts1(x2, x3, t0, t1):
+ return {}
+
+ def opts2(x3, t0, t1):
+ return {}
+
+ def opts3(t0, t1):
+ return {}
+
+ res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
+ opts=[opts0, opts1, opts2, opts3])
+ assert_quad(res, 25.066666666666663)
+
+ def test_square_separate_ranges_and_opts(self):
+ def f(y, x):
+ return 1.0
+
+ assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
+
+ def test_square_aliased_ranges_and_opts(self):
+ def f(y, x):
+ return 1.0
+
+ r = [-1, 1]
+ opt = {}
+ assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
+
+ def test_square_separate_fn_ranges_and_opts(self):
+ def f(y, x):
+ return 1.0
+
+ def fn_range0(*args):
+ return (-1, 1)
+
+ def fn_range1(*args):
+ return (-1, 1)
+
+ def fn_opt0(*args):
+ return {}
+
+ def fn_opt1(*args):
+ return {}
+
+ ranges = [fn_range0, fn_range1]
+ opts = [fn_opt0, fn_opt1]
+ assert_quad(nquad(f, ranges, opts=opts), 4.0)
+
+ def test_square_aliased_fn_ranges_and_opts(self):
+ def f(y, x):
+ return 1.0
+
+ def fn_range(*args):
+ return (-1, 1)
+
+ def fn_opt(*args):
+ return {}
+
+ ranges = [fn_range, fn_range]
+ opts = [fn_opt, fn_opt]
+ assert_quad(nquad(f, ranges, opts=opts), 4.0)
+
+ def test_matching_quad(self):
+ def func(x):
+ return x**2 + 1
+
+ res, reserr = quad(func, 0, 4)
+ res2, reserr2 = nquad(func, ranges=[[0, 4]])
+ assert_almost_equal(res, res2)
+ assert_almost_equal(reserr, reserr2)
+
+ def test_matching_dblquad(self):
+ def func2d(x0, x1):
+ return x0**2 + x1**3 - x0 * x1 + 1
+
+ res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
+ res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
+ assert_almost_equal(res, res2)
+ assert_almost_equal(reserr, reserr2)
+
+ def test_matching_tplquad(self):
+ def func3d(x0, x1, x2, c0, c1):
+ return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
+
+ res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
+ lambda x, y: -np.pi, lambda x, y: np.pi,
+ args=(2, 3))
+ res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
+ assert_almost_equal(res, res2)
+
+ def test_dict_as_opts(self):
+ try:
+ nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
+ except(TypeError):
+ assert False
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadrature.py b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadrature.py
new file mode 100644
index 0000000..7002805
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/tests/test_quadrature.py
@@ -0,0 +1,267 @@
+import numpy as np
+from numpy import cos, sin, pi
+from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
+ assert_, suppress_warnings)
+
+from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
+ cumulative_trapezoid, cumtrapz, trapz, trapezoid,
+ quad, simpson, simps, fixed_quad, AccuracyWarning)
+
+
+class TestFixedQuad(object):
+ def test_scalar(self):
+ n = 4
+ func = lambda x: x**(2*n - 1)
+ expected = 1/(2*n)
+ got, _ = fixed_quad(func, 0, 1, n=n)
+ # quadrature exact for this input
+ assert_allclose(got, expected, rtol=1e-12)
+
+ def test_vector(self):
+ n = 4
+ p = np.arange(1, 2*n)
+ func = lambda x: x**p[:,None]
+ expected = 1/(p + 1)
+ got, _ = fixed_quad(func, 0, 1, n=n)
+ assert_allclose(got, expected, rtol=1e-12)
+
+
+class TestQuadrature(object):
+ def quad(self, x, a, b, args):
+ raise NotImplementedError
+
+ def test_quadrature(self):
+ # Typical function with two extra arguments:
+ def myfunc(x, n, z): # Bessel function integrand
+ return cos(n*x-z*sin(x))/pi
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8))
+ table_val = 0.30614353532540296487
+ assert_almost_equal(val, table_val, decimal=7)
+
+ def test_quadrature_rtol(self):
+ def myfunc(x, n, z): # Bessel function integrand
+ return 1e90 * cos(n*x-z*sin(x))/pi
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
+ table_val = 1e90 * 0.30614353532540296487
+ assert_allclose(val, table_val, rtol=1e-10)
+
+ def test_quadrature_miniter(self):
+ # Typical function with two extra arguments:
+ def myfunc(x, n, z): # Bessel function integrand
+ return cos(n*x-z*sin(x))/pi
+ table_val = 0.30614353532540296487
+ for miniter in [5, 52]:
+ val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
+ assert_almost_equal(val, table_val, decimal=7)
+ assert_(err < 1.0)
+
+ def test_quadrature_single_args(self):
+ def myfunc(x, n):
+ return 1e90 * cos(n*x-1.8*sin(x))/pi
+ val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
+ table_val = 1e90 * 0.30614353532540296487
+ assert_allclose(val, table_val, rtol=1e-10)
+
+ def test_romberg(self):
+ # Typical function with two extra arguments:
+ def myfunc(x, n, z): # Bessel function integrand
+ return cos(n*x-z*sin(x))/pi
+ val = romberg(myfunc, 0, pi, args=(2, 1.8))
+ table_val = 0.30614353532540296487
+ assert_almost_equal(val, table_val, decimal=7)
+
+ def test_romberg_rtol(self):
+ # Typical function with two extra arguments:
+ def myfunc(x, n, z): # Bessel function integrand
+ return 1e19*cos(n*x-z*sin(x))/pi
+ val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
+ table_val = 1e19*0.30614353532540296487
+ assert_allclose(val, table_val, rtol=1e-10)
+
+ def test_romb(self):
+ assert_equal(romb(np.arange(17)), 128)
+
+ def test_romb_gh_3731(self):
+ # Check that romb makes maximal use of data points
+ x = np.arange(2**4+1)
+ y = np.cos(0.2*x)
+ val = romb(y)
+ val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
+ assert_allclose(val, val2, rtol=1e-8, atol=0)
+
+ # should be equal to romb with 2**k+1 samples
+ with suppress_warnings() as sup:
+ sup.filter(AccuracyWarning, "divmax .4. exceeded")
+ val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
+ assert_allclose(val, val3, rtol=1e-12, atol=0)
+
+ def test_non_dtype(self):
+ # Check that we work fine with functions returning float
+ import math
+ valmath = romberg(math.sin, 0, 1)
+ expected_val = 0.45969769413185085
+ assert_almost_equal(valmath, expected_val, decimal=7)
+
+ def test_newton_cotes(self):
+ """Test the first few degrees, for evenly spaced points."""
+ n = 1
+ wts, errcoff = newton_cotes(n, 1)
+ assert_equal(wts, n*np.array([0.5, 0.5]))
+ assert_almost_equal(errcoff, -n**3/12.0)
+
+ n = 2
+ wts, errcoff = newton_cotes(n, 1)
+ assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
+ assert_almost_equal(errcoff, -n**5/2880.0)
+
+ n = 3
+ wts, errcoff = newton_cotes(n, 1)
+ assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
+ assert_almost_equal(errcoff, -n**5/6480.0)
+
+ n = 4
+ wts, errcoff = newton_cotes(n, 1)
+ assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
+ assert_almost_equal(errcoff, -n**7/1935360.0)
+
+ def test_newton_cotes2(self):
+ """Test newton_cotes with points that are not evenly spaced."""
+
+ x = np.array([0.0, 1.5, 2.0])
+ y = x**2
+ wts, errcoff = newton_cotes(x)
+ exact_integral = 8.0/3
+ numeric_integral = np.dot(wts, y)
+ assert_almost_equal(numeric_integral, exact_integral)
+
+ x = np.array([0.0, 1.4, 2.1, 3.0])
+ y = x**2
+ wts, errcoff = newton_cotes(x)
+ exact_integral = 9.0
+ numeric_integral = np.dot(wts, y)
+ assert_almost_equal(numeric_integral, exact_integral)
+
+ def test_simpson(self):
+ y = np.arange(17)
+ assert_equal(simpson(y), 128)
+ assert_equal(simpson(y, dx=0.5), 64)
+ assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32)
+
+ y = np.arange(4)
+ x = 2**y
+ assert_equal(simpson(y, x=x, even='avg'), 13.875)
+ assert_equal(simpson(y, x=x, even='first'), 13.75)
+ assert_equal(simpson(y, x=x, even='last'), 14)
+
+ def test_simps(self):
+ # Basic coverage test for the alias
+ y = np.arange(4)
+ x = 2**y
+ assert_equal(simpson(y, x=x, dx=0.5, even='first'),
+ simps(y, x=x, dx=0.5, even='first'))
+
+
+class TestCumulative_trapezoid(object):
+ def test_1d(self):
+ x = np.linspace(-2, 2, num=5)
+ y = x
+ y_int = cumulative_trapezoid(y, x, initial=0)
+ y_expected = [0., -1.5, -2., -1.5, 0.]
+ assert_allclose(y_int, y_expected)
+
+ y_int = cumulative_trapezoid(y, x, initial=None)
+ assert_allclose(y_int, y_expected[1:])
+
+ def test_y_nd_x_nd(self):
+ x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+ y = x
+ y_int = cumulative_trapezoid(y, x, initial=0)
+ y_expected = np.array([[[0., 0.5, 2., 4.5],
+ [0., 4.5, 10., 16.5]],
+ [[0., 8.5, 18., 28.5],
+ [0., 12.5, 26., 40.5]],
+ [[0., 16.5, 34., 52.5],
+ [0., 20.5, 42., 64.5]]])
+
+ assert_allclose(y_int, y_expected)
+
+ # Try with all axes
+ shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
+ for axis, shape in zip([0, 1, 2], shapes):
+ y_int = cumulative_trapezoid(y, x, initial=3.45, axis=axis)
+ assert_equal(y_int.shape, (3, 2, 4))
+ y_int = cumulative_trapezoid(y, x, initial=None, axis=axis)
+ assert_equal(y_int.shape, shape)
+
+ def test_y_nd_x_1d(self):
+ y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+ x = np.arange(4)**2
+ # Try with all axes
+ ys_expected = (
+ np.array([[[4., 5., 6., 7.],
+ [8., 9., 10., 11.]],
+ [[40., 44., 48., 52.],
+ [56., 60., 64., 68.]]]),
+ np.array([[[2., 3., 4., 5.]],
+ [[10., 11., 12., 13.]],
+ [[18., 19., 20., 21.]]]),
+ np.array([[[0.5, 5., 17.5],
+ [4.5, 21., 53.5]],
+ [[8.5, 37., 89.5],
+ [12.5, 53., 125.5]],
+ [[16.5, 69., 161.5],
+ [20.5, 85., 197.5]]]))
+
+ for axis, y_expected in zip([0, 1, 2], ys_expected):
+ y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis,
+ initial=None)
+ assert_allclose(y_int, y_expected)
+
+ def test_x_none(self):
+ y = np.linspace(-2, 2, num=5)
+
+ y_int = cumulative_trapezoid(y)
+ y_expected = [-1.5, -2., -1.5, 0.]
+ assert_allclose(y_int, y_expected)
+
+ y_int = cumulative_trapezoid(y, initial=1.23)
+ y_expected = [1.23, -1.5, -2., -1.5, 0.]
+ assert_allclose(y_int, y_expected)
+
+ y_int = cumulative_trapezoid(y, dx=3)
+ y_expected = [-4.5, -6., -4.5, 0.]
+ assert_allclose(y_int, y_expected)
+
+ y_int = cumulative_trapezoid(y, dx=3, initial=1.23)
+ y_expected = [1.23, -4.5, -6., -4.5, 0.]
+ assert_allclose(y_int, y_expected)
+
+ def test_cumtrapz(self):
+ # Basic coverage test for the alias
+ x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+ y = x
+ assert_allclose(cumulative_trapezoid(y, x, dx=0.5, axis=0, initial=0),
+ cumtrapz(y, x, dx=0.5, axis=0, initial=0),
+ rtol=1e-14)
+
+
+class TestTrapezoid():
+ """This function is tested in NumPy more extensive, just do some
+ basic due diligence here."""
+ def test_trapezoid(self):
+ y = np.arange(17)
+ assert_equal(trapezoid(y), 128)
+ assert_equal(trapezoid(y, dx=0.5), 64)
+ assert_equal(trapezoid(y, x=np.linspace(0, 4, 17)), 32)
+
+ y = np.arange(4)
+ x = 2**y
+ assert_equal(trapezoid(y, x=x, dx=0.1), 13.5)
+
+ def test_trapz(self):
+ # Basic coverage test for the alias
+ y = np.arange(4)
+ x = 2**y
+ assert_equal(trapezoid(y, x=x, dx=0.5, axis=0),
+ trapz(y, x=x, dx=0.5, axis=0))
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/integrate/vode.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/vode.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..8ca7ee2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/integrate/vode.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/__init__.py
new file mode 100644
index 0000000..cd06266
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/__init__.py
@@ -0,0 +1,190 @@
+"""
+========================================
+Interpolation (:mod:`scipy.interpolate`)
+========================================
+
+.. currentmodule:: scipy.interpolate
+
+Sub-package for objects used in interpolation.
+
+As listed below, this sub-package contains spline functions and classes,
+1-D and multidimensional (univariate and multivariate)
+interpolation classes, Lagrange and Taylor polynomial interpolators, and
+wrappers for `FITPACK `__
+and DFITPACK functions.
+
+Univariate interpolation
+========================
+
+.. autosummary::
+ :toctree: generated/
+
+ interp1d
+ BarycentricInterpolator
+ KroghInterpolator
+ barycentric_interpolate
+ krogh_interpolate
+ pchip_interpolate
+ CubicHermiteSpline
+ PchipInterpolator
+ Akima1DInterpolator
+ CubicSpline
+ PPoly
+ BPoly
+
+
+Multivariate interpolation
+==========================
+
+Unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ griddata
+ LinearNDInterpolator
+ NearestNDInterpolator
+ CloughTocher2DInterpolator
+ Rbf
+ interp2d
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ interpn
+ RegularGridInterpolator
+ RectBivariateSpline
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`
+
+Tensor product polynomials:
+
+.. autosummary::
+ :toctree: generated/
+
+ NdPPoly
+
+
+1-D Splines
+===========
+
+.. autosummary::
+ :toctree: generated/
+
+ BSpline
+ make_interp_spline
+ make_lsq_spline
+
+Functional interface to FITPACK routines:
+
+.. autosummary::
+ :toctree: generated/
+
+ splrep
+ splprep
+ splev
+ splint
+ sproot
+ spalde
+ splder
+ splantider
+ insert
+
+Object-oriented FITPACK interface:
+
+.. autosummary::
+ :toctree: generated/
+
+ UnivariateSpline
+ InterpolatedUnivariateSpline
+ LSQUnivariateSpline
+
+
+
+2-D Splines
+===========
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ RectBivariateSpline
+ RectSphereBivariateSpline
+
+For unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ BivariateSpline
+ SmoothBivariateSpline
+ SmoothSphereBivariateSpline
+ LSQBivariateSpline
+ LSQSphereBivariateSpline
+
+Low-level interface to FITPACK functions:
+
+.. autosummary::
+ :toctree: generated/
+
+ bisplrep
+ bisplev
+
+Additional tools
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ lagrange
+ approximate_taylor_polynomial
+ pade
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`,
+ `scipy.ndimage.spline_filter`,
+ `scipy.signal.resample`,
+ `scipy.signal.bspline`,
+ `scipy.signal.gauss_spline`,
+ `scipy.signal.qspline1d`,
+ `scipy.signal.cspline1d`,
+ `scipy.signal.qspline1d_eval`,
+ `scipy.signal.cspline1d_eval`,
+ `scipy.signal.qspline2d`,
+ `scipy.signal.cspline2d`.
+
+``pchip`` is an alias of `PchipInterpolator` for backward compatibility
+(should not be used in new code).
+"""
+from .interpolate import *
+from .fitpack import *
+
+# New interface to fitpack library:
+from .fitpack2 import *
+
+from .rbf import Rbf
+
+from .polyint import *
+
+from ._cubic import *
+
+from .ndgriddata import *
+
+from ._bsplines import *
+
+from ._pade import *
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
+# Backward compatibility
+pchip = PchipInterpolator
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bspl.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bspl.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..6d4885f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bspl.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bsplines.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bsplines.py
new file mode 100644
index 0000000..96563d8
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_bsplines.py
@@ -0,0 +1,1011 @@
+import operator
+
+import numpy as np
+from numpy.core.multiarray import normalize_axis_index
+from scipy.linalg import (get_lapack_funcs, LinAlgError,
+ cholesky_banded, cho_solve_banded)
+from . import _bspl
+from . import _fitpack_impl
+from . import _fitpack as _dierckx
+from scipy._lib._util import prod
+
+__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline"]
+
+
+def _get_dtype(dtype):
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
+ if np.issubdtype(dtype, np.complexfloating):
+ return np.complex_
+ else:
+ return np.float_
+
+
+def _as_float_array(x, check_finite=False):
+ """Convert the input into a C contiguous float array.
+
+ NB: Upcasts half- and single-precision floats to double precision.
+ """
+ x = np.ascontiguousarray(x)
+ dtyp = _get_dtype(x.dtype)
+ x = x.astype(dtyp, copy=False)
+ if check_finite and not np.isfinite(x).all():
+ raise ValueError("Array must not contain infs or nans.")
+ return x
+
+
+class BSpline(object):
+ r"""Univariate spline in the B-spline basis.
+
+ .. math::
+
+ S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
+
+ where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
+ and knots `t`.
+
+ Parameters
+ ----------
+ t : ndarray, shape (n+k+1,)
+ knots
+ c : ndarray, shape (>=n, ...)
+ spline coefficients
+ k : int
+ B-spline degree
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
+ or to return nans.
+ If True, extrapolates the first and last polynomial pieces of b-spline
+ functions active on the base interval.
+ If 'periodic', periodic extrapolation is used.
+ Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ t : ndarray
+ knot vector
+ c : ndarray
+ spline coefficients
+ k : int
+ spline degree
+ extrapolate : bool
+ If True, extrapolates the first and last polynomial pieces of b-spline
+ functions active on the base interval.
+ axis : int
+ Interpolation axis.
+ tck : tuple
+ A read-only equivalent of ``(self.t, self.c, self.k)``
+
+ Methods
+ -------
+ __call__
+ basis_element
+ derivative
+ antiderivative
+ integrate
+ construct_fast
+
+ Notes
+ -----
+ B-spline basis elements are defined via
+
+ .. math::
+
+ B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
+
+ B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
+
+ **Implementation details**
+
+ - At least ``k+1`` coefficients are required for a spline of degree `k`,
+ so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
+ ``j > n``, are ignored.
+
+ - B-spline basis elements of degree `k` form a partition of unity on the
+ *base interval*, ``t[k] <= x <= t[n]``.
+
+
+ Examples
+ --------
+
+ Translating the recursive definition of B-splines into Python code, we have:
+
+ >>> def B(x, k, i, t):
+ ... if k == 0:
+ ... return 1.0 if t[i] <= x < t[i+1] else 0.0
+ ... if t[i+k] == t[i]:
+ ... c1 = 0.0
+ ... else:
+ ... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
+ ... if t[i+k+1] == t[i+1]:
+ ... c2 = 0.0
+ ... else:
+ ... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
+ ... return c1 + c2
+
+ >>> def bspline(x, t, c, k):
+ ... n = len(t) - k - 1
+ ... assert (n >= k+1) and (len(c) >= n)
+ ... return sum(c[i] * B(x, k, i, t) for i in range(n))
+
+ Note that this is an inefficient (if straightforward) way to
+ evaluate B-splines --- this spline class does it in an equivalent,
+ but much more efficient way.
+
+ Here we construct a quadratic spline function on the base interval
+ ``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
+
+ >>> from scipy.interpolate import BSpline
+ >>> k = 2
+ >>> t = [0, 1, 2, 3, 4, 5, 6]
+ >>> c = [-1, 2, 0, -1]
+ >>> spl = BSpline(t, c, k)
+ >>> spl(2.5)
+ array(1.375)
+ >>> bspline(2.5, t, c, k)
+ 1.375
+
+ Note that outside of the base interval results differ. This is because
+ `BSpline` extrapolates the first and last polynomial pieces of B-spline
+ functions active on the base interval.
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> xx = np.linspace(1.5, 4.5, 50)
+ >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
+ >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
+ >>> ax.grid(True)
+ >>> ax.legend(loc='best')
+ >>> plt.show()
+
+
+ References
+ ----------
+ .. [1] Tom Lyche and Knut Morken, Spline methods,
+ http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
+ .. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
+
+ """
+ def __init__(self, t, c, k, extrapolate=True, axis=0):
+ super(BSpline, self).__init__()
+
+ self.k = operator.index(k)
+ self.c = np.asarray(c)
+ self.t = np.ascontiguousarray(t, dtype=np.float64)
+
+ if extrapolate == 'periodic':
+ self.extrapolate = extrapolate
+ else:
+ self.extrapolate = bool(extrapolate)
+
+ n = self.t.shape[0] - self.k - 1
+
+ axis = normalize_axis_index(axis, self.c.ndim)
+
+ # Note that the normalized axis is stored in the object.
+ self.axis = axis
+ if axis != 0:
+ # roll the interpolation axis to be the first one in self.c
+ # More specifically, the target shape for self.c is (n, ...),
+ # and axis !=0 means that we have c.shape (..., n, ...)
+ # ^
+ # axis
+ self.c = np.rollaxis(self.c, axis)
+
+ if k < 0:
+ raise ValueError("Spline order cannot be negative.")
+ if self.t.ndim != 1:
+ raise ValueError("Knot vector must be one-dimensional.")
+ if n < self.k + 1:
+ raise ValueError("Need at least %d knots for degree %d" %
+ (2*k + 2, k))
+ if (np.diff(self.t) < 0).any():
+ raise ValueError("Knots must be in a non-decreasing order.")
+ if len(np.unique(self.t[k:n+1])) < 2:
+ raise ValueError("Need at least two internal knots.")
+ if not np.isfinite(self.t).all():
+ raise ValueError("Knots should not have nans or infs.")
+ if self.c.ndim < 1:
+ raise ValueError("Coefficients must be at least 1-dimensional.")
+ if self.c.shape[0] < n:
+ raise ValueError("Knots, coefficients and degree are inconsistent.")
+
+ dt = _get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
+
+ @classmethod
+ def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
+ """Construct a spline without making checks.
+
+ Accepts same parameters as the regular constructor. Input arrays
+ `t` and `c` must of correct shape and dtype.
+ """
+ self = object.__new__(cls)
+ self.t, self.c, self.k = t, c, k
+ self.extrapolate = extrapolate
+ self.axis = axis
+ return self
+
+ @property
+ def tck(self):
+ """Equivalent to ``(self.t, self.c, self.k)`` (read-only).
+ """
+ return self.t, self.c, self.k
+
+ @classmethod
+ def basis_element(cls, t, extrapolate=True):
+ """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
+
+ Parameters
+ ----------
+ t : ndarray, shape (k+1,)
+ internal knots
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
+ or to return nans.
+ If 'periodic', periodic extrapolation is used.
+ Default is True.
+
+ Returns
+ -------
+ basis_element : callable
+ A callable representing a B-spline basis element for the knot
+ vector `t`.
+
+ Notes
+ -----
+ The degree of the B-spline, `k`, is inferred from the length of `t` as
+ ``len(t)-2``. The knot vector is constructed by appending and prepending
+ ``k+1`` elements to internal knots `t`.
+
+ Examples
+ --------
+
+ Construct a cubic B-spline:
+
+ >>> from scipy.interpolate import BSpline
+ >>> b = BSpline.basis_element([0, 1, 2, 3, 4])
+ >>> k = b.k
+ >>> b.t[k:-k]
+ array([ 0., 1., 2., 3., 4.])
+ >>> k
+ 3
+
+ Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare
+ to its explicit form:
+
+ >>> t = [-1, 0, 1, 1, 2]
+ >>> b = BSpline.basis_element(t[1:])
+ >>> def f(x):
+ ... return np.where(x < 1, x*x, (2. - x)**2)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> x = np.linspace(0, 2, 51)
+ >>> ax.plot(x, b(x), 'g', lw=3)
+ >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
+ >>> ax.grid(True)
+ >>> plt.show()
+
+ """
+ k = len(t) - 2
+ t = _as_float_array(t)
+ t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
+ c = np.zeros_like(t)
+ c[k] = 1.
+ return cls.construct_fast(t, c, k, extrapolate)
+
+ def __call__(self, x, nu=0, extrapolate=None):
+ """
+ Evaluate a spline function.
+
+ Parameters
+ ----------
+ x : array_like
+ points to evaluate the spline at.
+ nu: int, optional
+ derivative to evaluate (default is 0).
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate based on the first and last intervals
+ or return nans. If 'periodic', periodic extrapolation is used.
+ Default is `self.extrapolate`.
+
+ Returns
+ -------
+ y : array_like
+ Shape is determined by replacing the interpolation axis
+ in the coefficient array with the shape of `x`.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ x = np.asarray(x)
+ x_shape, x_ndim = x.shape, x.ndim
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
+
+ # With periodic extrapolation we map x to the segment
+ # [self.t[k], self.t[n]].
+ if extrapolate == 'periodic':
+ n = self.t.size - self.k - 1
+ x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
+ self.t[self.k])
+ extrapolate = False
+
+ out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+ self._evaluate(x, nu, extrapolate, out)
+ out = out.reshape(x_shape + self.c.shape[1:])
+ if self.axis != 0:
+ # transpose to move the calculated values to the interpolation axis
+ l = list(range(out.ndim))
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+ out = out.transpose(l)
+ return out
+
+ def _evaluate(self, xp, nu, extrapolate, out):
+ _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
+ self.k, xp, nu, extrapolate, out)
+
+ def _ensure_c_contiguous(self):
+ """
+ c and t may be modified by the user. The Cython code expects
+ that they are C contiguous.
+
+ """
+ if not self.t.flags.c_contiguous:
+ self.t = self.t.copy()
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+
+ def derivative(self, nu=1):
+ """Return a B-spline representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Derivative order.
+ Default is 1.
+
+ Returns
+ -------
+ b : BSpline object
+ A new instance representing the derivative.
+
+ See Also
+ --------
+ splder, splantider
+
+ """
+ c = self.c
+ # pad the c array if needed
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ tck = _fitpack_impl.splder((self.t, c, self.k), nu)
+ return self.construct_fast(*tck, extrapolate=self.extrapolate,
+ axis=self.axis)
+
+ def antiderivative(self, nu=1):
+ """Return a B-spline representing the antiderivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Antiderivative order. Default is 1.
+
+ Returns
+ -------
+ b : BSpline object
+ A new instance representing the antiderivative.
+
+ Notes
+ -----
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+
+ See Also
+ --------
+ splder, splantider
+
+ """
+ c = self.c
+ # pad the c array if needed
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ return self.construct_fast(*tck, extrapolate=extrapolate,
+ axis=self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """Compute a definite integral of the spline.
+
+ Parameters
+ ----------
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval,
+ ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
+ base interval. If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ I : array_like
+ Definite integral of the spline over the interval ``[a, b]``.
+
+ Examples
+ --------
+ Construct the linear spline ``x if x < 1 else 2 - x`` on the base
+ interval :math:`[0, 2]`, and integrate it
+
+ >>> from scipy.interpolate import BSpline
+ >>> b = BSpline.basis_element([0, 1, 2])
+ >>> b.integrate(0, 1)
+ array(0.5)
+
+ If the integration limits are outside of the base interval, the result
+ is controlled by the `extrapolate` parameter
+
+ >>> b.integrate(-1, 1)
+ array(0.0)
+ >>> b.integrate(-1, 1, extrapolate=False)
+ array(0.5)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> ax.grid(True)
+ >>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
+ >>> ax.axvline(2, c='r', lw=5, alpha=0.5)
+ >>> xx = [-1, 1, 2]
+ >>> ax.plot(xx, b(xx))
+ >>> plt.show()
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # Prepare self.t and self.c.
+ self._ensure_c_contiguous()
+
+ # Swap integration bounds if needed.
+ sign = 1
+ if b < a:
+ a, b = b, a
+ sign = -1
+ n = self.t.size - self.k - 1
+
+ if extrapolate != "periodic" and not extrapolate:
+ # Shrink the integration interval, if needed.
+ a = max(a, self.t[self.k])
+ b = min(b, self.t[n])
+
+ if self.c.ndim == 1:
+ # Fast path: use FITPACK's routine
+ # (cf _fitpack_impl.splint).
+ t, c, k = self.tck
+ integral, wrk = _dierckx._splint(t, c, k, a, b)
+ return integral * sign
+
+ out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)
+
+ # Compute the antiderivative.
+ c = self.c
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)
+
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ ts, te = self.t[self.k], self.t[n]
+ period = te - ts
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+
+ if n_periods > 0:
+ # Evaluate the difference of antiderivatives.
+ x = np.asarray([ts, te], dtype=np.float_)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral = out[1] - out[0]
+ integral *= n_periods
+ else:
+ integral = np.zeros((1, prod(self.c.shape[1:])),
+ dtype=self.c.dtype)
+
+ # Map a to [ts, te], b is always a + left.
+ a = ts + (a - ts) % period
+ b = a + left
+
+ # If b <= te then we need to integrate over [a, b], otherwise
+ # over [a, te] and from xs to what is remained.
+ if b <= te:
+ x = np.asarray([a, b], dtype=np.float_)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+ else:
+ x = np.asarray([a, te], dtype=np.float_)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+
+ x = np.asarray([ts, ts + b - te], dtype=np.float_)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+ else:
+ # Evaluate the difference of antiderivatives.
+ x = np.asarray([a, b], dtype=np.float_)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, extrapolate, out)
+ integral = out[1] - out[0]
+
+ integral *= sign
+ return integral.reshape(ca.shape[1:])
+
+
+#################################
+# Interpolating spline helpers #
+#################################
+
+def _not_a_knot(x, k):
+ """Given data x, construct the knot vector w/ not-a-knot BC.
+ cf de Boor, XIII(12)."""
+ x = np.asarray(x)
+ if k % 2 != 1:
+ raise ValueError("Odd degree for now only. Got %s." % k)
+
+ m = (k - 1) // 2
+ t = x[m+1:-m-1]
+ t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
+ return t
+
+
+def _augknt(x, k):
+ """Construct a knot vector appropriate for the order-k interpolation."""
+ return np.r_[(x[0],)*k, x, (x[-1],)*k]
+
+
+def _convert_string_aliases(deriv, target_shape):
+ if isinstance(deriv, str):
+ if deriv == "clamped":
+ deriv = [(1, np.zeros(target_shape))]
+ elif deriv == "natural":
+ deriv = [(2, np.zeros(target_shape))]
+ else:
+ raise ValueError("Unknown boundary condition : %s" % deriv)
+ return deriv
+
+
+def _process_deriv_spec(deriv):
+ if deriv is not None:
+ try:
+ ords, vals = zip(*deriv)
+ except TypeError as e:
+ msg = ("Derivatives, `bc_type`, should be specified as a pair of "
+ "iterables of pairs of (order, value).")
+ raise ValueError(msg) from e
+ else:
+ ords, vals = [], []
+ return np.atleast_1d(ords, vals)
+
+
+def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
+ check_finite=True):
+ """Compute the (coefficients of) interpolating B-spline.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ Abscissas.
+ y : array_like, shape (n, ...)
+ Ordinates.
+ k : int, optional
+ B-spline degree. Default is cubic, k=3.
+ t : array_like, shape (nt + k + 1,), optional.
+ Knots.
+ The number of knots needs to agree with the number of datapoints and
+ the number of derivatives at the edges. Specifically, ``nt - n`` must
+ equal ``len(deriv_l) + len(deriv_r)``.
+ bc_type : 2-tuple or None
+ Boundary conditions.
+ Default is None, which means choosing the boundary conditions
+ automatically. Otherwise, it must be a length-two tuple where the first
+ element sets the boundary conditions at ``x[0]`` and the second
+ element sets the boundary conditions at ``x[-1]``. Each of these must
+ be an iterable of pairs ``(order, value)`` which gives the values of
+ derivatives of specified orders at the given edge of the interpolation
+ interval.
+ Alternatively, the following string aliases are recognized:
+
+ * ``"clamped"``: The first derivatives at the ends are zero. This is
+ equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.
+ * ``"natural"``: The second derivatives at ends are zero. This is
+ equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.
+ * ``"not-a-knot"`` (default): The first and second segments are the same
+ polynomial. This is equivalent to having ``bc_type=None``.
+
+ axis : int, optional
+ Interpolation axis. Default is 0.
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default is True.
+
+ Returns
+ -------
+ b : a BSpline object of the degree ``k`` and with knots ``t``.
+
+ Examples
+ --------
+
+ Use cubic interpolation on Chebyshev nodes:
+
+ >>> def cheb_nodes(N):
+ ... jj = 2.*np.arange(N) + 1
+ ... x = np.cos(np.pi * jj / 2 / N)[::-1]
+ ... return x
+
+ >>> x = cheb_nodes(20)
+ >>> y = np.sqrt(1 - x**2)
+
+ >>> from scipy.interpolate import BSpline, make_interp_spline
+ >>> b = make_interp_spline(x, y)
+ >>> np.allclose(b(x), y)
+ True
+
+ Note that the default is a cubic spline with a not-a-knot boundary condition
+
+ >>> b.k
+ 3
+
+ Here we use a 'natural' spline, with zero 2nd derivatives at edges:
+
+ >>> l, r = [(2, 0.0)], [(2, 0.0)]
+ >>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural"
+ >>> np.allclose(b_n(x), y)
+ True
+ >>> x0, x1 = x[0], x[-1]
+ >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
+ True
+
+ Interpolation of parametric curves is also supported. As an example, we
+ compute a discretization of a snail curve in polar coordinates
+
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
+ >>> r = 0.3 + np.cos(phi)
+ >>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
+
+ Build an interpolating curve, parameterizing it by the angle
+
+ >>> from scipy.interpolate import make_interp_spline
+ >>> spl = make_interp_spline(phi, np.c_[x, y])
+
+ Evaluate the interpolant on a finer grid (note that we transpose the result
+ to unpack it into a pair of x- and y-arrays)
+
+ >>> phi_new = np.linspace(0, 2.*np.pi, 100)
+ >>> x_new, y_new = spl(phi_new).T
+
+ Plot the result
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(x, y, 'o')
+ >>> plt.plot(x_new, y_new, '-')
+ >>> plt.show()
+
+ See Also
+ --------
+ BSpline : base class representing the B-spline objects
+ CubicSpline : a cubic spline in the polynomial basis
+ make_lsq_spline : a similar factory function for spline fitting
+ UnivariateSpline : a wrapper over FITPACK spline fitting routines
+ splrep : a wrapper over FITPACK spline fitting routines
+
+ """
+ # convert string aliases for the boundary conditions
+ if bc_type is None or bc_type == 'not-a-knot':
+ deriv_l, deriv_r = None, None
+ elif isinstance(bc_type, str):
+ deriv_l, deriv_r = bc_type, bc_type
+ else:
+ try:
+ deriv_l, deriv_r = bc_type
+ except TypeError as e:
+ raise ValueError("Unknown boundary condition: %s" % bc_type) from e
+
+ y = np.asarray(y)
+
+ axis = normalize_axis_index(axis, y.ndim)
+
+ # special-case k=0 right away
+ if k == 0:
+ if any(_ is not None for _ in (t, deriv_l, deriv_r)):
+ raise ValueError("Too much info for k=0: t and bc_type can only "
+ "be None.")
+ x = _as_float_array(x, check_finite)
+ t = np.r_[x, x[-1]]
+ c = np.asarray(y)
+ c = np.rollaxis(c, axis)
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+ # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
+ if k == 1 and t is None:
+ if not (deriv_l is None and deriv_r is None):
+ raise ValueError("Too much info for k=1: bc_type can only be None.")
+ x = _as_float_array(x, check_finite)
+ t = np.r_[x[0], x, x[-1]]
+ c = np.asarray(y)
+ c = np.rollaxis(c, axis)
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+ x = _as_float_array(x, check_finite)
+ y = _as_float_array(y, check_finite)
+ k = operator.index(k)
+
+ # come up with a sensible knot vector, if needed
+ if t is None:
+ if deriv_l is None and deriv_r is None:
+ if k == 2:
+ # OK, it's a bit ad hoc: Greville sites + omit
+ # 2nd and 2nd-to-last points, a la not-a-knot
+ t = (x[1:] + x[:-1]) / 2.
+ t = np.r_[(x[0],)*(k+1),
+ t[1:-1],
+ (x[-1],)*(k+1)]
+ else:
+ t = _not_a_knot(x, k)
+ else:
+ t = _augknt(x, k)
+
+ t = _as_float_array(t, check_finite)
+
+ y = np.rollaxis(y, axis) # now internally interp axis is zero
+
+ if x.ndim != 1 or np.any(x[1:] < x[:-1]):
+ raise ValueError("Expect x to be a 1-D sorted array_like.")
+ if np.any(x[1:] == x[:-1]):
+ raise ValueError("Expect x to not have duplicates")
+ if k < 0:
+ raise ValueError("Expect non-negative k.")
+ if t.ndim != 1 or np.any(t[1:] < t[:-1]):
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
+ if x.size != y.shape[0]:
+ raise ValueError('Shapes of x {} and y {} are incompatible'
+ .format(x.shape, y.shape))
+ if t.size < x.size + k + 1:
+ raise ValueError('Got %d knots, need at least %d.' %
+ (t.size, x.size + k + 1))
+ if (x[0] < t[k]) or (x[-1] > t[-k]):
+ raise ValueError('Out of bounds w/ x = %s.' % x)
+
+ # Here : deriv_l, r = [(nu, value), ...]
+ deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
+ deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)
+ nleft = deriv_l_ords.shape[0]
+
+ deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
+ deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)
+ nright = deriv_r_ords.shape[0]
+
+ # have `n` conditions for `nt` coefficients; need nt-n derivatives
+ n = x.size
+ nt = t.size - k - 1
+
+ if nt - n != nleft + nright:
+ raise ValueError("The number of derivatives at boundaries does not "
+ "match: expected %s, got %s+%s" % (nt-n, nleft, nright))
+
+ # set up the LHS: the collocation matrix + derivatives at boundaries
+ kl = ku = k
+ ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F')
+ _bspl._colloc(x, t, k, ab, offset=nleft)
+ if nleft > 0:
+ _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)
+ if nright > 0:
+ _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,
+ offset=nt-nright)
+
+ # set up the RHS: values to interpolate (+ derivative values, if any)
+ extradim = prod(y.shape[1:])
+ rhs = np.empty((nt, extradim), dtype=y.dtype)
+ if nleft > 0:
+ rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
+ rhs[nleft:nt - nright] = y.reshape(-1, extradim)
+ if nright > 0:
+ rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
+
+ # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
+ if check_finite:
+ ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
+ gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
+ lu, piv, c, info = gbsv(kl, ku, ab, rhs,
+ overwrite_ab=True, overwrite_b=True)
+
+ if info > 0:
+ raise LinAlgError("Collocation matix is singular.")
+ elif info < 0:
+ raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
+
+ c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+
+def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
+ r"""Compute the (coefficients of) an LSQ B-spline.
+
+ The result is a linear combination
+
+ .. math::
+
+ S(x) = \sum_j c_j B_j(x; t)
+
+ of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
+
+ .. math::
+
+ \sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
+
+ Parameters
+ ----------
+ x : array_like, shape (m,)
+ Abscissas.
+ y : array_like, shape (m, ...)
+ Ordinates.
+ t : array_like, shape (n + k + 1,).
+ Knots.
+ Knots and data points must satisfy Schoenberg-Whitney conditions.
+ k : int, optional
+ B-spline degree. Default is cubic, k=3.
+ w : array_like, shape (n,), optional
+ Weights for spline fitting. Must be positive. If ``None``,
+ then weights are all equal.
+ Default is ``None``.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default is True.
+
+ Returns
+ -------
+ b : a BSpline object of the degree `k` with knots `t`.
+
+ Notes
+ -----
+
+ The number of data points must be larger than the spline degree `k`.
+
+ Knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ Examples
+ --------
+ Generate some noisy data:
+
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
+
+ Now fit a smoothing cubic spline with a pre-defined internal knots.
+ Here we make the knot vector (k+1)-regular by adding boundary knots:
+
+ >>> from scipy.interpolate import make_lsq_spline, BSpline
+ >>> t = [-1, 0, 1]
+ >>> k = 3
+ >>> t = np.r_[(x[0],)*(k+1),
+ ... t,
+ ... (x[-1],)*(k+1)]
+ >>> spl = make_lsq_spline(x, y, t, k)
+
+ For comparison, we also construct an interpolating spline for the same
+ set of data:
+
+ >>> from scipy.interpolate import make_interp_spline
+ >>> spl_i = make_interp_spline(x, y)
+
+ Plot both:
+
+ >>> import matplotlib.pyplot as plt
+ >>> xs = np.linspace(-3, 3, 100)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
+ >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
+ >>> plt.legend(loc='best')
+ >>> plt.show()
+
+ **NaN handling**: If the input arrays contain ``nan`` values, the result is
+ not useful since the underlying spline fitting routines cannot deal with
+ ``nan``. A workaround is to use zero weights for not-a-number data points:
+
+ >>> y[8] = np.nan
+ >>> w = np.isnan(y)
+ >>> y[w] = 0.
+ >>> tck = make_lsq_spline(x, y, t, w=~w)
+
+ Notice the need to replace a ``nan`` by a numerical value (precise value
+ does not matter as long as the corresponding weight is zero.)
+
+ See Also
+ --------
+ BSpline : base class representing the B-spline objects
+ make_interp_spline : a similar factory function for interpolating splines
+ LSQUnivariateSpline : a FITPACK-based spline fitting routine
+ splrep : a FITPACK-based fitting routine
+
+ """
+ x = _as_float_array(x, check_finite)
+ y = _as_float_array(y, check_finite)
+ t = _as_float_array(t, check_finite)
+ if w is not None:
+ w = _as_float_array(w, check_finite)
+ else:
+ w = np.ones_like(x)
+ k = operator.index(k)
+
+ axis = normalize_axis_index(axis, y.ndim)
+
+ y = np.rollaxis(y, axis) # now internally interp axis is zero
+
+ if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
+ raise ValueError("Expect x to be a 1-D sorted array_like.")
+ if x.shape[0] < k+1:
+ raise ValueError("Need more x points.")
+ if k < 0:
+ raise ValueError("Expect non-negative k.")
+ if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
+ if x.size != y.shape[0]:
+ raise ValueError('Shapes of x {} and y {} are incompatible'
+ .format(x.shape, y.shape))
+ if k > 0 and np.any((x < t[k]) | (x > t[-k])):
+ raise ValueError('Out of bounds w/ x = %s.' % x)
+ if x.size != w.size:
+ raise ValueError('Shapes of x {} and w {} are incompatible'
+ .format(x.shape, w.shape))
+
+ # number of coefficients
+ n = t.size - k - 1
+
+ # construct A.T @ A and rhs with A the collocation matrix, and
+ # rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
+ lower = True
+ extradim = prod(y.shape[1:])
+ ab = np.zeros((k+1, n), dtype=np.float_, order='F')
+ rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
+ _bspl._norm_eq_lsq(x, t, k,
+ y.reshape(-1, extradim),
+ w,
+ ab, rhs)
+ rhs = rhs.reshape((n,) + y.shape[1:])
+
+ # have observation matrix & rhs, can solve the LSQ problem
+ cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
+ check_finite=check_finite)
+ c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
+ check_finite=check_finite)
+
+ c = np.ascontiguousarray(c)
+ return BSpline.construct_fast(t, c, k, axis=axis)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_cubic.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_cubic.py
new file mode 100644
index 0000000..449c5d9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_cubic.py
@@ -0,0 +1,855 @@
+"""Interpolation algorithms using piecewise cubic polynomials."""
+
+import numpy as np
+
+from . import PPoly
+from .polyint import _isscalar
+from scipy.linalg import solve_banded, solve
+
+
+__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
+ "Akima1DInterpolator", "CubicSpline"]
+
+
+def prepare_input(x, y, axis, dydx=None):
+ """Prepare input for cubic spline interpolators.
+
+ All data are converted to numpy arrays and checked for correctness.
+ Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0th
+ axis. The value of `axis` is converted to lie in
+ [0, number of dimensions of `y`).
+ """
+
+ x, y = map(np.asarray, (x, y))
+ if np.issubdtype(x.dtype, np.complexfloating):
+ raise ValueError("`x` must contain real values.")
+ x = x.astype(float)
+
+ if np.issubdtype(y.dtype, np.complexfloating):
+ dtype = complex
+ else:
+ dtype = float
+
+ if dydx is not None:
+ dydx = np.asarray(dydx)
+ if y.shape != dydx.shape:
+ raise ValueError("The shapes of `y` and `dydx` must be identical.")
+ if np.issubdtype(dydx.dtype, np.complexfloating):
+ dtype = complex
+ dydx = dydx.astype(dtype, copy=False)
+
+ y = y.astype(dtype, copy=False)
+ axis = axis % y.ndim
+ if x.ndim != 1:
+ raise ValueError("`x` must be 1-dimensional.")
+ if x.shape[0] < 2:
+ raise ValueError("`x` must contain at least 2 elements.")
+ if x.shape[0] != y.shape[axis]:
+ raise ValueError("The length of `y` along `axis`={0} doesn't "
+ "match the length of `x`".format(axis))
+
+ if not np.all(np.isfinite(x)):
+ raise ValueError("`x` must contain only finite values.")
+ if not np.all(np.isfinite(y)):
+ raise ValueError("`y` must contain only finite values.")
+
+ if dydx is not None and not np.all(np.isfinite(dydx)):
+ raise ValueError("`dydx` must contain only finite values.")
+
+ dx = np.diff(x)
+ if np.any(dx <= 0):
+ raise ValueError("`x` must be strictly increasing sequence.")
+
+ y = np.rollaxis(y, axis)
+ if dydx is not None:
+ dydx = np.rollaxis(dydx, axis)
+
+ return x, dx, y, axis, dydx
+
+
+class CubicHermiteSpline(PPoly):
+ """Piecewise-cubic interpolator matching values and first derivatives.
+
+ The result is represented as a `PPoly` instance.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ dydx : array_like
+ Array containing derivatives of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), it is set to True.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ If you want to create a higher-order spline matching higher-order
+ derivatives, use `BPoly.from_derivatives`.
+
+ References
+ ----------
+ .. [1] `Cubic Hermite spline
+ `_
+ on Wikipedia.
+ """
+ def __init__(self, x, y, dydx, axis=0, extrapolate=None):
+ if extrapolate is None:
+ extrapolate = True
+
+ x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
+
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+ t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
+
+ c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
+ c[0] = t / dxr
+ c[1] = (slope - dydx[:-1]) / dxr - t
+ c[2] = dydx[:-1]
+ c[3] = y[:-1]
+
+ super(CubicHermiteSpline, self).__init__(c, x, extrapolate=extrapolate)
+ self.axis = axis
+
+
+class PchipInterpolator(CubicHermiteSpline):
+ r"""PCHIP 1-D monotonic cubic interpolation.
+
+ ``x`` and ``y`` are arrays of values used to approximate some function f,
+ with ``y = f(x)``. The interpolant uses monotonic cubic splines
+ to find the value of new points. (PCHIP stands for Piecewise Cubic
+ Hermite Interpolating Polynomial).
+
+ Parameters
+ ----------
+ x : ndarray
+ A 1-D array of monotonically increasing real values. ``x`` cannot
+ include duplicate values (otherwise f is overspecified)
+ y : ndarray
+ A 1-D array of real values. ``y``'s length along the interpolation
+ axis must be equal to the length of ``x``. If N-D array, use ``axis``
+ parameter to select correct axis.
+ axis : int, optional
+ Axis in the y array corresponding to the x-coordinate values.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ CubicHermiteSpline : Piecewise-cubic interpolator.
+ Akima1DInterpolator : Akima 1D interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ The interpolator preserves monotonicity in the interpolation data and does
+ not overshoot if the data is not smooth.
+
+ The first derivatives are guaranteed to be continuous, but the second
+ derivatives may jump at :math:`x_k`.
+
+ Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
+ by using PCHIP algorithm [1]_.
+
+ Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
+ are the slopes at internal points :math:`x_k`.
+ If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
+ them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
+ weighted harmonic mean
+
+ .. math::
+
+ \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
+
+ where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
+
+ The end slopes are set using a one-sided scheme [2]_.
+
+
+ References
+ ----------
+ .. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
+ SIAM J. Numer. Anal., 17(2), 238 (1980).
+ :doi:`10.1137/0717021`.
+ .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
+ :doi:`10.1137/1.9780898717952`
+
+
+ """
+ def __init__(self, x, y, axis=0, extrapolate=None):
+ x, _, y, axis, _ = prepare_input(x, y, axis)
+ xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
+ dk = self._find_derivatives(xp, y)
+ super(PchipInterpolator, self).__init__(x, y, dk, axis=0,
+ extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _edge_case(h0, h1, m0, m1):
+ # one-sided three-point estimate for the derivative
+ d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
+
+ # try to preserve shape
+ mask = np.sign(d) != np.sign(m0)
+ mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
+ mmm = (~mask) & mask2
+
+ d[mask] = 0.
+ d[mmm] = 3.*m0[mmm]
+
+ return d
+
+ @staticmethod
+ def _find_derivatives(x, y):
+ # Determine the derivatives at the points y_k, d_k, by using
+ # PCHIP algorithm is:
+ # We choose the derivatives at the point x_k by
+ # Let m_k be the slope of the kth segment (between k and k+1)
+ # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
+ # else use weighted harmonic mean:
+ # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
+ # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
+ # where h_k is the spacing between x_k and x_{k+1}
+ y_shape = y.shape
+ if y.ndim == 1:
+ # So that _edge_case doesn't end up assigning to scalars
+ x = x[:, None]
+ y = y[:, None]
+
+ hk = x[1:] - x[:-1]
+ mk = (y[1:] - y[:-1]) / hk
+
+ if y.shape[0] == 2:
+ # edge case: only have two points, use linear interpolation
+ dk = np.zeros_like(y)
+ dk[0] = mk
+ dk[1] = mk
+ return dk.reshape(y_shape)
+
+ smk = np.sign(mk)
+ condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
+
+ w1 = 2*hk[1:] + hk[:-1]
+ w2 = hk[1:] + 2*hk[:-1]
+
+ # values where division by zero occurs will be excluded
+ # by 'condition' afterwards
+ with np.errstate(divide='ignore'):
+ whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
+
+ dk = np.zeros_like(y)
+ dk[1:-1][condition] = 0.0
+ dk[1:-1][~condition] = 1.0 / whmean[~condition]
+
+ # special case endpoints, as suggested in
+ # Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
+ dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
+ dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
+
+ return dk.reshape(y_shape)
+
+
+def pchip_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for pchip interpolation.
+
+ xi and yi are arrays of values used to approximate some function f,
+ with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
+ to find the value of new points x and the derivatives there.
+
+ See `scipy.interpolate.PchipInterpolator` for details.
+
+ Parameters
+ ----------
+ xi : array_like
+ A sorted list of x-coordinates, of length N.
+ yi : array_like
+ A 1-D array of real values. `yi`'s length along the interpolation
+ axis must be equal to the length of `xi`. If N-D array, use axis
+ parameter to select correct axis.
+ x : scalar or array_like
+ Of length M.
+ der : int or list, optional
+ Derivatives to extract. The 0th derivative can be included to
+ return the function value.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+
+ Returns
+ -------
+ y : scalar or array_like
+ The result, of length R or length M or M by R,
+
+ Examples
+ --------
+ We can interpolate 2D observed data using pchip interpolation:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import pchip_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = pchip_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="pchip interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = PchipInterpolator(xi, yi, axis=axis)
+
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(der)(x)
+ else:
+ return [P.derivative(nu)(x) for nu in der]
+
+
+class Akima1DInterpolator(CubicHermiteSpline):
+ """
+ Akima interpolator
+
+ Fit piecewise cubic polynomials, given vectors x and y. The interpolation
+ method by Akima uses a continuously differentiable sub-spline built from
+ piecewise cubic polynomials. The resultant curve passes through the given
+ data points and will appear smooth and natural.
+
+ Parameters
+ ----------
+ x : ndarray, shape (m, )
+ 1-D array of monotonically increasing real values.
+ y : ndarray, shape (m, ...)
+ N-D array of real values. The length of ``y`` along the first axis
+ must be equal to the length of ``x``.
+ axis : int, optional
+ Specifies the axis of ``y`` along which to interpolate. Interpolation
+ defaults to the first axis of ``y``.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ .. versionadded:: 0.14
+
+ Use only for precise data, as the fitted curve passes through the given
+ points exactly. This routine is useful for plotting a pleasingly smooth
+ curve through a few given points for purposes of plotting.
+
+ References
+ ----------
+ [1] A new method of interpolation and smooth curve fitting based
+ on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
+ 589-602.
+
+ """
+
+ def __init__(self, x, y, axis=0):
+ # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
+ # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+ # determine slopes between breakpoints
+ m = np.empty((x.size + 3, ) + y.shape[1:])
+ dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
+ m[2:-2] = np.diff(y, axis=0) / dx
+
+ # add two additional points on the left ...
+ m[1] = 2. * m[2] - m[3]
+ m[0] = 2. * m[1] - m[2]
+ # ... and on the right
+ m[-2] = 2. * m[-3] - m[-4]
+ m[-1] = 2. * m[-2] - m[-3]
+
+ # if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
+ # This is the fill value:
+ t = .5 * (m[3:] + m[:-3])
+ # get the denominator of the slope t
+ dm = np.abs(np.diff(m, axis=0))
+ f1 = dm[2:]
+ f2 = dm[:-2]
+ f12 = f1 + f2
+ # These are the mask of where the the slope at breakpoint is defined:
+ ind = np.nonzero(f12 > 1e-9 * np.max(f12))
+ x_ind, y_ind = ind[0], ind[1:]
+ # Set the slope at breakpoint
+ t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
+ f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
+
+ super(Akima1DInterpolator, self).__init__(x, y, t, axis=0,
+ extrapolate=False)
+ self.axis = axis
+
+ def extend(self, c, x, right=True):
+ raise NotImplementedError("Extending a 1-D Akima interpolator is not "
+ "yet implemented")
+
+ # These are inherited from PPoly, but they do not produce an Akima
+ # interpolator. Hence stub them out.
+ @classmethod
+ def from_spline(cls, tck, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+ @classmethod
+ def from_bernstein_basis(cls, bp, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+
+class CubicSpline(CubicHermiteSpline):
+ """Cubic spline data interpolator.
+
+ Interpolate data with a piecewise cubic polynomial which is twice
+ continuously differentiable [1]_. The result is represented as a `PPoly`
+ instance with breakpoints matching the given data.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ bc_type : string or 2-tuple, optional
+ Boundary condition type. Two additional equations, given by the
+ boundary conditions, are required to determine all coefficients of
+ polynomials on each segment [2]_.
+
+ If `bc_type` is a string, then the specified condition will be applied
+ at both ends of a spline. Available conditions are:
+
+ * 'not-a-knot' (default): The first and second segment at a curve end
+ are the same polynomial. It is a good default when there is no
+ information on boundary conditions.
+ * 'periodic': The interpolated functions is assumed to be periodic
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
+ * 'clamped': The first derivative at curves ends are zero. Assuming
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
+ * 'natural': The second derivative at curve ends are zero. Assuming
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
+
+ If `bc_type` is a 2-tuple, the first and the second value will be
+ applied at the curve start and end respectively. The tuple values can
+ be one of the previously mentioned strings (except 'periodic') or a
+ tuple `(order, deriv_values)` allowing to specify arbitrary
+ derivatives at curve ends:
+
+ * `order`: the derivative order, 1 or 2.
+ * `deriv_value`: array_like containing derivative values, shape must
+ be the same as `y`, excluding ``axis`` dimension. For example, if
+ `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
+ and have the shape (n0, n1).
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ Parameters `bc_type` and ``interpolate`` work independently, i.e. the
+ former controls only construction of a spline, and the latter only
+ evaluation.
+
+ When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
+ a condition that the first derivative is equal to the linear interpolant
+ slope. When both boundary conditions are 'not-a-knot' and n = 3, the
+ solution is sought as a parabola passing through given points.
+
+ When 'not-a-knot' boundary conditions is applied to both ends, the
+ resulting spline will be the same as returned by `splrep` (with ``s=0``)
+ and `InterpolatedUnivariateSpline`, but these two methods use a
+ representation in B-spline basis.
+
+ .. versionadded:: 0.18.0
+
+ Examples
+ --------
+ In this example the cubic spline is used to interpolate a sampled sinusoid.
+ You can see that the spline continuity property holds for the first and
+ second derivatives and violates only for the third derivative.
+
+ >>> from scipy.interpolate import CubicSpline
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> cs = CubicSpline(x, y)
+ >>> xs = np.arange(-0.5, 9.6, 0.1)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(x, y, 'o', label='data')
+ >>> ax.plot(xs, np.sin(xs), label='true')
+ >>> ax.plot(xs, cs(xs), label="S")
+ >>> ax.plot(xs, cs(xs, 1), label="S'")
+ >>> ax.plot(xs, cs(xs, 2), label="S''")
+ >>> ax.plot(xs, cs(xs, 3), label="S'''")
+ >>> ax.set_xlim(-0.5, 9.5)
+ >>> ax.legend(loc='lower left', ncol=2)
+ >>> plt.show()
+
+ In the second example, the unit circle is interpolated with a spline. A
+ periodic boundary condition is used. You can see that the first derivative
+ values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
+ computed. Note that a circle cannot be exactly represented by a cubic
+ spline. To increase precision, more breakpoints would be required.
+
+ >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
+ >>> y = np.c_[np.cos(theta), np.sin(theta)]
+ >>> cs = CubicSpline(theta, y, bc_type='periodic')
+ >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
+ ds/dx=0.0 ds/dy=1.0
+ >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
+ >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
+ >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
+ >>> ax.axes.set_aspect('equal')
+ >>> ax.legend(loc='center')
+ >>> plt.show()
+
+ The third example is the interpolation of a polynomial y = x**3 on the
+ interval 0 <= x<= 1. A cubic spline can represent this function exactly.
+ To achieve that we need to specify values and first derivatives at
+ endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
+ y'(1) = 3.
+
+ >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
+ >>> x = np.linspace(0, 1)
+ >>> np.allclose(x**3, cs(x))
+ True
+
+ References
+ ----------
+ .. [1] `Cubic Spline Interpolation
+ `_
+ on Wikiversity.
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
+ """
+ def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+ n = len(x)
+
+ bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
+
+ if extrapolate is None:
+ if bc[0] == 'periodic':
+ extrapolate = 'periodic'
+ else:
+ extrapolate = True
+
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+
+ # If bc is 'not-a-knot' this change is just a convention.
+ # If bc is 'periodic' then we already checked that y[0] == y[-1],
+ # and the spline is just a constant, we handle this case in the same
+ # way by setting the first derivatives to slope, which is 0.
+ if n == 2:
+ if bc[0] in ['not-a-knot', 'periodic']:
+ bc[0] = (1, slope[0])
+ if bc[1] in ['not-a-knot', 'periodic']:
+ bc[1] = (1, slope[0])
+
+ # This is a very special case, when both conditions are 'not-a-knot'
+ # and n == 3. In this case 'not-a-knot' can't be handled regularly
+ # as the both conditions are identical. We handle this case by
+ # constructing a parabola passing through given points.
+ if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
+ A = np.zeros((3, 3)) # This is a standard matrix.
+ b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
+
+ A[0, 0] = 1
+ A[0, 1] = 1
+ A[1, 0] = dx[1]
+ A[1, 1] = 2 * (dx[0] + dx[1])
+ A[1, 2] = dx[0]
+ A[2, 1] = 1
+ A[2, 2] = 1
+
+ b[0] = 2 * slope[0]
+ b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
+ b[2] = 2 * slope[1]
+
+ s = solve(A, b, overwrite_a=True, overwrite_b=True,
+ check_finite=False)
+ elif n == 3 and bc[0] == 'periodic':
+ # In case when number of points is 3 we should count derivatives
+ # manually
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+ t = (slope / dxr).sum() / (1. / dxr).sum()
+ s.fill(t)
+ else:
+ # Find derivative values at each x[i] by solving a tridiagonal
+ # system.
+ A = np.zeros((3, n)) # This is a banded matrix representation.
+ b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+
+ # Filling the system for i=1..n-2
+ # (x[i-1] - x[i]) * s[i-1] +\
+ # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
+ # (x[i] - x[i-1]) * s[i+1] =\
+ # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
+ # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
+
+ A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
+ A[0, 2:] = dx[:-1] # The upper diagonal
+ A[-1, :-2] = dx[1:] # The lower diagonal
+
+ b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
+
+ bc_start, bc_end = bc
+
+ if bc_start == 'periodic':
+ # Due to the periodicity, and because y[-1] = y[0], the linear
+ # system has (n-1) unknowns/equations instead of n:
+ A = A[:, 0:-1]
+ A[1, 0] = 2 * (dx[-1] + dx[0])
+ A[0, 1] = dx[-1]
+
+ b = b[:-1]
+
+ # Also, due to the periodicity, the system is not tri-diagonal.
+ # We need to compute a "condensed" matrix of shape (n-2, n-2).
+ # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
+ # for more explanations.
+ # The condensed matrix is obtained by removing the last column
+ # and last row of the (n-1, n-1) system matrix. The removed
+ # values are saved in scalar variables with the (n-1, n-1)
+ # system matrix indices forming their names:
+ a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
+ a_m1_m2 = dx[-1]
+ a_m1_m1 = 2 * (dx[-1] + dx[-2])
+ a_m2_m1 = dx[-3]
+ a_0_m1 = dx[0]
+
+ b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
+ b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
+
+ Ac = A[:, :-1]
+ b1 = b[:-1]
+ b2 = np.zeros_like(b1)
+ b2[0] = -a_0_m1
+ b2[-1] = -a_m2_m1
+
+ # s1 and s2 are the solutions of (n-2, n-2) system
+ s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ # computing the s[n-2] solution:
+ s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
+ (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
+
+ # s is the solution of the (n, n) system:
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+ s[:-2] = s1 + s_m1 * s2
+ s[-2] = s_m1
+ s[-1] = s[0]
+ else:
+ if bc_start == 'not-a-knot':
+ A[1, 0] = dx[1]
+ A[0, 1] = x[2] - x[0]
+ d = x[2] - x[0]
+ b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
+ dxr[0]**2 * slope[1]) / d
+ elif bc_start[0] == 1:
+ A[1, 0] = 1
+ A[0, 1] = 0
+ b[0] = bc_start[1]
+ elif bc_start[0] == 2:
+ A[1, 0] = 2 * dx[0]
+ A[0, 1] = dx[0]
+ b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
+
+ if bc_end == 'not-a-knot':
+ A[1, -1] = dx[-2]
+ A[-1, -2] = x[-1] - x[-3]
+ d = x[-1] - x[-3]
+ b[-1] = ((dxr[-1]**2*slope[-2] +
+ (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
+ elif bc_end[0] == 1:
+ A[1, -1] = 1
+ A[-1, -2] = 0
+ b[-1] = bc_end[1]
+ elif bc_end[0] == 2:
+ A[1, -1] = 2 * dx[-1]
+ A[-1, -2] = dx[-1]
+ b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
+
+ s = solve_banded((1, 1), A, b, overwrite_ab=True,
+ overwrite_b=True, check_finite=False)
+
+ super(CubicSpline, self).__init__(x, y, s, axis=0,
+ extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _validate_bc(bc_type, y, expected_deriv_shape, axis):
+ """Validate and prepare boundary conditions.
+
+ Returns
+ -------
+ validated_bc : 2-tuple
+ Boundary conditions for a curve start and end.
+ y : ndarray
+ y casted to complex dtype if one of the boundary conditions has
+ complex dtype.
+ """
+ if isinstance(bc_type, str):
+ if bc_type == 'periodic':
+ if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
+ raise ValueError(
+ "The first and last `y` point along axis {} must "
+ "be identical (within machine precision) when "
+ "bc_type='periodic'.".format(axis))
+
+ bc_type = (bc_type, bc_type)
+
+ else:
+ if len(bc_type) != 2:
+ raise ValueError("`bc_type` must contain 2 elements to "
+ "specify start and end conditions.")
+
+ if 'periodic' in bc_type:
+ raise ValueError("'periodic' `bc_type` is defined for both "
+ "curve ends and cannot be used with other "
+ "boundary conditions.")
+
+ validated_bc = []
+ for bc in bc_type:
+ if isinstance(bc, str):
+ if bc == 'clamped':
+ validated_bc.append((1, np.zeros(expected_deriv_shape)))
+ elif bc == 'natural':
+ validated_bc.append((2, np.zeros(expected_deriv_shape)))
+ elif bc in ['not-a-knot', 'periodic']:
+ validated_bc.append(bc)
+ else:
+ raise ValueError("bc_type={} is not allowed.".format(bc))
+ else:
+ try:
+ deriv_order, deriv_value = bc
+ except Exception as e:
+ raise ValueError(
+ "A specified derivative value must be "
+ "given in the form (order, value)."
+ ) from e
+
+ if deriv_order not in [1, 2]:
+ raise ValueError("The specified derivative order must "
+ "be 1 or 2.")
+
+ deriv_value = np.asarray(deriv_value)
+ if deriv_value.shape != expected_deriv_shape:
+ raise ValueError(
+ "`deriv_value` shape {} is not the expected one {}."
+ .format(deriv_value.shape, expected_deriv_shape))
+
+ if np.issubdtype(deriv_value.dtype, np.complexfloating):
+ y = y.astype(complex, copy=False)
+
+ validated_bc.append((deriv_order, deriv_value))
+
+ return validated_bc, y
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..faf7d8e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack_impl.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack_impl.py
new file mode 100644
index 0000000..1b38a94
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_fitpack_impl.py
@@ -0,0 +1,1316 @@
+"""
+fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
+ FITPACK is a collection of FORTRAN programs for curve and surface
+ fitting with splines and tensor product splines.
+
+See
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
+or
+ http://www.netlib.org/dierckx/
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+TODO: Make interfaces to the following fitpack functions:
+ For univariate splines: cocosp, concon, fourco, insert
+ For bivariate splines: profil, regrid, parsur, surev
+"""
+
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+import warnings
+import numpy as np
+from . import _fitpack
+from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
+ empty, iinfo, asarray)
+
+# Try to replace _fitpack interface with
+# f2py-generated version
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+def _int_overflow(x, msg=None):
+ """Cast the value to an dfitpack_int and raise an OverflowError if the value
+ cannot fit.
+ """
+ if x > iinfo(dfitpack_int).max:
+ if msg is None:
+ msg = '%r cannot fit into an %r' % (x, dfitpack_int)
+ raise OverflowError(msg)
+ return dfitpack_int.type(x)
+
+
+_iermess = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree k.\n"
+ "fp gives the upper bound fp0 for the smoothing factor s", None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: data (x,y) size is too small or smoothing parameter"
+ "\ns is too small (fp>s).", ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
+ ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_iermess2 = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
+ "\nfp gives the upper bound fp0 for the smoothing factor s", None],
+ -3: ["Warning. The coefficients of the spline have been computed as the\n"
+ "minimal norm least-squares solution of a rank deficient system.",
+ None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
+ ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable causes: s too small or badly chosen eps.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 4: ["No more knots can be added because the number of B-spline\n"
+ "coefficients already exceeds the number of data points m.\n"
+ "Probable causes: either s or m too small. (fp>s)", ValueError],
+ 5: ["No more knots can be added because the additional knot would\n"
+ "coincide with an old one. Probable cause: s too small or too large\n"
+ "a weight to an inaccurate data point. (fp>s)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
+ "the minimal least-squares solution of a rank deficient system of\n"
+ "linear equations.", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ """
+ Find the B-spline representation of an N-D curve.
+
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
+ N-dimensional space parametrized by `u`, find a smooth approximating
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ A list of sample vector arrays representing the curve.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `x` values have standard-deviation given by
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+ u : array_like, optional
+ An array of parameter values. If not given, these values are
+ calculated automatically as ``M = len(x[0])``, where
+
+ v[0] = 0
+
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+ u[i] = v[i] / v[M-1]
+
+ ub, ue : int, optional
+ The end-points of the parameters interval. Defaults to
+ u[0] and u[-1].
+ k : int, optional
+ Degree of the spline. Cubic splines are recommended.
+ Even values of `k` should be avoided especially with a small s-value.
+ ``1 <= k <= 5``, default is 3.
+ task : int, optional
+ If task==0 (default), find t and c for a given smoothing factor, s.
+ If task==1, find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1
+ for the same set of data.
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+ where g(x) is the smoothed interpolation of (x,y). The user can
+ use `s` to control the trade-off between closeness and smoothness
+ of fit. Larger `s` means more smoothing while smaller values of `s`
+ indicate less smoothing. Recommended values of `s` depend on the
+ weights, w. If the weights represent the inverse of the
+ standard-deviation of y, then a good `s` value should be found in
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+ data points in x, y, and w.
+ t : int, optional
+ The knots needed for task=-1.
+ full_output : int, optional
+ If non-zero, then return optional outputs.
+ nest : int, optional
+ An over-estimate of the total number of knots of the spline to
+ help in determining the storage space. By default nest=m/2.
+ Always large enough is nest=m+k+1.
+ per : int, optional
+ If non-zero, data points are considered periodic with period
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+ quiet : int, optional
+ Non-zero to suppress messages.
+ This parameter is deprecated; use standard Python warning filters
+ instead.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ u : array
+ An array of the values of the parameter.
+ fp : float
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splrep, splev, sproot, spalde, splint,
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives.
+ The number of dimensions N must be smaller than 11.
+
+ References
+ ----------
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines, Computer Graphics and Image Processing",
+ 20 (1982) 171-184.
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines", report tw55, Dept. Computer Science,
+ K.U.Leuven, 1981.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ if task <= 0:
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+ x = atleast_1d(x)
+ idim, m = x.shape
+ if per:
+ for i in range(idim):
+ if x[i][0] != x[i][-1]:
+ if quiet < 2:
+ warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
+ (i, m, i)))
+ x[i][-1] = x[i][0]
+ if not 0 < idim < 11:
+ raise TypeError('0 < idim < 11 must hold')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ ipar = (u is not None)
+ if ipar:
+ _parcur_cache['u'] = u
+ if ub is None:
+ _parcur_cache['ub'] = u[0]
+ else:
+ _parcur_cache['ub'] = ub
+ if ue is None:
+ _parcur_cache['ue'] = u[-1]
+ else:
+ _parcur_cache['ue'] = ue
+ else:
+ _parcur_cache['u'] = zeros(m, float)
+ if not (1 <= k <= 5):
+ raise TypeError('1 <= k= %d <=5 must hold' % k)
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
+ raise TypeError('Mismatch of input dimensions')
+ if s is None:
+ s = m - sqrt(2*m)
+ if t is None and task == -1:
+ raise TypeError('Knots must be given for task=-1')
+ if t is not None:
+ _parcur_cache['t'] = atleast_1d(t)
+ n = len(_parcur_cache['t'])
+ if task == -1 and n < 2*k + 2:
+ raise TypeError('There must be at least 2*k+2 knots for task=-1')
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if nest is None:
+ nest = m + 2*k
+
+ if (task >= 0 and s == 0) or (nest < 0):
+ if per:
+ nest = m + 2*k
+ else:
+ nest = m + k + 1
+ nest = max(nest, 2*k + 3)
+ u = _parcur_cache['u']
+ ub = _parcur_cache['ub']
+ ue = _parcur_cache['ue']
+ t = _parcur_cache['t']
+ wrk = _parcur_cache['wrk']
+ iwrk = _parcur_cache['iwrk']
+ t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
+ task, ipar, s, t, nest, wrk, iwrk, per)
+ _parcur_cache['u'] = o['u']
+ _parcur_cache['ub'] = o['ub']
+ _parcur_cache['ue'] = o['ue']
+ _parcur_cache['t'] = t
+ _parcur_cache['wrk'] = o['wrk']
+ _parcur_cache['iwrk'] = o['iwrk']
+ ier = o['ier']
+ fp = o['fp']
+ n = len(t)
+ u = o['u']
+ c.shape = idim, n - k - 1
+ tcku = [t, list(c), k], u
+ if ier <= 0 and not quiet:
+ warnings.warn(RuntimeWarning(_iermess[ier][0] +
+ "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s)))
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]))
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tcku, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tcku, fp, ier, _iermess['unknown'][0]
+ else:
+ return tcku
+
+
+_curfit_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int)}
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ """
+ Find the B-spline representation of 1-D curve.
+
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+ approximation of degree k on the interval ``xb <= x <= xe``.
+
+ Parameters
+ ----------
+ x, y : array_like
+ The data points defining a curve y = f(x).
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as x and y.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the y values have standard-deviation given by the
+ vector d, then w should be 1/d. Default is ones(len(x)).
+ xb, xe : float, optional
+ The interval to fit. If None, these default to x[0] and x[-1]
+ respectively.
+ k : int, optional
+ The order of the spline fit. It is recommended to use cubic splines.
+ Even order splines should be avoided especially with small s values.
+ 1 <= k <= 5
+ task : {1, 0, -1}, optional
+ If task==0 find t and c for a given smoothing factor, s.
+
+ If task==1 find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1 for the same
+ set of data (t will be stored an used internally)
+
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t. These should be interior knots as knots on the ends will be
+ added automatically.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s, where g(x)
+ is the smoothed interpolation of (x,y). The user can use s to control
+ the tradeoff between closeness and smoothness of fit. Larger s means
+ more smoothing while smaller values of s indicate less smoothing.
+ Recommended values of s depend on the weights, w. If the weights
+ represent the inverse of the standard-deviation of y, then a good s
+ value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
+ the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
+ weights are supplied. s = 0.0 (interpolating) if no weights are
+ supplied.
+ t : array_like, optional
+ The knots needed for task=-1. If given then task is automatically set
+ to -1.
+ full_output : bool, optional
+ If non-zero, then return optional outputs.
+ per : bool, optional
+ If non-zero, data points are considered periodic with period x[m-1] -
+ x[0] and a smooth periodic spline approximation is returned. Values of
+ y[m-1] and w[m-1] are not used.
+ quiet : bool, optional
+ Non-zero to suppress messages.
+ This parameter is deprecated; use standard Python warning filters
+ instead.
+
+ Returns
+ -------
+ tck : tuple
+ (t,c,k) a tuple containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ fp : array, optional
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int, optional
+ An integer flag about splrep success. Success is indicated if ier<=0.
+ If ier in [1,2,3] an error occurred but was not raised. Otherwise an
+ error is raised.
+ msg : str, optional
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ UnivariateSpline, BivariateSpline
+ splprep, splev, sproot, spalde, splint
+ bisplrep, bisplev
+
+ Notes
+ -----
+ See splev for evaluation of the spline and its derivatives. Uses the
+ FORTRAN routine curfit from FITPACK.
+
+ The user is responsible for assuring that the values of *x* are unique.
+ Otherwise, *splrep* will not return sensible results.
+
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ References
+ ----------
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+ integration of experimental data using spline functions",
+ J.Comp.Appl.Maths 1 (1975) 165-184.
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+ 1286-1304.
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import splev, splrep
+ >>> x = np.linspace(0, 10, 10)
+ >>> y = np.sin(x)
+ >>> tck = splrep(x, y)
+ >>> x2 = np.linspace(0, 10, 200)
+ >>> y2 = splev(x2, tck)
+ >>> plt.plot(x, y, 'o', x2, y2)
+ >>> plt.show()
+
+ """
+ if task <= 0:
+ _curfit_cache = {}
+ x, y = map(atleast_1d, [x, y])
+ m = len(x)
+ if w is None:
+ w = ones(m, float)
+ if s is None:
+ s = 0.0
+ else:
+ w = atleast_1d(w)
+ if s is None:
+ s = m - sqrt(2*m)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if (m != len(y)) or (m != len(w)):
+ raise TypeError('Lengths of the first three arguments (x,y,w) must '
+ 'be equal')
+ if not (1 <= k <= 5):
+ raise TypeError('Given degree of the spline (k=%d) is not supported. '
+ '(1<=k<=5)' % k)
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if xb is None:
+ xb = x[0]
+ if xe is None:
+ xe = x[-1]
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if t is not None:
+ task = -1
+ if task == -1:
+ if t is None:
+ raise TypeError('Knots must be given for task=-1')
+ numknots = len(t)
+ _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
+ _curfit_cache['t'][k+1:-k-1] = t
+ nest = len(_curfit_cache['t'])
+ elif task == 0:
+ if per:
+ nest = max(m + 2*k, 2*k + 3)
+ else:
+ nest = max(m + k + 1, 2*k + 3)
+ t = empty((nest,), float)
+ _curfit_cache['t'] = t
+ if task <= 0:
+ if per:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
+ else:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
+ _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
+ try:
+ t = _curfit_cache['t']
+ wrk = _curfit_cache['wrk']
+ iwrk = _curfit_cache['iwrk']
+ except KeyError as e:
+ raise TypeError("must call with task=1 only after"
+ " call with task=0,-1") from e
+ if not per:
+ n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
+ xb, xe, k, s)
+ else:
+ n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
+ tck = (t[:n], c[:n], k)
+ if ier <= 0 and not quiet:
+ _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess))
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]))
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tck, fp, ier, _iermess['unknown'][0]
+ else:
+ return tck
+
+
+def splev(x, tck, der=0, ext=0):
+ """
+ Evaluate a B-spline or its derivatives.
+
+ Given the knots and coefficients of a B-spline representation, evaluate
+ the value of the smoothing polynomial and its derivatives. This is a
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ An array of points at which to return the value of the smoothed
+ spline or its derivatives. If `tck` was returned from `splprep`,
+ then the parameter values, u should be given.
+ tck : tuple
+ A sequence of length 3 returned by `splrep` or `splprep` containing
+ the knots, coefficients, and degree of the spline.
+ der : int, optional
+ The order of derivative of the spline to compute (must be less than
+ or equal to k).
+ ext : int, optional
+ Controls the value returned for elements of ``x`` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0, return the extrapolated value.
+ * if ext=1, return 0
+ * if ext=2, raise a ValueError
+ * if ext=3, return the boundary value.
+
+ The default value is 0.
+
+ Returns
+ -------
+ y : ndarray or list of ndarrays
+ An array of values representing the spline function evaluated at
+ the points in ``x``. If `tck` was returned from `splprep`, then this
+ is a list of arrays representing the curve in N-D space.
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splint
+ bisplrep, bisplev
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, x=x, t=t, k=k, der=der:
+ splev(x, [t, c, k], der, ext), c))
+ else:
+ if not (0 <= der <= k):
+ raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
+ if ext not in (0, 1, 2, 3):
+ raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
+
+ x = asarray(x)
+ shape = x.shape
+ x = atleast_1d(x).ravel()
+ y, ier = _fitpack._spl_(x, der, t, c, k, ext)
+
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier == 1:
+ raise ValueError("Found x value not in the domain")
+ if ier:
+ raise TypeError("An error occurred")
+
+ return y.reshape(shape)
+
+
+def splint(a, b, tck, full_output=0):
+ """
+ Evaluate the definite integral of a B-spline.
+
+ Given the knots and coefficients of a B-spline, evaluate the definite
+ integral of the smoothing polynomial between two given points.
+
+ Parameters
+ ----------
+ a, b : float
+ The end-points of the integration interval.
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline (see `splev`).
+ full_output : int, optional
+ Non-zero to return optional output.
+
+ Returns
+ -------
+ integral : float
+ The resulting integral.
+ wrk : ndarray
+ An array containing the integrals of the normalized B-splines
+ defined on the set of knots.
+
+ Notes
+ -----
+ splint silently assumes that the spline function is zero outside the data
+ interval (a, b).
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splev
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+
+ References
+ ----------
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, a=a, b=b, t=t, k=k:
+ splint(a, b, [t, c, k]), c))
+ else:
+ aint, wrk = _fitpack._splint(t, c, k, a, b)
+ if full_output:
+ return aint, wrk
+ else:
+ return aint
+
+
+def sproot(tck, mest=10):
+ """
+ Find the roots of a cubic B-spline.
+
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
+ roots of the spline.
+
+ Parameters
+ ----------
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots,
+ the B-spline coefficients, and the degree of the spline.
+ The number of knots must be >= 8, and the degree must be 3.
+ The knots must be a montonically increasing sequence.
+ mest : int, optional
+ An estimate of the number of zeros (Default is 10).
+
+ Returns
+ -------
+ zeros : ndarray
+ An array giving the roots of the spline.
+
+ See also
+ --------
+ splprep, splrep, splint, spalde, splev
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ t, c, k = tck
+ if k != 3:
+ raise ValueError("sproot works only for cubic (k=3) splines")
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, t=t, k=k, mest=mest:
+ sproot([t, c, k], mest), c))
+ else:
+ if len(t) < 8:
+ raise TypeError("The number of knots %d>=8" % len(t))
+ z, ier = _fitpack._sproot(t, c, k, mest)
+ if ier == 10:
+ raise TypeError("Invalid input data. "
+ "t1<=..<=t4 1:
+ return list(map(lambda x, tck=tck: spalde(x, tck), x))
+ d, ier = _fitpack._spalde(t, c, k, x[0])
+ if ier == 0:
+ return d
+ if ier == 10:
+ raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
+ raise TypeError("Unknown error")
+
+# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
+# full_output=0,nest=None,per=0,quiet=1):
+
+
+_surfit_cache = {'tx': array([], float), 'ty': array([], float),
+ 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
+
+
+def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
+ kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
+ full_output=0, nxest=None, nyest=None, quiet=1):
+ """
+ Find a bivariate B-spline representation of a surface.
+
+ Given a set of data points (x[i], y[i], z[i]) representing a surface
+ z=f(x,y), compute a B-spline representation of the surface. Based on
+ the routine SURFIT from FITPACK.
+
+ Parameters
+ ----------
+ x, y, z : ndarray
+ Rank-1 arrays of data points.
+ w : ndarray, optional
+ Rank-1 array of weights. By default ``w=np.ones(len(x))``.
+ xb, xe : float, optional
+ End points of approximation interval in `x`.
+ By default ``xb = x.min(), xe=x.max()``.
+ yb, ye : float, optional
+ End points of approximation interval in `y`.
+ By default ``yb=y.min(), ye = y.max()``.
+ kx, ky : int, optional
+ The degrees of the spline (1 <= kx, ky <= 5).
+ Third order (kx=ky=3) is recommended.
+ task : int, optional
+ If task=0, find knots in x and y and coefficients for a given
+ smoothing factor, s.
+ If task=1, find knots and coefficients for another value of the
+ smoothing factor, s. bisplrep must have been previously called
+ with task=0 or task=1.
+ If task=-1, find coefficients for a given set of knots tx, ty.
+ s : float, optional
+ A non-negative smoothing factor. If weights correspond
+ to the inverse of the standard-deviation of the errors in z,
+ then a good s-value should be found in the range
+ ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
+ eps : float, optional
+ A threshold for determining the effective rank of an
+ over-determined linear system of equations (0 < eps < 1).
+ `eps` is not likely to need changing.
+ tx, ty : ndarray, optional
+ Rank-1 arrays of the knots of the spline for task=-1
+ full_output : int, optional
+ Non-zero to return optional outputs.
+ nxest, nyest : int, optional
+ Over-estimates of the total number of knots. If None then
+ ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
+ ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
+ quiet : int, optional
+ Non-zero to suppress printing of messages.
+ This parameter is deprecated; use standard Python warning filters
+ instead.
+
+ Returns
+ -------
+ tck : array_like
+ A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
+ coefficients (c) of the bivariate B-spline representation of the
+ surface along with the degree of the spline.
+ fp : ndarray
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated if
+ ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplev` to evaluate the value of the B-spline given its tck
+ representation.
+
+ References
+ ----------
+ .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
+ m = len(x)
+ if not (m == len(y) == len(z)):
+ raise TypeError('len(x)==len(y)==len(z) must hold.')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if xb is None:
+ xb = x.min()
+ if xe is None:
+ xe = x.max()
+ if yb is None:
+ yb = y.min()
+ if ye is None:
+ ye = y.max()
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if s is None:
+ s = m - sqrt(2*m)
+ if tx is None and task == -1:
+ raise TypeError('Knots_x must be given for task=-1')
+ if tx is not None:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ nx = len(_surfit_cache['tx'])
+ if ty is None and task == -1:
+ raise TypeError('Knots_y must be given for task=-1')
+ if ty is not None:
+ _surfit_cache['ty'] = atleast_1d(ty)
+ ny = len(_surfit_cache['ty'])
+ if task == -1 and nx < 2*kx+2:
+ raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
+ if task == -1 and ny < 2*ky+2:
+ raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
+ if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
+ raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
+ 'supported. (1<=k<=5)' % (kx, ky))
+ if m < (kx + 1)*(ky + 1):
+ raise TypeError('m >= (kx+1)(ky+1) must hold')
+ if nxest is None:
+ nxest = int(kx + sqrt(m/2))
+ if nyest is None:
+ nyest = int(ky + sqrt(m/2))
+ nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
+ if task >= 0 and s == 0:
+ nxest = int(kx + sqrt(3*m))
+ nyest = int(ky + sqrt(3*m))
+ if task == -1:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ _surfit_cache['ty'] = atleast_1d(ty)
+ tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
+ wrk = _surfit_cache['wrk']
+ u = nxest - kx - 1
+ v = nyest - ky - 1
+ km = max(kx, ky) + 1
+ ne = max(nxest, nyest)
+ bx, by = kx*v + ky + 1, ky*u + kx + 1
+ b1, b2 = bx, bx + v - ky
+ if bx > by:
+ b1, b2 = by, by + u - kx
+ msg = "Too many data points to interpolate"
+ lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
+ 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
+ msg=msg)
+ lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, msg=msg)
+ tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
+ task, s, eps, tx, ty, nxest, nyest,
+ wrk, lwrk1, lwrk2)
+ _curfit_cache['tx'] = tx
+ _curfit_cache['ty'] = ty
+ _curfit_cache['wrk'] = o['wrk']
+ ier, fp = o['ier'], o['fp']
+ tck = [tx, ty, c, kx, ky]
+
+ ierm = min(11, max(-3, ier))
+ if ierm <= 0 and not quiet:
+ _mess = (_iermess2[ierm][0] +
+ "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess))
+ if ierm > 0 and not full_output:
+ if ier in [1, 2, 3, 4, 5]:
+ _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
+ else:
+ try:
+ raise _iermess2[ierm][1](_iermess2[ierm][0])
+ except KeyError as e:
+ raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess2[ierm][0]
+ except KeyError:
+ return tck, fp, ier, _iermess2['unknown'][0]
+ else:
+ return tck
+
+
+def bisplev(x, y, tck, dx=0, dy=0):
+ """
+ Evaluate a bivariate B-spline and its derivatives.
+
+ Return a rank-2 array of spline function values (or spline derivative
+ values) at points given by the cross-product of the rank-1 arrays `x` and
+ `y`. In special cases, return an array or just a float if either `x` or
+ `y` or both are floats. Based on BISPEV from FITPACK.
+
+ Parameters
+ ----------
+ x, y : ndarray
+ Rank-1 arrays specifying the domain over which to evaluate the
+ spline or its derivative.
+ tck : tuple
+ A sequence of length 5 returned by `bisplrep` containing the knot
+ locations, the coefficients, and the degree of the spline:
+ [tx, ty, c, kx, ky].
+ dx, dy : int, optional
+ The orders of the partial derivatives in `x` and `y` respectively.
+
+ Returns
+ -------
+ vals : ndarray
+ The B-spline or its derivative evaluated over the set formed by
+ the cross-product of `x` and `y`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplrep` to generate the `tck` representation.
+
+ References
+ ----------
+ .. [1] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P. : Curve and surface fitting with splines,
+ Monographs on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ tx, ty, c, kx, ky = tck
+ if not (0 <= dx < kx):
+ raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
+ if not (0 <= dy < ky):
+ raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
+ x, y = map(atleast_1d, [x, y])
+ if (len(x.shape) != 1) or (len(y.shape) != 1):
+ raise ValueError("First two entries should be rank-1 arrays.")
+ z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier:
+ raise TypeError("An error occurred")
+ z.shape = len(x), len(y)
+ if len(z) > 1:
+ return z
+ if len(z[0]) > 1:
+ return z[0]
+ return z[0][0]
+
+
+def dblint(xa, xb, ya, yb, tck):
+ """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
+
+ Parameters
+ ----------
+ xa, xb : float
+ The end-points of the x integration interval.
+ ya, yb : float
+ The end-points of the y integration interval.
+ tck : list [tx, ty, c, kx, ky]
+ A sequence of length 5 returned by bisplrep containing the knot
+ locations tx, ty, the coefficients c, and the degrees kx, ky
+ of the spline.
+
+ Returns
+ -------
+ integ : float
+ The value of the resulting integral.
+ """
+ tx, ty, c, kx, ky = tck
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
+
+
+def insert(x, tck, m=1, per=0):
+ """
+ Insert knots into a B-spline.
+
+ Given the knots and coefficients of a B-spline representation, create a
+ new B-spline with a knot inserted `m` times at point `x`.
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+ Parameters
+ ----------
+ x (u) : array_like
+ A 1-D point at which to insert a new knot(s). If `tck` was returned
+ from ``splprep``, then the parameter values, u should be given.
+ tck : tuple
+ A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
+ the vector of knots, the B-spline coefficients,
+ and the degree of the spline.
+ m : int, optional
+ The number of times to insert the given knot (its multiplicity).
+ Default is 1.
+ per : int, optional
+ If non-zero, the input spline is considered periodic.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots, the B-spline
+ coefficients, and the degree of the new spline.
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+ In case of a periodic spline (``per != 0``) there must be
+ either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, splder, sproot
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y, k=4)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> dspl = splder(spl)
+ >>> sproot(dspl) / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ if n < 0:
+ return splantider(tck, -n)
+
+ t, c, k = tck
+
+ if n > k:
+ raise ValueError(("Order of derivative (n = %r) must be <= "
+ "order of spline (k = %r)") % (n, tck[2]))
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + ((None,)*len(c.shape[1:]))
+
+ with np.errstate(invalid='raise', divide='raise'):
+ try:
+ for j in range(n):
+ # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
+
+ # Compute the denominator in the differentiation formula.
+ # (and append traling dims, if necessary)
+ dt = t[k+1:-1] - t[1:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = (c[1:-1-k] - c[:-2-k]) * k / dt
+ # Pad coefficient array to same size as knots (FITPACK
+ # convention)
+ c = np.r_[c, np.zeros((k,) + c.shape[1:])]
+ # Adjust knots
+ t = t[1:-1]
+ k -= 1
+ except FloatingPointError as e:
+ raise ValueError(("The spline has internal repeated knots "
+ "and is not differentiable %d times") % n) from e
+
+ return t, c, k
+
+
+def splantider(tck, n=1):
+ """
+ Compute the spline for the antiderivative (integral) of a given spline.
+
+ Parameters
+ ----------
+ tck : tuple of (t, c, k)
+ Spline whose antiderivative to compute
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ tck_ader : tuple of (t2, c2, k2)
+ Spline of order k2=k+n representing the antiderivative of the input
+ spline.
+
+ See Also
+ --------
+ splder, splev, spalde
+
+ Notes
+ -----
+ The `splder` function is the inverse operation of this function.
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+ rounding error.
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = splrep(x, y)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = splantider(spl)
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ if n < 0:
+ return splder(tck, -n)
+
+ t, c, k = tck
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + (None,)*len(c.shape[1:])
+
+ for j in range(n):
+ # This is the inverse set of operations to splder.
+
+ # Compute the multiplier in the antiderivative formula.
+ dt = t[k+1:] - t[:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
+ c = np.r_[np.zeros((1,) + c.shape[1:]),
+ c,
+ [c[-1]] * (k+2)]
+ # New knots
+ t = np.r_[t[0], t, t[-1]]
+ k += 1
+
+ return t, c, k
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_pade.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_pade.py
new file mode 100644
index 0000000..3ded1c9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_pade.py
@@ -0,0 +1,66 @@
+from numpy import zeros, asarray, eye, poly1d, hstack, r_
+from scipy import linalg
+
+__all__ = ["pade"]
+
+def pade(an, m, n=None):
+ """
+ Return Pade approximation to a polynomial as the ratio of two polynomials.
+
+ Parameters
+ ----------
+ an : (N,) array_like
+ Taylor series coefficients.
+ m : int
+ The order of the returned approximating polynomial `q`.
+ n : int, optional
+ The order of the returned approximating polynomial `p`. By default,
+ the order is ``len(an)-m``.
+
+ Returns
+ -------
+ p, q : Polynomial class
+ The Pade approximation of the polynomial defined by `an` is
+ ``p(x)/q(x)``.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import pade
+ >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
+ >>> p, q = pade(e_exp, 2)
+
+ >>> e_exp.reverse()
+ >>> e_poly = np.poly1d(e_exp)
+
+ Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
+
+ >>> e_poly(1)
+ 2.7166666666666668
+
+ >>> p(1)/q(1)
+ 2.7179487179487181
+
+ """
+ an = asarray(an)
+ if n is None:
+ n = len(an) - 1 - m
+ if n < 0:
+ raise ValueError("Order of q must be smaller than len(an)-1.")
+ if n < 0:
+ raise ValueError("Order of p must be greater than 0.")
+ N = m + n
+ if N > len(an)-1:
+ raise ValueError("Order of q+p must be smaller than len(an).")
+ an = an[:N+1]
+ Akj = eye(N+1, n+1, dtype=an.dtype)
+ Bkj = zeros((N+1, m), dtype=an.dtype)
+ for row in range(1, m+1):
+ Bkj[row,:row] = -(an[:row])[::-1]
+ for row in range(m+1, N+1):
+ Bkj[row,:] = -(an[row-m:row])[::-1]
+ C = hstack((Akj, Bkj))
+ pq = linalg.solve(C, an)
+ p = pq[:n+1]
+ q = r_[1.0, pq[n+1:]]
+ return poly1d(p[::-1]), poly1d(q[::-1])
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_ppoly.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_ppoly.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..a40d7d3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/_ppoly.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/dfitpack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/dfitpack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..bbaefc9
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/dfitpack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack.py
new file mode 100644
index 0000000..e7c4cc7
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack.py
@@ -0,0 +1,763 @@
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+import warnings
+
+import numpy as np
+
+# These are in the API for fitpack even if not used in fitpack.py itself.
+from ._fitpack_impl import bisplrep, bisplev, dblint
+from . import _fitpack_impl as _impl
+from ._bsplines import BSpline
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ """
+ Find the B-spline representation of an N-D curve.
+
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
+ N-D space parametrized by `u`, find a smooth approximating
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ A list of sample vector arrays representing the curve.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `x` values have standard-deviation given by
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+ u : array_like, optional
+ An array of parameter values. If not given, these values are
+ calculated automatically as ``M = len(x[0])``, where
+
+ v[0] = 0
+
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+ u[i] = v[i] / v[M-1]
+
+ ub, ue : int, optional
+ The end-points of the parameters interval. Defaults to
+ u[0] and u[-1].
+ k : int, optional
+ Degree of the spline. Cubic splines are recommended.
+ Even values of `k` should be avoided especially with a small s-value.
+ ``1 <= k <= 5``, default is 3.
+ task : int, optional
+ If task==0 (default), find t and c for a given smoothing factor, s.
+ If task==1, find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1
+ for the same set of data.
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+ where g(x) is the smoothed interpolation of (x,y). The user can
+ use `s` to control the trade-off between closeness and smoothness
+ of fit. Larger `s` means more smoothing while smaller values of `s`
+ indicate less smoothing. Recommended values of `s` depend on the
+ weights, w. If the weights represent the inverse of the
+ standard-deviation of y, then a good `s` value should be found in
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+ data points in x, y, and w.
+ t : int, optional
+ The knots needed for task=-1.
+ full_output : int, optional
+ If non-zero, then return optional outputs.
+ nest : int, optional
+ An over-estimate of the total number of knots of the spline to
+ help in determining the storage space. By default nest=m/2.
+ Always large enough is nest=m+k+1.
+ per : int, optional
+ If non-zero, data points are considered periodic with period
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+ quiet : int, optional
+ Non-zero to suppress messages.
+ This parameter is deprecated; use standard Python warning filters
+ instead.
+
+ Returns
+ -------
+ tck : tuple
+ (t,c,k) a tuple containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ u : array
+ An array of the values of the parameter.
+ fp : float
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splrep, splev, sproot, spalde, splint,
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives.
+ The number of dimensions N must be smaller than 11.
+
+ The number of coefficients in the `c` array is ``k+1`` less then the number
+ of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
+ the array of coefficients to have the same length as the array of knots.
+ These additional coefficients are ignored by evaluation routines, `splev`
+ and `BSpline`.
+
+ References
+ ----------
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines, Computer Graphics and Image Processing",
+ 20 (1982) 171-184.
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines", report tw55, Dept. Computer Science,
+ K.U.Leuven, 1981.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Generate a discretization of a limacon curve in the polar coordinates:
+
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
+ >>> r = 0.5 + np.cos(phi) # polar coords
+ >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
+
+ And interpolate:
+
+ >>> from scipy.interpolate import splprep, splev
+ >>> tck, u = splprep([x, y], s=0)
+ >>> new_points = splev(u, tck)
+
+ Notice that (i) we force interpolation by using `s=0`,
+ (ii) the parameterization, ``u``, is generated automatically.
+ Now plot the result:
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y, 'ro')
+ >>> ax.plot(new_points[0], new_points[1], 'r-')
+ >>> plt.show()
+
+ """
+ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
+ quiet)
+ return res
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ """
+ Find the B-spline representation of a 1-D curve.
+
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+ approximation of degree k on the interval ``xb <= x <= xe``.
+
+ Parameters
+ ----------
+ x, y : array_like
+ The data points defining a curve y = f(x).
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as x and y.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the y values have standard-deviation given by the
+ vector d, then w should be 1/d. Default is ones(len(x)).
+ xb, xe : float, optional
+ The interval to fit. If None, these default to x[0] and x[-1]
+ respectively.
+ k : int, optional
+ The degree of the spline fit. It is recommended to use cubic splines.
+ Even values of k should be avoided especially with small s values.
+ 1 <= k <= 5
+ task : {1, 0, -1}, optional
+ If task==0 find t and c for a given smoothing factor, s.
+
+ If task==1 find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1 for the same
+ set of data (t will be stored an used internally)
+
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t. These should be interior knots as knots on the ends will be
+ added automatically.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
+ is the smoothed interpolation of (x,y). The user can use s to control
+ the tradeoff between closeness and smoothness of fit. Larger s means
+ more smoothing while smaller values of s indicate less smoothing.
+ Recommended values of s depend on the weights, w. If the weights
+ represent the inverse of the standard-deviation of y, then a good s
+ value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
+ the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
+ weights are supplied. s = 0.0 (interpolating) if no weights are
+ supplied.
+ t : array_like, optional
+ The knots needed for task=-1. If given then task is automatically set
+ to -1.
+ full_output : bool, optional
+ If non-zero, then return optional outputs.
+ per : bool, optional
+ If non-zero, data points are considered periodic with period x[m-1] -
+ x[0] and a smooth periodic spline approximation is returned. Values of
+ y[m-1] and w[m-1] are not used.
+ quiet : bool, optional
+ Non-zero to suppress messages.
+ This parameter is deprecated; use standard Python warning filters
+ instead.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ fp : array, optional
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int, optional
+ An integer flag about splrep success. Success is indicated if ier<=0.
+ If ier in [1,2,3] an error occurred but was not raised. Otherwise an
+ error is raised.
+ msg : str, optional
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ UnivariateSpline, BivariateSpline
+ splprep, splev, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives. Uses the
+ FORTRAN routine ``curfit`` from FITPACK.
+
+ The user is responsible for assuring that the values of `x` are unique.
+ Otherwise, `splrep` will not return sensible results.
+
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ This routine zero-pads the coefficients array ``c`` to have the same length
+ as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
+ by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
+ `splprep`, which does not zero-pad the coefficients.
+
+ References
+ ----------
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+ integration of experimental data using spline functions",
+ J.Comp.Appl.Maths 1 (1975) 165-184.
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+ 1286-1304.
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ You can interpolate 1-D points with a B-spline curve.
+ Further examples are given in
+ :ref:`in the tutorial `.
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import splev, splrep
+ >>> x = np.linspace(0, 10, 10)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y)
+ >>> x2 = np.linspace(0, 10, 200)
+ >>> y2 = splev(x2, spl)
+ >>> plt.plot(x, y, 'o', x2, y2)
+ >>> plt.show()
+
+ """
+ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
+ return res
+
+
+def splev(x, tck, der=0, ext=0):
+ """
+ Evaluate a B-spline or its derivatives.
+
+ Given the knots and coefficients of a B-spline representation, evaluate
+ the value of the smoothing polynomial and its derivatives. This is a
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ An array of points at which to return the value of the smoothed
+ spline or its derivatives. If `tck` was returned from `splprep`,
+ then the parameter values, u should be given.
+ tck : 3-tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3 returned by
+ `splrep` or `splprep` containing the knots, coefficients, and degree
+ of the spline. (Also see Notes.)
+ der : int, optional
+ The order of derivative of the spline to compute (must be less than
+ or equal to k, the degree of the spline).
+ ext : int, optional
+ Controls the value returned for elements of ``x`` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0, return the extrapolated value.
+ * if ext=1, return 0
+ * if ext=2, raise a ValueError
+ * if ext=3, return the boundary value.
+
+ The default value is 0.
+
+ Returns
+ -------
+ y : ndarray or list of ndarrays
+ An array of values representing the spline function evaluated at
+ the points in `x`. If `tck` was returned from `splprep`, then this
+ is a list of arrays representing the curve in an N-D space.
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using `BSpline` objects.
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
+ "not recommended. Use BSpline.__call__(x) instead.")
+ warnings.warn(mesg, DeprecationWarning)
+
+ # remap the out-of-bounds behavior
+ try:
+ extrapolate = {0: True, }[ext]
+ except KeyError as e:
+ raise ValueError("Extrapolation mode %s is not supported "
+ "by BSpline." % ext) from e
+
+ return tck(x, der, extrapolate=extrapolate)
+ else:
+ return _impl.splev(x, tck, der, ext)
+
+
+def splint(a, b, tck, full_output=0):
+ """
+ Evaluate the definite integral of a B-spline between two given points.
+
+ Parameters
+ ----------
+ a, b : float
+ The end-points of the integration interval.
+ tck : tuple or a BSpline instance
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline (see `splev`).
+ full_output : int, optional
+ Non-zero to return optional output.
+
+ Returns
+ -------
+ integral : float
+ The resulting integral.
+ wrk : ndarray
+ An array containing the integrals of the normalized B-splines
+ defined on the set of knots.
+ (Only returned if `full_output` is non-zero)
+
+ Notes
+ -----
+ `splint` silently assumes that the spline function is zero outside the data
+ interval (`a`, `b`).
+
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+ References
+ ----------
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
+ "not recommended. Use BSpline.integrate() instead.")
+ warnings.warn(mesg, DeprecationWarning)
+
+ if full_output != 0:
+ mesg = ("full_output = %s is not supported. Proceeding as if "
+ "full_output = 0" % full_output)
+
+ return tck.integrate(a, b, extrapolate=False)
+ else:
+ return _impl.splint(a, b, tck, full_output)
+
+
+def sproot(tck, mest=10):
+ """
+ Find the roots of a cubic B-spline.
+
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
+ roots of the spline.
+
+ Parameters
+ ----------
+ tck : tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline.
+ The number of knots must be >= 8, and the degree must be 3.
+ The knots must be a montonically increasing sequence.
+ mest : int, optional
+ An estimate of the number of zeros (Default is 10).
+
+ Returns
+ -------
+ zeros : ndarray
+ An array giving the roots of the spline.
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ See also
+ --------
+ splprep, splrep, splint, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
+ "not recommended.")
+ warnings.warn(mesg, DeprecationWarning)
+
+ t, c, k = tck.tck
+
+ # _impl.sproot expects the interpolation axis to be last, so roll it.
+ # NB: This transpose is a no-op if c is 1D.
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ return _impl.sproot((t, c, k), mest)
+ else:
+ return _impl.sproot(tck, mest)
+
+
+def spalde(x, tck):
+ """
+ Evaluate all derivatives of a B-spline.
+
+ Given the knots and coefficients of a cubic B-spline compute all
+ derivatives up to order k at a point (or set of points).
+
+ Parameters
+ ----------
+ x : array_like
+ A point or a set of points at which to evaluate the derivatives.
+ Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
+ tck : tuple
+ A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline (see `splev`).
+
+ Returns
+ -------
+ results : {ndarray, list of ndarrays}
+ An array (or a list of arrays) containing all derivatives
+ up to order k inclusive for each point `x`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
+ BSpline
+
+ References
+ ----------
+ .. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
+ 6 (1972) 50-62.
+ .. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
+ applics 10 (1972) 134-149.
+ .. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ raise TypeError("spalde does not accept BSpline instances.")
+ else:
+ return _impl.spalde(x, tck)
+
+
+def insert(x, tck, m=1, per=0):
+ """
+ Insert knots into a B-spline.
+
+ Given the knots and coefficients of a B-spline representation, create a
+ new B-spline with a knot inserted `m` times at point `x`.
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+ Parameters
+ ----------
+ x (u) : array_like
+ A 1-D point at which to insert a new knot(s). If `tck` was returned
+ from ``splprep``, then the parameter values, u should be given.
+ tck : a `BSpline` instance or a tuple
+ If tuple, then it is expected to be a tuple (t,c,k) containing
+ the vector of knots, the B-spline coefficients, and the degree of
+ the spline.
+ m : int, optional
+ The number of times to insert the given knot (its multiplicity).
+ Default is 1.
+ per : int, optional
+ If non-zero, the input spline is considered periodic.
+
+ Returns
+ -------
+ BSpline instance or a tuple
+ A new B-spline with knots t, coefficients c, and degree k.
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+ In case of a periodic spline (``per != 0``) there must be
+ either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, insert
+ >>> x = np.linspace(0, 10, 5)
+ >>> y = np.sin(x)
+ >>> tck = splrep(x, y)
+ >>> tck[0]
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
+
+ A knot is inserted:
+
+ >>> tck_inserted = insert(3, tck)
+ >>> tck_inserted[0]
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
+
+ Some knots are inserted:
+
+ >>> tck_inserted2 = insert(8, tck, m=3)
+ >>> tck_inserted2[0]
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
+
+ """
+ if isinstance(tck, BSpline):
+
+ t, c, k = tck.tck
+
+ # FITPACK expects the interpolation axis to be last, so roll it over
+ # NB: if c array is 1D, transposes are no-ops
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
+
+ # and roll the last axis back
+ c_ = np.asarray(c_)
+ c_ = c_.transpose((sh[-1],) + sh[:-1])
+ return BSpline(t_, c_, k_)
+ else:
+ return _impl.insert(x, tck, m, per)
+
+
+def splder(tck, n=1):
+ """
+ Compute the spline representation of the derivative of a given spline
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose derivative to compute
+ n : int, optional
+ Order of derivative to evaluate. Default: 1
+
+ Returns
+ -------
+ `BSpline` instance or tuple
+ Spline of order k2=k-n representing the derivative
+ of the input spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ See Also
+ --------
+ splantider, splev, spalde
+ BSpline
+
+ Examples
+ --------
+ This can be used for finding maxima of a curve:
+
+ >>> from scipy.interpolate import splrep, splder, sproot
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y, k=4)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> dspl = splder(spl)
+ >>> sproot(dspl) / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.derivative(n)
+ else:
+ return _impl.splder(tck, n)
+
+
+def splantider(tck, n=1):
+ """
+ Compute the spline for the antiderivative (integral) of a given spline.
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose antiderivative to compute
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ BSpline instance or a tuple of (t2, c2, k2)
+ Spline of order k2=k+n representing the antiderivative of the input
+ spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ See Also
+ --------
+ splder, splev, spalde
+ BSpline
+
+ Notes
+ -----
+ The `splder` function is the inverse operation of this function.
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+ rounding error.
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = splrep(x, y)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = splantider(spl)
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.antiderivative(n)
+ else:
+ return _impl.splantider(tck, n)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack2.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack2.py
new file mode 100644
index 0000000..937ee4d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/fitpack2.py
@@ -0,0 +1,1979 @@
+"""
+fitpack --- curve and surface fitting with splines
+
+fitpack is based on a collection of Fortran routines DIERCKX
+by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
+to double routines by Pearu Peterson.
+"""
+# Created by Pearu Peterson, June,August 2003
+__all__ = [
+ 'UnivariateSpline',
+ 'InterpolatedUnivariateSpline',
+ 'LSQUnivariateSpline',
+ 'BivariateSpline',
+ 'LSQBivariateSpline',
+ 'SmoothBivariateSpline',
+ 'LSQSphereBivariateSpline',
+ 'SmoothSphereBivariateSpline',
+ 'RectBivariateSpline',
+ 'RectSphereBivariateSpline']
+
+
+import warnings
+
+from numpy import zeros, concatenate, ravel, diff, array, ones
+import numpy as np
+
+from . import fitpack
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+# ############### Univariate spline ####################
+
+_curfit_messages = {1: """
+The required storage space exceeds the available storage space, as
+specified by the parameter nest: nest too small. If nest is already
+large (say nest > m/2), it may also indicate that s is too small.
+The approximation returned is the weighted least-squares spline
+according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
+gives the corresponding weighted sum of squared residuals (fp>s).
+""",
+ 2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+ 3: """
+The maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+ 10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[0]0, i=0..m-1
+if iopt=-1:
+ xb>> from scipy.interpolate import UnivariateSpline
+ >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
+ >>> w = np.isnan(y)
+ >>> y[w] = 0.
+ >>> spl = UnivariateSpline(x, y, w=~w)
+
+ Notice the need to replace a ``nan`` by a numerical value (precise value
+ does not matter as long as the corresponding weight is zero.)
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
+ >>> plt.plot(x, y, 'ro', ms=5)
+
+ Use the default value for the smoothing parameter:
+
+ >>> spl = UnivariateSpline(x, y)
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(xs, spl(xs), 'g', lw=3)
+
+ Manually change the amount of smoothing:
+
+ >>> spl.set_smoothing_factor(0.5)
+ >>> plt.plot(xs, spl(xs), 'b', lw=3)
+ >>> plt.show()
+
+ """
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, s, ext,
+ check_finite)
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+ xe=bbox[1], s=s)
+ if data[-1] == 1:
+ # nest too small, setting to maximum bound
+ data = self._reset_nest(data)
+ self._data = data
+ self._reset_class()
+
+ @staticmethod
+ def validate_input(x, y, w, bbox, k, s, ext, check_finite):
+ x, y, bbox = np.asarray(x), np.asarray(y), np.asarray(bbox)
+ if w is not None:
+ w = np.asarray(w)
+ if check_finite:
+ w_finite = np.isfinite(w).all() if w is not None else True
+ if (not np.isfinite(x).all() or not np.isfinite(y).all() or
+ not w_finite):
+ raise ValueError("x and y array must not contain "
+ "NaNs or infs.")
+ if s is None or s > 0:
+ if not np.all(diff(x) >= 0.0):
+ raise ValueError("x must be increasing if s > 0")
+ else:
+ if not np.all(diff(x) > 0.0):
+ raise ValueError("x must be strictly increasing if s = 0")
+ if x.size != y.size:
+ raise ValueError("x and y should have a same length")
+ elif w is not None and not x.size == y.size == w.size:
+ raise ValueError("x, y, and w should have a same length")
+ elif bbox.shape != (2,):
+ raise ValueError("bbox shape should be (2,)")
+ elif not (1 <= k <= 5):
+ raise ValueError("k should be 1 <= k <= 5")
+ elif s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ try:
+ ext = _extrap_modes[ext]
+ except KeyError as e:
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
+
+ return x, y, w, bbox, ext
+
+ @classmethod
+ def _from_tck(cls, tck, ext=0):
+ """Construct a spline object from given tck"""
+ self = cls.__new__(cls)
+ t, c, k = tck
+ self._eval_args = tck
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ self._data = (None, None, None, None, None, k, None, len(t), t,
+ c, None, None, None, None)
+ self.ext = ext
+ return self
+
+ def _reset_class(self):
+ data = self._data
+ n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
+ self._eval_args = t[:n], c[:n], k
+ if ier == 0:
+ # the spline returned has a residual sum of squares fp
+ # such that abs(fp-s)/s <= tol with tol a relative
+ # tolerance set to 0.001 by the program
+ pass
+ elif ier == -1:
+ # the spline returned is an interpolating spline
+ self._set_class(InterpolatedUnivariateSpline)
+ elif ier == -2:
+ # the spline returned is the weighted least-squares
+ # polynomial of degree k. In this extreme case fp gives
+ # the upper bound fp0 for the smoothing factor s.
+ self._set_class(LSQUnivariateSpline)
+ else:
+ # error
+ if ier == 1:
+ self._set_class(LSQUnivariateSpline)
+ message = _curfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message)
+
+ def _set_class(self, cls):
+ self._spline_class = cls
+ if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
+ LSQUnivariateSpline):
+ self.__class__ = cls
+ else:
+ # It's an unknown subclass -- don't change class. cf. #731
+ pass
+
+ def _reset_nest(self, data, nest=None):
+ n = data[10]
+ if nest is None:
+ k, m = data[5], len(data[0])
+ nest = m+k+1 # this is the maximum bound for nest
+ else:
+ if not n <= nest:
+ raise ValueError("`nest` can only be increased")
+ t, c, fpint, nrdata = [np.resize(data[j], nest) for j in
+ [8, 9, 11, 12]]
+
+ args = data[:8] + (t, c, n, fpint, nrdata, data[13])
+ data = dfitpack.fpcurf1(*args)
+ return data
+
+ def set_smoothing_factor(self, s):
+ """ Continue spline computation with the given smoothing
+ factor s and with the knots found at the last call.
+
+ This routine modifies the spline in place.
+
+ """
+ data = self._data
+ if data[6] == -1:
+ warnings.warn('smoothing factor unchanged for'
+ 'LSQ spline with fixed knots')
+ return
+ args = data[:6] + (s,) + data[7:]
+ data = dfitpack.fpcurf1(*args)
+ if data[-1] == 1:
+ # nest too small, setting to maximum bound
+ data = self._reset_nest(data)
+ self._data = data
+ self._reset_class()
+
+ def __call__(self, x, nu=0, ext=None):
+ """
+ Evaluate spline (or its nu-th derivative) at positions x.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D array of points at which to return the value of the smoothed
+ spline or its derivatives. Note: `x` can be unordered but the
+ evaluation is more efficient if `x` is (partially) ordered.
+ nu : int
+ The order of derivative of the spline to compute.
+ ext : int
+ Controls the value returned for elements of `x` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 or 'const', return the boundary value.
+
+ The default value is 0, passed from the initialization of
+ UnivariateSpline.
+
+ """
+ x = np.asarray(x)
+ # empty input yields empty output
+ if x.size == 0:
+ return array([])
+ if ext is None:
+ ext = self.ext
+ else:
+ try:
+ ext = _extrap_modes[ext]
+ except KeyError as e:
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
+ return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
+
+ def get_knots(self):
+ """ Return positions of interior knots of the spline.
+
+ Internally, the knot vector contains ``2*k`` additional boundary knots.
+ """
+ data = self._data
+ k, n = data[5], data[7]
+ return data[8][k:n-k]
+
+ def get_coeffs(self):
+ """Return spline coefficients."""
+ data = self._data
+ k, n = data[5], data[7]
+ return data[9][:n-k-1]
+
+ def get_residual(self):
+ """Return weighted sum of squared residuals of the spline approximation.
+
+ This is equivalent to::
+
+ sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
+
+ """
+ return self._data[10]
+
+ def integral(self, a, b):
+ """ Return definite integral of the spline between two given points.
+
+ Parameters
+ ----------
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+
+ Returns
+ -------
+ integral : float
+ The value of the definite integral of the spline between limits.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 3, 11)
+ >>> y = x**2
+ >>> spl = UnivariateSpline(x, y)
+ >>> spl.integral(0, 3)
+ 9.0
+
+ which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
+ of 0 and 3.
+
+ A caveat is that this routine assumes the spline to be zero outside of
+ the data limits:
+
+ >>> spl.integral(-1, 4)
+ 9.0
+ >>> spl.integral(-1, 0)
+ 0.0
+
+ """
+ return dfitpack.splint(*(self._eval_args+(a, b)))
+
+ def derivatives(self, x):
+ """ Return all derivatives of the spline at the point x.
+
+ Parameters
+ ----------
+ x : float
+ The point to evaluate the derivatives at.
+
+ Returns
+ -------
+ der : ndarray, shape(k+1,)
+ Derivatives of the orders 0 to k.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 3, 11)
+ >>> y = x**2
+ >>> spl = UnivariateSpline(x, y)
+ >>> spl.derivatives(1.5)
+ array([2.25, 3.0, 2.0, 0])
+
+ """
+ d, ier = dfitpack.spalde(*(self._eval_args+(x,)))
+ if not ier == 0:
+ raise ValueError("Error code returned by spalde: %s" % ier)
+ return d
+
+ def roots(self):
+ """ Return the zeros of the spline.
+
+ Restriction: only cubic splines are supported by fitpack.
+ """
+ k = self._data[5]
+ if k == 3:
+ z, m, ier = dfitpack.sproot(*self._eval_args[:2])
+ if not ier == 0:
+ raise ValueError("Error code returned by spalde: %s" % ier)
+ return z[:m]
+ raise NotImplementedError('finding roots unsupported for '
+ 'non-cubic splines')
+
+ def derivative(self, n=1):
+ """
+ Construct a new spline representing the derivative of this spline.
+
+ Parameters
+ ----------
+ n : int, optional
+ Order of derivative to evaluate. Default: 1
+
+ Returns
+ -------
+ spline : UnivariateSpline
+ Spline of order k2=k-n representing the derivative of this
+ spline.
+
+ See Also
+ --------
+ splder, antiderivative
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ This can be used for finding maxima of a curve:
+
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = UnivariateSpline(x, y, k=4, s=0)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> spl.derivative().roots() / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ tck = fitpack.splder(self._eval_args, n)
+ # if self.ext is 'const', derivative.ext will be 'zeros'
+ ext = 1 if self.ext == 3 else self.ext
+ return UnivariateSpline._from_tck(tck, ext=ext)
+
+ def antiderivative(self, n=1):
+ """
+ Construct a new spline representing the antiderivative of this spline.
+
+ Parameters
+ ----------
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ spline : UnivariateSpline
+ Spline of order k2=k+n representing the antiderivative of this
+ spline.
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ See Also
+ --------
+ splantider, derivative
+
+ Examples
+ --------
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = UnivariateSpline(x, y, s=0)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> spl(1.7), spl.antiderivative().derivative()(1.7)
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = spl.antiderivative()
+ >>> ispl(np.pi/2) - ispl(0)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ tck = fitpack.splantider(self._eval_args, n)
+ return UnivariateSpline._from_tck(tck, self.ext)
+
+
+class InterpolatedUnivariateSpline(UnivariateSpline):
+ """
+ 1-D interpolating spline for a given set of data points.
+
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
+ Spline function passes through all provided points. Equivalent to
+ `UnivariateSpline` with s=0.
+
+ Parameters
+ ----------
+ x : (N,) array_like
+ Input dimension of data points -- must be strictly increasing
+ y : (N,) array_like
+ input dimension of data points
+ w : (N,) array_like, optional
+ Weights for spline fitting. Must be positive. If None (default),
+ weights are all equal.
+ bbox : (2,) array_like, optional
+ 2-sequence specifying the boundary of the approximation interval. If
+ None (default), ``bbox=[x[0], x[-1]]``.
+ k : int, optional
+ Degree of the smoothing spline. Must be 1 <= `k` <= 5.
+ ext : int or str, optional
+ Controls the extrapolation mode for elements
+ not in the interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 of 'const', return the boundary value.
+
+ The default value is 0.
+
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination or non-sensical results) if the inputs
+ do contain infinities or NaNs.
+ Default is False.
+
+ See Also
+ --------
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ LSQUnivariateSpline :
+ a spline for which knots are user-selected
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ splrep :
+ a function to find the B-spline representation of a 1-D curve
+ splev :
+ a function to evaluate a B-spline or its derivatives
+ sproot :
+ a function to find the roots of a cubic B-spline
+ splint :
+ a function to evaluate the definite integral of a B-spline between two
+ given points
+ spalde :
+ a function to evaluate all derivatives of a B-spline
+
+ Notes
+ -----
+ The number of data points must be larger than the spline degree `k`.
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import InterpolatedUnivariateSpline
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
+ >>> spl = InterpolatedUnivariateSpline(x, y)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
+ >>> plt.show()
+
+ Notice that the ``spl(x)`` interpolates `y`:
+
+ >>> spl.get_residual()
+ 0.0
+
+ """
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+ ext, check_finite)
+ if not np.all(diff(x) > 0.0):
+ raise ValueError('x must be strictly increasing')
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+ xe=bbox[1], s=0)
+ self._reset_class()
+
+
+_fpchec_error_string = """The input parameters have been rejected by fpchec. \
+This means that at least one of the following conditions is violated:
+
+1) k+1 <= n-k-1 <= m
+2) t(1) <= t(2) <= ... <= t(k+1)
+ t(n-k) <= t(n-k+1) <= ... <= t(n)
+3) t(k+1) < t(k+2) < ... < t(n-k)
+4) t(k+1) <= x(i) <= t(n-k)
+5) The conditions specified by Schoenberg and Whitney must hold
+ for at least one subset of data points, i.e., there must be a
+ subset of data points y(j) such that
+ t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+"""
+
+
+class LSQUnivariateSpline(UnivariateSpline):
+ """
+ 1-D spline with explicit internal knots.
+
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
+ specifies the internal knots of the spline
+
+ Parameters
+ ----------
+ x : (N,) array_like
+ Input dimension of data points -- must be increasing
+ y : (N,) array_like
+ Input dimension of data points
+ t : (M,) array_like
+ interior knots of the spline. Must be in ascending order and::
+
+ bbox[0] < t[0] < ... < t[-1] < bbox[-1]
+
+ w : (N,) array_like, optional
+ weights for spline fitting. Must be positive. If None (default),
+ weights are all equal.
+ bbox : (2,) array_like, optional
+ 2-sequence specifying the boundary of the approximation interval. If
+ None (default), ``bbox = [x[0], x[-1]]``.
+ k : int, optional
+ Degree of the smoothing spline. Must be 1 <= `k` <= 5.
+ Default is `k` = 3, a cubic spline.
+ ext : int or str, optional
+ Controls the extrapolation mode for elements
+ not in the interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 of 'const', return the boundary value.
+
+ The default value is 0.
+
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination or non-sensical results) if the inputs
+ do contain infinities or NaNs.
+ Default is False.
+
+ Raises
+ ------
+ ValueError
+ If the interior knots do not satisfy the Schoenberg-Whitney conditions
+
+ See Also
+ --------
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ InterpolatedUnivariateSpline :
+ a interpolating univariate spline for a given set of data points.
+ splrep :
+ a function to find the B-spline representation of a 1-D curve
+ splev :
+ a function to evaluate a B-spline or its derivatives
+ sproot :
+ a function to find the roots of a cubic B-spline
+ splint :
+ a function to evaluate the definite integral of a B-spline between two
+ given points
+ spalde :
+ a function to evaluate all derivatives of a B-spline
+
+ Notes
+ -----
+ The number of data points must be larger than the spline degree `k`.
+
+ Knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
+
+ Fit a smoothing spline with a pre-defined internal knots:
+
+ >>> t = [-1, 0, 1]
+ >>> spl = LSQUnivariateSpline(x, y, t)
+
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3)
+ >>> plt.show()
+
+ Check the knot vector:
+
+ >>> spl.get_knots()
+ array([-3., -1., 0., 1., 3.])
+
+ Constructing lsq spline using the knots from another spline:
+
+ >>> x = np.arange(10)
+ >>> s = UnivariateSpline(x, x, s=0)
+ >>> s.get_knots()
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
+ >>> knt = s.get_knots()
+ >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
+ >>> s1.get_knots()
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
+
+ """
+
+ def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+ ext, check_finite)
+ if not np.all(diff(x) >= 0.0):
+ raise ValueError('x must be increasing')
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ xb = bbox[0]
+ xe = bbox[1]
+ if xb is None:
+ xb = x[0]
+ if xe is None:
+ xe = x[-1]
+ t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
+ n = len(t)
+ if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
+ raise ValueError('Interior knots t must satisfy '
+ 'Schoenberg-Whitney conditions')
+ if not dfitpack.fpchec(x, t, k) == 0:
+ raise ValueError(_fpchec_error_string)
+ data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
+ self._data = data[:-3] + (None, None, data[-1])
+ self._reset_class()
+
+
+# ############### Bivariate spline ####################
+
+class _BivariateSplineBase(object):
+ """ Base class for Bivariate spline s(x,y) interpolation on the rectangle
+ [xb,xe] x [yb, ye] calculated from a given set of data points
+ (x,y,z).
+
+ See Also
+ --------
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+ BivariateSpline :
+ a base class for bivariate splines.
+ SphereBivariateSpline :
+ a bivariate spline on a spherical grid
+ """
+
+ def get_residual(self):
+ """ Return weighted sum of squared residuals of the spline
+ approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
+ """
+ return self.fp
+
+ def get_knots(self):
+ """ Return a tuple (tx,ty) where tx,ty contain knots positions
+ of the spline with respect to x-, y-variable, respectively.
+ The position of interior and additional knots are given as
+ t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
+ """
+ return self.tck[:2]
+
+ def get_coeffs(self):
+ """ Return spline coefficients."""
+ return self.tck[2]
+
+ def __call__(self, x, y, dx=0, dy=0, grid=True):
+ """
+ Evaluate the spline or its derivatives at given positions.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input coordinates.
+
+ If `grid` is False, evaluate the spline at points ``(x[i],
+ y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
+ is obeyed.
+
+ If `grid` is True: evaluate spline at the grid points
+ defined by the coordinate arrays x, y. The arrays must be
+ sorted to increasing order.
+
+ Note that the axis ordering is inverted relative to
+ the output of meshgrid.
+ dx : int
+ Order of x-derivative
+
+ .. versionadded:: 0.14.0
+ dy : int
+ Order of y-derivative
+
+ .. versionadded:: 0.14.0
+ grid : bool
+ Whether to evaluate the results on a grid spanned by the
+ input arrays, or at points specified by the input arrays.
+
+ .. versionadded:: 0.14.0
+
+ """
+ x = np.asarray(x)
+ y = np.asarray(y)
+
+ tx, ty, c = self.tck[:3]
+ kx, ky = self.degrees
+ if grid:
+ if x.size == 0 or y.size == 0:
+ return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
+
+ if dx or dy:
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by parder: %s" % ier)
+ else:
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by bispev: %s" % ier)
+ else:
+ # standard Numpy broadcasting
+ if x.shape != y.shape:
+ x, y = np.broadcast_arrays(x, y)
+
+ shape = x.shape
+ x = x.ravel()
+ y = y.ravel()
+
+ if x.size == 0 or y.size == 0:
+ return np.zeros(shape, dtype=self.tck[2].dtype)
+
+ if dx or dy:
+ z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by pardeu: %s" % ier)
+ else:
+ z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by bispeu: %s" % ier)
+
+ z = z.reshape(shape)
+ return z
+
+
+_surfit_messages = {1: """
+The required storage space exceeds the available storage space: nxest
+or nyest too small, or s too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small or
+badly chosen eps.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+ 3: """
+the maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached:
+s too small.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+ 4: """
+No more knots can be added because the number of b-spline coefficients
+(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
+either s or m too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 5: """
+No more knots can be added because the additional knot would (quasi)
+coincide with an old one: s too small or too large a weight to an
+inaccurate data point.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
+If iopt==-1, then
+ xb= 0.0):
+ raise ValueError('w should be positive')
+ if (eps is not None) and (not 0.0 < eps < 1.0):
+ raise ValueError('eps should be between (0, 1)')
+ if not x.size >= (kx + 1) * (ky + 1):
+ raise ValueError('The length of x, y and z should be at least'
+ ' (kx+1) * (ky+1)')
+ return x, y, z, w
+
+
+class SmoothBivariateSpline(BivariateSpline):
+ """
+ Smooth bivariate spline approximation.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ 1-D sequences of data points (order is not important).
+ w : array_like, optional
+ Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
+ bbox : array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain. By default,
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+ estimate of the standard deviation of ``z[i]``.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+ """
+
+ def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
+ eps=1e-16):
+
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+ bbox = ravel(bbox)
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+ if s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ xb, xe, yb, ye = bbox
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+ xb, xe, yb,
+ ye, kx, ky,
+ s=s, eps=eps,
+ lwrk2=1)
+ if ier > 10: # lwrk2 was to small, re-run
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+ xb, xe, yb,
+ ye, kx, ky,
+ s=s,
+ eps=eps,
+ lwrk2=ier)
+ if ier in [0, -1, -2]: # normal return
+ pass
+ else:
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message)
+
+ self.fp = fp
+ self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
+ self.degrees = kx, ky
+
+
+class LSQBivariateSpline(BivariateSpline):
+ """
+ Weighted least-squares bivariate spline approximation.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ 1-D sequences of data points (order is not important).
+ tx, ty : array_like
+ Strictly ordered 1-D sequences of knots coordinates.
+ w : array_like, optional
+ Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
+ bbox : (4,) array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain. By default,
+ ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+ """
+
+ def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
+ eps=None):
+
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+ bbox = ravel(bbox)
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+
+ nx = 2*kx+2+len(tx)
+ ny = 2*ky+2+len(ty)
+ tx1 = zeros((nx,), float)
+ ty1 = zeros((ny,), float)
+ tx1[kx+1:nx-kx-1] = tx
+ ty1[ky+1:ny-ky-1] = ty
+
+ xb, xe, yb, ye = bbox
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
+ xb, xe, yb, ye,
+ kx, ky, eps, lwrk2=1)
+ if ier > 10:
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
+ xb, xe, yb, ye,
+ kx, ky, eps, lwrk2=ier)
+ if ier in [0, -1, -2]: # normal return
+ pass
+ else:
+ if ier < -2:
+ deficiency = (nx-kx-1)*(ny-ky-1)+ier
+ message = _surfit_messages.get(-3) % (deficiency)
+ else:
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message)
+ self.fp = fp
+ self.tck = tx1, ty1, c
+ self.degrees = kx, ky
+
+
+class RectBivariateSpline(BivariateSpline):
+ """
+ Bivariate spline approximation over a rectangular mesh.
+
+ Can be used for both smoothing and interpolating data.
+
+ Parameters
+ ----------
+ x,y : array_like
+ 1-D arrays of coordinates in strictly ascending order.
+ z : array_like
+ 2-D array of data with shape (x.size,y.size).
+ bbox : array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain. By default,
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
+ function. Default is ``s=0``, which is for interpolation.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ """
+
+ def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
+ x, y, bbox = ravel(x), ravel(y), ravel(bbox)
+ z = np.asarray(z)
+ if not np.all(diff(x) > 0.0):
+ raise ValueError('x must be strictly increasing')
+ if not np.all(diff(y) > 0.0):
+ raise ValueError('y must be strictly increasing')
+ if not x.size == z.shape[0]:
+ raise ValueError('x dimension of z must have same number of '
+ 'elements as x')
+ if not y.size == z.shape[1]:
+ raise ValueError('y dimension of z must have same number of '
+ 'elements as y')
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+ if s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ z = ravel(z)
+ xb, xe, yb, ye = bbox
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
+ ye, kx, ky, s)
+
+ if ier not in [0, -1, -2]:
+ msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(msg)
+
+ self.fp = fp
+ self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
+ self.degrees = kx, ky
+
+
+_spherefit_messages = _surfit_messages.copy()
+_spherefit_messages[10] = """
+ERROR. On entry, the input data are controlled on validity. The following
+ restrictions must be satisfied:
+ -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 00, i=1,...,m
+ lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
+ kwrk >= m+(ntest-7)*(npest-7)
+ if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
+ 0=0: s>=0
+ if one of these conditions is found to be violated,control
+ is immediately repassed to the calling program. in that
+ case there is no approximation returned."""
+_spherefit_messages[-3] = """
+WARNING. The coefficients of the spline returned have been computed as the
+ minimal norm least-squares solution of a (numerically) rank
+ deficient system (deficiency=%i, rank=%i). Especially if the rank
+ deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
+ the results may be inaccurate. They could also seriously depend on
+ the value of eps."""
+
+
+class SphereBivariateSpline(_BivariateSplineBase):
+ """
+ Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
+ given set of data points (theta,phi,r).
+
+ .. versionadded:: 0.11.0
+
+ See Also
+ --------
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothUnivariateSpline :
+ a smooth univariate spline through the given points
+ LSQUnivariateSpline :
+ a univariate spline using weighted least-squares fitting
+ """
+
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+ """
+ Evaluate the spline or its derivatives at given positions.
+
+ Parameters
+ ----------
+ theta, phi : array_like
+ Input coordinates.
+
+ If `grid` is False, evaluate the spline at points
+ ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
+ Numpy broadcasting is obeyed.
+
+ If `grid` is True: evaluate spline at the grid points
+ defined by the coordinate arrays theta, phi. The arrays
+ must be sorted to increasing order.
+ dtheta : int, optional
+ Order of theta-derivative
+
+ .. versionadded:: 0.14.0
+ dphi : int
+ Order of phi-derivative
+
+ .. versionadded:: 0.14.0
+ grid : bool
+ Whether to evaluate the results on a grid spanned by the
+ input arrays, or at points specified by the input arrays.
+
+ .. versionadded:: 0.14.0
+
+ """
+ theta = np.asarray(theta)
+ phi = np.asarray(phi)
+
+ if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
+ raise ValueError("requested theta out of bounds.")
+ if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
+ raise ValueError("requested phi out of bounds.")
+
+ return _BivariateSplineBase.__call__(self, theta, phi,
+ dx=dtheta, dy=dphi, grid=grid)
+
+ def ev(self, theta, phi, dtheta=0, dphi=0):
+ """
+ Evaluate the spline at points
+
+ Returns the interpolated value at ``(theta[i], phi[i]),
+ i=0,...,len(theta)-1``.
+
+ Parameters
+ ----------
+ theta, phi : array_like
+ Input coordinates. Standard Numpy broadcasting is obeyed.
+ dtheta : int, optional
+ Order of theta-derivative
+
+ .. versionadded:: 0.14.0
+ dphi : int, optional
+ Order of phi-derivative
+
+ .. versionadded:: 0.14.0
+ """
+ return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
+
+
+class SmoothSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Smooth bivariate spline approximation in spherical coordinates.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ theta, phi, r : array_like
+ 1-D sequences of data points (order is not important). Coordinates
+ must be given in radians. Theta must lie within the interval
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+ w : array_like, optional
+ Positive 1-D sequence of weights.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+ estimate of the standard deviation of ``r[i]``.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid (the input data does not
+ have to be on a grid):
+
+ >>> theta = np.linspace(0., np.pi, 7)
+ >>> phi = np.linspace(0., 2*np.pi, 9)
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+ >>> data[3,3:-2] = 3.
+ >>> data = np.roll(data, 4, 1)
+
+ We need to set up the interpolator object
+
+ >>> lats, lons = np.meshgrid(theta, phi)
+ >>> from scipy.interpolate import SmoothSphereBivariateSpline
+ >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ ... data.T.ravel(), s=3.5)
+
+ As a first test, we'll see what the algorithm returns when run on the
+ input coordinates
+
+ >>> data_orig = lut(theta, phi)
+
+ Finally we interpolate the data to a finer grid
+
+ >>> fine_lats = np.linspace(0., np.pi, 70)
+ >>> fine_lons = np.linspace(0., 2 * np.pi, 90)
+
+ >>> data_smth = lut(fine_lats, fine_lons)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(131)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(132)
+ >>> ax2.imshow(data_orig, interpolation='nearest')
+ >>> ax3 = fig.add_subplot(133)
+ >>> ax3.imshow(data_smth, interpolation='nearest')
+ >>> plt.show()
+
+ """
+
+ def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
+
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+
+ # input validation
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+ raise ValueError('theta should be between [0, pi]')
+ if not ((0.0 <= phi).all() and (phi <= 2.0 * np.pi).all()):
+ raise ValueError('phi should be between [0, 2pi]')
+ if w is not None:
+ w = np.asarray(w)
+ if not (w >= 0.0).all():
+ raise ValueError('w should be positive')
+ if not s >= 0.0:
+ raise ValueError('s should be positive')
+ if not 0.0 < eps < 1.0:
+ raise ValueError('eps should be between (0, 1)')
+
+ if np.issubclass_(w, float):
+ w = ones(len(theta)) * w
+ nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
+ r, w=w, s=s,
+ eps=eps)
+ if ier not in [0, -1, -2]:
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(message)
+
+ self.fp = fp
+ self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
+ self.degrees = (3, 3)
+
+
+class LSQSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Weighted least-squares bivariate spline approximation in spherical
+ coordinates.
+
+ Determines a smoothing bicubic spline according to a given
+ set of knots in the `theta` and `phi` directions.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ theta, phi, r : array_like
+ 1-D sequences of data points (order is not important). Coordinates
+ must be given in radians. Theta must lie within the interval
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+ tt, tp : array_like
+ Strictly ordered 1-D sequences of knots coordinates.
+ Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
+ w : array_like, optional
+ Positive 1-D sequence of weights, of the same length as `theta`, `phi`
+ and `r`.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the
+ open interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid (the input data does not
+ have to be on a grid):
+
+ >>> from scipy.interpolate import LSQSphereBivariateSpline
+ >>> import matplotlib.pyplot as plt
+
+ >>> theta = np.linspace(0, np.pi, num=7)
+ >>> phi = np.linspace(0, 2*np.pi, num=9)
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+ >>> data[3,3:-2] = 3.
+ >>> data = np.roll(data, 4, 1)
+
+ We need to set up the interpolator object. Here, we must also specify the
+ coordinates of the knots to use.
+
+ >>> lats, lons = np.meshgrid(theta, phi)
+ >>> knotst, knotsp = theta.copy(), phi.copy()
+ >>> knotst[0] += .0001
+ >>> knotst[-1] -= .0001
+ >>> knotsp[0] += .0001
+ >>> knotsp[-1] -= .0001
+ >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ ... data.T.ravel(), knotst, knotsp)
+
+ As a first test, we'll see what the algorithm returns when run on the
+ input coordinates
+
+ >>> data_orig = lut(theta, phi)
+
+ Finally we interpolate the data to a finer grid
+
+ >>> fine_lats = np.linspace(0., np.pi, 70)
+ >>> fine_lons = np.linspace(0., 2*np.pi, 90)
+ >>> data_lsq = lut(fine_lats, fine_lons)
+
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(131)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(132)
+ >>> ax2.imshow(data_orig, interpolation='nearest')
+ >>> ax3 = fig.add_subplot(133)
+ >>> ax3.imshow(data_lsq, interpolation='nearest')
+ >>> plt.show()
+
+ """
+
+ def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
+
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+ tt, tp = np.asarray(tt), np.asarray(tp)
+
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+ raise ValueError('theta should be between [0, pi]')
+ if not ((0.0 <= phi).all() and (phi <= 2*np.pi).all()):
+ raise ValueError('phi should be between [0, 2pi]')
+ if not ((0.0 < tt).all() and (tt < np.pi).all()):
+ raise ValueError('tt should be between (0, pi)')
+ if not ((0.0 < tp).all() and (tp < 2*np.pi).all()):
+ raise ValueError('tp should be between (0, 2pi)')
+ if w is not None:
+ w = np.asarray(w)
+ if not (w >= 0.0).all():
+ raise ValueError('w should be positive')
+ if not 0.0 < eps < 1.0:
+ raise ValueError('eps should be between (0, 1)')
+
+ if np.issubclass_(w, float):
+ w = ones(len(theta)) * w
+ nt_, np_ = 8 + len(tt), 8 + len(tp)
+ tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
+ tt_[4:-4], tp_[4:-4] = tt, tp
+ tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
+ tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
+ w=w, eps=eps)
+ if ier > 0:
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(message)
+
+ self.fp = fp
+ self.tck = tt_, tp_, c
+ self.degrees = (3, 3)
+
+
+_spfit_messages = _surfit_messages.copy()
+_spfit_messages[10] = """
+ERROR: on entry, the input data are controlled on validity
+ the following restrictions must be satisfied.
+ -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
+ -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
+ -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
+ mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
+ kwrk>=5+mu+mv+nuest+nvest,
+ lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
+ 0< u(i-1)=0: s>=0
+ if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
+ if one of these conditions is found to be violated,control is
+ immediately repassed to the calling program. in that case there is no
+ approximation returned."""
+
+
+class RectSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Bivariate spline approximation over a rectangular mesh on a sphere.
+
+ Can be used for smoothing data.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ u : array_like
+ 1-D array of colatitude coordinates in strictly ascending order.
+ Coordinates must be given in radians and lie within the interval
+ ``[0, pi]``.
+ v : array_like
+ 1-D array of longitude coordinates in strictly ascending order.
+ Coordinates must be given in radians. First element (``v[0]``) must lie
+ within the interval ``[-pi, pi)``. Last element (``v[-1]``) must satisfy
+ ``v[-1] <= v[0] + 2*pi``.
+ r : array_like
+ 2-D array of data with shape ``(u.size, v.size)``.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition
+ (``s=0`` is for interpolation).
+ pole_continuity : bool or (bool, bool), optional
+ Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
+ ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
+ will be 1 or 0 when this is True or False, respectively.
+ Defaults to False.
+ pole_values : float or (float, float), optional
+ Data values at the poles ``u=0`` and ``u=pi``. Either the whole
+ parameter or each individual element can be None. Defaults to None.
+ pole_exact : bool or (bool, bool), optional
+ Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
+ value is considered to be the right function value, and it will be
+ fitted exactly. If False, the value will be considered to be a data
+ value just like the other data values. Defaults to False.
+ pole_flat : bool or (bool, bool), optional
+ For the poles at ``u=0`` and ``u=pi``, specify whether or not the
+ approximation has vanishing derivatives. Defaults to False.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
+ ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
+ least-squares spline approximation is not implemented yet.
+
+ When actually performing the interpolation, the requested `v` values must
+ lie within the same length 2pi interval that the original `v` values were
+ chosen from.
+
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid
+
+ >>> lats = np.linspace(10, 170, 9) * np.pi / 180.
+ >>> lons = np.linspace(0, 350, 18) * np.pi / 180.
+ >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ ... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ We want to interpolate it to a global one-degree grid
+
+ >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
+ >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
+ >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
+
+ We need to set up the interpolator object
+
+ >>> from scipy.interpolate import RectSphereBivariateSpline
+ >>> lut = RectSphereBivariateSpline(lats, lons, data)
+
+ Finally we interpolate the data. The `RectSphereBivariateSpline` object
+ only takes 1-D arrays as input, therefore we need to do some reshaping.
+
+ >>> data_interp = lut.ev(new_lats.ravel(),
+ ... new_lons.ravel()).reshape((360, 180)).T
+
+ Looking at the original and the interpolated data, one can see that the
+ interpolant reproduces the original data very well:
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(211)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(212)
+ >>> ax2.imshow(data_interp, interpolation='nearest')
+ >>> plt.show()
+
+ Choosing the optimal value of ``s`` can be a delicate task. Recommended
+ values for ``s`` depend on the accuracy of the data values. If the user
+ has an idea of the statistical errors on the data, she can also find a
+ proper estimate for ``s``. By assuming that, if she specifies the
+ right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
+ reproduces the function underlying the data, she can evaluate
+ ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
+ For example, if she knows that the statistical errors on her
+ ``r(i,j)``-values are not greater than 0.1, she may expect that a good
+ ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
+
+ If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
+ be determined by trial and error. The best is then to start with a very
+ large value of ``s`` (to determine the least-squares polynomial and the
+ corresponding upper bound ``fp0`` for ``s``) and then to progressively
+ decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
+ ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
+ shows more detail) to obtain closer fits.
+
+ The interpolation results for different values of ``s`` give some insight
+ into this process:
+
+ >>> fig2 = plt.figure()
+ >>> s = [3e9, 2e9, 1e9, 1e8]
+ >>> for ii in range(len(s)):
+ ... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
+ ... data_interp = lut.ev(new_lats.ravel(),
+ ... new_lons.ravel()).reshape((360, 180)).T
+ ... ax = fig2.add_subplot(2, 2, ii+1)
+ ... ax.imshow(data_interp, interpolation='nearest')
+ ... ax.set_title("s = %g" % s[ii])
+ >>> plt.show()
+
+ """
+
+ def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
+ pole_exact=False, pole_flat=False):
+ iopt = np.array([0, 0, 0], dtype=dfitpack_int)
+ ider = np.array([-1, 0, -1, 0], dtype=dfitpack_int)
+ if pole_values is None:
+ pole_values = (None, None)
+ elif isinstance(pole_values, (float, np.float32, np.float64)):
+ pole_values = (pole_values, pole_values)
+ if isinstance(pole_continuity, bool):
+ pole_continuity = (pole_continuity, pole_continuity)
+ if isinstance(pole_exact, bool):
+ pole_exact = (pole_exact, pole_exact)
+ if isinstance(pole_flat, bool):
+ pole_flat = (pole_flat, pole_flat)
+
+ r0, r1 = pole_values
+ iopt[1:] = pole_continuity
+ if r0 is None:
+ ider[0] = -1
+ else:
+ ider[0] = pole_exact[0]
+
+ if r1 is None:
+ ider[2] = -1
+ else:
+ ider[2] = pole_exact[1]
+
+ ider[1], ider[3] = pole_flat
+
+ u, v = np.ravel(u), np.ravel(v)
+ r = np.asarray(r)
+
+ if not ((0.0 <= u).all() and (u <= np.pi).all()):
+ raise ValueError('u should be between [0, pi]')
+ if not -np.pi <= v[0] < np.pi:
+ raise ValueError('v[0] should be between [-pi, pi)')
+ if not v[-1] <= v[0] + 2*np.pi:
+ raise ValueError('v[-1] should be v[0] + 2pi or less ')
+
+ if not np.all(np.diff(u) > 0.0):
+ raise ValueError('u must be strictly increasing')
+ if not np.all(np.diff(v) > 0.0):
+ raise ValueError('v must be strictly increasing')
+
+ if not u.size == r.shape[0]:
+ raise ValueError('u dimension of r must have same number of '
+ 'elements as u')
+ if not v.size == r.shape[1]:
+ raise ValueError('v dimension of r must have same number of '
+ 'elements as v')
+
+ if pole_continuity[1] is False and pole_flat[1] is True:
+ raise ValueError('if pole_continuity is False, so must be '
+ 'pole_flat')
+ if pole_continuity[0] is False and pole_flat[0] is True:
+ raise ValueError('if pole_continuity is False, so must be '
+ 'pole_flat')
+
+ if not s >= 0.0:
+ raise ValueError('s should be positive')
+
+ r = np.ravel(r)
+ nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
+ u.copy(),
+ v.copy(),
+ r.copy(),
+ r0, r1, s)
+
+ if ier not in [0, -1, -2]:
+ msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(msg)
+
+ self.fp = fp
+ self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
+ self.degrees = (3, 3)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..e50dd07
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd_info.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd_info.py
new file mode 100644
index 0000000..a96def4
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpnd_info.py
@@ -0,0 +1,37 @@
+"""
+Here we perform some symbolic computations required for the N-D
+interpolation routines in `interpnd.pyx`.
+
+"""
+from sympy import symbols, binomial, Matrix # type: ignore[import]
+
+
+def _estimate_gradients_2d_global():
+
+ #
+ # Compute
+ #
+ #
+
+ f1, f2, df1, df2, x = symbols(['f1', 'f2', 'df1', 'df2', 'x'])
+ c = [f1, (df1 + 3*f1)/3, (df2 + 3*f2)/3, f2]
+
+ w = 0
+ for k in range(4):
+ w += binomial(3, k) * c[k] * x**k*(1-x)**(3-k)
+
+ wpp = w.diff(x, 2).expand()
+ intwpp2 = (wpp**2).integrate((x, 0, 1)).expand()
+
+ A = Matrix([[intwpp2.coeff(df1**2), intwpp2.coeff(df1*df2)/2],
+ [intwpp2.coeff(df1*df2)/2, intwpp2.coeff(df2**2)]])
+
+ B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)],
+ [intwpp2.coeff(df2).subs(df1, 0)]]) / 2
+
+ print("A")
+ print(A)
+ print("B")
+ print(B)
+ print("solution")
+ print(A.inv() * B)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpolate.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpolate.py
new file mode 100644
index 0000000..3fad185
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/interpolate.py
@@ -0,0 +1,2767 @@
+__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
+ 'RegularGridInterpolator', 'interpn']
+
+import itertools
+import warnings
+
+import numpy as np
+from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
+ ravel, poly1d, asarray, intp)
+
+import scipy.special as spec
+from scipy.special import comb
+from scipy._lib._util import prod
+
+from . import fitpack
+from . import dfitpack
+from . import _fitpack
+from .polyint import _Interpolator1D
+from . import _ppoly
+from .fitpack2 import RectBivariateSpline
+from .interpnd import _ndim_coords_from_arrays
+from ._bsplines import make_interp_spline, BSpline
+
+
+def lagrange(x, w):
+ r"""
+ Return a Lagrange interpolating polynomial.
+
+ Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
+ polynomial through the points ``(x, w)``.
+
+ Warning: This implementation is numerically unstable. Do not expect to
+ be able to use more than about 20 points even if they are chosen optimally.
+
+ Parameters
+ ----------
+ x : array_like
+ `x` represents the x-coordinates of a set of datapoints.
+ w : array_like
+ `w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
+
+ Returns
+ -------
+ lagrange : `numpy.poly1d` instance
+ The Lagrange interpolating polynomial.
+
+ Examples
+ --------
+ Interpolate :math:`f(x) = x^3` by 3 points.
+
+ >>> from scipy.interpolate import lagrange
+ >>> x = np.array([0, 1, 2])
+ >>> y = x**3
+ >>> poly = lagrange(x, y)
+
+ Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
+ it is given by
+
+ .. math::
+
+ \begin{aligned}
+ L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
+ &= x (-2 + 3x)
+ \end{aligned}
+
+ >>> from numpy.polynomial.polynomial import Polynomial
+ >>> Polynomial(poly).coef
+ array([ 3., -2., 0.])
+
+ """
+
+ M = len(x)
+ p = poly1d(0.0)
+ for j in range(M):
+ pt = poly1d(w[j])
+ for k in range(M):
+ if k == j:
+ continue
+ fac = x[j]-x[k]
+ pt *= poly1d([1.0, -x[k]])/fac
+ p += pt
+ return p
+
+
+# !! Need to find argument for keeping initialize. If it isn't
+# !! found, get rid of it!
+
+
+class interp2d(object):
+ """
+ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
+ fill_value=None)
+
+ Interpolate over a 2-D grid.
+
+ `x`, `y` and `z` are arrays of values used to approximate some function
+ f: ``z = f(x, y)``. This class returns a function whose call method uses
+ spline interpolation to find the value of new points.
+
+ If `x` and `y` represent a regular grid, consider using
+ RectBivariateSpline.
+
+ Note that calling `interp2d` with NaNs present in input values results in
+ undefined behaviour.
+
+ Methods
+ -------
+ __call__
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays defining the data point coordinates.
+
+ If the points lie on a regular grid, `x` can specify the column
+ coordinates and `y` the row coordinates, for example::
+
+ >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
+
+ Otherwise, `x` and `y` must specify the full coordinates for each
+ point, for example::
+
+ >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
+
+ If `x` and `y` are multidimensional, they are flattened before use.
+ z : array_like
+ The values of the function to interpolate at the data points. If
+ `z` is a multidimensional array, it is flattened before use. The
+ length of a flattened `z` array is either
+ len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
+ or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
+ for each point.
+ kind : {'linear', 'cubic', 'quintic'}, optional
+ The kind of spline interpolation to use. Default is 'linear'.
+ copy : bool, optional
+ If True, the class makes internal copies of x, y and z.
+ If False, references may be used. The default is to copy.
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data (x,y), a ValueError is raised.
+ If False, then `fill_value` is used.
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If omitted (None), values outside
+ the domain are extrapolated via nearest-neighbor extrapolation.
+
+ See Also
+ --------
+ RectBivariateSpline :
+ Much faster 2-D interpolation if your input data is on a grid
+ bisplrep, bisplev :
+ Spline interpolation based on FITPACK
+ BivariateSpline : a more recent wrapper of the FITPACK routines
+ interp1d : 1-D version of this function
+
+ Notes
+ -----
+ The minimum number of data points required along the interpolation
+ axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
+ quintic interpolation.
+
+ The interpolator is constructed by `bisplrep`, with a smoothing factor
+ of 0. If more control over smoothing is needed, `bisplrep` should be
+ used directly.
+
+ Examples
+ --------
+ Construct a 2-D grid and interpolate on it:
+
+ >>> from scipy import interpolate
+ >>> x = np.arange(-5.01, 5.01, 0.25)
+ >>> y = np.arange(-5.01, 5.01, 0.25)
+ >>> xx, yy = np.meshgrid(x, y)
+ >>> z = np.sin(xx**2+yy**2)
+ >>> f = interpolate.interp2d(x, y, z, kind='cubic')
+
+ Now use the obtained interpolation function and plot the result:
+
+ >>> import matplotlib.pyplot as plt
+ >>> xnew = np.arange(-5.01, 5.01, 1e-2)
+ >>> ynew = np.arange(-5.01, 5.01, 1e-2)
+ >>> znew = f(xnew, ynew)
+ >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
+ >>> plt.show()
+ """
+
+ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
+ fill_value=None):
+ x = ravel(x)
+ y = ravel(y)
+ z = asarray(z)
+
+ rectangular_grid = (z.size == len(x) * len(y))
+ if rectangular_grid:
+ if z.ndim == 2:
+ if z.shape != (len(y), len(x)):
+ raise ValueError("When on a regular grid with x.size = m "
+ "and y.size = n, if z.ndim == 2, then z "
+ "must have shape (n, m)")
+ if not np.all(x[1:] >= x[:-1]):
+ j = np.argsort(x)
+ x = x[j]
+ z = z[:, j]
+ if not np.all(y[1:] >= y[:-1]):
+ j = np.argsort(y)
+ y = y[j]
+ z = z[j, :]
+ z = ravel(z.T)
+ else:
+ z = ravel(z)
+ if len(x) != len(y):
+ raise ValueError(
+ "x and y must have equal lengths for non rectangular grid")
+ if len(z) != len(x):
+ raise ValueError(
+ "Invalid length for input z for non rectangular grid")
+
+ interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
+ try:
+ kx = ky = interpolation_types[kind]
+ except KeyError as e:
+ raise ValueError(
+ f"Unsupported interpolation type {repr(kind)}, must be "
+ f"either of {', '.join(map(repr, interpolation_types))}."
+ ) from e
+
+ if not rectangular_grid:
+ # TODO: surfit is really not meant for interpolation!
+ self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
+ else:
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
+ x, y, z, None, None, None, None,
+ kx=kx, ky=ky, s=0.0)
+ self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
+ kx, ky)
+
+ self.bounds_error = bounds_error
+ self.fill_value = fill_value
+ self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
+
+ self.x_min, self.x_max = np.amin(x), np.amax(x)
+ self.y_min, self.y_max = np.amin(y), np.amax(y)
+
+ def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
+ """Interpolate the function.
+
+ Parameters
+ ----------
+ x : 1-D array
+ x-coordinates of the mesh on which to interpolate.
+ y : 1-D array
+ y-coordinates of the mesh on which to interpolate.
+ dx : int >= 0, < kx
+ Order of partial derivatives in x.
+ dy : int >= 0, < ky
+ Order of partial derivatives in y.
+ assume_sorted : bool, optional
+ If False, values of `x` and `y` can be in any order and they are
+ sorted first.
+ If True, `x` and `y` have to be arrays of monotonically
+ increasing values.
+
+ Returns
+ -------
+ z : 2-D array with shape (len(y), len(x))
+ The interpolated values.
+ """
+
+ x = atleast_1d(x)
+ y = atleast_1d(y)
+
+ if x.ndim != 1 or y.ndim != 1:
+ raise ValueError("x and y should both be 1-D arrays")
+
+ if not assume_sorted:
+ x = np.sort(x, kind="mergesort")
+ y = np.sort(y, kind="mergesort")
+
+ if self.bounds_error or self.fill_value is not None:
+ out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
+ out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
+
+ any_out_of_bounds_x = np.any(out_of_bounds_x)
+ any_out_of_bounds_y = np.any(out_of_bounds_y)
+
+ if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
+ raise ValueError("Values out of range; x must be in %r, y in %r"
+ % ((self.x_min, self.x_max),
+ (self.y_min, self.y_max)))
+
+ z = fitpack.bisplev(x, y, self.tck, dx, dy)
+ z = atleast_2d(z)
+ z = transpose(z)
+
+ if self.fill_value is not None:
+ if any_out_of_bounds_x:
+ z[:, out_of_bounds_x] = self.fill_value
+ if any_out_of_bounds_y:
+ z[out_of_bounds_y, :] = self.fill_value
+
+ if len(z) == 1:
+ z = z[0]
+ return array(z)
+
+
+def _check_broadcast_up_to(arr_from, shape_to, name):
+ """Helper to check that arr_from broadcasts up to shape_to"""
+ shape_from = arr_from.shape
+ if len(shape_to) >= len(shape_from):
+ for t, f in zip(shape_to[::-1], shape_from[::-1]):
+ if f != 1 and f != t:
+ break
+ else: # all checks pass, do the upcasting that we need later
+ if arr_from.size != 1 and arr_from.shape != shape_to:
+ arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
+ return arr_from.ravel()
+ # at least one check failed
+ raise ValueError('%s argument must be able to broadcast up '
+ 'to shape %s but had shape %s'
+ % (name, shape_to, shape_from))
+
+
+def _do_extrapolate(fill_value):
+ """Helper to check if fill_value == "extrapolate" without warnings"""
+ return (isinstance(fill_value, str) and
+ fill_value == 'extrapolate')
+
+
+class interp1d(_Interpolator1D):
+ """
+ Interpolate a 1-D function.
+
+ `x` and `y` are arrays of values used to approximate some function f:
+ ``y = f(x)``. This class returns a function whose call method uses
+ interpolation to find the value of new points.
+
+ Parameters
+ ----------
+ x : (N,) array_like
+ A 1-D array of real values.
+ y : (...,N,...) array_like
+ A N-D array of real values. The length of `y` along the interpolation
+ axis must be equal to the length of `x`.
+ kind : str or int, optional
+ Specifies the kind of interpolation as a string or as an integer
+ specifying the order of the spline interpolator to use.
+ The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
+ 'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
+ 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
+ zeroth, first, second or third order; 'previous' and 'next' simply
+ return the previous or next value of the point; 'nearest-up' and
+ 'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
+ in that 'nearest-up' rounds up and 'nearest' rounds down. Default
+ is 'linear'.
+ axis : int, optional
+ Specifies the axis of `y` along which to interpolate.
+ Interpolation defaults to the last axis of `y`.
+ copy : bool, optional
+ If True, the class makes internal copies of x and y.
+ If False, references to `x` and `y` are used. The default is to copy.
+ bounds_error : bool, optional
+ If True, a ValueError is raised any time interpolation is attempted on
+ a value outside of the range of x (where extrapolation is
+ necessary). If False, out of bounds values are assigned `fill_value`.
+ By default, an error is raised unless ``fill_value="extrapolate"``.
+ fill_value : array-like or (array-like, array_like) or "extrapolate", optional
+ - if a ndarray (or float), this value will be used to fill in for
+ requested points outside of the data range. If not provided, then
+ the default is NaN. The array-like must broadcast properly to the
+ dimensions of the non-interpolation axes.
+ - If a two-element tuple, then the first element is used as a
+ fill value for ``x_new < x[0]`` and the second element is used for
+ ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
+ list or ndarray, regardless of shape) is taken to be a single
+ array-like argument meant to be used for both bounds as
+ ``below, above = fill_value, fill_value``.
+
+ .. versionadded:: 0.17.0
+ - If "extrapolate", then points outside the data range will be
+ extrapolated.
+
+ .. versionadded:: 0.17.0
+ assume_sorted : bool, optional
+ If False, values of `x` can be in any order and they are sorted first.
+ If True, `x` has to be an array of monotonically increasing values.
+
+ Attributes
+ ----------
+ fill_value
+
+ Methods
+ -------
+ __call__
+
+ See Also
+ --------
+ splrep, splev
+ Spline interpolation/smoothing based on FITPACK.
+ UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
+ interp2d : 2-D interpolation
+
+ Notes
+ -----
+ Calling `interp1d` with NaNs present in input values results in
+ undefined behaviour.
+
+ Input values `x` and `y` must be convertible to `float` values like
+ `int` or `float`.
+
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import interpolate
+ >>> x = np.arange(0, 10)
+ >>> y = np.exp(-x/3.0)
+ >>> f = interpolate.interp1d(x, y)
+
+ >>> xnew = np.arange(0, 9, 0.1)
+ >>> ynew = f(xnew) # use interpolation function returned by `interp1d`
+ >>> plt.plot(x, y, 'o', xnew, ynew, '-')
+ >>> plt.show()
+ """
+
+ def __init__(self, x, y, kind='linear', axis=-1,
+ copy=True, bounds_error=None, fill_value=np.nan,
+ assume_sorted=False):
+ """ Initialize a 1-D linear interpolation class."""
+ _Interpolator1D.__init__(self, x, y, axis=axis)
+
+ self.bounds_error = bounds_error # used by fill_value setter
+ self.copy = copy
+
+ if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
+ order = {'zero': 0, 'slinear': 1,
+ 'quadratic': 2, 'cubic': 3}[kind]
+ kind = 'spline'
+ elif isinstance(kind, int):
+ order = kind
+ kind = 'spline'
+ elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
+ 'next'):
+ raise NotImplementedError("%s is unsupported: Use fitpack "
+ "routines for other types." % kind)
+ x = array(x, copy=self.copy)
+ y = array(y, copy=self.copy)
+
+ if not assume_sorted:
+ ind = np.argsort(x, kind="mergesort")
+ x = x[ind]
+ y = np.take(y, ind, axis=axis)
+
+ if x.ndim != 1:
+ raise ValueError("the x array must have exactly one dimension.")
+ if y.ndim == 0:
+ raise ValueError("the y array must have at least one dimension.")
+
+ # Force-cast y to a floating-point type, if it's not yet one
+ if not issubclass(y.dtype.type, np.inexact):
+ y = y.astype(np.float_)
+
+ # Backward compatibility
+ self.axis = axis % y.ndim
+
+ # Interpolation goes internally along the first axis
+ self.y = y
+ self._y = self._reshape_yi(self.y)
+ self.x = x
+ del y, x # clean up namespace to prevent misuse; use attributes
+ self._kind = kind
+ self.fill_value = fill_value # calls the setter, can modify bounds_err
+
+ # Adjust to interpolation kind; store reference to *unbound*
+ # interpolation methods, in order to avoid circular references to self
+ # stored in the bound instance methods, and therefore delayed garbage
+ # collection. See: https://docs.python.org/reference/datamodel.html
+ if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
+ # Make a "view" of the y array that is rotated to the interpolation
+ # axis.
+ minval = 2
+ if kind == 'nearest':
+ # Do division before addition to prevent possible integer
+ # overflow
+ self._side = 'left'
+ self.x_bds = self.x / 2.0
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+ self._call = self.__class__._call_nearest
+ elif kind == 'nearest-up':
+ # Do division before addition to prevent possible integer
+ # overflow
+ self._side = 'right'
+ self.x_bds = self.x / 2.0
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+ self._call = self.__class__._call_nearest
+ elif kind == 'previous':
+ # Side for np.searchsorted and index for clipping
+ self._side = 'left'
+ self._ind = 0
+ # Move x by one floating point value to the left
+ self._x_shift = np.nextafter(self.x, -np.inf)
+ self._call = self.__class__._call_previousnext
+ elif kind == 'next':
+ self._side = 'right'
+ self._ind = 1
+ # Move x by one floating point value to the right
+ self._x_shift = np.nextafter(self.x, np.inf)
+ self._call = self.__class__._call_previousnext
+ else:
+ # Check if we can delegate to numpy.interp (2x-10x faster).
+ cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
+ cond = cond and self.y.ndim == 1
+ cond = cond and not _do_extrapolate(fill_value)
+
+ if cond:
+ self._call = self.__class__._call_linear_np
+ else:
+ self._call = self.__class__._call_linear
+ else:
+ minval = order + 1
+
+ rewrite_nan = False
+ xx, yy = self.x, self._y
+ if order > 1:
+ # Quadratic or cubic spline. If input contains even a single
+ # nan, then the output is all nans. We cannot just feed data
+ # with nans to make_interp_spline because it calls LAPACK.
+ # So, we make up a bogus x and y with no nans and use it
+ # to get the correct shape of the output, which we then fill
+ # with nans.
+ # For slinear or zero order spline, we just pass nans through.
+ mask = np.isnan(self.x)
+ if mask.any():
+ sx = self.x[~mask]
+ if sx.size == 0:
+ raise ValueError("`x` array is all-nan")
+ xx = np.linspace(np.nanmin(self.x),
+ np.nanmax(self.x),
+ len(self.x))
+ rewrite_nan = True
+ if np.isnan(self._y).any():
+ yy = np.ones_like(self._y)
+ rewrite_nan = True
+
+ self._spline = make_interp_spline(xx, yy, k=order,
+ check_finite=False)
+ if rewrite_nan:
+ self._call = self.__class__._call_nan_spline
+ else:
+ self._call = self.__class__._call_spline
+
+ if len(self.x) < minval:
+ raise ValueError("x and y arrays must have at "
+ "least %d entries" % minval)
+
+ @property
+ def fill_value(self):
+ """The fill value."""
+ # backwards compat: mimic a public attribute
+ return self._fill_value_orig
+
+ @fill_value.setter
+ def fill_value(self, fill_value):
+ # extrapolation only works for nearest neighbor and linear methods
+ if _do_extrapolate(fill_value):
+ if self.bounds_error:
+ raise ValueError("Cannot extrapolate and raise "
+ "at the same time.")
+ self.bounds_error = False
+ self._extrapolate = True
+ else:
+ broadcast_shape = (self.y.shape[:self.axis] +
+ self.y.shape[self.axis + 1:])
+ if len(broadcast_shape) == 0:
+ broadcast_shape = (1,)
+ # it's either a pair (_below_range, _above_range) or a single value
+ # for both above and below range
+ if isinstance(fill_value, tuple) and len(fill_value) == 2:
+ below_above = [np.asarray(fill_value[0]),
+ np.asarray(fill_value[1])]
+ names = ('fill_value (below)', 'fill_value (above)')
+ for ii in range(2):
+ below_above[ii] = _check_broadcast_up_to(
+ below_above[ii], broadcast_shape, names[ii])
+ else:
+ fill_value = np.asarray(fill_value)
+ below_above = [_check_broadcast_up_to(
+ fill_value, broadcast_shape, 'fill_value')] * 2
+ self._fill_value_below, self._fill_value_above = below_above
+ self._extrapolate = False
+ if self.bounds_error is None:
+ self.bounds_error = True
+ # backwards compat: fill_value was a public attr; make it writeable
+ self._fill_value_orig = fill_value
+
+ def _call_linear_np(self, x_new):
+ # Note that out-of-bounds values are taken care of in self._evaluate
+ return np.interp(x_new, self.x, self.y)
+
+ def _call_linear(self, x_new):
+ # 2. Find where in the original data, the values to interpolate
+ # would be inserted.
+ # Note: If x_new[n] == x[m], then m is returned by searchsorted.
+ x_new_indices = searchsorted(self.x, x_new)
+
+ # 3. Clip x_new_indices so that they are within the range of
+ # self.x indices and at least 1. Removes mis-interpolation
+ # of x_new[n] = x[0]
+ x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
+
+ # 4. Calculate the slope of regions that each x_new value falls in.
+ lo = x_new_indices - 1
+ hi = x_new_indices
+
+ x_lo = self.x[lo]
+ x_hi = self.x[hi]
+ y_lo = self._y[lo]
+ y_hi = self._y[hi]
+
+ # Note that the following two expressions rely on the specifics of the
+ # broadcasting semantics.
+ slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
+
+ # 5. Calculate the actual value for each entry in x_new.
+ y_new = slope*(x_new - x_lo)[:, None] + y_lo
+
+ return y_new
+
+ def _call_nearest(self, x_new):
+ """ Find nearest neighbor interpolated y_new = f(x_new)."""
+
+ # 2. Find where in the averaged data the values to interpolate
+ # would be inserted.
+ # Note: use side='left' (right) to searchsorted() to define the
+ # halfway point to be nearest to the left (right) neighbor
+ x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
+
+ # 3. Clip x_new_indices so that they are within the range of x indices.
+ x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
+
+ # 4. Calculate the actual value for each entry in x_new.
+ y_new = self._y[x_new_indices]
+
+ return y_new
+
+ def _call_previousnext(self, x_new):
+ """Use previous/next neighbor of x_new, y_new = f(x_new)."""
+
+ # 1. Get index of left/right value
+ x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
+
+ # 2. Clip x_new_indices so that they are within the range of x indices.
+ x_new_indices = x_new_indices.clip(1-self._ind,
+ len(self.x)-self._ind).astype(intp)
+
+ # 3. Calculate the actual value for each entry in x_new.
+ y_new = self._y[x_new_indices+self._ind-1]
+
+ return y_new
+
+ def _call_spline(self, x_new):
+ return self._spline(x_new)
+
+ def _call_nan_spline(self, x_new):
+ out = self._spline(x_new)
+ out[...] = np.nan
+ return out
+
+ def _evaluate(self, x_new):
+ # 1. Handle values in x_new that are outside of x. Throw error,
+ # or return a list of mask array indicating the outofbounds values.
+ # The behavior is set by the bounds_error variable.
+ x_new = asarray(x_new)
+ y_new = self._call(self, x_new)
+ if not self._extrapolate:
+ below_bounds, above_bounds = self._check_bounds(x_new)
+ if len(y_new) > 0:
+ # Note fill_value must be broadcast up to the proper size
+ # and flattened to work here
+ y_new[below_bounds] = self._fill_value_below
+ y_new[above_bounds] = self._fill_value_above
+ return y_new
+
+ def _check_bounds(self, x_new):
+ """Check the inputs for being in the bounds of the interpolated data.
+
+ Parameters
+ ----------
+ x_new : array
+
+ Returns
+ -------
+ out_of_bounds : bool array
+ The mask on x_new of values that are out of the bounds.
+ """
+
+ # If self.bounds_error is True, we raise an error if any x_new values
+ # fall outside the range of x. Otherwise, we return an array indicating
+ # which values are outside the boundary region.
+ below_bounds = x_new < self.x[0]
+ above_bounds = x_new > self.x[-1]
+
+ # !! Could provide more information about which values are out of bounds
+ if self.bounds_error and below_bounds.any():
+ raise ValueError("A value in x_new is below the interpolation "
+ "range.")
+ if self.bounds_error and above_bounds.any():
+ raise ValueError("A value in x_new is above the interpolation "
+ "range.")
+
+ # !! Should we emit a warning if some values are out of bounds?
+ # !! matlab does not.
+ return below_bounds, above_bounds
+
+
+class _PPolyBase(object):
+ """Base class for piecewise polynomials."""
+ __slots__ = ('c', 'x', 'extrapolate', 'axis')
+
+ def __init__(self, c, x, extrapolate=None, axis=0):
+ self.c = np.asarray(c)
+ self.x = np.ascontiguousarray(x, dtype=np.float64)
+
+ if extrapolate is None:
+ extrapolate = True
+ elif extrapolate != 'periodic':
+ extrapolate = bool(extrapolate)
+ self.extrapolate = extrapolate
+
+ if self.c.ndim < 2:
+ raise ValueError("Coefficients array must be at least "
+ "2-dimensional.")
+
+ if not (0 <= axis < self.c.ndim - 1):
+ raise ValueError("axis=%s must be between 0 and %s" %
+ (axis, self.c.ndim-1))
+
+ self.axis = axis
+ if axis != 0:
+ # roll the interpolation axis to be the first one in self.c
+ # More specifically, the target shape for self.c is (k, m, ...),
+ # and axis !=0 means that we have c.shape (..., k, m, ...)
+ # ^
+ # axis
+ # So we roll two of them.
+ self.c = np.rollaxis(self.c, axis+1)
+ self.c = np.rollaxis(self.c, axis+1)
+
+ if self.x.ndim != 1:
+ raise ValueError("x must be 1-dimensional")
+ if self.x.size < 2:
+ raise ValueError("at least 2 breakpoints are needed")
+ if self.c.ndim < 2:
+ raise ValueError("c must have at least 2 dimensions")
+ if self.c.shape[0] == 0:
+ raise ValueError("polynomial must be at least of order 0")
+ if self.c.shape[1] != self.x.size-1:
+ raise ValueError("number of coefficients != len(x)-1")
+ dx = np.diff(self.x)
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
+ raise ValueError("`x` must be strictly increasing or decreasing.")
+
+ dtype = self._get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+ def _get_dtype(self, dtype):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.c.dtype, np.complexfloating):
+ return np.complex_
+ else:
+ return np.float_
+
+ @classmethod
+ def construct_fast(cls, c, x, extrapolate=None, axis=0):
+ """
+ Construct the piecewise polynomial without making checks.
+
+ Takes the same parameters as the constructor. Input arguments
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
+ ``c`` array can only be of dtypes float and complex, and ``x``
+ array must have dtype float.
+ """
+ self = object.__new__(cls)
+ self.c = c
+ self.x = x
+ self.axis = axis
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = extrapolate
+ return self
+
+ def _ensure_c_contiguous(self):
+ """
+ c and x may be modified by the user. The Cython code expects
+ that they are C contiguous.
+ """
+ if not self.x.flags.c_contiguous:
+ self.x = self.x.copy()
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+
+ def extend(self, c, x, right=None):
+ """
+ Add additional breakpoints and coefficients to the polynomial.
+
+ Parameters
+ ----------
+ c : ndarray, size (k, m, ...)
+ Additional coefficients for polynomials in intervals. Note that
+ the first additional interval will be formed using one of the
+ ``self.x`` end points.
+ x : ndarray, size (m,)
+ Additional breakpoints. Must be sorted in the same order as
+ ``self.x`` and either to the right or to the left of the current
+ breakpoints.
+ right
+ Deprecated argument. Has no effect.
+
+ .. deprecated:: 0.19
+ """
+ if right is not None:
+ warnings.warn("`right` is deprecated and will be removed.")
+
+ c = np.asarray(c)
+ x = np.asarray(x)
+
+ if c.ndim < 2:
+ raise ValueError("invalid dimensions for c")
+ if x.ndim != 1:
+ raise ValueError("invalid dimensions for x")
+ if x.shape[0] != c.shape[1]:
+ raise ValueError("Shapes of x {} and c {} are incompatible"
+ .format(x.shape, c.shape))
+ if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
+ raise ValueError("Shapes of c {} and self.c {} are incompatible"
+ .format(c.shape, self.c.shape))
+
+ if c.size == 0:
+ return
+
+ dx = np.diff(x)
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
+ raise ValueError("`x` is not sorted.")
+
+ if self.x[-1] >= self.x[0]:
+ if not x[-1] >= x[0]:
+ raise ValueError("`x` is in the different order "
+ "than `self.x`.")
+
+ if x[0] >= self.x[-1]:
+ action = 'append'
+ elif x[-1] <= self.x[0]:
+ action = 'prepend'
+ else:
+ raise ValueError("`x` is neither on the left or on the right "
+ "from `self.x`.")
+ else:
+ if not x[-1] <= x[0]:
+ raise ValueError("`x` is in the different order "
+ "than `self.x`.")
+
+ if x[0] <= self.x[-1]:
+ action = 'append'
+ elif x[-1] >= self.x[0]:
+ action = 'prepend'
+ else:
+ raise ValueError("`x` is neither on the left or on the right "
+ "from `self.x`.")
+
+ dtype = self._get_dtype(c.dtype)
+
+ k2 = max(c.shape[0], self.c.shape[0])
+ c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
+ dtype=dtype)
+
+ if action == 'append':
+ c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
+ c2[k2-c.shape[0]:, self.c.shape[1]:] = c
+ self.x = np.r_[self.x, x]
+ elif action == 'prepend':
+ c2[k2-self.c.shape[0]:, :c.shape[1]] = c
+ c2[k2-c.shape[0]:, c.shape[1]:] = self.c
+ self.x = np.r_[x, self.x]
+
+ self.c = c2
+
+ def __call__(self, x, nu=0, extrapolate=None):
+ """
+ Evaluate the piecewise polynomial or its derivative.
+
+ Parameters
+ ----------
+ x : array_like
+ Points to evaluate the interpolant at.
+ nu : int, optional
+ Order of derivative to evaluate. Must be non-negative.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ x = np.asarray(x)
+ x_shape, x_ndim = x.shape, x.ndim
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
+
+ # With periodic extrapolation we map x to the segment
+ # [self.x[0], self.x[-1]].
+ if extrapolate == 'periodic':
+ x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
+ extrapolate = False
+
+ out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+ self._evaluate(x, nu, extrapolate, out)
+ out = out.reshape(x_shape + self.c.shape[2:])
+ if self.axis != 0:
+ # transpose to move the calculated values to the interpolation axis
+ l = list(range(out.ndim))
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+ out = out.transpose(l)
+ return out
+
+
+class PPoly(_PPolyBase):
+ """
+ Piecewise polynomial in terms of coefficients and breakpoints
+
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+ local power basis::
+
+ S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
+
+ where ``k`` is the degree of the polynomial.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k, m, ...)
+ Polynomial coefficients, order `k` and `m` intervals.
+ x : ndarray, shape (m+1,)
+ Polynomial breakpoints. Must be sorted in either increasing or
+ decreasing order.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ x : ndarray
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials. They are reshaped
+ to a 3-D array with the last dimension representing
+ the trailing dimensions of the original coefficient array.
+ axis : int
+ Interpolation axis.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ solve
+ roots
+ extend
+ from_spline
+ from_bernstein_basis
+ construct_fast
+
+ See also
+ --------
+ BPoly : piecewise polynomials in the Bernstein basis
+
+ Notes
+ -----
+ High-order polynomials in the power basis can be numerically
+ unstable. Precision problems can start to appear for orders
+ larger than 20-30.
+ """
+ def _evaluate(self, x, nu, extrapolate, out):
+ _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, x, nu, bool(extrapolate), out)
+
+ def derivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of derivative to evaluate. Default is 1, i.e., compute the
+ first derivative. If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k - n representing the derivative
+ of this polynomial.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+ """
+ if nu < 0:
+ return self.antiderivative(-nu)
+
+ # reduce order
+ if nu == 0:
+ c2 = self.c.copy()
+ else:
+ c2 = self.c[:-nu, :].copy()
+
+ if c2.shape[0] == 0:
+ # derivative of order 0 is zero
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+ # multiply by the correct rising factorials
+ factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
+ c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
+
+ # construct a compatible polynomial
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+ def antiderivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Antiderivative is also the indefinite integral of the function,
+ and derivative is its inverse operation.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
+ the first integral. If negative, the derivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k + n representing
+ the antiderivative of this polynomial.
+
+ Notes
+ -----
+ The antiderivative returned by this function is continuous and
+ continuously differentiable to order n-1, up to floating point
+ rounding error.
+
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+ """
+ if nu <= 0:
+ return self.derivative(-nu)
+
+ c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
+ dtype=self.c.dtype)
+ c[:-nu] = self.c
+
+ # divide by the correct rising factorials
+ factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
+ c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+ # fix continuity of added degrees of freedom
+ self._ensure_c_contiguous()
+ _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
+ self.x, nu - 1)
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ # construct a compatible polynomial
+ return self.construct_fast(c, self.x, extrapolate, self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ a : float
+ Lower integration bound
+ b : float
+ Upper integration bound
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ ig : array_like
+ Definite integral of the piecewise polynomial over [a, b]
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # Swap integration bounds if needed
+ sign = 1
+ if b < a:
+ a, b = b, a
+ sign = -1
+
+ range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+
+ # Compute the integral.
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ xs, xe = self.x[0], self.x[-1]
+ period = xe - xs
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+
+ if n_periods > 0:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, xs, xe, False, out=range_int)
+ range_int *= n_periods
+ else:
+ range_int.fill(0)
+
+ # Map a to [xs, xe], b is always a + left.
+ a = xs + (a - xs) % period
+ b = a + left
+
+ # If b <= xe then we need to integrate over [a, b], otherwise
+ # over [a, xe] and from xs to what is remained.
+ remainder_int = np.empty_like(range_int)
+ if b <= xe:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, b, False, out=remainder_int)
+ range_int += remainder_int
+ else:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, xe, False, out=remainder_int)
+ range_int += remainder_int
+
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, xs, xs + left + a - xe, False, out=remainder_int)
+ range_int += remainder_int
+ else:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, b, bool(extrapolate), out=range_int)
+
+ # Return
+ range_int *= sign
+ return range_int.reshape(self.c.shape[2:])
+
+ def solve(self, y=0., discontinuity=True, extrapolate=None):
+ """
+ Find real solutions of the the equation ``pp(x) == y``.
+
+ Parameters
+ ----------
+ y : float, optional
+ Right-hand side. Default is zero.
+ discontinuity : bool, optional
+ Whether to report sign changes across discontinuities at
+ breakpoints as roots.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to return roots from the polynomial
+ extrapolated based on first and last intervals, 'periodic' works
+ the same as False. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ roots : ndarray
+ Roots of the polynomial(s).
+
+ If the PPoly object describes multiple polynomials, the
+ return value is an object array whose each element is an
+ ndarray containing the roots.
+
+ Notes
+ -----
+ This routine works only on real-valued polynomials.
+
+ If the piecewise polynomial contains sections that are
+ identically zero, the root list will contain the start point
+ of the corresponding interval, followed by a ``nan`` value.
+
+ If the polynomial is discontinuous across a breakpoint, and
+ there is a sign change across the breakpoint, this is reported
+ if the `discont` parameter is True.
+
+ Examples
+ --------
+
+ Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
+ ``[-2, 1], [1, 2]``:
+
+ >>> from scipy.interpolate import PPoly
+ >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
+ >>> pp.solve()
+ array([-1., 1.])
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ self._ensure_c_contiguous()
+
+ if np.issubdtype(self.c.dtype, np.complexfloating):
+ raise ValueError("Root finding is only for "
+ "real-valued polynomials")
+
+ y = float(y)
+ r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, y, bool(discontinuity),
+ bool(extrapolate))
+ if self.c.ndim == 2:
+ return r[0]
+ else:
+ r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
+ # this for-loop is equivalent to ``r2[...] = r``, but that's broken
+ # in NumPy 1.6.0
+ for ii, root in enumerate(r):
+ r2[ii] = root
+
+ return r2.reshape(self.c.shape[2:])
+
+ def roots(self, discontinuity=True, extrapolate=None):
+ """
+ Find real roots of the the piecewise polynomial.
+
+ Parameters
+ ----------
+ discontinuity : bool, optional
+ Whether to report sign changes across discontinuities at
+ breakpoints as roots.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to return roots from the polynomial
+ extrapolated based on first and last intervals, 'periodic' works
+ the same as False. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ roots : ndarray
+ Roots of the polynomial(s).
+
+ If the PPoly object describes multiple polynomials, the
+ return value is an object array whose each element is an
+ ndarray containing the roots.
+
+ See Also
+ --------
+ PPoly.solve
+ """
+ return self.solve(0, discontinuity, extrapolate)
+
+ @classmethod
+ def from_spline(cls, tck, extrapolate=None):
+ """
+ Construct a piecewise polynomial from a spline
+
+ Parameters
+ ----------
+ tck
+ A spline, as returned by `splrep` or a BSpline object.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+ """
+ if isinstance(tck, BSpline):
+ t, c, k = tck.tck
+ if extrapolate is None:
+ extrapolate = tck.extrapolate
+ else:
+ t, c, k = tck
+
+ cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
+ for m in range(k, -1, -1):
+ y = fitpack.splev(t[:-1], tck, der=m)
+ cvals[k - m, :] = y/spec.gamma(m+1)
+
+ return cls.construct_fast(cvals, t, extrapolate)
+
+ @classmethod
+ def from_bernstein_basis(cls, bp, extrapolate=None):
+ """
+ Construct a piecewise polynomial in the power basis
+ from a polynomial in Bernstein basis.
+
+ Parameters
+ ----------
+ bp : BPoly
+ A Bernstein basis polynomial, as created by BPoly
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+ """
+ if not isinstance(bp, BPoly):
+ raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
+ "Got %s instead." % type(bp))
+
+ dx = np.diff(bp.x)
+ k = bp.c.shape[0] - 1 # polynomial order
+
+ rest = (None,)*(bp.c.ndim-2)
+
+ c = np.zeros_like(bp.c)
+ for a in range(k+1):
+ factor = (-1)**a * comb(k, a) * bp.c[a]
+ for s in range(a, k+1):
+ val = comb(k-a, s-a) * (-1)**s
+ c[k-s] += factor * val / dx[(slice(None),)+rest]**s
+
+ if extrapolate is None:
+ extrapolate = bp.extrapolate
+
+ return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
+
+
+class BPoly(_PPolyBase):
+ """Piecewise polynomial in terms of coefficients and breakpoints.
+
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+ Bernstein polynomial basis::
+
+ S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
+
+ where ``k`` is the degree of the polynomial, and::
+
+ b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
+
+ with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
+ coefficient.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k, m, ...)
+ Polynomial coefficients, order `k` and `m` intervals
+ x : ndarray, shape (m+1,)
+ Polynomial breakpoints. Must be sorted in either increasing or
+ decreasing order.
+ extrapolate : bool, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ x : ndarray
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials. They are reshaped
+ to a 3-D array with the last dimension representing
+ the trailing dimensions of the original coefficient array.
+ axis : int
+ Interpolation axis.
+
+ Methods
+ -------
+ __call__
+ extend
+ derivative
+ antiderivative
+ integrate
+ construct_fast
+ from_power_basis
+ from_derivatives
+
+ See also
+ --------
+ PPoly : piecewise polynomials in the power basis
+
+ Notes
+ -----
+ Properties of Bernstein polynomials are well documented in the literature,
+ see for example [1]_ [2]_ [3]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
+
+ .. [2] Kenneth I. Joy, Bernstein polynomials,
+ http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
+
+ .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
+ vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import BPoly
+ >>> x = [0, 1]
+ >>> c = [[1], [2], [3]]
+ >>> bp = BPoly(c, x)
+
+ This creates a 2nd order polynomial
+
+ .. math::
+
+ B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
+ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
+
+ """
+
+ def _evaluate(self, x, nu, extrapolate, out):
+ _ppoly.evaluate_bernstein(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, x, nu, bool(extrapolate), out)
+
+ def derivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of derivative to evaluate. Default is 1, i.e., compute the
+ first derivative. If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ bp : BPoly
+ Piecewise polynomial of order k - nu representing the derivative of
+ this polynomial.
+
+ """
+ if nu < 0:
+ return self.antiderivative(-nu)
+
+ if nu > 1:
+ bp = self
+ for k in range(nu):
+ bp = bp.derivative()
+ return bp
+
+ # reduce order
+ if nu == 0:
+ c2 = self.c.copy()
+ else:
+ # For a polynomial
+ # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
+ # we use the fact that
+ # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
+ # which leads to
+ # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
+ #
+ # finally, for an interval [y, y + dy] with dy != 1,
+ # we need to correct for an extra power of dy
+
+ rest = (None,)*(self.c.ndim-2)
+
+ k = self.c.shape[0] - 1
+ dx = np.diff(self.x)[(None, slice(None))+rest]
+ c2 = k * np.diff(self.c, axis=0) / dx
+
+ if c2.shape[0] == 0:
+ # derivative of order 0 is zero
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+ # construct a compatible polynomial
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+ def antiderivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
+ the first integral. If negative, the derivative is returned.
+
+ Returns
+ -------
+ bp : BPoly
+ Piecewise polynomial of order k + nu representing the
+ antiderivative of this polynomial.
+
+ Notes
+ -----
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+ """
+ if nu <= 0:
+ return self.derivative(-nu)
+
+ if nu > 1:
+ bp = self
+ for k in range(nu):
+ bp = bp.antiderivative()
+ return bp
+
+ # Construct the indefinite integrals on individual intervals
+ c, x = self.c, self.x
+ k = c.shape[0]
+ c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
+
+ c2[1:, ...] = np.cumsum(c, axis=0) / k
+ delta = x[1:] - x[:-1]
+ c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
+
+ # Now fix continuity: on the very first interval, take the integration
+ # constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
+ # the integration constant is then equal to the jump of the `bp` at x_j.
+ # The latter is given by the coefficient of B_{n+1, n+1}
+ # *on the previous interval* (other B. polynomials are zero at the
+ # breakpoint). Finally, use the fact that BPs form a partition of unity.
+ c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ return self.construct_fast(c2, x, extrapolate, axis=self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ a : float
+ Lower integration bound
+ b : float
+ Upper integration bound
+ extrapolate : {bool, 'periodic', None}, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs. If 'periodic', periodic
+ extrapolation is used. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ array_like
+ Definite integral of the piecewise polynomial over [a, b]
+
+ """
+ # XXX: can probably use instead the fact that
+ # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
+ ib = self.antiderivative()
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # ib.extrapolate shouldn't be 'periodic', it is converted to
+ # False for 'periodic. in antiderivative() call.
+ if extrapolate != 'periodic':
+ ib.extrapolate = extrapolate
+
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ # For simplicity and clarity convert to a <= b case.
+ if a <= b:
+ sign = 1
+ else:
+ a, b = b, a
+ sign = -1
+
+ xs, xe = self.x[0], self.x[-1]
+ period = xe - xs
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+ res = n_periods * (ib(xe) - ib(xs))
+
+ # Map a and b to [xs, xe].
+ a = xs + (a - xs) % period
+ b = a + left
+
+ # If b <= xe then we need to integrate over [a, b], otherwise
+ # over [a, xe] and from xs to what is remained.
+ if b <= xe:
+ res += ib(b) - ib(a)
+ else:
+ res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
+
+ return sign * res
+ else:
+ return ib(b) - ib(a)
+
+ def extend(self, c, x, right=None):
+ k = max(self.c.shape[0], c.shape[0])
+ self.c = self._raise_degree(self.c, k - self.c.shape[0])
+ c = self._raise_degree(c, k - c.shape[0])
+ return _PPolyBase.extend(self, c, x, right)
+ extend.__doc__ = _PPolyBase.extend.__doc__
+
+ @classmethod
+ def from_power_basis(cls, pp, extrapolate=None):
+ """
+ Construct a piecewise polynomial in Bernstein basis
+ from a power basis polynomial.
+
+ Parameters
+ ----------
+ pp : PPoly
+ A piecewise polynomial in the power basis
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+ """
+ if not isinstance(pp, PPoly):
+ raise TypeError(".from_power_basis only accepts PPoly instances. "
+ "Got %s instead." % type(pp))
+
+ dx = np.diff(pp.x)
+ k = pp.c.shape[0] - 1 # polynomial order
+
+ rest = (None,)*(pp.c.ndim-2)
+
+ c = np.zeros_like(pp.c)
+ for a in range(k+1):
+ factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
+ for j in range(k-a, k+1):
+ c[j] += factor * comb(j, k-a)
+
+ if extrapolate is None:
+ extrapolate = pp.extrapolate
+
+ return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
+
+ @classmethod
+ def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
+ """Construct a piecewise polynomial in the Bernstein basis,
+ compatible with the specified values and derivatives at breakpoints.
+
+ Parameters
+ ----------
+ xi : array_like
+ sorted 1-D array of x-coordinates
+ yi : array_like or list of array_likes
+ ``yi[i][j]`` is the ``j``th derivative known at ``xi[i]``
+ orders : None or int or array_like of ints. Default: None.
+ Specifies the degree of local polynomials. If not None, some
+ derivatives are ignored.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+
+ Notes
+ -----
+ If ``k`` derivatives are specified at a breakpoint ``x``, the
+ constructed polynomial is exactly ``k`` times continuously
+ differentiable at ``x``, unless the ``order`` is provided explicitly.
+ In the latter case, the smoothness of the polynomial at
+ the breakpoint is controlled by the ``order``.
+
+ Deduces the number of derivatives to match at each end
+ from ``order`` and the number of derivatives available. If
+ possible it uses the same number of derivatives from
+ each end; if the number is odd it tries to take the
+ extra one from y2. In any case if not enough derivatives
+ are available at one end or another it draws enough to
+ make up the total from the other end.
+
+ If the order is too high and not enough derivatives are available,
+ an exception is raised.
+
+ Examples
+ --------
+
+ >>> from scipy.interpolate import BPoly
+ >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
+
+ Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
+ such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
+
+ >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
+
+ Creates a piecewise polynomial `f(x)`, such that
+ `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
+ Based on the number of derivatives provided, the order of the
+ local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
+ Notice that no restriction is imposed on the derivatives at
+ ``x = 1`` and ``x = 2``.
+
+ Indeed, the explicit form of the polynomial is::
+
+ f(x) = | x * (1 - x), 0 <= x < 1
+ | 2 * (x - 1), 1 <= x <= 2
+
+ So that f'(1-0) = -1 and f'(1+0) = 2
+
+ """
+ xi = np.asarray(xi)
+ if len(xi) != len(yi):
+ raise ValueError("xi and yi need to have the same length")
+ if np.any(xi[1:] - xi[:1] <= 0):
+ raise ValueError("x coordinates are not in increasing order")
+
+ # number of intervals
+ m = len(xi) - 1
+
+ # global poly order is k-1, local orders are <=k and can vary
+ try:
+ k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
+ except TypeError as e:
+ raise ValueError(
+ "Using a 1-D array for y? Please .reshape(-1, 1)."
+ ) from e
+
+ if orders is None:
+ orders = [None] * m
+ else:
+ if isinstance(orders, (int, np.integer)):
+ orders = [orders] * m
+ k = max(k, max(orders))
+
+ if any(o <= 0 for o in orders):
+ raise ValueError("Orders must be positive.")
+
+ c = []
+ for i in range(m):
+ y1, y2 = yi[i], yi[i+1]
+ if orders[i] is None:
+ n1, n2 = len(y1), len(y2)
+ else:
+ n = orders[i]+1
+ n1 = min(n//2, len(y1))
+ n2 = min(n - n1, len(y2))
+ n1 = min(n - n2, len(y2))
+ if n1+n2 != n:
+ mesg = ("Point %g has %d derivatives, point %g"
+ " has %d derivatives, but order %d requested" % (
+ xi[i], len(y1), xi[i+1], len(y2), orders[i]))
+ raise ValueError(mesg)
+
+ if not (n1 <= len(y1) and n2 <= len(y2)):
+ raise ValueError("`order` input incompatible with"
+ " length y1 or y2.")
+
+ b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
+ y1[:n1], y2[:n2])
+ if len(b) < k:
+ b = BPoly._raise_degree(b, k - len(b))
+ c.append(b)
+
+ c = np.asarray(c)
+ return cls(c.swapaxes(0, 1), xi, extrapolate)
+
+ @staticmethod
+ def _construct_from_derivatives(xa, xb, ya, yb):
+ r"""Compute the coefficients of a polynomial in the Bernstein basis
+ given the values and derivatives at the edges.
+
+ Return the coefficients of a polynomial in the Bernstein basis
+ defined on ``[xa, xb]`` and having the values and derivatives at the
+ endpoints `xa` and `xb` as specified by `ya`` and `yb`.
+ The polynomial constructed is of the minimal possible degree, i.e.,
+ if the lengths of `ya` and `yb` are `na` and `nb`, the degree
+ of the polynomial is ``na + nb - 1``.
+
+ Parameters
+ ----------
+ xa : float
+ Left-hand end point of the interval
+ xb : float
+ Right-hand end point of the interval
+ ya : array_like
+ Derivatives at `xa`. `ya[0]` is the value of the function, and
+ `ya[i]` for ``i > 0`` is the value of the ``i``th derivative.
+ yb : array_like
+ Derivatives at `xb`.
+
+ Returns
+ -------
+ array
+ coefficient array of a polynomial having specified derivatives
+
+ Notes
+ -----
+ This uses several facts from life of Bernstein basis functions.
+ First of all,
+
+ .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
+
+ If B(x) is a linear combination of the form
+
+ .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
+
+ then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
+ Iterating the latter one, one finds for the q-th derivative
+
+ .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
+
+ with
+
+ .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
+
+ This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
+ `c_q` are found one by one by iterating `q = 0, ..., na`.
+
+ At ``x = xb`` it's the same with ``a = n - q``.
+
+ """
+ ya, yb = np.asarray(ya), np.asarray(yb)
+ if ya.shape[1:] != yb.shape[1:]:
+ raise ValueError('Shapes of ya {} and yb {} are incompatible'
+ .format(ya.shape, yb.shape))
+
+ dta, dtb = ya.dtype, yb.dtype
+ if (np.issubdtype(dta, np.complexfloating) or
+ np.issubdtype(dtb, np.complexfloating)):
+ dt = np.complex_
+ else:
+ dt = np.float_
+
+ na, nb = len(ya), len(yb)
+ n = na + nb
+
+ c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
+
+ # compute coefficients of a polynomial degree na+nb-1
+ # walk left-to-right
+ for q in range(0, na):
+ c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
+ for j in range(0, q):
+ c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
+
+ # now walk right-to-left
+ for q in range(0, nb):
+ c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
+ for j in range(0, q):
+ c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
+
+ return c
+
+ @staticmethod
+ def _raise_degree(c, d):
+ r"""Raise a degree of a polynomial in the Bernstein basis.
+
+ Given the coefficients of a polynomial degree `k`, return (the
+ coefficients of) the equivalent polynomial of degree `k+d`.
+
+ Parameters
+ ----------
+ c : array_like
+ coefficient array, 1-D
+ d : integer
+
+ Returns
+ -------
+ array
+ coefficient array, 1-D array of length `c.shape[0] + d`
+
+ Notes
+ -----
+ This uses the fact that a Bernstein polynomial `b_{a, k}` can be
+ identically represented as a linear combination of polynomials of
+ a higher degree `k+d`:
+
+ .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
+ comb(d, j) / comb(k+d, a+j)
+
+ """
+ if d == 0:
+ return c
+
+ k = c.shape[0] - 1
+ out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
+
+ for a in range(c.shape[0]):
+ f = c[a] * comb(k, a)
+ for j in range(d+1):
+ out[a+j] += f * comb(d, j) / comb(k+d, a+j)
+ return out
+
+
+class NdPPoly(object):
+ """
+ Piecewise tensor product polynomial
+
+ The value at point ``xp = (x', y', z', ...)`` is evaluated by first
+ computing the interval indices `i` such that::
+
+ x[0][i[0]] <= x' < x[0][i[0]+1]
+ x[1][i[1]] <= y' < x[1][i[1]+1]
+ ...
+
+ and then computing::
+
+ S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
+ * (xp[0] - x[0][i[0]])**m0
+ * ...
+ * (xp[n] - x[n][i[n]])**mn
+ for m0 in range(k[0]+1)
+ ...
+ for mn in range(k[n]+1))
+
+ where ``k[j]`` is the degree of the polynomial in dimension j. This
+ representation is the piecewise multivariate power basis.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
+ Polynomial coefficients, with polynomial order `kj` and
+ `mj+1` intervals for each dimension `j`.
+ x : ndim-tuple of ndarrays, shapes (mj+1,)
+ Polynomial breakpoints for each dimension. These must be
+ sorted in increasing order.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs. Default: True.
+
+ Attributes
+ ----------
+ x : tuple of ndarrays
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ integrate_1d
+ construct_fast
+
+ See also
+ --------
+ PPoly : piecewise polynomials in 1D
+
+ Notes
+ -----
+ High-order polynomials in the power basis can be numerically
+ unstable.
+
+ """
+
+ def __init__(self, c, x, extrapolate=None):
+ self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
+ self.c = np.asarray(c)
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+ if any(v.ndim != 1 for v in self.x):
+ raise ValueError("x arrays must all be 1-dimensional")
+ if any(v.size < 2 for v in self.x):
+ raise ValueError("x arrays must all contain at least 2 points")
+ if c.ndim < 2*ndim:
+ raise ValueError("c must have at least 2*len(x) dimensions")
+ if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
+ raise ValueError("x-coordinates are not in increasing order")
+ if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
+ raise ValueError("x and c do not agree on the number of intervals")
+
+ dtype = self._get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+ @classmethod
+ def construct_fast(cls, c, x, extrapolate=None):
+ """
+ Construct the piecewise polynomial without making checks.
+
+ Takes the same parameters as the constructor. Input arguments
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
+ ``c`` array can only be of dtypes float and complex, and ``x``
+ array must have dtype float.
+
+ """
+ self = object.__new__(cls)
+ self.c = c
+ self.x = x
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = extrapolate
+ return self
+
+ def _get_dtype(self, dtype):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.c.dtype, np.complexfloating):
+ return np.complex_
+ else:
+ return np.float_
+
+ def _ensure_c_contiguous(self):
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+ if not isinstance(self.x, tuple):
+ self.x = tuple(self.x)
+
+ def __call__(self, x, nu=None, extrapolate=None):
+ """
+ Evaluate the piecewise polynomial or its derivative
+
+ Parameters
+ ----------
+ x : array-like
+ Points to evaluate the interpolant at.
+ nu : tuple, optional
+ Orders of derivatives to evaluate. Each must be non-negative.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ y : array-like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+
+ x = _ndim_coords_from_arrays(x)
+ x_shape = x.shape
+ x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
+
+ if nu is None:
+ nu = np.zeros((ndim,), dtype=np.intc)
+ else:
+ nu = np.asarray(nu, dtype=np.intc)
+ if nu.ndim != 1 or nu.shape[0] != ndim:
+ raise ValueError("invalid number of derivative orders nu")
+
+ dim1 = prod(self.c.shape[:ndim])
+ dim2 = prod(self.c.shape[ndim:2*ndim])
+ dim3 = prod(self.c.shape[2*ndim:])
+ ks = np.array(self.c.shape[:ndim], dtype=np.intc)
+
+ out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+
+ _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
+ self.x,
+ ks,
+ x,
+ nu,
+ bool(extrapolate),
+ out)
+
+ return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
+
+ def _derivative_inplace(self, nu, axis):
+ """
+ Compute 1-D derivative along a selected dimension in-place
+ May result to non-contiguous c array.
+ """
+ if nu < 0:
+ return self._antiderivative_inplace(-nu, axis)
+
+ ndim = len(self.x)
+ axis = axis % ndim
+
+ # reduce order
+ if nu == 0:
+ # noop
+ return
+ else:
+ sl = [slice(None)]*ndim
+ sl[axis] = slice(None, -nu, None)
+ c2 = self.c[tuple(sl)]
+
+ if c2.shape[axis] == 0:
+ # derivative of order 0 is zero
+ shp = list(c2.shape)
+ shp[axis] = 1
+ c2 = np.zeros(shp, dtype=c2.dtype)
+
+ # multiply by the correct rising factorials
+ factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
+ sl = [None]*c2.ndim
+ sl[axis] = slice(None)
+ c2 *= factor[tuple(sl)]
+
+ self.c = c2
+
+ def _antiderivative_inplace(self, nu, axis):
+ """
+ Compute 1-D antiderivative along a selected dimension
+ May result to non-contiguous c array.
+ """
+ if nu <= 0:
+ return self._derivative_inplace(-nu, axis)
+
+ ndim = len(self.x)
+ axis = axis % ndim
+
+ perm = list(range(ndim))
+ perm[0], perm[axis] = perm[axis], perm[0]
+ perm = perm + list(range(ndim, self.c.ndim))
+
+ c = self.c.transpose(perm)
+
+ c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
+ dtype=c.dtype)
+ c2[:-nu] = c
+
+ # divide by the correct rising factorials
+ factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
+ c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+ # fix continuity of added degrees of freedom
+ perm2 = list(range(c2.ndim))
+ perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
+
+ c2 = c2.transpose(perm2)
+ c2 = c2.copy()
+ _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
+ self.x[axis], nu-1)
+
+ c2 = c2.transpose(perm2)
+ c2 = c2.transpose(perm)
+
+ # Done
+ self.c = c2
+
+ def derivative(self, nu):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : ndim-tuple of int
+ Order of derivatives to evaluate for each dimension.
+ If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ pp : NdPPoly
+ Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
+ representing the derivative of this polynomial.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals in each dimension are
+ considered half-open, ``[a, b)``, except for the last interval
+ which is closed ``[a, b]``.
+
+ """
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+ for axis, n in enumerate(nu):
+ p._derivative_inplace(n, axis)
+
+ p._ensure_c_contiguous()
+ return p
+
+ def antiderivative(self, nu):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Antiderivative is also the indefinite integral of the function,
+ and derivative is its inverse operation.
+
+ Parameters
+ ----------
+ nu : ndim-tuple of int
+ Order of derivatives to evaluate for each dimension.
+ If negative, the derivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k + n representing
+ the antiderivative of this polynomial.
+
+ Notes
+ -----
+ The antiderivative returned by this function is continuous and
+ continuously differentiable to order n-1, up to floating point
+ rounding error.
+
+ """
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+ for axis, n in enumerate(nu):
+ p._antiderivative_inplace(n, axis)
+
+ p._ensure_c_contiguous()
+ return p
+
+ def integrate_1d(self, a, b, axis, extrapolate=None):
+ r"""
+ Compute NdPPoly representation for one dimensional definite integral
+
+ The result is a piecewise polynomial representing the integral:
+
+ .. math::
+
+ p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
+
+ where the dimension integrated over is specified with the
+ `axis` parameter.
+
+ Parameters
+ ----------
+ a, b : float
+ Lower and upper bound for integration.
+ axis : int
+ Dimension over which to compute the 1-D integrals
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ ig : NdPPoly or array-like
+ Definite integral of the piecewise polynomial over [a, b].
+ If the polynomial was 1D, an array is returned,
+ otherwise, an NdPPoly object.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+ axis = int(axis) % ndim
+
+ # reuse 1-D integration routines
+ c = self.c
+ swap = list(range(c.ndim))
+ swap.insert(0, swap[axis])
+ del swap[axis + 1]
+ swap.insert(1, swap[ndim + axis])
+ del swap[ndim + axis + 1]
+
+ c = c.transpose(swap)
+ p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
+ self.x[axis],
+ extrapolate=extrapolate)
+ out = p.integrate(a, b, extrapolate=extrapolate)
+
+ # Construct result
+ if ndim == 1:
+ return out.reshape(c.shape[2:])
+ else:
+ c = out.reshape(c.shape[2:])
+ x = self.x[:axis] + self.x[axis+1:]
+ return self.construct_fast(c, x, extrapolate=extrapolate)
+
+ def integrate(self, ranges, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ ranges : ndim-tuple of 2-tuples float
+ Sequence of lower and upper bounds for each dimension,
+ ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ ig : array_like
+ Definite integral of the piecewise polynomial over
+ [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
+
+ """
+
+ ndim = len(self.x)
+
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ if not hasattr(ranges, '__len__') or len(ranges) != ndim:
+ raise ValueError("Range not a sequence of correct length")
+
+ self._ensure_c_contiguous()
+
+ # Reuse 1D integration routine
+ c = self.c
+ for n, (a, b) in enumerate(ranges):
+ swap = list(range(c.ndim))
+ swap.insert(1, swap[ndim - n])
+ del swap[ndim - n + 1]
+
+ c = c.transpose(swap)
+
+ p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
+ out = p.integrate(a, b, extrapolate=extrapolate)
+ c = out.reshape(c.shape[2:])
+
+ return c
+
+
+class RegularGridInterpolator(object):
+ """
+ Interpolation on a regular grid in arbitrary dimensions
+
+ The data must be defined on a regular grid; the grid spacing however may be
+ uneven. Linear and nearest-neighbor interpolation are supported. After
+ setting up the interpolator object, the interpolation method (*linear* or
+ *nearest*) may be chosen at each evaluation.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions.
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear" and
+ "nearest". This parameter will become the default for the object's
+ ``__call__`` method. Default is "linear".
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If None, values outside
+ the domain are extrapolated.
+
+ Methods
+ -------
+ __call__
+
+ Notes
+ -----
+ Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
+ avoids expensive triangulation of the input data by taking advantage of the
+ regular grid structure.
+
+ If any of `points` have a dimension of size 1, linear interpolation will
+ return an array of `nan` values. Nearest-neighbor interpolation will work
+ as usual in this case.
+
+ .. versionadded:: 0.14
+
+ Examples
+ --------
+ Evaluate a simple example function on the points of a 3-D grid:
+
+ >>> from scipy.interpolate import RegularGridInterpolator
+ >>> def f(x, y, z):
+ ... return 2 * x**3 + 3 * y**2 - z
+ >>> x = np.linspace(1, 4, 11)
+ >>> y = np.linspace(4, 7, 22)
+ >>> z = np.linspace(7, 9, 33)
+ >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
+
+ ``data`` is now a 3-D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
+ Next, define an interpolating function from this data:
+
+ >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
+
+ Evaluate the interpolating function at the two points
+ ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
+
+ >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
+ >>> my_interpolating_function(pts)
+ array([ 125.80469388, 146.30069388])
+
+ which is indeed a close approximation to
+ ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
+
+ See also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolation on unstructured
+ data in N dimensions
+
+ LinearNDInterpolator : Piecewise linear interpolant on unstructured data
+ in N dimensions
+
+ References
+ ----------
+ .. [1] Python package *regulargrid* by Johannes Buchner, see
+ https://pypi.python.org/pypi/regulargrid/
+ .. [2] Wikipedia, "Trilinear interpolation",
+ https://en.wikipedia.org/wiki/Trilinear_interpolation
+ .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
+ and multilinear table interpolation in many dimensions." MATH.
+ COMPUT. 50.181 (1988): 189-196.
+ https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
+
+ """
+ # this class is based on code originally programmed by Johannes Buchner,
+ # see https://github.com/JohannesBuchner/regulargrid
+
+ def __init__(self, points, values, method="linear", bounds_error=True,
+ fill_value=np.nan):
+ if method not in ["linear", "nearest"]:
+ raise ValueError("Method '%s' is not defined" % method)
+ self.method = method
+ self.bounds_error = bounds_error
+
+ if not hasattr(values, 'ndim'):
+ # allow reasonable duck-typed values
+ values = np.asarray(values)
+
+ if len(points) > values.ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), values.ndim))
+
+ if hasattr(values, 'dtype') and hasattr(values, 'astype'):
+ if not np.issubdtype(values.dtype, np.inexact):
+ values = values.astype(float)
+
+ self.fill_value = fill_value
+ if fill_value is not None:
+ fill_value_dtype = np.asarray(fill_value).dtype
+ if (hasattr(values, 'dtype') and not
+ np.can_cast(fill_value_dtype, values.dtype,
+ casting='same_kind')):
+ raise ValueError("fill_value must be either 'None' or "
+ "of a type compatible with values")
+
+ for i, p in enumerate(points):
+ if not np.all(np.diff(p) > 0.):
+ raise ValueError("The points in dimension %d must be strictly "
+ "ascending" % i)
+ if not np.asarray(p).ndim == 1:
+ raise ValueError("The points in dimension %d must be "
+ "1-dimensional" % i)
+ if not values.shape[i] == len(p):
+ raise ValueError("There are %d points and %d values in "
+ "dimension %d" % (len(p), values.shape[i], i))
+ self.grid = tuple([np.asarray(p) for p in points])
+ self.values = values
+
+ def __call__(self, xi, method=None):
+ """
+ Interpolation at coordinates
+
+ Parameters
+ ----------
+ xi : ndarray of shape (..., ndim)
+ The coordinates to sample the gridded data at
+
+ method : str
+ The method of interpolation to perform. Supported are "linear" and
+ "nearest".
+
+ """
+ method = self.method if method is None else method
+ if method not in ["linear", "nearest"]:
+ raise ValueError("Method '%s' is not defined" % method)
+
+ ndim = len(self.grid)
+ xi = _ndim_coords_from_arrays(xi, ndim=ndim)
+ if xi.shape[-1] != len(self.grid):
+ raise ValueError("The requested sample points xi have dimension "
+ "%d, but this RegularGridInterpolator has "
+ "dimension %d" % (xi.shape[1], ndim))
+
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi_shape[-1])
+
+ if self.bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(self.grid[i][0] <= p),
+ np.all(p <= self.grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+
+ indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
+ if method == "linear":
+ result = self._evaluate_linear(indices,
+ norm_distances,
+ out_of_bounds)
+ elif method == "nearest":
+ result = self._evaluate_nearest(indices,
+ norm_distances,
+ out_of_bounds)
+ if not self.bounds_error and self.fill_value is not None:
+ result[out_of_bounds] = self.fill_value
+
+ return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
+
+ def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
+ # slice for broadcasting over trailing dimensions in self.values
+ vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
+
+ # find relevant values
+ # each i and i+1 represents a edge
+ edges = itertools.product(*[[i, i + 1] for i in indices])
+ values = 0.
+ for edge_indices in edges:
+ weight = 1.
+ for ei, i, yi in zip(edge_indices, indices, norm_distances):
+ weight *= np.where(ei == i, 1 - yi, yi)
+ values += np.asarray(self.values[edge_indices]) * weight[vslice]
+ return values
+
+ def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
+ idx_res = [np.where(yi <= .5, i, i + 1)
+ for i, yi in zip(indices, norm_distances)]
+ return self.values[tuple(idx_res)]
+
+ def _find_indices(self, xi):
+ # find relevant edges between which xi are situated
+ indices = []
+ # compute distance to lower edge in unity units
+ norm_distances = []
+ # check for out of bounds xi
+ out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
+ # iterate through dimensions
+ for x, grid in zip(xi, self.grid):
+ i = np.searchsorted(grid, x) - 1
+ i[i < 0] = 0
+ i[i > grid.size - 2] = grid.size - 2
+ indices.append(i)
+ norm_distances.append((x - grid[i]) /
+ (grid[i + 1] - grid[i]))
+ if not self.bounds_error:
+ out_of_bounds += x < grid[0]
+ out_of_bounds += x > grid[-1]
+ return indices, norm_distances, out_of_bounds
+
+
+def interpn(points, values, xi, method="linear", bounds_error=True,
+ fill_value=np.nan):
+ """
+ Multidimensional interpolation on regular grids.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions.
+
+ xi : ndarray of shape (..., ndim)
+ The coordinates to sample the gridded data at
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear" and
+ "nearest", and "splinef2d". "splinef2d" is only supported for
+ 2-dimensional data.
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If None, values outside
+ the domain are extrapolated. Extrapolation is not supported by method
+ "splinef2d".
+
+ Returns
+ -------
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+ Interpolated values at input coordinates.
+
+ Notes
+ -----
+
+ .. versionadded:: 0.14
+
+ Examples
+ --------
+ Evaluate a simple example function on the points of a regular 3-D grid:
+
+ >>> from scipy.interpolate import interpn
+ >>> def value_func_3d(x, y, z):
+ ... return 2 * x + 3 * y - z
+ >>> x = np.linspace(0, 5)
+ >>> y = np.linspace(0, 5)
+ >>> z = np.linspace(0, 5)
+ >>> points = (x, y, z)
+ >>> values = value_func_3d(*np.meshgrid(*points))
+
+ Evaluate the interpolating function at a point
+
+ >>> point = np.array([2.21, 3.12, 1.15])
+ >>> print(interpn(points, values, point))
+ [11.72]
+
+ See also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolation on unstructured
+ data in N dimensions
+
+ LinearNDInterpolator : Piecewise linear interpolant on unstructured data
+ in N dimensions
+
+ RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
+ regular grid in arbitrary dimensions
+
+ RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
+
+ """
+ # sanity check 'method' kwarg
+ if method not in ["linear", "nearest", "splinef2d"]:
+ raise ValueError("interpn only understands the methods 'linear', "
+ "'nearest', and 'splinef2d'. You provided %s." %
+ method)
+
+ if not hasattr(values, 'ndim'):
+ values = np.asarray(values)
+
+ ndim = values.ndim
+ if ndim > 2 and method == "splinef2d":
+ raise ValueError("The method splinef2d can only be used for "
+ "2-dimensional input data")
+ if not bounds_error and fill_value is None and method == "splinef2d":
+ raise ValueError("The method splinef2d does not support extrapolation.")
+
+ # sanity check consistency of input dimensions
+ if len(points) > ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), ndim))
+ if len(points) != ndim and method == 'splinef2d':
+ raise ValueError("The method splinef2d can only be used for "
+ "scalar data with one point per coordinate")
+
+ # sanity check input grid
+ for i, p in enumerate(points):
+ if not np.all(np.diff(p) > 0.):
+ raise ValueError("The points in dimension %d must be strictly "
+ "ascending" % i)
+ if not np.asarray(p).ndim == 1:
+ raise ValueError("The points in dimension %d must be "
+ "1-dimensional" % i)
+ if not values.shape[i] == len(p):
+ raise ValueError("There are %d points and %d values in "
+ "dimension %d" % (len(p), values.shape[i], i))
+ grid = tuple([np.asarray(p) for p in points])
+
+ # sanity check requested xi
+ xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
+ if xi.shape[-1] != len(grid):
+ raise ValueError("The requested sample points xi have dimension "
+ "%d, but this RegularGridInterpolator has "
+ "dimension %d" % (xi.shape[1], len(grid)))
+
+ if bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(grid[i][0] <= p),
+ np.all(p <= grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+
+ # perform interpolation
+ if method == "linear":
+ interp = RegularGridInterpolator(points, values, method="linear",
+ bounds_error=bounds_error,
+ fill_value=fill_value)
+ return interp(xi)
+ elif method == "nearest":
+ interp = RegularGridInterpolator(points, values, method="nearest",
+ bounds_error=bounds_error,
+ fill_value=fill_value)
+ return interp(xi)
+ elif method == "splinef2d":
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi.shape[-1])
+
+ # RectBivariateSpline doesn't support fill_value; we need to wrap here
+ idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
+ grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
+ axis=0)
+ result = np.empty_like(xi[:, 0])
+
+ # make a copy of values for RectBivariateSpline
+ interp = RectBivariateSpline(points[0], points[1], values[:])
+ result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
+ result[np.logical_not(idx_valid)] = fill_value
+
+ return result.reshape(xi_shape[:-1])
+
+
+# backward compatibility wrapper
+class _ppform(PPoly):
+ """
+ Deprecated piecewise polynomial class.
+
+ New code should use the `PPoly` class instead.
+
+ """
+
+ def __init__(self, coeffs, breaks, fill=0.0, sort=False):
+ warnings.warn("_ppform is deprecated -- use PPoly instead",
+ category=DeprecationWarning)
+
+ if sort:
+ breaks = np.sort(breaks)
+ else:
+ breaks = np.asarray(breaks)
+
+ PPoly.__init__(self, coeffs, breaks)
+
+ self.coeffs = self.c
+ self.breaks = self.x
+ self.K = self.coeffs.shape[0]
+ self.fill = fill
+ self.a = self.breaks[0]
+ self.b = self.breaks[-1]
+
+ def __call__(self, x):
+ return PPoly.__call__(self, x, 0, False)
+
+ def _evaluate(self, x, nu, extrapolate, out):
+ PPoly._evaluate(self, x, nu, extrapolate, out)
+ out[~((x >= self.a) & (x <= self.b))] = self.fill
+ return out
+
+ @classmethod
+ def fromspline(cls, xk, cvals, order, fill=0.0):
+ # Note: this spline representation is incompatible with FITPACK
+ N = len(xk)-1
+ sivals = np.empty((order+1, N), dtype=float)
+ for m in range(order, -1, -1):
+ fact = spec.gamma(m+1)
+ res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
+ res /= fact
+ sivals[order-m, :] = res
+ return cls(sivals, xk, fill=fill)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/ndgriddata.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/ndgriddata.py
new file mode 100644
index 0000000..a5addd0
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/ndgriddata.py
@@ -0,0 +1,269 @@
+"""
+Convenience interface to N-D interpolation
+
+.. versionadded:: 0.9
+
+"""
+import numpy as np
+from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
+ CloughTocher2DInterpolator, _ndim_coords_from_arrays
+from scipy.spatial import cKDTree
+
+__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
+ 'CloughTocher2DInterpolator']
+
+#------------------------------------------------------------------------------
+# Nearest-neighbor interpolation
+#------------------------------------------------------------------------------
+
+
+class NearestNDInterpolator(NDInterpolatorBase):
+ """
+ NearestNDInterpolator(x, y)
+
+ Nearest-neighbor interpolation in N dimensions.
+
+ .. versionadded:: 0.9
+
+ Methods
+ -------
+ __call__
+
+ Parameters
+ ----------
+ x : (Npoints, Ndims) ndarray of floats
+ Data point coordinates.
+ y : (Npoints,) ndarray of float or complex
+ Data values.
+ rescale : boolean, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+ tree_options : dict, optional
+ Options passed to the underlying ``cKDTree``.
+
+ .. versionadded:: 0.17.0
+
+
+ Notes
+ -----
+ Uses ``scipy.spatial.cKDTree``
+
+ Examples
+ --------
+ We can interpolate values on a 2D plane:
+
+ >>> from scipy.interpolate import NearestNDInterpolator
+ >>> import matplotlib.pyplot as plt
+ >>> np.random.seed(0)
+ >>> x = np.random.random(10) - 0.5
+ >>> y = np.random.random(10) - 0.5
+ >>> z = np.hypot(x, y)
+ >>> X = np.linspace(min(x), max(x))
+ >>> Y = np.linspace(min(y), max(y))
+ >>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
+ >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
+ >>> Z = interp(X, Y)
+ >>> plt.pcolormesh(X, Y, Z, shading='auto')
+ >>> plt.plot(x, y, "ok", label="input point")
+ >>> plt.legend()
+ >>> plt.colorbar()
+ >>> plt.axis("equal")
+ >>> plt.show()
+
+ See also
+ --------
+ griddata :
+ Interpolate unstructured D-D data.
+ LinearNDInterpolator :
+ Piecewise linear interpolant in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
+
+ """
+
+ def __init__(self, x, y, rescale=False, tree_options=None):
+ NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
+ need_contiguous=False,
+ need_values=False)
+ if tree_options is None:
+ tree_options = dict()
+ self.tree = cKDTree(self.points, **tree_options)
+ self.values = np.asarray(y)
+
+ def __call__(self, *args):
+ """
+ Evaluate interpolator at given points.
+
+ Parameters
+ ----------
+ x1, x2, ... xn: array-like of float
+ Points where to interpolate data at.
+ x1, x2, ... xn can be array-like of float with broadcastable shape.
+ or x1 can be array-like of float with shape ``(..., ndim)``
+
+ """
+ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
+ xi = self._check_call_shape(xi)
+ xi = self._scale_x(xi)
+ dist, i = self.tree.query(xi)
+ return self.values[i]
+
+
+#------------------------------------------------------------------------------
+# Convenience interface function
+#------------------------------------------------------------------------------
+
+def griddata(points, values, xi, method='linear', fill_value=np.nan,
+ rescale=False):
+ """
+ Interpolate unstructured D-D data.
+
+ Parameters
+ ----------
+ points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
+ Data point coordinates.
+ values : ndarray of float or complex, shape (n,)
+ Data values.
+ xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
+ Points at which to interpolate data.
+ method : {'linear', 'nearest', 'cubic'}, optional
+ Method of interpolation. One of
+
+ ``nearest``
+ return the value at the data point closest to
+ the point of interpolation. See `NearestNDInterpolator` for
+ more details.
+
+ ``linear``
+ tessellate the input point set to N-D
+ simplices, and interpolate linearly on each simplex. See
+ `LinearNDInterpolator` for more details.
+
+ ``cubic`` (1-D)
+ return the value determined from a cubic
+ spline.
+
+ ``cubic`` (2-D)
+ return the value determined from a
+ piecewise cubic, continuously differentiable (C1), and
+ approximately curvature-minimizing polynomial surface. See
+ `CloughTocher2DInterpolator` for more details.
+ fill_value : float, optional
+ Value used to fill in for requested points outside of the
+ convex hull of the input points. If not provided, then the
+ default is ``nan``. This option has no effect for the
+ 'nearest' method.
+ rescale : bool, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+
+ Returns
+ -------
+ ndarray
+ Array of interpolated values.
+
+ Notes
+ -----
+
+ .. versionadded:: 0.9
+
+ Examples
+ --------
+
+ Suppose we want to interpolate the 2-D function
+
+ >>> def func(x, y):
+ ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
+
+ on a grid in [0, 1]x[0, 1]
+
+ >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
+
+ but we only know its values at 1000 data points:
+
+ >>> points = np.random.rand(1000, 2)
+ >>> values = func(points[:,0], points[:,1])
+
+ This can be done with `griddata` -- below we try out all of the
+ interpolation methods:
+
+ >>> from scipy.interpolate import griddata
+ >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
+ >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
+ >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
+
+ One can see that the exact result is reproduced by all of the
+ methods to some degree, but for this smooth function the piecewise
+ cubic interpolant gives the best results:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.subplot(221)
+ >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
+ >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
+ >>> plt.title('Original')
+ >>> plt.subplot(222)
+ >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Nearest')
+ >>> plt.subplot(223)
+ >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Linear')
+ >>> plt.subplot(224)
+ >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Cubic')
+ >>> plt.gcf().set_size_inches(6, 6)
+ >>> plt.show()
+
+ See also
+ --------
+ LinearNDInterpolator :
+ Piecewise linear interpolant in N dimensions.
+ NearestNDInterpolator :
+ Nearest-neighbor interpolation in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
+
+ """
+
+ points = _ndim_coords_from_arrays(points)
+
+ if points.ndim < 2:
+ ndim = points.ndim
+ else:
+ ndim = points.shape[-1]
+
+ if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
+ from .interpolate import interp1d
+ points = points.ravel()
+ if isinstance(xi, tuple):
+ if len(xi) != 1:
+ raise ValueError("invalid number of dimensions in xi")
+ xi, = xi
+ # Sort points/values together, necessary as input for interp1d
+ idx = np.argsort(points)
+ points = points[idx]
+ values = values[idx]
+ if method == 'nearest':
+ fill_value = 'extrapolate'
+ ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
+ fill_value=fill_value)
+ return ip(xi)
+ elif method == 'nearest':
+ ip = NearestNDInterpolator(points, values, rescale=rescale)
+ return ip(xi)
+ elif method == 'linear':
+ ip = LinearNDInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ elif method == 'cubic' and ndim == 2:
+ ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ else:
+ raise ValueError("Unknown interpolation method %r for "
+ "%d dimensional data" % (method, ndim))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/polyint.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/polyint.py
new file mode 100644
index 0000000..3f13051
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/polyint.py
@@ -0,0 +1,714 @@
+import numpy as np
+from scipy.special import factorial
+from scipy._lib._util import _asarray_validated, float_factorial
+
+
+__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
+ "barycentric_interpolate", "approximate_taylor_polynomial"]
+
+
+def _isscalar(x):
+ """Check whether x is if a scalar type, or 0-dim"""
+ return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
+
+
+class _Interpolator1D(object):
+ """
+ Common features in univariate interpolation
+
+ Deal with input data type and interpolation axis rolling. The
+ actual interpolator can assume the y-data is of shape (n, r) where
+ `n` is the number of x-points, and `r` the number of variables,
+ and use self.dtype as the y-data type.
+
+ Attributes
+ ----------
+ _y_axis
+ Axis along which the interpolation goes in the original array
+ _y_extra_shape
+ Additional trailing shape of the input arrays, excluding
+ the interpolation axis.
+ dtype
+ Dtype of the y-data arrays. Can be set via _set_dtype, which
+ forces it to be float or complex.
+
+ Methods
+ -------
+ __call__
+ _prepare_x
+ _finish_y
+ _reshape_yi
+ _set_yi
+ _set_dtype
+ _evaluate
+
+ """
+
+ __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
+
+ def __init__(self, xi=None, yi=None, axis=None):
+ self._y_axis = axis
+ self._y_extra_shape = None
+ self.dtype = None
+ if yi is not None:
+ self._set_yi(yi, xi=xi, axis=axis)
+
+ def __call__(self, x):
+ """
+ Evaluate the interpolant
+
+ Parameters
+ ----------
+ x : array_like
+ Points to evaluate the interpolant at.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Input values `x` must be convertible to `float` values like `int`
+ or `float`.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate(x)
+ return self._finish_y(y, x_shape)
+
+ def _evaluate(self, x):
+ """
+ Actually evaluate the value of the interpolator.
+ """
+ raise NotImplementedError()
+
+ def _prepare_x(self, x):
+ """Reshape input x array to 1-D"""
+ x = _asarray_validated(x, check_finite=False, as_inexact=True)
+ x_shape = x.shape
+ return x.ravel(), x_shape
+
+ def _finish_y(self, y, x_shape):
+ """Reshape interpolated y back to an N-D array similar to initial y"""
+ y = y.reshape(x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = (list(range(nx, nx + self._y_axis))
+ + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
+ y = y.transpose(s)
+ return y
+
+ def _reshape_yi(self, yi, check=False):
+ yi = np.rollaxis(np.asarray(yi), self._y_axis)
+ if check and yi.shape[1:] != self._y_extra_shape:
+ ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
+ self._y_extra_shape[:-self._y_axis])
+ raise ValueError("Data must be of shape %s" % ok_shape)
+ return yi.reshape((yi.shape[0], -1))
+
+ def _set_yi(self, yi, xi=None, axis=None):
+ if axis is None:
+ axis = self._y_axis
+ if axis is None:
+ raise ValueError("no interpolation axis specified")
+
+ yi = np.asarray(yi)
+
+ shape = yi.shape
+ if shape == ():
+ shape = (1,)
+ if xi is not None and shape[axis] != len(xi):
+ raise ValueError("x and y arrays must be equal in length along "
+ "interpolation axis.")
+
+ self._y_axis = (axis % yi.ndim)
+ self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
+ self.dtype = None
+ self._set_dtype(yi.dtype)
+
+ def _set_dtype(self, dtype, union=False):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.dtype, np.complexfloating):
+ self.dtype = np.complex_
+ else:
+ if not union or self.dtype != np.complex_:
+ self.dtype = np.float_
+
+
+class _Interpolator1DWithDerivatives(_Interpolator1D):
+ def derivatives(self, x, der=None):
+ """
+ Evaluate many derivatives of the polynomial at the point x
+
+ Produce an array of all derivative values at the point x.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+ der : int or None, optional
+ How many derivatives to extract; None for all potentially
+ nonzero derivatives (that is a number equal to the number
+ of points). This number includes the function value as 0th
+ derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Array with derivatives; d[j] contains the jth derivative.
+ Shape of d[j] is determined by replacing the interpolation
+ axis in the original array with the shape of x.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
+ array([1.0,2.0,3.0])
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
+ array([[1.0,1.0],
+ [2.0,2.0],
+ [3.0,3.0]])
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der)
+
+ y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ + list(range(1, nx+1)) +
+ list(range(nx+1+self._y_axis, nx+ny+1)))
+ y = y.transpose(s)
+ return y
+
+ def derivative(self, x, der=1):
+ """
+ Evaluate one derivative of the polynomial at the point x
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+
+ der : integer, optional
+ Which derivative to extract. This number includes the
+ function value as 0th derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Derivative interpolated at the x-points. Shape of d is
+ determined by replacing the interpolation axis in the
+ original array with the shape of x.
+
+ Notes
+ -----
+ This is computed by evaluating all derivatives up to the desired
+ one (using self.derivatives()) and then discarding the rest.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der+1)
+ return self._finish_y(y[der], x_shape)
+
+
+class KroghInterpolator(_Interpolator1DWithDerivatives):
+ """
+ Interpolating polynomial for a set of points.
+
+ The polynomial passes through all the pairs (xi,yi). One may
+ additionally specify a number of derivatives at each point xi;
+ this is done by repeating the value xi and specifying the
+ derivatives as successive yi values.
+
+ Allows evaluation of the polynomial and all its derivatives.
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial, although they can be obtained
+ by evaluating all the derivatives.
+
+ Parameters
+ ----------
+ xi : array_like, length N
+ Known x-coordinates. Must be sorted in increasing order.
+ yi : array_like
+ Known y-coordinates. When an xi occurs two or more times in
+ a row, the corresponding yi's represent derivative values.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Notes
+ -----
+ Be aware that the algorithms implemented here are not necessarily
+ the most numerically stable known. Moreover, even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon. In general, even with well-chosen
+ x values, degrees higher than about thirty cause problems with
+ numerical instability in this code.
+
+ Based on [1]_.
+
+ References
+ ----------
+ .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
+ and Numerical Differentiation", 1970.
+
+ Examples
+ --------
+ To produce a polynomial that is zero at 0 and 1 and has
+ derivative 2 at 0, call
+
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,1],[0,2,0])
+
+ This constructs the quadratic 2*X**2-2*X. The derivative condition
+ is indicated by the repeated zero in the xi array; the corresponding
+ yi values are 0, the function value, and 2, the derivative value.
+
+ For another example, given xi, yi, and a derivative ypi for each
+ point, appropriate arrays can be constructed as:
+
+ >>> xi = np.linspace(0, 1, 5)
+ >>> yi, ypi = np.random.rand(2, 5)
+ >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
+ >>> KroghInterpolator(xi_k, yi_k)
+
+ To produce a vector-valued polynomial, supply a higher-dimensional
+ array for yi:
+
+ >>> KroghInterpolator([0,1],[[2,3],[4,5]])
+
+ This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
+
+ """
+
+ def __init__(self, xi, yi, axis=0):
+ _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
+
+ self.xi = np.asarray(xi)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+
+ c = np.zeros((self.n+1, self.r), dtype=self.dtype)
+ c[0] = self.yi[0]
+ Vk = np.zeros((self.n, self.r), dtype=self.dtype)
+ for k in range(1, self.n):
+ s = 0
+ while s <= k and xi[k-s] == xi[k]:
+ s += 1
+ s -= 1
+ Vk[0] = self.yi[k]/float_factorial(s)
+ for i in range(k-s):
+ if xi[i] == xi[k]:
+ raise ValueError("Elements if `xi` can't be equal.")
+ if s == 0:
+ Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
+ else:
+ Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
+ c[k] = Vk[k-s]
+ self.c = c
+
+ def _evaluate(self, x):
+ pi = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0,np.newaxis,:]
+ for k in range(1, self.n):
+ w = x - self.xi[k-1]
+ pi = w*pi
+ p += pi[:,np.newaxis] * self.c[k]
+ return p
+
+ def _evaluate_derivatives(self, x, der=None):
+ n = self.n
+ r = self.r
+
+ if der is None:
+ der = self.n
+ pi = np.zeros((n, len(x)))
+ w = np.zeros((n, len(x)))
+ pi[0] = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0, np.newaxis, :]
+
+ for k in range(1, n):
+ w[k-1] = x - self.xi[k-1]
+ pi[k] = w[k-1] * pi[k-1]
+ p += pi[k, :, np.newaxis] * self.c[k]
+
+ cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
+ cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
+ cn[0] = p
+ for k in range(1, n):
+ for i in range(1, n-k+1):
+ pi[i] = w[k+i-1]*pi[i-1] + pi[i]
+ cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
+ cn[k] *= float_factorial(k)
+
+ cn[n, :, :] = 0
+ return cn[:der]
+
+
+def krogh_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ See `KroghInterpolator` for more details.
+
+ Parameters
+ ----------
+ xi : array_like
+ Known x-coordinates.
+ yi : array_like
+ Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
+ vectors of length R, or scalars if R=1.
+ x : array_like
+ Point or points at which to evaluate the derivatives.
+ der : int or list, optional
+ How many derivatives to extract; None for all potentially
+ nonzero derivatives (that is a number equal to the number
+ of points), or a list of derivatives to extract. This number
+ includes the function value as 0th derivative.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ d : ndarray
+ If the interpolator's values are R-D then the
+ returned array will be the number of derivatives by N by R.
+ If `x` is a scalar, the middle dimension will be dropped; if
+ the `yi` are scalars then the last dimension will be dropped.
+
+ See Also
+ --------
+ KroghInterpolator : Krogh interpolator
+
+ Notes
+ -----
+ Construction of the interpolating polynomial is a relatively expensive
+ process. If you want to evaluate it repeatedly consider using the class
+ KroghInterpolator (which is what this function uses).
+
+ Examples
+ --------
+ We can interpolate 2D observed data using krogh interpolation:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import krogh_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = krogh_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="krogh interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = KroghInterpolator(xi, yi, axis=axis)
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(x,der=der)
+ else:
+ return P.derivatives(x,der=np.amax(der)+1)[der]
+
+
+def approximate_taylor_polynomial(f,x,degree,scale,order=None):
+ """
+ Estimate the Taylor polynomial of f at x by polynomial fitting.
+
+ Parameters
+ ----------
+ f : callable
+ The function whose Taylor polynomial is sought. Should accept
+ a vector of `x` values.
+ x : scalar
+ The point at which the polynomial is to be evaluated.
+ degree : int
+ The degree of the Taylor polynomial
+ scale : scalar
+ The width of the interval to use to evaluate the Taylor polynomial.
+ Function values spread over a range this wide are used to fit the
+ polynomial. Must be chosen carefully.
+ order : int or None, optional
+ The order of the polynomial to be used in the fitting; `f` will be
+ evaluated ``order+1`` times. If None, use `degree`.
+
+ Returns
+ -------
+ p : poly1d instance
+ The Taylor polynomial (translated to the origin, so that
+ for example p(0)=f(x)).
+
+ Notes
+ -----
+ The appropriate choice of "scale" is a trade-off; too large and the
+ function differs from its Taylor polynomial too much to get a good
+ answer, too small and round-off errors overwhelm the higher-order terms.
+ The algorithm used becomes numerically unstable around order 30 even
+ under ideal circumstances.
+
+ Choosing order somewhat larger than degree may improve the higher-order
+ terms.
+
+ Examples
+ --------
+ We can calculate Taylor approximation polynomials of sin function with
+ various degrees:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import approximate_taylor_polynomial
+ >>> x = np.linspace(-10.0, 10.0, num=100)
+ >>> plt.plot(x, np.sin(x), label="sin curve")
+ >>> for degree in np.arange(1, 15, step=2):
+ ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
+ ... order=degree + 2)
+ ... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
+ >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
+ ... borderaxespad=0.0, shadow=True)
+ >>> plt.tight_layout()
+ >>> plt.axis([-10, 10, -10, 10])
+ >>> plt.show()
+
+ """
+ if order is None:
+ order = degree
+
+ n = order+1
+ # Choose n points that cluster near the endpoints of the interval in
+ # a way that avoids the Runge phenomenon. Ensure, by including the
+ # endpoint or not as appropriate, that one point always falls at x
+ # exactly.
+ xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
+
+ P = KroghInterpolator(xs, f(xs))
+ d = P.derivatives(x,der=degree+1)
+
+ return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
+
+
+class BarycentricInterpolator(_Interpolator1D):
+ """The interpolating polynomial for a set of points
+
+ Constructs a polynomial that passes through a given set of points.
+ Allows evaluation of the polynomial, efficient changing of the y
+ values to be interpolated, and updating by adding more x values.
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial.
+
+ The values yi need to be provided before the function is
+ evaluated, but none of the preprocessing depends on them, so rapid
+ updates are possible.
+
+ Parameters
+ ----------
+ xi : array_like
+ 1-D array of x coordinates of the points the polynomial
+ should pass through
+ yi : array_like, optional
+ The y coordinates of the points the polynomial should pass through.
+ If None, the y values will be supplied later via the `set_y` method.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Notes
+ -----
+ This class uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
+
+ """
+ def __init__(self, xi, yi=None, axis=0):
+ _Interpolator1D.__init__(self, xi, yi, axis)
+
+ self.xi = np.asfarray(xi)
+ self.set_yi(yi)
+ self.n = len(self.xi)
+
+ self.wi = np.zeros(self.n)
+ self.wi[0] = 1
+ for j in range(1, self.n):
+ self.wi[:j] *= (self.xi[j]-self.xi[:j])
+ self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
+ self.wi **= -1
+
+ def set_yi(self, yi, axis=None):
+ """
+ Update the y values to be interpolated
+
+ The barycentric interpolation algorithm requires the calculation
+ of weights, but these depend only on the xi. The yi can be changed
+ at any time.
+
+ Parameters
+ ----------
+ yi : array_like
+ The y coordinates of the points the polynomial should pass through.
+ If None, the y values will be supplied later.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ """
+ if yi is None:
+ self.yi = None
+ return
+ self._set_yi(yi, xi=self.xi, axis=axis)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+
+ def add_xi(self, xi, yi=None):
+ """
+ Add more x values to the set to be interpolated
+
+ The barycentric interpolation algorithm allows easy updating by
+ adding more points for the polynomial to pass through.
+
+ Parameters
+ ----------
+ xi : array_like
+ The x coordinates of the points that the polynomial should pass
+ through.
+ yi : array_like, optional
+ The y coordinates of the points the polynomial should pass through.
+ Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
+ vector-valued.
+ If `yi` is not given, the y values will be supplied later. `yi` should
+ be given if and only if the interpolator has y values specified.
+
+ """
+ if yi is not None:
+ if self.yi is None:
+ raise ValueError("No previous yi value to update!")
+ yi = self._reshape_yi(yi, check=True)
+ self.yi = np.vstack((self.yi,yi))
+ else:
+ if self.yi is not None:
+ raise ValueError("No update to yi provided!")
+ old_n = self.n
+ self.xi = np.concatenate((self.xi,xi))
+ self.n = len(self.xi)
+ self.wi **= -1
+ old_wi = self.wi
+ self.wi = np.zeros(self.n)
+ self.wi[:old_n] = old_wi
+ for j in range(old_n, self.n):
+ self.wi[:j] *= (self.xi[j]-self.xi[:j])
+ self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
+ self.wi **= -1
+
+ def __call__(self, x):
+ """Evaluate the interpolating polynomial at the points x
+
+ Parameters
+ ----------
+ x : array_like
+ Points to evaluate the interpolant at.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Currently the code computes an outer product between x and the
+ weights, that is, it constructs an intermediate array of size
+ N by len(x), where N is the degree of the polynomial.
+ """
+ return _Interpolator1D.__call__(self, x)
+
+ def _evaluate(self, x):
+ if x.size == 0:
+ p = np.zeros((0, self.r), dtype=self.dtype)
+ else:
+ c = x[...,np.newaxis]-self.xi
+ z = c == 0
+ c[z] = 1
+ c = self.wi/c
+ p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
+ # Now fix where x==some xi
+ r = np.nonzero(z)
+ if len(r) == 1: # evaluation at a scalar
+ if len(r[0]) > 0: # equals one of the points
+ p = self.yi[r[0][0]]
+ else:
+ p[r[:-1]] = self.yi[r[-1]]
+ return p
+
+
+def barycentric_interpolate(xi, yi, x, axis=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ Constructs a polynomial that passes through a given set of points,
+ then evaluates the polynomial. For reasons of numerical stability,
+ this function does not compute the coefficients of the polynomial.
+
+ This function uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the `x` coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Parameters
+ ----------
+ xi : array_like
+ 1-D array of x coordinates of the points the polynomial should
+ pass through
+ yi : array_like
+ The y coordinates of the points the polynomial should pass through.
+ x : scalar or array_like
+ Points to evaluate the interpolator at.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ y : scalar or array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ See Also
+ --------
+ BarycentricInterpolator : Bary centric interpolator
+
+ Notes
+ -----
+ Construction of the interpolation weights is a relatively slow process.
+ If you want to call this many times with the same xi (but possibly
+ varying yi or x) you should use the class `BarycentricInterpolator`.
+ This is what this function uses internally.
+
+ Examples
+ --------
+ We can interpolate 2D observed data using barycentric interpolation:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import barycentric_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = barycentric_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="barycentric interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ return BarycentricInterpolator(xi, yi, axis=axis)(x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/rbf.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/rbf.py
new file mode 100644
index 0000000..7c05572
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/rbf.py
@@ -0,0 +1,279 @@
+"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
+
+Written by John Travers , February 2007
+Based closely on Matlab code by Alex Chirokov
+Additional, large, improvements by Robert Hetland
+Some additional alterations by Travis Oliphant
+Interpolation with multi-dimensional target domain by Josua Sassen
+
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+Copyright (c) 2006-2007, Robert Hetland
+Copyright (c) 2007, John Travers
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of Robert Hetland nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import numpy as np
+
+from scipy import linalg
+from scipy.special import xlogy
+from scipy.spatial.distance import cdist, pdist, squareform
+
+__all__ = ['Rbf']
+
+
+class Rbf(object):
+ """
+ Rbf(*args)
+
+ A class for radial basis function interpolation of functions from
+ N-D scattered data to an M-D domain.
+
+ Parameters
+ ----------
+ *args : arrays
+ x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
+ and d is the array of values at the nodes
+ function : str or callable, optional
+ The radial basis function, based on the radius, r, given by the norm
+ (default is Euclidean distance); the default is 'multiquadric'::
+
+ 'multiquadric': sqrt((r/self.epsilon)**2 + 1)
+ 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
+ 'gaussian': exp(-(r/self.epsilon)**2)
+ 'linear': r
+ 'cubic': r**3
+ 'quintic': r**5
+ 'thin_plate': r**2 * log(r)
+
+ If callable, then it must take 2 arguments (self, r). The epsilon
+ parameter will be available as self.epsilon. Other keyword
+ arguments passed in will be available as well.
+
+ epsilon : float, optional
+ Adjustable constant for gaussian or multiquadrics functions
+ - defaults to approximate average distance between nodes (which is
+ a good start).
+ smooth : float, optional
+ Values greater than zero increase the smoothness of the
+ approximation. 0 is for interpolation (default), the function will
+ always go through the nodal points in this case.
+ norm : str, callable, optional
+ A function that returns the 'distance' between two points, with
+ inputs as arrays of positions (x, y, z, ...), and an output as an
+ array of distance. E.g., the default: 'euclidean', such that the result
+ is a matrix of the distances from each point in ``x1`` to each point in
+ ``x2``. For more options, see documentation of
+ `scipy.spatial.distances.cdist`.
+ mode : str, optional
+ Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
+ '1-D' the data `d` will be considered as 1-D and flattened
+ internally. When it is 'N-D' the data `d` is assumed to be an array of
+ shape (n_samples, m), where m is the dimension of the target domain.
+
+
+ Attributes
+ ----------
+ N : int
+ The number of data points (as determined by the input arrays).
+ di : ndarray
+ The 1-D array of data values at each of the data coordinates `xi`.
+ xi : ndarray
+ The 2-D array of data coordinates.
+ function : str or callable
+ The radial basis function. See description under Parameters.
+ epsilon : float
+ Parameter used by gaussian or multiquadrics functions. See Parameters.
+ smooth : float
+ Smoothing parameter. See description under Parameters.
+ norm : str or callable
+ The distance function. See description under Parameters.
+ mode : str
+ Mode of the interpolation. See description under Parameters.
+ nodes : ndarray
+ A 1-D array of node values for the interpolation.
+ A : internal property, do not use
+
+ Examples
+ --------
+ >>> from scipy.interpolate import Rbf
+ >>> x, y, z, d = np.random.rand(4, 50)
+ >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
+ >>> xi = yi = zi = np.linspace(0, 1, 20)
+ >>> di = rbfi(xi, yi, zi) # interpolated values
+ >>> di.shape
+ (20,)
+
+ """
+ # Available radial basis functions that can be selected as strings;
+ # they all start with _h_ (self._init_function relies on that)
+ def _h_multiquadric(self, r):
+ return np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_inverse_multiquadric(self, r):
+ return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_gaussian(self, r):
+ return np.exp(-(1.0/self.epsilon*r)**2)
+
+ def _h_linear(self, r):
+ return r
+
+ def _h_cubic(self, r):
+ return r**3
+
+ def _h_quintic(self, r):
+ return r**5
+
+ def _h_thin_plate(self, r):
+ return xlogy(r**2, r)
+
+ # Setup self._function and do smoke test on initial r
+ def _init_function(self, r):
+ if isinstance(self.function, str):
+ self.function = self.function.lower()
+ _mapped = {'inverse': 'inverse_multiquadric',
+ 'inverse multiquadric': 'inverse_multiquadric',
+ 'thin-plate': 'thin_plate'}
+ if self.function in _mapped:
+ self.function = _mapped[self.function]
+
+ func_name = "_h_" + self.function
+ if hasattr(self, func_name):
+ self._function = getattr(self, func_name)
+ else:
+ functionlist = [x[3:] for x in dir(self)
+ if x.startswith('_h_')]
+ raise ValueError("function must be a callable or one of " +
+ ", ".join(functionlist))
+ self._function = getattr(self, "_h_"+self.function)
+ elif callable(self.function):
+ allow_one = False
+ if hasattr(self.function, 'func_code') or \
+ hasattr(self.function, '__code__'):
+ val = self.function
+ allow_one = True
+ elif hasattr(self.function, "__call__"):
+ val = self.function.__call__.__func__
+ else:
+ raise ValueError("Cannot determine number of arguments to "
+ "function")
+
+ argcount = val.__code__.co_argcount
+ if allow_one and argcount == 1:
+ self._function = self.function
+ elif argcount == 2:
+ self._function = self.function.__get__(self, Rbf)
+ else:
+ raise ValueError("Function argument must take 1 or 2 "
+ "arguments.")
+
+ a0 = self._function(r)
+ if a0.shape != r.shape:
+ raise ValueError("Callable must take array and return array of "
+ "the same shape")
+ return a0
+
+ def __init__(self, *args, **kwargs):
+ # `args` can be a variable number of arrays; we flatten them and store
+ # them as a single 2-D array `xi` of shape (n_args-1, array_size),
+ # plus a 1-D array `di` for the values.
+ # All arrays must have the same number of elements
+ self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
+ for a in args[:-1]])
+ self.N = self.xi.shape[-1]
+
+ self.mode = kwargs.pop('mode', '1-D')
+
+ if self.mode == '1-D':
+ self.di = np.asarray(args[-1]).flatten()
+ self._target_dim = 1
+ elif self.mode == 'N-D':
+ self.di = np.asarray(args[-1])
+ self._target_dim = self.di.shape[-1]
+ else:
+ raise ValueError("Mode has to be 1-D or N-D.")
+
+ if not all([x.size == self.di.shape[0] for x in self.xi]):
+ raise ValueError("All arrays must be equal length.")
+
+ self.norm = kwargs.pop('norm', 'euclidean')
+ self.epsilon = kwargs.pop('epsilon', None)
+ if self.epsilon is None:
+ # default epsilon is the "the average distance between nodes" based
+ # on a bounding hypercube
+ ximax = np.amax(self.xi, axis=1)
+ ximin = np.amin(self.xi, axis=1)
+ edges = ximax - ximin
+ edges = edges[np.nonzero(edges)]
+ self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
+
+ self.smooth = kwargs.pop('smooth', 0.0)
+ self.function = kwargs.pop('function', 'multiquadric')
+
+ # attach anything left in kwargs to self for use by any user-callable
+ # function or to save on the object returned.
+ for item, value in kwargs.items():
+ setattr(self, item, value)
+
+ # Compute weights
+ if self._target_dim > 1: # If we have more than one target dimension,
+ # we first factorize the matrix
+ self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
+ lu, piv = linalg.lu_factor(self.A)
+ for i in range(self._target_dim):
+ self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
+ else:
+ self.nodes = linalg.solve(self.A, self.di)
+
+ @property
+ def A(self):
+ # this only exists for backwards compatibility: self.A was available
+ # and, at least technically, public.
+ r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
+ return self._init_function(r) - np.eye(self.N)*self.smooth
+
+ def _call_norm(self, x1, x2):
+ return cdist(x1.T, x2.T, self.norm)
+
+ def __call__(self, *args):
+ args = [np.asarray(x) for x in args]
+ if not all([x.shape == y.shape for x in args for y in args]):
+ raise ValueError("Array lengths must be equal")
+ if self._target_dim > 1:
+ shp = args[0].shape + (self._target_dim,)
+ else:
+ shp = args[0].shape
+ xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
+ r = self._call_norm(xa, self.xi)
+ return np.dot(self._function(r), self.nodes).reshape(shp)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/setup.py
new file mode 100644
index 0000000..d0165e6
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/setup.py
@@ -0,0 +1,63 @@
+from os.path import join
+
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ from scipy._build_utils import (get_f2py_int64_options,
+ ilp64_pre_build_hook,
+ uses_blas64)
+
+ if uses_blas64():
+ # TODO: Note that fitpack does not use BLAS/LAPACK.
+ # The reason why we use 64-bit ints only in this case
+ # is because scipy._build_utils knows the 64-bit int
+ # flags for too few Fortran compilers, so we cannot turn
+ # this on by default.
+ pre_build_hook = ilp64_pre_build_hook
+ f2py_options = get_f2py_int64_options()
+ define_macros = [("HAVE_ILP64", None)]
+ else:
+ pre_build_hook = None
+ f2py_options = None
+ define_macros = []
+
+ config = Configuration('interpolate', parent_package, top_path)
+
+ fitpack_src = [join('fitpack', '*.f')]
+ config.add_library('fitpack', sources=fitpack_src,
+ _pre_build_hook=pre_build_hook)
+
+ config.add_extension('interpnd',
+ sources=['interpnd.c'])
+
+ config.add_extension('_ppoly',
+ sources=['_ppoly.c'])
+
+ config.add_extension('_bspl',
+ sources=['_bspl.c'],
+ depends=['src/__fitpack.h'])
+
+ config.add_extension('_fitpack',
+ sources=['src/_fitpackmodule.c'],
+ libraries=['fitpack'],
+ define_macros=define_macros,
+ depends=(['src/__fitpack.h']
+ + fitpack_src)
+ )
+
+ config.add_extension('dfitpack',
+ sources=['src/fitpack.pyf'],
+ libraries=['fitpack'],
+ define_macros=define_macros,
+ depends=fitpack_src,
+ f2py_options=f2py_options
+ )
+
+ config.add_data_dir('tests')
+
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/bug-1310.npz b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/bug-1310.npz
new file mode 100644
index 0000000..8dc93c7
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/bug-1310.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/estimate_gradients_hang.npy b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/estimate_gradients_hang.npy
new file mode 100644
index 0000000..79e1b09
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/data/estimate_gradients_hang.npy differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_bsplines.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_bsplines.py
new file mode 100644
index 0000000..c0def42
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_bsplines.py
@@ -0,0 +1,1257 @@
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_,
+ suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
+ make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,
+ sproot, splint, insert)
+import scipy.linalg as sl
+from scipy._lib import _pep440
+
+from scipy.interpolate._bsplines import _not_a_knot, _augknt
+import scipy.interpolate._fitpack_impl as _impl
+from scipy.interpolate._fitpack import _splint
+
+
+class TestBSpline(object):
+
+ def test_ctor(self):
+ # knots should be an ordered 1-D array of finite real numbers
+ assert_raises((TypeError, ValueError), BSpline,
+ **dict(t=[1, 1.j], c=[1.], k=0))
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
+
+ # for n+k+1 knots and degree k need at least n coefficients
+ assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
+
+ # non-integer orders
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
+
+ # basic interval cannot have measure zero (here: [1..1])
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
+
+ # tck vs self.tck
+ n, k = 11, 3
+ t = np.arange(n+k+1)
+ c = np.random.random(n)
+ b = BSpline(t, c, k)
+
+ assert_allclose(t, b.t)
+ assert_allclose(c, b.c)
+ assert_equal(k, b.k)
+
+ def test_tck(self):
+ b = _make_random_spline()
+ tck = b.tck
+
+ assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
+ assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
+ assert_equal(b.k, tck[2])
+
+ # b.tck is read-only
+ with pytest.raises(AttributeError):
+ b.tck = 'foo'
+
+ def test_degree_0(self):
+ xx = np.linspace(0, 1, 10)
+
+ b = BSpline(t=[0, 1], c=[3.], k=0)
+ assert_allclose(b(xx), 3)
+
+ b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
+ assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
+
+ def test_degree_1(self):
+ t = [0, 1, 2, 3, 4]
+ c = [1, 2, 3]
+ k = 1
+ b = BSpline(t, c, k)
+
+ x = np.linspace(1, 3, 50)
+ assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
+ b(x), atol=1e-14)
+ assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
+
+ def test_bernstein(self):
+ # a special knot vector: Bernstein polynomials
+ k = 3
+ t = np.asarray([0]*(k+1) + [1]*(k+1))
+ c = np.asarray([1., 2., 3., 4.])
+ bp = BPoly(c.reshape(-1, 1), [0, 1])
+ bspl = BSpline(t, c, k)
+
+ xx = np.linspace(-1., 2., 10)
+ assert_allclose(bp(xx, extrapolate=True),
+ bspl(xx, extrapolate=True), atol=1e-14)
+ assert_allclose(splev(xx, (t, c, k)),
+ bspl(xx), atol=1e-14)
+
+ def test_rndm_naive_eval(self):
+ # test random coefficient spline *on the base interval*,
+ # t[k] <= x < t[-k-1]
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ y_b = b(xx)
+
+ y_n = [_naive_eval(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n, atol=1e-14)
+
+ y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n2, atol=1e-14)
+
+ def test_rndm_splev(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+
+ def test_rndm_splrep(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20)
+
+ tck = splrep(x, y)
+ b = BSpline(*tck)
+
+ t, k = b.t, b.k
+ xx = np.linspace(t[k], t[-k-1], 80)
+ assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
+
+ def test_rndm_unity(self):
+ b = _make_random_spline()
+ b.c = np.ones_like(b.c)
+ xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
+ assert_allclose(b(xx), 1.)
+
+ def test_vectorization(self):
+ n, k = 22, 3
+ t = np.sort(np.random.random(n))
+ c = np.random.random(size=(n, 6, 7))
+ b = BSpline(t, c, k)
+ tm, tp = t[k], t[-k-1]
+ xx = tm + (tp - tm) * np.random.random((3, 4, 5))
+ assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
+
+ def test_len_c(self):
+ # for n+k+1 knots, only first n coefs are used.
+ # and BTW this is consistent with FITPACK
+ n, k = 33, 3
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+
+ # pad coefficients with random garbage
+ c_pad = np.r_[c, np.random.random(k+1)]
+
+ b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
+ assert_allclose(b(xx), b_pad(xx), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
+
+ def test_endpoints(self):
+ # base interval is closed
+ b = _make_random_spline()
+ t, _, k = b.tck
+ tm, tp = t[k], t[-k-1]
+ for extrap in (True, False):
+ assert_allclose(b([tm, tp], extrap),
+ b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
+
+ def test_continuity(self):
+ # assert continuity at internal knots
+ b = _make_random_spline()
+ t, _, k = b.tck
+ assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
+ atol=1e-9)
+
+ def test_extrap(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
+ mask = (t[k] < xx) & (xx < t[-k-1])
+
+ # extrap has no effect within the base interval
+ assert_allclose(b(xx[mask], extrapolate=True),
+ b(xx[mask], extrapolate=False))
+
+ # extrapolated values agree with FITPACK
+ assert_allclose(b(xx, extrapolate=True),
+ splev(xx, (t, c, k), ext=0))
+
+ def test_default_extrap(self):
+ # BSpline defaults to extrapolate=True
+ b = _make_random_spline()
+ t, _, k = b.tck
+ xx = [t[0] - 1, t[-1] + 1]
+ yy = b(xx)
+ assert_(not np.all(np.isnan(yy)))
+
+ def test_periodic_extrap(self):
+ np.random.seed(1234)
+ t = np.sort(np.random.random(8))
+ c = np.random.random(4)
+ k = 3
+ b = BSpline(t, c, k, extrapolate='periodic')
+ n = t.size - (k + 1)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[n] + dt, 50)
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_allclose(b(xx), splev(xy, (t, c, k)))
+
+ # Direct check
+ xx = [-1, 0, 0.5, 1]
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))
+
+ def test_ppoly(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ pp = PPoly.from_spline((t, c, k))
+
+ xx = np.linspace(t[k], t[-k], 100)
+ assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
+
+ def test_derivative_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[0], t[-1], 50)
+ xx = np.r_[xx, t]
+
+ for der in range(1, k+1):
+ yd = splev(xx, (t, c, k), der=der)
+ assert_allclose(yd, b(xx, nu=der), atol=1e-14)
+
+ # higher derivatives all vanish
+ assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
+
+ def test_derivative_jumps(self):
+ # example from de Boor, Chap IX, example (24)
+ # NB: knots augmented & corresp coefs are zeroed out
+ # in agreement with the convention (29)
+ k = 2
+ t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
+ np.random.seed(1234)
+ c = np.r_[0, 0, np.random.random(5), 0, 0]
+ b = BSpline(t, c, k)
+
+ # b is continuous at x != 6 (triple knot)
+ x = np.asarray([1, 3, 4, 6])
+ assert_allclose(b(x[x != 6] - 1e-10),
+ b(x[x != 6] + 1e-10))
+ assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
+
+ # 1st derivative jumps at double knots, 1 & 6:
+ x0 = np.asarray([3, 4])
+ assert_allclose(b(x0 - 1e-10, nu=1),
+ b(x0 + 1e-10, nu=1))
+ x1 = np.asarray([1, 6])
+ assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
+ b(x1 + 1e-10, nu=1))))
+
+ # 2nd derivative is not guaranteed to be continuous either
+ assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
+ b(x + 1e-10, nu=2))))
+
+ def test_basis_element_quadratic(self):
+ xx = np.linspace(-1, 4, 20)
+ b = BSpline.basis_element(t=[0, 1, 2, 3])
+ assert_allclose(b(xx),
+ splev(xx, (b.t, b.c, b.k)), atol=1e-14)
+ assert_allclose(b(xx),
+ B_0123(xx), atol=1e-14)
+
+ b = BSpline.basis_element(t=[0, 1, 1, 2])
+ xx = np.linspace(0, 2, 10)
+ assert_allclose(b(xx),
+ np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
+
+ def test_basis_element_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
+
+ def test_cmplx(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ cc = c * (1. + 3.j)
+
+ b = BSpline(t, cc, k)
+ b_re = BSpline(t, b.c.real, k)
+ b_im = BSpline(t, b.c.imag, k)
+
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
+ assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
+
+ def test_nan(self):
+ # nan in, nan out.
+ b = BSpline.basis_element([0, 1, 1, 2])
+ assert_(np.isnan(b(np.nan)))
+
+ def test_derivative_method(self):
+ b = _make_random_spline(k=5)
+ t, c, k = b.tck
+ b0 = BSpline(t, c, k)
+ xx = np.linspace(t[k], t[-k-1], 20)
+ for j in range(1, k):
+ b = b.derivative()
+ assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antiderivative_method(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ # repeat with N-D array for c
+ c = np.c_[c, c, c]
+ c = np.dstack((c, c))
+ b = BSpline(t, c, k)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ def test_integral(self):
+ b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
+ assert_allclose(b.integrate(0, 1), 0.5)
+ assert_allclose(b.integrate(1, 0), -1 * 0.5)
+ assert_allclose(b.integrate(1, 0), -0.5)
+
+ # extrapolate or zeros outside of [0, 2]; default is yes
+ assert_allclose(b.integrate(-1, 1), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
+ assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)
+
+ # Test ``_fitpack._splint()``
+ t, c, k = b.tck
+ assert_allclose(b.integrate(1, -1, extrapolate=False),
+ _splint(t, c, k, 1, -1)[0])
+
+ # Test ``extrapolate='periodic'``.
+ b.extrapolate = 'periodic'
+ i = b.antiderivative()
+ period_int = i(2) - i(0)
+
+ assert_allclose(b.integrate(0, 2), period_int)
+ assert_allclose(b.integrate(2, 0), -1 * period_int)
+ assert_allclose(b.integrate(-9, -7), period_int)
+ assert_allclose(b.integrate(-8, -4), 2 * period_int)
+
+ assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
+ assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5 + 12, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)
+
+ assert_allclose(b.integrate(0, -1), i(0) - i(1))
+ assert_allclose(b.integrate(-9, -10), i(0) - i(1))
+ assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
+
+ def test_integrate_ppoly(self):
+ # test .integrate method to be consistent with PPoly.integrate
+ x = [0, 1, 2, 3, 4]
+ b = make_interp_spline(x, x)
+ b.extrapolate = 'periodic'
+ p = PPoly.from_spline(b)
+
+ for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
+ assert_allclose(b.integrate(x0, x1),
+ p.integrate(x0, x1))
+
+ def test_subclassing(self):
+ # classmethods should not decay to the base class
+ class B(BSpline):
+ pass
+
+ b = B.basis_element([0, 1, 2, 2])
+ assert_equal(b.__class__, B)
+ assert_equal(b.derivative().__class__, B)
+ assert_equal(b.antiderivative().__class__, B)
+
+ @pytest.mark.parametrize('axis', range(-4, 4))
+ def test_axis(self, axis):
+ n, k = 22, 3
+ t = np.linspace(0, 1, n + k + 1)
+ sh = [6, 7, 8]
+ # We need the positive axis for some of the indexing and slices used
+ # in this test.
+ pos_axis = axis % 4
+ sh.insert(pos_axis, n) # [22, 6, 7, 8] etc
+ c = np.random.random(size=sh)
+ b = BSpline(t, c, k, axis=axis)
+ assert_equal(b.c.shape,
+ [sh[pos_axis],] + sh[:pos_axis] + sh[pos_axis+1:])
+
+ xp = np.random.random((3, 4, 5))
+ assert_equal(b(xp).shape,
+ sh[:pos_axis] + list(xp.shape) + sh[pos_axis+1:])
+
+ # -c.ndim <= axis < c.ndim
+ for ax in [-c.ndim - 1, c.ndim]:
+ assert_raises(np.AxisError, BSpline,
+ **dict(t=t, c=c, k=k, axis=ax))
+
+ # derivative, antiderivative keeps the axis
+ for b1 in [BSpline(t, c, k, axis=axis).derivative(),
+ BSpline(t, c, k, axis=axis).derivative(2),
+ BSpline(t, c, k, axis=axis).antiderivative(),
+ BSpline(t, c, k, axis=axis).antiderivative(2)]:
+ assert_equal(b1.axis, b.axis)
+
+ def test_neg_axis(self):
+ k = 2
+ t = [0, 1, 2, 3, 4, 5, 6]
+ c = np.array([[-1, 2, 0, -1], [2, 0, -3, 1]])
+
+ spl = BSpline(t, c, k, axis=-1)
+ spl0 = BSpline(t, c[0], k)
+ spl1 = BSpline(t, c[1], k)
+ assert_equal(spl(2.5), [spl0(2.5), spl1(2.5)])
+
+
+def test_knots_multiplicity():
+ # Take a spline w/ random coefficients, throw in knots of varying
+ # multiplicity.
+
+ def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
+ # check evaluations against FITPACK, incl extrapolations
+ t, c, k = b.tck
+ x = np.unique(t)
+ x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
+ assert_allclose(splev(x, (t, c, k), der), b(x, der),
+ atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k))
+
+ # test loop itself
+ # [the index `j` is for interpreting the traceback in case of a failure]
+ for k in [1, 2, 3, 4, 5]:
+ b = _make_random_spline(k=k)
+ for j, b1 in enumerate(_make_multiples(b)):
+ check_splev(b1, j)
+ for der in range(1, k+1):
+ check_splev(b1, j, der, 1e-12, 1e-12)
+
+
+### stolen from @pv, verbatim
+def _naive_B(x, k, i, t):
+ """
+ Naive way to compute B-spline basis functions. Useful only for testing!
+ computes B(x; t[i],..., t[i+k+1])
+ """
+ if k == 0:
+ return 1.0 if t[i] <= x < t[i+1] else 0.0
+ if t[i+k] == t[i]:
+ c1 = 0.0
+ else:
+ c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
+ if t[i+k+1] == t[i+1]:
+ c2 = 0.0
+ else:
+ c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
+ return (c1 + c2)
+
+
+### stolen from @pv, verbatim
+def _naive_eval(x, t, c, k):
+ """
+ Naive B-spline evaluation. Useful only for testing!
+ """
+ if x == t[k]:
+ i = k
+ else:
+ i = np.searchsorted(t, x) - 1
+ assert t[i] <= x <= t[i+1]
+ assert i >= k and i < len(t) - k
+ return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
+
+
+def _naive_eval_2(x, t, c, k):
+ """Naive B-spline evaluation, another way."""
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ assert t[k] <= x <= t[n]
+ return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
+
+
+def _sum_basis_elements(x, t, c, k):
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ s = 0.
+ for i in range(n):
+ b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
+ s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
+ return s
+
+
+def B_012(x):
+ """ A linear B-spline function B(x | 0, 1, 2)."""
+ x = np.atleast_1d(x)
+ return np.piecewise(x, [(x < 0) | (x > 2),
+ (x >= 0) & (x < 1),
+ (x >= 1) & (x <= 2)],
+ [lambda x: 0., lambda x: x, lambda x: 2.-x])
+
+
+def B_0123(x, der=0):
+ """A quadratic B-spline function B(x | 0, 1, 2, 3)."""
+ x = np.atleast_1d(x)
+ conds = [x < 1, (x > 1) & (x < 2), x > 2]
+ if der == 0:
+ funcs = [lambda x: x*x/2.,
+ lambda x: 3./4 - (x-3./2)**2,
+ lambda x: (3.-x)**2 / 2]
+ elif der == 2:
+ funcs = [lambda x: 1.,
+ lambda x: -2.,
+ lambda x: 1.]
+ else:
+ raise ValueError('never be here: der=%s' % der)
+ pieces = np.piecewise(x, conds, funcs)
+ return pieces
+
+
+def _make_random_spline(n=35, k=3):
+ np.random.seed(123)
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+ return BSpline.construct_fast(t, c, k)
+
+
+def _make_multiples(b):
+ """Increase knot multiplicity."""
+ c, k = b.c, b.k
+
+ t1 = b.t.copy()
+ t1[17:19] = t1[17]
+ t1[22] = t1[21]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[:k+1] = t1[0]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[-k-1:] = t1[-1]
+ yield BSpline(t1, c, k)
+
+
+class TestInterop(object):
+ #
+ # Test that FITPACK-based spl* functions can deal with BSpline objects
+ #
+ def setup_method(self):
+ xx = np.linspace(0, 4.*np.pi, 41)
+ yy = np.cos(xx)
+ b = make_interp_spline(xx, yy)
+ self.tck = (b.t, b.c, b.k)
+ self.xx, self.yy, self.b = xx, yy, b
+
+ self.xnew = np.linspace(0, 4.*np.pi, 21)
+
+ c2 = np.c_[b.c, b.c, b.c]
+ self.c2 = np.dstack((c2, c2))
+ self.b2 = BSpline(b.t, self.c2, b.k)
+
+ def test_splev(self):
+ xnew, b, b2 = self.xnew, self.b, self.b2
+
+ # check that splev works with 1-D array of coefficients
+ # for array and scalar `x`
+ assert_allclose(splev(xnew, b),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose(splev(xnew, b.tck),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose([splev(x, b) for x in xnew],
+ b(xnew), atol=1e-15, rtol=1e-15)
+
+ # With N-D coefficients, there's a quirck:
+ # splev(x, BSpline) is equivalent to BSpline(x)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning,
+ "Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.")
+ assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15)
+
+ # However, splev(x, BSpline.tck) needs some transposes. This is because
+ # BSpline interpolates along the first axis, while the legacy FITPACK
+ # wrapper does list(map(...)) which effectively interpolates along the
+ # last axis. Like so:
+ sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
+ cc = b2.c.transpose(sh)
+ tck = (b2.t, cc, b2.k)
+ assert_allclose(splev(xnew, tck),
+ b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
+
+ def test_splrep(self):
+ x, y = self.xx, self.yy
+ # test that "new" splrep is equivalent to _impl.splrep
+ tck = splrep(x, y)
+ t, c, k = _impl.splrep(x, y)
+ assert_allclose(tck[0], t, atol=1e-15)
+ assert_allclose(tck[1], c, atol=1e-15)
+ assert_equal(tck[2], k)
+
+ # also cover the `full_output=True` branch
+ tck_f, _, _, _ = splrep(x, y, full_output=True)
+ assert_allclose(tck_f[0], t, atol=1e-15)
+ assert_allclose(tck_f[1], c, atol=1e-15)
+ assert_equal(tck_f[2], k)
+
+ # test that the result of splrep roundtrips with splev:
+ # evaluate the spline on the original `x` points
+ yy = splev(x, tck)
+ assert_allclose(y, yy, atol=1e-15)
+
+ # ... and also it roundtrips if wrapped in a BSpline
+ b = BSpline(*tck)
+ assert_allclose(y, b(x), atol=1e-15)
+
+ @pytest.mark.xfail(_pep440.parse(np.__version__) < _pep440.Version('1.14.0'),
+ reason='requires NumPy >= 1.14.0')
+ def test_splrep_errors(self):
+ # test that both "old" and "new" splrep raise for an N-D ``y`` array
+ # with n > 1
+ x, y = self.xx, self.yy
+ y2 = np.c_[y, y]
+ with assert_raises(ValueError):
+ splrep(x, y2)
+ with assert_raises(ValueError):
+ _impl.splrep(x, y2)
+
+ # input below minimum size
+ with assert_raises(TypeError, match="m > k must hold"):
+ splrep(x[:3], y[:3])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splrep(x[:3], y[:3])
+
+ def test_splprep(self):
+ x = np.arange(15).reshape((3, 5))
+ b, u = splprep(x)
+ tck, u1 = _impl.splprep(x)
+
+ # test the roundtrip with splev for both "old" and "new" output
+ assert_allclose(u, u1, atol=1e-15)
+ assert_allclose(splev(u, b), x, atol=1e-15)
+ assert_allclose(splev(u, tck), x, atol=1e-15)
+
+ # cover the ``full_output=True`` branch
+ (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
+ assert_allclose(u, u_f, atol=1e-15)
+ assert_allclose(splev(u_f, b_f), x, atol=1e-15)
+
+ def test_splprep_errors(self):
+ # test that both "old" and "new" code paths raise for x.ndim > 2
+ x = np.arange(3*4*5).reshape((3, 4, 5))
+ with assert_raises(ValueError, match="too many values to unpack"):
+ splprep(x)
+ with assert_raises(ValueError, match="too many values to unpack"):
+ _impl.splprep(x)
+
+ # input below minimum size
+ x = np.linspace(0, 40, num=3)
+ with assert_raises(TypeError, match="m > k must hold"):
+ splprep([x])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splprep([x])
+
+ # automatically calculated parameters are non-increasing
+ # see gh-7589
+ x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep([x])
+ with assert_raises(ValueError, match="Invalid inputs"):
+ _impl.splprep([x])
+
+ # given non-increasing parameter values u
+ x = [1, 3, 2, 4]
+ u = [0, 0.3, 0.2, 1]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep(*[[x], None, u])
+
+ def test_sproot(self):
+ b, b2 = self.b, self.b2
+ roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
+ # sproot accepts a BSpline obj w/ 1-D coef array
+ assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
+ assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
+
+ # ... and deals with trailing dimensions if coef array is N-D
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning,
+ "Calling sproot.. with BSpline objects with c.ndim > 1 is not recommended.")
+ r = sproot(b2, mest=50)
+ r = np.asarray(r)
+
+ assert_equal(r.shape, (3, 2, 4))
+ assert_allclose(r - roots, 0, atol=1e-12)
+
+ # and legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
+ assert_equal(rr.shape, (3, 2, 4))
+ assert_allclose(rr - roots, 0, atol=1e-12)
+
+ def test_splint(self):
+ # test that splint accepts BSpline objects
+ b, b2 = self.b, self.b2
+ assert_allclose(splint(0, 1, b),
+ splint(0, 1, b.tck), atol=1e-14)
+ assert_allclose(splint(0, 1, b),
+ b.integrate(0, 1), atol=1e-14)
+
+ # ... and deals with N-D arrays of coefficients
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning,
+ "Calling splint.. with BSpline objects with c.ndim > 1 is not recommended.")
+ assert_allclose(splint(0, 1, b2), b2.integrate(0, 1), atol=1e-14)
+
+ # and the legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
+ assert_equal(integr.shape, (3, 2))
+ assert_allclose(integr,
+ splint(0, 1, b), atol=1e-14)
+
+ def test_splder(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splder(b)
+ tck_d = _impl.splder((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_splantider(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splantider(b)
+ tck_d = _impl.splantider((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_insert(self):
+ b, b2, xx = self.b, self.b2, self.xx
+
+ j = b.t.size // 2
+ tn = 0.5*(b.t[j] + b.t[j+1])
+
+ bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
+ assert_allclose(splev(xx, bn),
+ splev(xx, tck_n), atol=1e-15)
+ assert_(isinstance(bn, BSpline))
+ assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out
+
+ # for N-D array of coefficients, BSpline.c needs to be transposed
+ # after that, the results are equivalent.
+ sh = tuple(range(b2.c.ndim))
+ c_ = b2.c.transpose(sh[1:] + (0,))
+ tck_n2 = insert(tn, (b2.t, c_, b2.k))
+
+ bn2 = insert(tn, b2)
+
+ # need a transpose for comparing the results, cf test_splev
+ assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
+ bn2(xx), atol=1e-15)
+ assert_(isinstance(bn2, BSpline))
+ assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out
+
+
+class TestInterp(object):
+ #
+ # Test basic ways of constructing interpolating splines.
+ #
+ xx = np.linspace(0., 2.*np.pi)
+ yy = np.sin(xx)
+
+ def test_non_int_order(self):
+ with assert_raises(TypeError):
+ make_interp_spline(self.xx, self.yy, k=2.5)
+
+ def test_order_0(self):
+ b = make_interp_spline(self.xx, self.yy, k=0)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=0, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_linear(self):
+ b = make_interp_spline(self.xx, self.yy, k=1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=1, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_not_a_knot(self):
+ for k in [3, 5]:
+ b = make_interp_spline(self.xx, self.yy, k)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_quadratic_deriv(self):
+ der = [(1, 8.)] # order, value: f'(x) = 8.
+
+ # derivative at right-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ # derivative at left-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_cubic_deriv(self):
+ k = 3
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.)], [(1, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # 'natural' cubic spline, zero out 2nd derivatives at the boundaries
+ der_l, der_r = [(2, 0)], [(2, 0)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_quintic_derivs(self):
+ k, n = 5, 7
+ x = np.arange(n).astype(np.float_)
+ y = np.sin(x)
+ der_l = [(1, -12.), (2, 1)]
+ der_r = [(1, 8.), (2, 3.)]
+ b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
+ assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(x[0], 1), b(x[0], 2)],
+ [val for (nu, val) in der_l])
+ assert_allclose([b(x[-1], 1), b(x[-1], 2)],
+ [val for (nu, val) in der_r])
+
+ @pytest.mark.xfail(reason='unstable')
+ def test_cubic_deriv_unstable(self):
+ # 1st and 2nd derivative at x[0], no derivative information at x[-1]
+ # The problem is not that it fails [who would use this anyway],
+ # the problem is that it fails *silently*, and I've no idea
+ # how to detect this sort of instability.
+ # In this particular case: it's OK for len(t) < 20, goes haywire
+ # at larger `len(t)`.
+ k = 3
+ t = _augknt(self.xx, k)
+
+ der_l = [(1, 3.), (2, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_knots_not_data_sites(self):
+ # Knots need not coincide with the data sites.
+ # use a quadratic spline, knots are at data averages,
+ # two additional constraints are zero 2nd derivatives at edges
+ k = 2
+ t = np.r_[(self.xx[0],)*(k+1),
+ (self.xx[1:] + self.xx[:-1]) / 2.,
+ (self.xx[-1],)*(k+1)]
+ b = make_interp_spline(self.xx, self.yy, k, t,
+ bc_type=([(2, 0)], [(2, 0)]))
+
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
+ atol=1e-14)
+
+ def test_minimum_points_and_deriv(self):
+ # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
+ # f'(0) = 0, f'(1) = 3.
+ k = 3
+ x = [0., 1.]
+ y = [0., 1.]
+ b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
+
+ xx = np.linspace(0., 1.)
+ yy = xx**3
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_deriv_spec(self):
+ # If one of the derivatives is omitted, the spline definition is
+ # incomplete.
+ x = y = [1.0, 2, 3, 4, 5, 6]
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=([(1, 0.)], None))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(1, 0.))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=[(1, 0.)])
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=42)
+
+ # CubicSpline expects`bc_type=(left_pair, right_pair)`, while
+ # here we expect `bc_type=(iterable, iterable)`.
+ l, r = (1, 0.0), (1, 0.0)
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(l, r))
+
+ def test_complex(self):
+ k = 3
+ xx = self.xx
+ yy = self.yy + 1.j*self.yy
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
+ b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # also test zero and first order
+ for k in (0, 1):
+ b = make_interp_spline(xx, yy, k=k)
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(np.int_)
+ y = np.arange(10).astype(np.int_)
+
+ # Cython chokes on "buffer type mismatch" (construction) or
+ # "no matching signature found" (evaluation)
+ for k in (0, 1, 2, 3):
+ b = make_interp_spline(x, y, k=k)
+ b(x)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::5]
+ y = xx[::5]
+
+ for k in (0, 1, 2, 3):
+ make_interp_spline(x, y, k=k)
+
+ def test_check_finite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(10).astype(float)
+ y = x**2
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_interp_spline, x, y)
+
+ @pytest.mark.parametrize('k', [1, 2, 3, 5])
+ def test_list_input(self, k):
+ # regression test for gh-8714: TypeError for x, y being lists and k=2
+ x = list(range(10))
+ y = [a**2 for a in x]
+ make_interp_spline(x, y, k=k)
+
+ def test_multiple_rhs(self):
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [1., 2.])]
+ der_r = [(1, [3., 4.])]
+
+ b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_shapes(self):
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_interp_spline(x, y, k)
+ assert_equal(b.c.shape, (n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.random.random((5, 6, 7)))]
+ d_r = [(1, np.random.random((5, 6, 7)))]
+ b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
+
+ def test_string_aliases(self):
+ yy = np.sin(self.xx)
+
+ # a single string is duplicated
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # two strings are handled
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('natural', 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=([(2, 0)], [(1, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # one-sided BCs are OK
+ b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # 'not-a-knot' is equivalent to None
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # unknown strings do not pass
+ with assert_raises(ValueError):
+ make_interp_spline(self.xx, yy, k=3, bc_type='typo')
+
+ # string aliases are handled for 2D values
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [0., 0.])]
+ der_r = [(2, [0., 0.])]
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('clamped', 'natural'))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # ... and for N-D values:
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.zeros((5, 6, 7)))]
+ d_r = [(1, np.zeros((5, 6, 7)))]
+ b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ b2 = make_interp_spline(x, y, k, bc_type='clamped')
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ def test_full_matrix(self):
+ np.random.seed(1234)
+ k, n = 3, 7
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=n)
+ t = _not_a_knot(x, k)
+
+ b = make_interp_spline(x, y, k, t)
+ cf = make_interp_full_matr(x, y, t, k)
+ assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
+
+
+def make_interp_full_matr(x, y, t, k):
+ """Assemble an spline order k with knots t to interpolate
+ y(x) using full matrices.
+ Not-a-knot BC only.
+
+ This routine is here for testing only (even though it's functional).
+ """
+ assert x.size == y.size
+ assert t.size == x.size + k + 1
+ n = x.size
+
+ A = np.zeros((n, n), dtype=np.float_)
+
+ for j in range(n):
+ xval = x[j]
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ c = sl.solve(A, y)
+ return c
+
+
+### XXX: 'periodic' interp spline using full matrices
+def make_interp_per_full_matr(x, y, t, k):
+ x, y, t = map(np.asarray, (x, y, t))
+
+ n = x.size
+ nt = t.size - k - 1
+
+ # have `n` conditions for `nt` coefficients; need nt-n derivatives
+ assert nt - n == k - 1
+
+ # LHS: the collocation matrix + derivatives at edges
+ A = np.zeros((nt, nt), dtype=np.float_)
+
+ # derivatives at x[0]:
+ offset = 0
+
+ if x[0] == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, x[0]) - 1
+
+ if x[-1] == t[k]:
+ left2 = k
+ else:
+ left2 = np.searchsorted(t, x[-1]) - 1
+
+ for i in range(k-1):
+ bb = _bspl.evaluate_all_bspl(t, k, x[0], left, nu=i+1)
+ A[i, left-k:left+1] = bb
+ bb = _bspl.evaluate_all_bspl(t, k, x[-1], left2, nu=i+1)
+ A[i, left2-k:left2+1] = -bb
+ offset += 1
+
+ # RHS
+ y = np.r_[[0]*(k-1), y]
+
+ # collocation matrix
+ for j in range(n):
+ xval = x[j]
+ # find interval
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j + offset, left-k:left+1] = bb
+
+ c = sl.solve(A, y)
+ return c
+
+
+def make_lsq_full_matrix(x, y, t, k=3):
+ """Make the least-square spline, full matrices."""
+ x, y, t = map(np.asarray, (x, y, t))
+ m = x.size
+ n = t.size - k - 1
+
+ A = np.zeros((m, n), dtype=np.float_)
+
+ for j in range(m):
+ xval = x[j]
+ # find interval
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ # have observation matrix, can solve the LSQ problem
+ B = np.dot(A.T, A)
+ Y = np.dot(A.T, y)
+ c = sl.solve(B, Y)
+
+ return c, (A, Y)
+
+
+class TestLSQ(object):
+ #
+ # Test make_lsq_spline
+ #
+ np.random.seed(1234)
+ n, k = 13, 3
+ x = np.sort(np.random.random(n))
+ y = np.random.random(n)
+ t = _augknt(np.linspace(x[0], x[-1], 7), k)
+
+ def test_lstsq(self):
+ # check LSQ construction vs a full matrix version
+ x, y, t, k = self.x, self.y, self.t, self.k
+
+ c0, AY = make_lsq_full_matrix(x, y, t, k)
+ b = make_lsq_spline(x, y, t, k)
+
+ assert_allclose(b.c, c0)
+ assert_equal(b.c.shape, (t.size - k - 1,))
+
+ # also check against numpy.lstsq
+ aa, yy = AY
+ c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
+ assert_allclose(b.c, c1)
+
+ def test_weights(self):
+ # weights = 1 is same as None
+ x, y, t, k = self.x, self.y, self.t, self.k
+ w = np.ones_like(x)
+
+ b = make_lsq_spline(x, y, t, k)
+ b_w = make_lsq_spline(x, y, t, k, w=w)
+
+ assert_allclose(b.t, b_w.t, atol=1e-14)
+ assert_allclose(b.c, b_w.c, atol=1e-14)
+ assert_equal(b.k, b_w.k)
+
+ def test_multiple_rhs(self):
+ x, t, k, n = self.x, self.t, self.k, self.n
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_lsq_spline(x, y, t, k)
+ assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
+
+ def test_complex(self):
+ # cmplx-valued `y`
+ x, t, k = self.x, self.t, self.k
+ yc = self.y * (1. + 2.j)
+
+ b = make_lsq_spline(x, yc, t, k)
+ b_re = make_lsq_spline(x, yc.real, t, k)
+ b_im = make_lsq_spline(x, yc.imag, t, k)
+
+ assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(np.int_)
+ y = np.arange(10).astype(np.int_)
+ t = _augknt(x, k=1)
+ # Cython chokes on "buffer type mismatch"
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::3]
+ y = xx[::3]
+ t = _augknt(x, 1)
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_checkfinite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(12).astype(float)
+ y = x**2
+ t = _augknt(x, 3)
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_lsq_spline, x, y, t)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack.py
new file mode 100644
index 0000000..c2179a4
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack.py
@@ -0,0 +1,491 @@
+import itertools
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_,
+ assert_almost_equal, assert_array_almost_equal)
+from pytest import raises as assert_raises
+import pytest
+from scipy._lib._testutils import check_free_memory
+
+from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
+from scipy import interpolate
+from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
+ sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
+from scipy.interpolate.dfitpack import regrid_smth
+from scipy.interpolate.fitpack2 import dfitpack_int
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+def norm2(x):
+ return sqrt(dot(x.T,x))
+
+
+def f1(x,d=0):
+ if d is None:
+ return "sin"
+ if x is None:
+ return "sin(x)"
+ if d % 4 == 0:
+ return sin(x)
+ if d % 4 == 1:
+ return cos(x)
+ if d % 4 == 2:
+ return -sin(x)
+ if d % 4 == 3:
+ return -cos(x)
+
+
+def f2(x,y=0,dx=0,dy=0):
+ if x is None:
+ return "sin(x+y)"
+ d = dx+dy
+ if d % 4 == 0:
+ return sin(x+y)
+ if d % 4 == 1:
+ return cos(x+y)
+ if d % 4 == 2:
+ return -sin(x+y)
+ if d % 4 == 3:
+ return -cos(x+y)
+
+
+def makepairs(x, y):
+ """Helper function to create an array of pairs of x and y."""
+ xy = array(list(itertools.product(asarray(x), asarray(y))))
+ return xy.T
+
+
+def put(*a):
+ """Produce some output if file run directly"""
+ import sys
+ if hasattr(sys.modules['__main__'], '__put_prints'):
+ sys.stderr.write("".join(map(str, a)) + "\n")
+
+
+class TestSmokeTests(object):
+ """
+ Smoke tests (with a few asserts) for fitpack routines -- mostly
+ check that they are runnable
+ """
+
+ def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+ x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
+ x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
+ v = f(x)
+ nk = []
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0/float(N)
+ tol = 5 * h**(.75*(k-d))
+ if s > 0:
+ tol += 1e5*s
+ return tol
+
+ for k in range(1,6):
+ tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
+ if at:
+ t = tck[0][k:-k]
+ else:
+ t = x1
+ nd = []
+ for d in range(k+1):
+ tol = err_est(k, d)
+ err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d))
+ assert_(err < tol, (k, d, err, tol))
+ nd.append((err, tol))
+ nk.append(nd)
+ put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
+ repr(round(xb,3)),repr(round(xe,3)),
+ repr(round(a,3)),repr(round(b,3))))
+ if at:
+ str = "at knots"
+ else:
+ str = "at the middle of nodes"
+ put(" per=%d s=%s Evaluation %s" % (per,repr(s),str))
+ put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
+ k = 1
+ for l in nk:
+ put(' %d : ' % k)
+ for r in l:
+ put(' %.1e %.1e' % r)
+ put('\n')
+ k = k+1
+
+ def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
+ ia=0,ib=2*pi,dx=0.2*pi):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+ x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
+ v = f(x)
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0/float(N)
+ tol = 5 * h**(.75*(k-d))
+ if s > 0:
+ tol += 1e5*s
+ return tol
+
+ nk = []
+ for k in range(1,6):
+ tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
+ nk.append([splint(ia,ib,tck),spalde(dx,tck)])
+ put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
+ repr(round(xb,3)),repr(round(xe,3)),
+ repr(round(a,3)),repr(round(b,3))))
+ put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3))))
+ put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
+ k = 1
+ for r in nk:
+ if r[0] < 0:
+ sr = '-'
+ else:
+ sr = ' '
+ put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]),
+ abs(r[0]-(f(ib,-1)-f(ia,-1)))))
+ d = 0
+ for dr in r[1]:
+ err = abs(1-dr/f(dx,d))
+ tol = err_est(k, d)
+ assert_(err < tol, (k, d))
+ put(" %.1e %.1e" % (err, tol))
+ d = d+1
+ put("\n")
+ k = k+1
+
+ def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
+ ia=0,ib=2*pi,dx=0.2*pi):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+ x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
+ v = f(x)
+ put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
+ (f(None),repr(round(a,3)),repr(round(b,3))))
+ for k in range(1,6):
+ tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
+ if k == 3:
+ roots = sproot(tck)
+ assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
+ assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3)
+ put(' %d : %s' % (k, repr(roots.tolist())))
+ else:
+ assert_raises(ValueError, sproot, tck)
+
+ def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
+ ia=0,ib=2*pi,dx=0.2*pi):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+ x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
+ x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
+ v, _ = f(x),f(x1)
+ put(" u = %s N = %d" % (repr(round(dx,3)),N))
+ put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None)))
+ for k in range(1,6):
+ tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
+ tck = splrep(x,v,s=s,per=per,k=k)
+ uv = splev(dx,tckp)
+ err1 = abs(uv[1]-f(uv[0]))
+ err2 = abs(splev(uv[0],tck)-f(uv[0]))
+ assert_(err1 < 1e-2)
+ assert_(err2 < 1e-2)
+ put(" %d : %s %.1e %.1e" %
+ (k,repr([round(z,3) for z in uv]),
+ err1,
+ err2))
+ put("Derivatives of parametric cubic spline at u (first function):")
+ k = 3
+ tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
+ for d in range(1,k+1):
+ uv = splev(dx,tckp,d)
+ put(" %s " % (repr(uv[0])))
+
+ def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0):
+ x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx)
+ y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny)
+ xy = makepairs(x,y)
+ tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky)
+ tt = [tck[0][kx:-kx],tck[1][ky:-ky]]
+ t2 = makepairs(tt[0],tt[1])
+ v1 = bisplev(tt[0],tt[1],tck)
+ v2 = f2(t2[0],t2[1])
+ v2.shape = len(tt[0]),len(tt[1])
+ err = norm2(ravel(v1-v2))
+ assert_(err < 1e-2, err)
+ put(err)
+
+ def test_smoke_splrep_splev(self):
+ put("***************** splrep/splev")
+ self.check_1(s=1e-6)
+ self.check_1()
+ self.check_1(at=1)
+ self.check_1(per=1)
+ self.check_1(per=1,at=1)
+ self.check_1(b=1.5*pi)
+ self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1)
+
+ def test_smoke_splint_spalde(self):
+ put("***************** splint/spalde")
+ self.check_2()
+ self.check_2(per=1)
+ self.check_2(ia=0.2*pi,ib=pi)
+ self.check_2(ia=0.2*pi,ib=pi,N=50)
+
+ def test_smoke_sproot(self):
+ put("***************** sproot")
+ self.check_3(a=0.1,b=15)
+
+ def test_smoke_splprep_splrep_splev(self):
+ put("***************** splprep/splrep/splev")
+ self.check_4()
+ self.check_4(N=50)
+
+ def test_smoke_bisplrep_bisplev(self):
+ put("***************** bisplev")
+ self.check_5()
+
+
+class TestSplev(object):
+ def test_1d_shape(self):
+ x = [1,2,3,4,5]
+ y = [4,5,6,7,8]
+ tck = splrep(x, y)
+ z = splev([1], tck)
+ assert_equal(z.shape, (1,))
+ z = splev(1, tck)
+ assert_equal(z.shape, ())
+
+ def test_2d_shape(self):
+ x = [1, 2, 3, 4, 5]
+ y = [4, 5, 6, 7, 8]
+ tck = splrep(x, y)
+ t = np.array([[1.0, 1.5, 2.0, 2.5],
+ [3.0, 3.5, 4.0, 4.5]])
+ z = splev(t, tck)
+ z0 = splev(t[0], tck)
+ z1 = splev(t[1], tck)
+ assert_equal(z, np.row_stack((z0, z1)))
+
+ def test_extrapolation_modes(self):
+ # test extrapolation modes
+ # * if ext=0, return the extrapolated value.
+ # * if ext=1, return 0
+ # * if ext=2, raise a ValueError
+ # * if ext=3, return the boundary value.
+ x = [1,2,3]
+ y = [0,2,4]
+ tck = splrep(x, y, k=1)
+
+ rstl = [[-2, 6], [0, 0], None, [0, 4]]
+ for ext in (0, 1, 3):
+ assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
+
+ assert_raises(ValueError, splev, [0, 4], tck, ext=2)
+
+
+class TestSplder(object):
+ def setup_method(self):
+ # non-uniform grid, just to make it sure
+ x = np.linspace(0, 1, 100)**3
+ y = np.sin(20 * x)
+ self.spl = splrep(x, y)
+
+ # double check that knots are non-uniform
+ assert_(np.diff(self.spl[0]).ptp() > 0)
+
+ def test_inverse(self):
+ # Check that antiderivative + derivative is identity.
+ for n in range(5):
+ spl2 = splantider(self.spl, n)
+ spl3 = splder(spl2, n)
+ assert_allclose(self.spl[0], spl3[0])
+ assert_allclose(self.spl[1], spl3[1])
+ assert_equal(self.spl[2], spl3[2])
+
+ def test_splder_vs_splev(self):
+ # Check derivative vs. FITPACK
+
+ for n in range(3+1):
+ # Also extrapolation!
+ xx = np.linspace(-1, 2, 2000)
+ if n == 3:
+ # ... except that FITPACK extrapolates strangely for
+ # order 0, so let's not check that.
+ xx = xx[(xx >= 0) & (xx <= 1)]
+
+ dy = splev(xx, self.spl, n)
+ spl2 = splder(self.spl, n)
+ dy2 = splev(xx, spl2)
+ if n == 1:
+ assert_allclose(dy, dy2, rtol=2e-6)
+ else:
+ assert_allclose(dy, dy2)
+
+ def test_splantider_vs_splint(self):
+ # Check antiderivative vs. FITPACK
+ spl2 = splantider(self.spl)
+
+ # no extrapolation, splint assumes function is zero outside
+ # range
+ xx = np.linspace(0, 1, 20)
+
+ for x1 in xx:
+ for x2 in xx:
+ y1 = splint(x1, x2, self.spl)
+ y2 = splev(x2, spl2) - splev(x1, spl2)
+ assert_allclose(y1, y2)
+
+ def test_order0_diff(self):
+ assert_raises(ValueError, splder, self.spl, 4)
+
+ def test_kink(self):
+ # Should refuse to differentiate splines with kinks
+
+ spl2 = insert(0.5, self.spl, m=2)
+ splder(spl2, 2) # Should work
+ assert_raises(ValueError, splder, spl2, 3)
+
+ spl2 = insert(0.5, self.spl, m=3)
+ splder(spl2, 1) # Should work
+ assert_raises(ValueError, splder, spl2, 2)
+
+ spl2 = insert(0.5, self.spl, m=4)
+ assert_raises(ValueError, splder, spl2, 1)
+
+ def test_multidim(self):
+ # c can have trailing dims
+ for n in range(3):
+ t, c, k = self.spl
+ c2 = np.c_[c, c, c]
+ c2 = np.dstack((c2, c2))
+
+ spl2 = splantider((t, c2, k), n)
+ spl3 = splder(spl2, n)
+
+ assert_allclose(t, spl3[0])
+ assert_allclose(c2, spl3[1])
+ assert_equal(k, spl3[2])
+
+
+class TestBisplrep(object):
+ def test_overflow(self):
+ from numpy.lib.stride_tricks import as_strided
+ if dfitpack_int.itemsize == 8:
+ size = 1500000**2
+ else:
+ size = 400**2
+ # Don't allocate a real array, as it's very big, but rely
+ # on that it's not referenced
+ x = as_strided(np.zeros(()), shape=(size,))
+ assert_raises(OverflowError, bisplrep, x, x, x, w=x,
+ xb=0, xe=1, yb=0, ye=1, s=0)
+
+ def test_regression_1310(self):
+ # Regression test for gh-1310
+ data = np.load(data_file('bug-1310.npz'))['data']
+
+ # Shouldn't crash -- the input data triggers work array sizes
+ # that caused previously some data to not be aligned on
+ # sizeof(double) boundaries in memory, which made the Fortran
+ # code to crash when compiled with -O3
+ bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
+ full_output=True)
+
+ @pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
+ def test_ilp64_bisplrep(self):
+ check_free_memory(28000) # VM size, doesn't actually use the pages
+ x = np.linspace(0, 1, 400)
+ y = np.linspace(0, 1, 400)
+ x, y = np.meshgrid(x, y)
+ z = np.zeros_like(x)
+ tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
+ assert_allclose(bisplev(0.5, 0.5, tck), 0.0)
+
+
+def test_dblint():
+ # Basic test to see it runs and gives the correct result on a trivial
+ # problem. Note that `dblint` is not exposed in the interpolate namespace.
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ xx, yy = np.meshgrid(x, y)
+ rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
+ tck = list(rect.tck)
+ tck.extend(rect.degrees)
+
+ assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
+ assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
+ assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
+ assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
+
+
+def test_splev_der_k():
+ # regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
+ # for x outside of knot range
+
+ # test case from gh-2188
+ tck = (np.array([0., 0., 2.5, 2.5]),
+ np.array([-1.56679978, 2.43995873, 0., 0.]),
+ 1)
+ t, c, k = tck
+ x = np.array([-3, 0, 2.5, 3])
+
+ # an explicit form of the linear spline
+ assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
+ assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
+
+ # now check a random spline vs splder
+ np.random.seed(1234)
+ x = np.sort(np.random.random(30))
+ y = np.random.random(30)
+ t, c, k = splrep(x, y)
+
+ x = [t[0] - 1., t[-1] + 1.]
+ tck2 = splder((t, c, k), k)
+ assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
+
+
+def test_splprep_segfault():
+ # regression test for gh-3847: splprep segfaults if knots are specified
+ # for task=-1
+ t = np.arange(0, 1.1, 0.1)
+ x = np.sin(2*np.pi*t)
+ y = np.cos(2*np.pi*t)
+ tck, u = interpolate.splprep([x, y], s=0)
+ unew = np.arange(0, 1.01, 0.01)
+
+ uknots = tck[0] # using the knots from the previous fitting
+ tck, u = interpolate.splprep([x, y], task=-1, t=uknots) # here is the crash
+
+
+def test_bisplev_integer_overflow():
+ np.random.seed(1)
+
+ x = np.linspace(0, 1, 11)
+ y = x
+ z = np.random.randn(11, 11).ravel()
+ kx = 1
+ ky = 1
+
+ nx, tx, ny, ty, c, fp, ier = regrid_smth(
+ x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
+ tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
+
+ xp = np.zeros([2621440])
+ yp = np.zeros([2621440])
+
+ assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack2.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack2.py
new file mode 100644
index 0000000..263bb13
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_fitpack2.py
@@ -0,0 +1,1056 @@
+# Created by Pearu Peterson, June 2003
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+
+from numpy import array, diff, linspace, meshgrid, ones, pi, shape
+from scipy.interpolate.fitpack import bisplrep, bisplev
+from scipy.interpolate.fitpack2 import (UnivariateSpline,
+ LSQUnivariateSpline, InterpolatedUnivariateSpline,
+ LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
+ LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
+ RectSphereBivariateSpline)
+
+
+class TestUnivariateSpline(object):
+ def test_linear_constant(self):
+ x = [1,2,3]
+ y = [3,3,3]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
+
+ def test_preserve_shape(self):
+ x = [1, 2, 3]
+ y = [0, 2, 4]
+ lut = UnivariateSpline(x, y, k=1)
+ arg = 2
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+ arg = [1.5, 2, 2.5]
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+
+ def test_linear_1d(self):
+ x = [1,2,3]
+ y = [0,2,4]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[0,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
+
+ def test_subclassing(self):
+ # See #731
+
+ class ZeroSpline(UnivariateSpline):
+ def __call__(self, x):
+ return 0*array(x)
+
+ sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
+ assert_array_equal(sp([1.5, 2.5]), [0., 0.])
+
+ def test_empty_input(self):
+ # Test whether empty input returns an empty output. Ticket 1014
+ x = [1,3,5,7,9]
+ y = [0,4,9,12,21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_array_equal(spl([]), array([]))
+
+ def test_resize_regression(self):
+ """Regression test for #1375."""
+ x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
+ -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
+ 0.65016502, 1.]
+ y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
+ 0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
+ 0.62928599, 1.]
+ w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
+ 6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
+ 6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
+ 1.00000000e+12]
+ spl = UnivariateSpline(x=x, y=y, w=w, s=None)
+ desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
+ assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
+
+ def test_out_of_range_regression(self):
+ # Test different extrapolation modes. See ticket 3557
+ x = np.arange(5, dtype=float)
+ y = x**3
+
+ xp = linspace(-8, 13, 100)
+ xp_zeros = xp.copy()
+ xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
+ xp_clip = xp.copy()
+ xp_clip[xp_clip < x[0]] = x[0]
+ xp_clip[xp_clip > x[-1]] = x[-1]
+
+ for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
+ spl = cls(x=x, y=y)
+ for ext in [0, 'extrapolate']:
+ assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
+ for ext in [1, 'zeros']:
+ assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
+ for ext in [2, 'raise']:
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ for ext in [3, 'const']:
+ assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
+
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ spl = LSQUnivariateSpline(x, y, t)
+ assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
+ assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
+ assert_raises(ValueError, spl, xp, **dict(ext=2))
+ assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
+
+ # also make sure that unknown values for `ext` are caught early
+ for ext in [-1, 'unknown']:
+ spl = UnivariateSpline(x, y)
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, ext=ext))
+
+ def test_lsq_fpchec(self):
+ xs = np.arange(100) * 1.
+ ys = np.arange(100) * 1.
+ knots = np.linspace(0, 99, 10)
+ bbox = (-1, 101)
+ assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
+ bbox=bbox)
+
+ def test_derivative_and_antiderivative(self):
+ # Thin wrappers to splder/splantider, so light smoke test only.
+ x = np.linspace(0, 1, 70)**3
+ y = np.cos(x)
+
+ spl = UnivariateSpline(x, y, s=0)
+ spl2 = spl.antiderivative(2).derivative(2)
+ assert_allclose(spl(0.3), spl2(0.3))
+
+ spl2 = spl.antiderivative(1)
+ assert_allclose(spl2(0.6) - spl2(0.2),
+ spl.integral(0.2, 0.6))
+
+ def test_derivative_extrapolation(self):
+ # Regression test for gh-10195: for a const-extrapolation spline
+ # its derivative evaluates to zero for extrapolation
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 5]
+ f = UnivariateSpline(x_values, y_values, ext='const', k=3)
+
+ x = [-1, 0, -0.5, 9, 9.5, 10]
+ assert_allclose(f.derivative()(x), 0, atol=1e-15)
+
+ def test_integral_out_of_bounds(self):
+ # Regression test for gh-7906: .integral(a, b) is wrong if both
+ # a and b are out-of-bounds
+ x = np.linspace(0., 1., 7)
+ for ext in range(4):
+ f = UnivariateSpline(x, x, s=0, ext=ext)
+ for (a, b) in [(1, 1), (1, 5), (2, 5),
+ (0, 0), (-2, 0), (-2, -1)]:
+ assert_allclose(f.integral(a, b), 0, atol=1e-15)
+
+ def test_nan(self):
+ # bail out early if the input data contains nans
+ x = np.arange(10, dtype=float)
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(x, y, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ y_end = y[-1]
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, check_finite=True))
+ y[-1] = y_end # check valid y but invalid w
+ w[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_strictly_increasing_x(self):
+ # Test the x is required to be strictly increasing for
+ # UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
+ # but merely increasing for UnivariateSpline if s>0
+ # and for LSQUnivariateSpline; see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0]
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
+ LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, s=0, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+
+ def test_increasing_x(self):
+ # Test that x is required to be increasing, see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0] - 1.0
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_invalid_input_for_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ UnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ UnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ UnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_invalid_input_for_interpolated_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ InterpolatedUnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ InterpolatedUnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_invalid_input_for_lsq_univariate_spline(self):
+
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ spl = UnivariateSpline(x_values, y_values, check_finite=True)
+ t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ LSQUnivariateSpline(x_values, y_values, t_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [1.0, 1.0, 1.0, 1.0]
+ LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (100, -100)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+ assert "Interior knots t must satisfy Schoenberg-Whitney conditions" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQUnivariateSpline(x_values, y_values, t_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_array_like_input(self):
+ x_values = np.array([1, 2, 4, 6, 8.5])
+ y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
+ w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
+ bbox = np.array([-100, 100])
+ # np.array input
+ spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
+ bbox=bbox)
+ # list input
+ spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
+ w=w_values.tolist(), bbox=bbox.tolist())
+
+ assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
+ spl2([0.1, 0.5, 0.9, 0.99]))
+
+
+class TestLSQBivariateSpline(object):
+ # NOTE: The systems in this test class are rank-deficient
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+ assert_equal(len(r), 1)
+
+ assert_almost_equal(lut(2,2), 3.)
+
+ def test_bilinearity(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,7,8,3,4,7,1,3,4]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+
+ tx, ty = lut.get_knots()
+ for xa, xb in zip(tx[:-1], tx[1:]):
+ for ya, yb in zip(ty[:-1], ty[1:]):
+ for t in [0.1, 0.5, 0.9]:
+ for s in [0.3, 0.4, 0.7]:
+ xp = xa*(1-t) + xb*t
+ yp = ya*(1-s) + yb*s
+ zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ + lut(xb, ya)*t*(1-s)
+ + lut(xa, yb)*(1-t)*s
+ + lut(xb, yb)*t*s)
+ assert_almost_equal(lut(xp,yp), zp)
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,8,8,8]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+ tx, ty = lut.get_knots()
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
+ trpz)
+
+ def test_empty_input(self):
+ # Test whether empty inputs returns an empty output. Ticket 1014
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+
+ assert_array_equal(lut([], []), np.zeros((0,0)))
+ assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ s = 0.1
+ tx = [1 + s, 3 - s]
+ ty = [1 + s, 3 - s]
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ LSQBivariateSpline(x, y, z, tx, ty)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
+ str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ s = 0.1
+ tx = np.array([1 + s, 3 - s])
+ ty = np.array([1 + s, 3 - s])
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0)
+ bbox = np.array([1.0, 10.0, 1.0, 10.0])
+
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ # np.array input
+ spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
+ # list input
+ spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ tx.tolist(), ty.tolist(), w=w.tolist(),
+ bbox=bbox)
+ assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
+ assert_equal(len(r), 2)
+
+
+class TestSmoothBivariateSpline(object):
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
+
+ def test_linear_1d(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,0,0,2,2,2,4,4,4]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,4,4,4]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe required storage space")
+ lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
+
+ tx = [1,2,4]
+ ty = [1,2,3]
+
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+
+ lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
+ assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
+ decimal=0) # the quadratures give 23.75 and 23.85
+
+ tz = lut(tx[:-1], ty[:-1])
+ trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
+
+ def test_rerun_lwrk2_too_small(self):
+ # in this setting, lwrk2 is too small in the default run. Here we
+ # check for equality with the bisplrep/bisplev output because there,
+ # an automatic re-run of the spline representation is done if ier>10.
+ x = np.linspace(-2, 2, 80)
+ y = np.linspace(-2, 2, 80)
+ z = x + y
+ xi = np.linspace(-1, 1, 100)
+ yi = np.linspace(-2, 2, 100)
+ tck = bisplrep(x, y, z)
+ res1 = bisplev(xi, yi, tck)
+ interp_ = SmoothBivariateSpline(x, y, z)
+ res2 = interp_(xi, yi)
+ assert_almost_equal(res1, res2)
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ SmoothBivariateSpline(x, y, z)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ SmoothBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
+ str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+ y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
+ bbox = np.array([1.0, 3.0, 1.0, 3.0])
+ # np.array input
+ spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
+ # list input
+ spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist(), w=w.tolist(),
+ kx=1, ky=1)
+ assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
+
+
+class TestLSQSphereBivariateSpline(object):
+ def setup_method(self):
+ # define the input data and coordinates
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
+ phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ knotdata = data[::5, ::5]
+ # calculate spline coefficients
+ lats, lons = meshgrid(theta, phi)
+ lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ self.lut_lsq = lut_lsq
+ self.data = knotdata
+ self.new_lons, self.new_lats = knotsp, knotst
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
+ assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
+ self.data)
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ lats, lons = meshgrid(theta, phi)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = 2 * pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, w=invalid_w)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
+ nphi) * 2. * pi
+ lats, lons = meshgrid(theta, phi)
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ w = ones((lats.ravel().shape[0]))
+
+ # np.array input
+ spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp, w=w)
+ # list input
+ spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
+ lons.ravel().tolist(),
+ data.T.ravel().tolist(),
+ knotst.tolist(),
+ knotsp.tolist(), w=w.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestSmoothSphereBivariateSpline(object):
+ def setup_method(self):
+ theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
+ .75*pi, .75*pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut.get_residual(), 0.)
+ assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
+ [[3, 3], [3, 3], [3, 3]])
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
+ .75 * pi, .75 * pi, .75 * pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ .75 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ 1.1 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 1.5 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 2.1 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
+ assert "s should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
+ .5 * pi, .75 * pi, .75 * pi, .75 * pi])
+ phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
+ pi, 1.5 * pi])
+ r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+
+ # np.array input
+ spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
+
+ # list input
+ spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
+ r.tolist(), w=w.tolist(), s=1E10)
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestRectBivariateSpline(object):
+ def test_defaults(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+
+ xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
+ yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+
+ assert_almost_equal(zi, zi2)
+
+ def test_derivatives_grid(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
+ [0,0,-11,0,0],[0,0,4,0,0]])/6.
+ dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
+ [2,.25,0,-.25,-2],[4,-1,0,1,-4]])
+ dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
+ [-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1),dx)
+ assert_array_almost_equal(lut(x,y,dy=1),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
+
+ def test_derivatives(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([0,0,2./3,0,0])
+ dy = array([4,-1,0,-.25,-4])
+ dxdy = array([160,65,0,55,32])/24.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
+ assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
+
+ def test_broadcast(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = array([6, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([2, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "y must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x dimension of z must have same number of elements as x"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
+ [1, 2, 2, 2], [1, 2, 1, 2]])
+ RectBivariateSpline(x, y, z)
+ assert "y dimension of z must have same number of elements as y"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = (-100, 100, -100)
+ RectBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ RectBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_array_like_input(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = array([1, 5, 1, 5])
+
+ spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
+ spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestRectSphereBivariateSpline(object):
+ def test_defaults(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
+ xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+ assert_almost_equal(zi, zi2)
+
+ def test_derivatives_grid(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ def test_derivatives(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
+ assert_allclose(lut(x, y, dtheta=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ def test_invalid_input(self):
+ data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(-1, 170, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 181, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-181, 10, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-10, 360, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(10, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data, s=-1)
+ assert "s should be positive" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ y = linspace(0.01, 2 * pi - 0.01, 7)
+ x = linspace(0.01, pi - 0.01, 7)
+ z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1, 2, 1]])
+ # np.array input
+ spl1 = RectSphereBivariateSpline(x, y, z)
+ # list input
+ spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
+ assert_array_almost_equal(spl1(x, y), spl2(x, y))
+
+
+def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
+ if dx == 0 and dy == 0:
+ return func(x, y)
+ elif dx == 1 and dy == 0:
+ return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
+ elif dx == 0 and dy == 1:
+ return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
+ elif dx == 1 and dy == 1:
+ return (func(x + eps, y + eps) - func(x - eps, y + eps)
+ - func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
+ else:
+ raise ValueError("invalid derivative order")
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_gil.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_gil.py
new file mode 100644
index 0000000..f049751
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_gil.py
@@ -0,0 +1,65 @@
+import itertools
+import threading
+import time
+
+import numpy as np
+from numpy.testing import assert_equal
+import pytest
+import scipy.interpolate
+
+
+class TestGIL(object):
+ """Check if the GIL is properly released by scipy.interpolate functions."""
+
+ def setup_method(self):
+ self.messages = []
+
+ def log(self, message):
+ self.messages.append(message)
+
+ def make_worker_thread(self, target, args):
+ log = self.log
+
+ class WorkerThread(threading.Thread):
+ def run(self):
+ log('interpolation started')
+ target(*args)
+ log('interpolation complete')
+
+ return WorkerThread()
+
+ @pytest.mark.slow
+ @pytest.mark.xfail(reason='race conditions, may depend on system load')
+ def test_rectbivariatespline(self):
+ def generate_params(n_points):
+ x = y = np.linspace(0, 1000, n_points)
+ x_grid, y_grid = np.meshgrid(x, y)
+ z = x_grid * y_grid
+ return x, y, z
+
+ def calibrate_delay(requested_time):
+ for n_points in itertools.count(5000, 1000):
+ args = generate_params(n_points)
+ time_started = time.time()
+ interpolate(*args)
+ if time.time() - time_started > requested_time:
+ return args
+
+ def interpolate(x, y, z):
+ scipy.interpolate.RectBivariateSpline(x, y, z)
+
+ args = calibrate_delay(requested_time=3)
+ worker_thread = self.make_worker_thread(interpolate, args)
+ worker_thread.start()
+ for i in range(3):
+ time.sleep(0.5)
+ self.log('working')
+ worker_thread.join()
+ assert_equal(self.messages, [
+ 'interpolation started',
+ 'working',
+ 'working',
+ 'working',
+ 'interpolation complete',
+ ])
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpnd.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpnd.py
new file mode 100644
index 0000000..af0a6f1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpnd.py
@@ -0,0 +1,386 @@
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
+ suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+import scipy.interpolate.interpnd as interpnd
+import scipy.spatial.qhull as qhull
+
+import pickle
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+class TestLinearNDInterpolation(object):
+ def test_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_smoketest_alternate(self):
+ # Test at single points, alternate calling convention
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+
+ yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
+ assert_almost_equal(y, yi)
+
+ def test_complex_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square(self):
+ # Test barycentric interpolation on a square against a manual
+ # implementation
+
+ points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+ values = np.array([1., 2., -3., 5.], dtype=np.double)
+
+ # NB: assume triangles (0, 1, 3) and (1, 2, 3)
+ #
+ # 1----2
+ # | \ |
+ # | \ |
+ # 0----3
+
+ def ip(x, y):
+ t1 = (x + y <= 1)
+ t2 = ~t1
+
+ x1 = x[t1]
+ y1 = y[t1]
+
+ x2 = x[t2]
+ y2 = y[t2]
+
+ z = 0*x
+
+ z[t1] = (values[0]*(1 - x1 - y1)
+ + values[1]*y1
+ + values[3]*x1)
+
+ z[t2] = (values[2]*(x2 + y2 - 1)
+ + values[1]*(1 - x2)
+ + values[3]*(1 - y2))
+ return z
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
+ np.linspace(0, 1, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+
+ assert_almost_equal(zi, ip(xx, yy))
+
+ def test_smoketest_rescale(self):
+ # Test at single points
+ x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+
+ yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square_rescale(self):
+ # Test barycentric interpolation on a rectangle with rescaling
+ # agaings the same implementation without rescaling
+
+ points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.double)
+ values = np.array([1., 2., -3., 5.], dtype=np.double)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+ zi_rescaled = interpnd.LinearNDInterpolator(points, values,
+ rescale=True)(xi)
+
+ assert_almost_equal(zi, zi_rescaled)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
+ rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.LinearNDInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+
+class TestEstimateGradients2DGlobal(object):
+ def test_smoketest(self):
+ x = np.array([(0, 0), (0, 2),
+ (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
+ tri = qhull.Delaunay(x)
+
+ # Should be exact for linear functions, independent of triangulation
+
+ funcs = [
+ (lambda x, y: 0*x + 1, (0, 0)),
+ (lambda x, y: 0 + x, (1, 0)),
+ (lambda x, y: -2 + y, (0, 1)),
+ (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
+ ]
+
+ for j, (func, grad) in enumerate(funcs):
+ z = func(x[:,0], x[:,1])
+ dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
+
+ assert_equal(dz.shape, (6, 2))
+ assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
+ rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
+
+ def test_regression_2359(self):
+ # Check regression --- for certain point sets, gradient
+ # estimation could end up in an infinite loop
+ points = np.load(data_file('estimate_gradients_hang.npy'))
+ values = np.random.rand(points.shape[0])
+ tri = qhull.Delaunay(points)
+
+ # This should not hang
+ with suppress_warnings() as sup:
+ sup.filter(interpnd.GradientEstimationWarning,
+ "Gradient estimation did not converge")
+ interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
+
+
+class TestCloughTocher2DInterpolator(object):
+
+ def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw):
+ np.random.seed(1234)
+ if x is None:
+ x = np.array([(0, 0), (0, 1),
+ (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
+ (0.5, 0.2)],
+ dtype=float)
+
+ if not alternate:
+ ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+ else:
+ ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
+ func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+
+ p = np.random.rand(50, 2)
+
+ if not alternate:
+ a = ip(p)
+ else:
+ a = ip(p[:,0], p[:,1])
+ b = func(p[:,0], p[:,1])
+
+ try:
+ assert_allclose(a, b, **kw)
+ except AssertionError:
+ print("_check_accuracy: abs(a-b):", abs(a - b))
+ print("ip.grad:", ip.grad)
+ raise
+
+ def test_linear_smoketest(self):
+ # Should be exact for linear functions, independent of triangulation
+ funcs = [
+ lambda x, y: 0*x + 1,
+ lambda x, y: 0 + x,
+ lambda x, y: -2 + y,
+ lambda x, y: 3 + 3*x + 14.15*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True,
+ err_msg="Function (alternate) %d" % j)
+ # check rescaling
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function (rescaled) %d" % j, rescale=True)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True, rescale=True,
+ err_msg="Function (alternate, rescaled) %d" % j)
+
+ def test_quadratic_smoketest(self):
+ # Should be reasonably accurate for quadratic functions
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_dense(self):
+ # Should be more accurate for dense meshes
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
+ ]
+
+ np.random.seed(4321) # use a different seed than the check!
+ grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
+ np.random.rand(30*30, 2)]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_wrong_ndim(self):
+ x = np.random.randn(30, 3)
+ y = np.random.randn(30)
+ assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.CloughTocher2DInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+ def test_boundary_tri_symmetry(self):
+ # Interpolation at neighbourless triangles should retain
+ # symmetry with mirroring the triangle.
+
+ # Equilateral triangle
+ points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
+ values = np.array([1, 0, 0])
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+
+ # Set gradient to zero at vertices
+ ip.grad[...] = 0
+
+ # Interpolation should be symmetric vs. bisector
+ alpha = 0.3
+ p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
+ p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
+
+ v1 = ip(p1)
+ v2 = ip(p2)
+ assert_allclose(v1, v2)
+
+ # ... and affine invariant
+ np.random.seed(1)
+ A = np.random.randn(2, 2)
+ b = np.random.randn(2)
+
+ points = A.dot(points.T).T + b[None,:]
+ p1 = A.dot(p1) + b
+ p2 = A.dot(p2) + b
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+ ip.grad[...] = 0
+
+ w1 = ip(p1)
+ w2 = ip(p2)
+ assert_allclose(w1, v1)
+ assert_allclose(w2, v2)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpolate.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpolate.py
new file mode 100644
index 0000000..bd5e439
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_interpolate.py
@@ -0,0 +1,2885 @@
+import itertools
+
+from numpy.testing import (assert_, assert_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_array_equal,
+ assert_allclose, assert_warns)
+from pytest import raises as assert_raises
+import pytest
+
+from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
+import numpy as np
+
+from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
+ splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
+ RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
+ RectBivariateSpline, interpn, NdPPoly, BSpline,
+ CloughTocher2DInterpolator)
+
+from scipy.special import poch, gamma
+
+from scipy.interpolate import _ppoly
+
+from scipy._lib._gcutils import assert_deallocated, IS_PYPY
+
+from scipy.integrate import nquad
+
+from scipy.special import binom
+
+from scipy.sparse.sputils import matrix
+
+
+class TestInterp2D(object):
+ def test_interp2d(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x+0.5*y)
+ I = interp2d(x, y, z)
+ assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
+
+ v,u = ogrid[0:2:24j, 0:pi:25j]
+ assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
+
+ def test_interp2d_meshgrid_input(self):
+ # Ticket #703
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+ z = sin(x[None,:] + y[:,None]/2.)
+ I = interp2d(x, y, z)
+ assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
+
+ def test_interp2d_meshgrid_input_unsorted(self):
+ np.random.seed(1234)
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+
+ z = sin(x[None,:] + y[:,None]/2.)
+ ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ z = sin(x[None,:] + y[:,None]/2.)
+ ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ np.random.shuffle(y)
+ z = sin(x[None,:] + y[:,None]/2.)
+ ip3 = interp2d(x, y, z, kind='cubic')
+
+ x = linspace(0, 2, 31)
+ y = linspace(0, pi, 30)
+
+ assert_equal(ip1(x, y), ip2(x, y))
+ assert_equal(ip1(x, y), ip3(x, y))
+
+ def test_interp2d_eval_unsorted(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x + 0.5*y)
+ func = interp2d(x, y, z)
+
+ xe = np.array([3, 4, 5])
+ ye = np.array([5.3, 7.1])
+ assert_allclose(func(xe, ye), func(xe, ye[::-1]))
+
+ assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
+
+ def test_interp2d_linear(self):
+ # Ticket #898
+ a = np.zeros([5, 5])
+ a[2, 2] = 1.0
+ x = y = np.arange(5)
+ b = interp2d(x, y, a, 'linear')
+ assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
+ assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
+
+ def test_interp2d_bounds(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 2, 7)
+ z = x[None, :]**2 + y[:, None]
+
+ ix = np.linspace(-1, 3, 31)
+ iy = np.linspace(-1, 3, 33)
+
+ b = interp2d(x, y, z, bounds_error=True)
+ assert_raises(ValueError, b, ix, iy)
+
+ b = interp2d(x, y, z, fill_value=np.nan)
+ iz = b(ix, iy)
+ mx = (ix < 0) | (ix > 1)
+ my = (iy < 0) | (iy > 2)
+ assert_(np.isnan(iz[my,:]).all())
+ assert_(np.isnan(iz[:,mx]).all())
+ assert_(np.isfinite(iz[~my,:][:,~mx]).all())
+
+
+class TestInterp1D(object):
+
+ def setup_method(self):
+ self.x5 = np.arange(5.)
+ self.x10 = np.arange(10.)
+ self.y10 = np.arange(10.)
+ self.x25 = self.x10.reshape((2,5))
+ self.x2 = np.arange(2.)
+ self.y2 = np.arange(2.)
+ self.x1 = np.array([0.])
+ self.y1 = np.array([0.])
+
+ self.y210 = np.arange(20.).reshape((2, 10))
+ self.y102 = np.arange(20.).reshape((10, 2))
+ self.y225 = np.arange(20.).reshape((2, 2, 5))
+ self.y25 = np.arange(10.).reshape((2, 5))
+ self.y235 = np.arange(30.).reshape((2, 3, 5))
+ self.y325 = np.arange(30.).reshape((3, 2, 5))
+
+ self.fill_value = -100.0
+
+ def test_validation(self):
+ # Make sure that appropriate exceptions are raised when invalid values
+ # are given to the constructor.
+
+ # These should all work.
+ for kind in ('nearest', 'nearest-up', 'zero', 'linear', 'slinear',
+ 'quadratic', 'cubic', 'previous', 'next'):
+ interp1d(self.x10, self.y10, kind=kind)
+ interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
+ interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=np.array([-1]))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1,))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=-1)
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1))
+ interp1d(self.x10, self.y10, kind=0)
+ interp1d(self.x10, self.y10, kind=1)
+ interp1d(self.x10, self.y10, kind=2)
+ interp1d(self.x10, self.y10, kind=3)
+ interp1d(self.x10, self.y210, kind='linear', axis=-1,
+ fill_value=(-1, -1))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=np.ones(10))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), np.ones(10)))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), -1))
+
+ # x array must be 1D.
+ assert_raises(ValueError, interp1d, self.x25, self.y10)
+
+ # y array cannot be a scalar.
+ assert_raises(ValueError, interp1d, self.x10, np.array(0))
+
+ # Check for x and y arrays having the same length.
+ assert_raises(ValueError, interp1d, self.x10, self.y2)
+ assert_raises(ValueError, interp1d, self.x2, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y102)
+ interp1d(self.x10, self.y210)
+ interp1d(self.x10, self.y102, axis=0)
+
+ # Check for x and y having at least 1 element.
+ assert_raises(ValueError, interp1d, self.x1, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y1)
+ assert_raises(ValueError, interp1d, self.x1, self.y1)
+
+ # Bad fill values
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1, -1)) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array((-1, -1, -1))) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[[-1]]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array([])) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=()) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
+
+ def test_init(self):
+ # Check that the attributes are initialized appropriately by the
+ # constructor.
+ assert_(interp1d(self.x10, self.y10).copy)
+ assert_(not interp1d(self.x10, self.y10, copy=False).copy)
+ assert_(interp1d(self.x10, self.y10).bounds_error)
+ assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
+ assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
+ assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
+ 3.0)
+ assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
+ (1.0, 2.0))
+ assert_equal(interp1d(self.x10, self.y10).axis, 0)
+ assert_equal(interp1d(self.x10, self.y210).axis, 1)
+ assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
+ assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
+ assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
+ assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
+
+ def test_assume_sorted(self):
+ # Check for unsorted arrays
+ interp10 = interp1d(self.x10, self.y10)
+ interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
+
+ assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
+ assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
+ interp10([2.4, 5.6, 6.0]))
+
+ # Check assume_sorted keyword (defaults to False)
+ interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=False)
+ assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
+
+ interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=True)
+ # Should raise an error for unsorted input if assume_sorted=True
+ assert_raises(ValueError, interp10_assume_kw2, self.x10)
+
+ # Check that if y is a 2-D array, things are still consistent
+ interp10_y_2d = interp1d(self.x10, self.y210)
+ interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
+ assert_array_almost_equal(interp10_y_2d(self.x10),
+ interp10_y_2d_unsorted(self.x10))
+
+ def test_linear(self):
+ for kind in ['linear', 'slinear']:
+ self._check_linear(kind)
+
+ def _check_linear(self, kind):
+ # Check the actual implementation of linear interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]))
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind=kind,
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [-1, 0, 9, 11], rtol=1e-14)
+
+ opts = dict(kind=kind,
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_linear_dtypes(self):
+ # regression test for gh-5898, where 1D linear interpolation has been
+ # delegated to numpy.interp for all float dtypes, and the latter was
+ # not handling e.g. np.float128.
+ for dtyp in np.sctypes["float"]:
+ x = np.arange(8, dtype=dtyp)
+ y = x
+ yp = interp1d(x, y, kind='linear')(x)
+ assert_equal(yp.dtype, dtyp)
+ assert_allclose(yp, y, atol=1e-15)
+
+ def test_slinear_dtypes(self):
+ # regression test for gh-7273: 1D slinear interpolation fails with
+ # float32 inputs
+ dt_r = [np.float16, np.float32, np.float64]
+ dt_rc = dt_r + [np.complex64, np.complex128]
+ spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
+ for dtx in dt_r:
+ x = np.arange(0, 10, dtype=dtx)
+ for dty in dt_rc:
+ y = np.exp(-x/3.0).astype(dty)
+ for dtn in dt_r:
+ xnew = x.astype(dtn)
+ for kind in spline_kinds:
+ f = interp1d(x, y, kind=kind, bounds_error=False)
+ assert_allclose(f(xnew), y, atol=1e-7,
+ err_msg="%s, %s %s" % (dtx, dty, dtn))
+
+ def test_cubic(self):
+ # Check the actual implementation of spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='cubic')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10(1.5), np.array([1.5]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]),)
+
+ def test_nearest(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest asserts that half-integer case (1.5) rounds down to 1
+ interp10 = interp1d(self.x10, self.y10, kind='nearest')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_nearest_up(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest-up asserts that half-integer case (1.5) rounds up to 2
+ interp10 = interp1d(self.x10, self.y10, kind='nearest-up')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest-up',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest-up',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_previous(self):
+ # Check the actual implementation of previous interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='previous')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='previous',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='previous',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_next(self):
+ # Check the actual implementation of next interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='next')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(2.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([3., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='next',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='next',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_zero(self):
+ # Check the actual implementation of zero-order spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='zero')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]))
+
+ def _bounds_check(self, kind='linear'):
+ # Test that our handling of out-of-bounds input is correct.
+ extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
+ bounds_error=False, kind=kind)
+
+ assert_array_equal(extrap10(11.2), np.array(self.fill_value))
+ assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
+ assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
+ np.array(self.fill_value),)
+ assert_array_equal(extrap10._check_bounds(
+ np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
+ np.array([[True, False, False, False, False],
+ [False, False, False, False, True]]))
+
+ raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
+ kind=kind)
+ assert_raises(ValueError, raises_bounds_error, -1.0)
+ assert_raises(ValueError, raises_bounds_error, 11.0)
+ raises_bounds_error([0.0, 5.0, 9.0])
+
+ def _bounds_check_int_nan_fill(self, kind='linear'):
+ x = np.arange(10).astype(np.int_)
+ y = np.arange(10).astype(np.int_)
+ c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
+ yi = c(x - 1)
+ assert_(np.isnan(yi[0]))
+ assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
+
+ def test_bounds(self):
+ for kind in ('linear', 'cubic', 'nearest', 'previous', 'next',
+ 'slinear', 'zero', 'quadratic'):
+ self._bounds_check(kind)
+ self._bounds_check_int_nan_fill(kind)
+
+ def _check_fill_value(self, kind):
+ interp = interp1d(self.x10, self.y10, kind=kind,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ assert_array_almost_equal(interp([-10, 10]), [-100, 100])
+
+ # Proper broadcasting:
+ # interp along axis of length 5
+ # other dim=(2, 3), (3, 2), (2, 2), or (2,)
+
+ # one singleton fill_value (works for all)
+ for y in (self.y235, self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=100, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), 100)
+ assert_array_almost_equal(interp([-10, 10]), 100)
+
+ # singleton lower, singleton upper
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ if y.ndim == 3:
+ result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
+ else:
+ result = [[-100, 100]] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (3,) fill_value
+ fill_value = [100, 200, 300]
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200],
+ [300, 300]]] * 2)
+
+ # one broadcastable (2,) fill_value
+ fill_value = [100, 200]
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ assert_array_almost_equal(interp(-10), result)
+ result = [[100, 100], [200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, singleton upper
+ fill_value = (np.array([-100, -200, -300]), 100)
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 100],
+ [-300, 100]]] * 2)
+
+ # broadcastable (2,) lower, singleton upper
+ fill_value = (np.array([-100, -200]), 100)
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 100]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, broadcastable (3,) upper
+ fill_value = ([-100, -200, -300], [100, 200, 300])
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2): # check ndarray as well as list here
+ if ii == 1:
+ fill_value = tuple(np.array(f) for f in fill_value)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200],
+ [-300, 300]]] * 2)
+ # broadcastable (2,) lower, broadcastable (2,) upper
+ fill_value = ([-100, -200], [100, 200])
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (2, 2) array-like
+ fill_value = [[100, 200], [1000, 2000]]
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = np.array(fill_value)
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200]],
+ [[1000, 1000],
+ [2000, 2000]]])
+
+ # broadcastable (2, 2) lower, broadcastable (2, 2) upper
+ fill_value = ([[-100, -200], [-1000, -2000]],
+ [[100, 200], [1000, 2000]])
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[-100, -200],
+ [-1000, -2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200]],
+ [[-1000, 1000],
+ [-2000, 2000]]])
+
+ def test_fill_value(self):
+ # test that two-element fill value works
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_fill_value(kind)
+
+ def test_fill_value_writeable(self):
+ # backwards compat: fill_value is a public writeable attribute
+ interp = interp1d(self.x10, self.y10, fill_value=123.0)
+ assert_equal(interp.fill_value, 123.0)
+ interp.fill_value = 321.0
+ assert_equal(interp.fill_value, 321.0)
+
+ def _nd_check_interp(self, kind='linear'):
+ # Check the behavior when the inputs and outputs are multidimensional.
+
+ # Multidimensional input.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
+ np.array([[3., 5.], [2., 7.]]))
+
+ # Scalar input -> 0-dim scalar array output
+ assert_(isinstance(interp10(1.2), np.ndarray))
+ assert_equal(interp10(1.2).shape, ())
+
+ # Multidimensional outputs.
+ interp210 = interp1d(self.x10, self.y210, kind=kind)
+ assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
+ assert_array_almost_equal(interp210(np.array([1., 2.])),
+ np.array([[1., 2.], [11., 12.]]))
+
+ interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
+ assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
+ assert_array_almost_equal(interp102(np.array([1., 3.])),
+ np.array([[2., 3.], [6., 7.]]))
+
+ # Both at the same time!
+ x_new = np.array([[3., 5.], [2., 7.]])
+ assert_array_almost_equal(interp210(x_new),
+ np.array([[[3., 5.], [2., 7.]],
+ [[13., 15.], [12., 17.]]]))
+ assert_array_almost_equal(interp102(x_new),
+ np.array([[[6., 7.], [10., 11.]],
+ [[4., 5.], [14., 15.]]]))
+
+ def _nd_check_shape(self, kind='linear'):
+ # Check large N-D output shape
+ a = [4, 5, 6, 7]
+ y = np.arange(np.prod(a)).reshape(*a)
+ for n, s in enumerate(a):
+ x = np.arange(s)
+ z = interp1d(x, y, axis=n, kind=kind)
+ assert_array_almost_equal(z(x), y, err_msg=kind)
+
+ x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
+ b = list(a)
+ b[n:n+1] = [2,3,1]
+ assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
+
+ def test_nd(self):
+ for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
+ 'zero', 'previous', 'next'):
+ self._nd_check_interp(kind)
+ self._nd_check_shape(kind)
+
+ def _check_complex(self, dtype=np.complex_, kind='linear'):
+ x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
+ y = x * x ** (1 + 2j)
+ y = y.astype(dtype)
+
+ # simple test
+ c = interp1d(x, y, kind=kind)
+ assert_array_almost_equal(y[:-1], c(x)[:-1])
+
+ # check against interpolating real+imag separately
+ xi = np.linspace(1, 10, 31)
+ cr = interp1d(x, y.real, kind=kind)
+ ci = interp1d(x, y.imag, kind=kind)
+ assert_array_almost_equal(c(xi).real, cr(xi))
+ assert_array_almost_equal(c(xi).imag, ci(xi))
+
+ def test_complex(self):
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_complex(np.complex64, kind)
+ self._check_complex(np.complex128, kind)
+
+ @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+ def test_circular_refs(self):
+ # Test interp1d can be automatically garbage collected
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ # Confirm interp can be released from memory after use
+ with assert_deallocated(interp1d, x, y) as interp:
+ interp([0.1, 0.2])
+ del interp
+
+ def test_overflow_nearest(self):
+ # Test that the x range doesn't overflow when given integers as input
+ for kind in ('nearest', 'previous', 'next'):
+ x = np.array([0, 50, 127], dtype=np.int8)
+ ii = interp1d(x, x, kind=kind)
+ assert_array_almost_equal(ii(x), x)
+
+ def test_local_nans(self):
+ # check that for local interpolation kinds (slinear, zero) a single nan
+ # only affects its local neighborhood
+ x = np.arange(10).astype(float)
+ y = x.copy()
+ y[6] = np.nan
+ for kind in ('zero', 'slinear'):
+ ir = interp1d(x, y, kind=kind)
+ vals = ir([4.9, 7.0])
+ assert_(np.isfinite(vals).all())
+
+ def test_spline_nans(self):
+ # Backwards compat: a single nan makes the whole spline interpolation
+ # return nans in an array of the correct shape. And it doesn't raise,
+ # just quiet nans because of backcompat.
+ x = np.arange(8).astype(float)
+ y = x.copy()
+ yn = y.copy()
+ yn[3] = np.nan
+
+ for kind in ['quadratic', 'cubic']:
+ ir = interp1d(x, y, kind=kind)
+ irn = interp1d(x, yn, kind=kind)
+ for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
+ xnew = np.asarray(xnew)
+ out, outn = ir(x), irn(x)
+ assert_(np.isnan(outn).all())
+ assert_equal(out.shape, outn.shape)
+
+ def test_all_nans(self):
+ # regression test for gh-11637: interp1d core dumps with all-nan `x`
+ x = np.ones(10) * np.nan
+ y = np.arange(10)
+ with assert_raises(ValueError):
+ interp1d(x, y, kind='cubic')
+
+ def test_read_only(self):
+ x = np.arange(0, 10)
+ y = np.exp(-x / 3.0)
+ xnew = np.arange(0, 9, 0.1)
+ # Check both read-only and not read-only:
+ for xnew_writeable in (True, False):
+ xnew.flags.writeable = xnew_writeable
+ x.flags.writeable = False
+ for kind in ('linear', 'nearest', 'zero', 'slinear', 'quadratic',
+ 'cubic'):
+ f = interp1d(x, y, kind=kind)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+
+class TestLagrange(object):
+
+ def test_lagrange(self):
+ p = poly1d([5,2,1,4,3])
+ xs = np.arange(len(p.coeffs))
+ ys = p(xs)
+ pl = lagrange(xs,ys)
+ assert_array_almost_equal(p.coeffs,pl.coeffs)
+
+
+class TestAkima1DInterpolator(object):
+ def test_eval(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344, 5.9803623910336236590978842,
+ 5.5067291516462386624652936, 5.2031367459745245795943447,
+ 4.1796554159017080820603951, 3.4110386597938129327189927,
+ 3.])
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_2d(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.column_stack((y, 2. * y))
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi = np.column_stack((yi, 2. * yi))
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_3d(self):
+ x = np.arange(0., 11.)
+ y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.empty((11, 2, 2))
+ y[:, 0, 0] = y_
+ y[:, 1, 0] = 2. * y_
+ y[:, 0, 1] = 3. * y_
+ y[:, 1, 1] = 4. * y_
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.empty((13, 2, 2))
+ yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi[:, 0, 0] = yi_
+ yi[:, 1, 0] = 2. * yi_
+ yi[:, 0, 1] = 3. * yi_
+ yi[:, 1, 1] = 4. * yi_
+ assert_allclose(ak(xi), yi)
+
+ def test_degenerate_case_multidimensional(self):
+ # This test is for issue #5683.
+ x = np.array([0, 1, 2])
+ y = np.vstack((x, x**2)).T
+ ak = Akima1DInterpolator(x, y)
+ x_eval = np.array([0.5, 1.5])
+ y_eval = ak(x_eval)
+ assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
+
+ def test_extend(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ match = "Extending a 1-D Akima interpolator is not yet implemented"
+ with pytest.raises(NotImplementedError, match=match):
+ ak.extend(None, None)
+
+
+class TestPPolyCommon(object):
+ # test basic functionality for PPoly and BPoly
+ def test_sort_check(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 1, 0.5])
+ assert_raises(ValueError, PPoly, c, x)
+ assert_raises(ValueError, BPoly, c, x)
+
+ def test_ctor_c(self):
+ # wrong shape: `c` must be at least 2D
+ with assert_raises(ValueError):
+ PPoly([1, 2], [0, 1])
+
+ def test_extend(self):
+ # Test adding new points to the piecewise polynomial
+ np.random.seed(1234)
+
+ order = 3
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ for cls in (PPoly, BPoly):
+ pp = cls(c[:,:9], x[:10])
+ pp.extend(c[:,9:], x[10:])
+
+ pp2 = cls(c[:, 10:], x[10:])
+ pp2.extend(c[:, :10], x[:10])
+
+ pp3 = cls(c, x)
+
+ assert_array_equal(pp.c, pp3.c)
+ assert_array_equal(pp.x, pp3.x)
+ assert_array_equal(pp2.c, pp3.c)
+ assert_array_equal(pp2.x, pp3.x)
+
+ def test_extend_diff_orders(self):
+ # Test extending polynomial with different order one
+ np.random.seed(1234)
+
+ x = np.linspace(0, 1, 6)
+ c = np.random.rand(2, 5)
+
+ x2 = np.linspace(1, 2, 6)
+ c2 = np.random.rand(4, 5)
+
+ for cls in (PPoly, BPoly):
+ pp1 = cls(c, x)
+ pp2 = cls(c2, x2)
+
+ pp_comb = cls(c, x)
+ pp_comb.extend(c2, x2[1:])
+
+ # NB. doesn't match to pp1 at the endpoint, because pp1 is not
+ # continuous with pp2 as we took random coefs.
+ xi1 = np.linspace(0, 1, 300, endpoint=False)
+ xi2 = np.linspace(1, 2, 300)
+
+ assert_allclose(pp1(xi1), pp_comb(xi1))
+ assert_allclose(pp2(xi2), pp_comb(xi2))
+
+ def test_extend_descending(self):
+ np.random.seed(0)
+
+ order = 3
+ x = np.sort(np.random.uniform(0, 10, 20))
+ c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
+
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+
+ p1 = cls(c[:, :9], x[:10])
+ p1.extend(c[:, 9:], x[10:])
+
+ p2 = cls(c[:, 10:], x[10:])
+ p2.extend(c[:, :10], x[:10])
+
+ assert_array_equal(p1.c, p.c)
+ assert_array_equal(p1.x, p.x)
+ assert_array_equal(p2.c, p.c)
+ assert_array_equal(p2.x, p.x)
+
+ def test_shape(self):
+ np.random.seed(1234)
+ c = np.random.rand(8, 12, 5, 6, 7)
+ x = np.sort(np.random.rand(13))
+ xp = np.random.rand(3, 4)
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+ assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
+
+ # 'scalars'
+ for cls in (PPoly, BPoly):
+ p = cls(c[..., 0, 0, 0], x)
+
+ assert_equal(np.shape(p(0.5)), ())
+ assert_equal(np.shape(p(np.array(0.5))), ())
+
+ assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]], dtype=object))
+
+ def test_complex_coef(self):
+ np.random.seed(12345)
+ x = np.sort(np.random.random(13))
+ c = np.random.random((8, 12)) * (1. + 0.3j)
+ c_re, c_im = c.real, c.imag
+ xp = np.random.random(5)
+ for cls in (PPoly, BPoly):
+ p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
+ for nu in [0, 1, 2]:
+ assert_allclose(p(xp, nu).real, p_re(xp, nu))
+ assert_allclose(p(xp, nu).imag, p_im(xp, nu))
+
+ def test_axis(self):
+ np.random.seed(12345)
+ c = np.random.rand(3, 4, 5, 6, 7, 8)
+ c_s = c.shape
+ xp = np.random.random((1, 2))
+ for axis in (0, 1, 2, 3):
+ m = c.shape[axis+1]
+ x = np.sort(np.random.rand(m+1))
+ for cls in (PPoly, BPoly):
+ p = cls(c, x, axis=axis)
+ assert_equal(p.c.shape,
+ c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
+ res = p(xp)
+ targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
+ assert_equal(res.shape, targ_shape)
+
+ # deriv/antideriv does not drop the axis
+ for p1 in [cls(c, x, axis=axis).derivative(),
+ cls(c, x, axis=axis).derivative(2),
+ cls(c, x, axis=axis).antiderivative(),
+ cls(c, x, axis=axis).antiderivative(2)]:
+ assert_equal(p1.axis, p.axis)
+
+ # c array needs two axes for the coefficients and intervals, so
+ # 0 <= axis < c.ndim-1; raise otherwise
+ for axis in (-1, 4, 5, 6):
+ for cls in (BPoly, PPoly):
+ assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
+
+
+class TestPolySubclassing(object):
+ class P(PPoly):
+ pass
+
+ class B(BPoly):
+ pass
+
+ def _make_polynomials(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(3))
+ c = np.random.random((4, 2))
+ return self.P(c, x), self.B(c, x)
+
+ def test_derivative(self):
+ pp, bp = self._make_polynomials()
+ for p in (pp, bp):
+ pd = p.derivative()
+ assert_equal(p.__class__, pd.__class__)
+
+ ppa = pp.antiderivative()
+ assert_equal(pp.__class__, ppa.__class__)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = self.P.from_spline(spl)
+ assert_equal(pp.__class__, self.P)
+
+ def test_conversions(self):
+ pp, bp = self._make_polynomials()
+
+ pp1 = self.P.from_bernstein_basis(bp)
+ assert_equal(pp1.__class__, self.P)
+
+ bp1 = self.B.from_power_basis(pp)
+ assert_equal(bp1.__class__, self.B)
+
+ def test_from_derivatives(self):
+ x = [0, 1, 2]
+ y = [[1], [2], [3]]
+ bp = self.B.from_derivatives(x, y)
+ assert_equal(bp.__class__, self.B)
+
+
+class TestPPoly(object):
+ def test_simple(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_periodic(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
+ assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
+
+ assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
+ assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
+
+ def test_read_only(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ xnew = np.array([0, 0.1, 0.2])
+ PPoly(c, x, extrapolate='periodic')
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+ f = PPoly(c, x)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+ def test_descending(self):
+ def binom_matrix(power):
+ n = np.arange(power + 1).reshape(-1, 1)
+ k = np.arange(power + 1)
+ B = binom(n, k)
+ return B[::-1, ::-1]
+
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-2, 2, size=(power + 1, m))
+
+ h = np.diff(x)
+ h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
+ B = binom_matrix(power)
+ cap = ca * h_powers
+ cdp = np.dot(B.T, cap)
+ cd = cdp / h_powers
+
+ pa = PPoly(ca, x, extrapolate=True)
+ pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-13)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-13)
+
+ roots_d = pd.roots()
+ roots_a = pa.roots()
+ assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+
+ assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+ ip = p.antiderivative()
+ assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
+
+ def test_construct_fast(self):
+ np.random.seed(1234)
+ c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
+ x = np.array([0, 0.5, 1])
+ p = PPoly.construct_fast(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_vs_alternative_implementations(self):
+ np.random.seed(1234)
+ c = np.random.rand(3, 12, 22)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+
+ p = PPoly(c, x)
+
+ xp = np.r_[0.3, 0.5, 0.33, 0.6]
+ expected = _ppoly_eval_1(c, x, xp)
+ assert_allclose(p(xp), expected)
+
+ expected = _ppoly_eval_2(c[:,:,0], x, xp)
+ assert_allclose(p(xp)[:,0], expected)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp(xi), splev(xi, spl))
+
+ # make sure .from_spline accepts BSpline objects
+ b = BSpline(*spl)
+ ppp = PPoly.from_spline(b)
+ assert_allclose(ppp(xi), b(xi))
+
+ # BSpline's extrapolate attribute propagates unless overridden
+ t, c, k = spl
+ for extrap in (None, True, False):
+ b = BSpline(t, c, k, extrapolate=extrap)
+ p = PPoly.from_spline(b)
+ assert_equal(p.extrapolate, b.extrapolate)
+
+ def test_derivative_simple(self):
+ np.random.seed(1234)
+ c = np.array([[4, 3, 2, 1]]).T
+ dc = np.array([[3*4, 2*3, 2]]).T
+ ddc = np.array([[2*3*4, 1*2*3]]).T
+ x = np.array([0, 1])
+
+ pp = PPoly(c, x)
+ dpp = PPoly(dc, x)
+ ddpp = PPoly(ddc, x)
+
+ assert_allclose(pp.derivative().c, dpp.c)
+ assert_allclose(pp.derivative(2).c, ddpp.c)
+
+ def test_derivative_eval(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 3):
+ assert_allclose(pp(xi, dx), splev(xi, spl, dx))
+
+ def test_derivative(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 10):
+ assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
+ err_msg="dx=%d" % (dx,))
+
+ def test_antiderivative_of_constant(self):
+ # https://github.com/scipy/scipy/issues/4216
+ p = PPoly([[1.]], [0, 1])
+ assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
+ assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
+
+ def test_antiderivative_regression_4355(self):
+ # https://github.com/scipy/scipy/issues/4355
+ p = PPoly([[1., 0.5]], [0, 1, 2])
+ q = p.antiderivative()
+ assert_equal(q.c, [[1, 0.5], [0, 1]])
+ assert_equal(q.x, [0, 1, 2])
+ assert_allclose(p.integrate(0, 2), 1.5)
+ assert_allclose(q(2) - q(0), 1.5)
+
+ def test_antiderivative_simple(self):
+ np.random.seed(1234)
+ # [ p1(x) = 3*x**2 + 2*x + 1,
+ # p2(x) = 1.6875]
+ c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
+ # [ pp1(x) = x**3 + x**2 + x,
+ # pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
+ ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
+ # [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
+ # ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
+ iic = np.array([[1/4, 1/3, 1/2, 0, 0],
+ [0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
+ x = np.array([0, 0.25, 1])
+
+ pp = PPoly(c, x)
+ ipp = pp.antiderivative()
+ iipp = pp.antiderivative(2)
+ iipp2 = ipp.antiderivative()
+
+ assert_allclose(ipp.x, x)
+ assert_allclose(ipp.c.T, ic.T)
+ assert_allclose(iipp.c.T, iic.T)
+ assert_allclose(iipp2.c.T, iic.T)
+
+ def test_antiderivative_vs_derivative(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 1, 30)**2
+ y = np.random.rand(len(x))
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ ipp = pp.antiderivative(dx)
+
+ # check that derivative is inverse op
+ pp2 = ipp.derivative(dx)
+ assert_allclose(pp.c, pp2.c)
+
+ # check continuity
+ for k in range(dx):
+ pp2 = ipp.derivative(k)
+
+ r = 1e-13
+ endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
+
+ assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
+ rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
+
+ def test_antiderivative_vs_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ pp2 = pp.antiderivative(dx)
+ spl2 = splantider(spl, dx)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp2(xi), splev(xi, spl2),
+ rtol=1e-7)
+
+ def test_antiderivative_continuity(self):
+ c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
+ x = np.array([0, 0.5, 1])
+
+ p = PPoly(c, x)
+ ip = p.antiderivative()
+
+ # check continuity
+ assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
+
+ # check that only lowest order coefficients were changed
+ p2 = ip.derivative()
+ assert_allclose(p2.c, p.c)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ a, b = 0.3, 0.9
+ ig = pp.integrate(a, b)
+
+ ipp = pp.antiderivative()
+ assert_allclose(ig, ipp(b) - ipp(a))
+ assert_allclose(ig, splint(a, b, spl))
+
+ a, b = -0.3, 0.9
+ ig = pp.integrate(a, b, extrapolate=True)
+ assert_allclose(ig, ipp(b) - ipp(a))
+
+ assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
+
+ def test_integrate_readonly(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+
+ P = PPoly(c, x)
+ vals = P.integrate(1, 4)
+
+ assert_(np.isfinite(vals).all())
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = PPoly(c, x, extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_roots(self):
+ x = np.linspace(0, 1, 31)**2
+ y = np.sin(30*x)
+
+ spl = splrep(x, y, s=0, k=3)
+ pp = PPoly.from_spline(spl)
+
+ r = pp.roots()
+ r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
+ assert_allclose(r, sproot(spl), atol=1e-15)
+
+ def test_roots_idzero(self):
+ # Roots for piecewise polynomials with identically zero
+ # sections.
+ c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
+ x = np.array([0, 0.4, 0.6, 1.0])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ # ditto for p.solve(const) with sections identically equal const
+ const = 2.
+ c1 = c.copy()
+ c1[1, :] += const
+ pp1 = PPoly(c1, x)
+
+ assert_array_equal(pp1.solve(const),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ def test_roots_all_zero(self):
+ # test the code path for the polynomial being identically zero everywhere
+ c = [[0], [0]]
+ x = [0, 1]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ c = [[0, 0], [0, 0]]
+ x = [0, 1, 2]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ def test_roots_repeated(self):
+ # Check roots repeated in multiple sections are reported only
+ # once.
+
+ # [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
+ c = np.array([[1, 0, -1], [-1, 0, 0]]).T
+ x = np.array([-1, 0, 1])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [-2, 0])
+ assert_array_equal(pp.roots(extrapolate=False), [0])
+
+ def test_roots_discont(self):
+ # Check that a discontinuity across zero is reported as root
+ c = np.array([[1], [-1]]).T
+ x = np.array([0, 0.5, 1])
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [0.5])
+ assert_array_equal(pp.roots(discontinuity=False), [])
+
+ # ditto for a discontinuity across y:
+ assert_array_equal(pp.solve(0.5), [0.5])
+ assert_array_equal(pp.solve(0.5, discontinuity=False), [])
+
+ assert_array_equal(pp.solve(1.5), [])
+ assert_array_equal(pp.solve(1.5, discontinuity=False), [])
+
+ def test_roots_random(self):
+ # Check high-order polynomials with random coefficients
+ np.random.seed(1234)
+
+ num = 0
+
+ for extrapolate in (True, False):
+ for order in range(0, 20):
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ pp = PPoly(c, x)
+ for y in [0, np.random.random()]:
+ r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
+
+ for i in range(2):
+ for j in range(3):
+ rr = r[i,j]
+ if rr.size > 0:
+ # Check that the reported roots indeed are roots
+ num += rr.size
+ val = pp(rr, extrapolate=extrapolate)[:,i,j]
+ cmpval = pp(rr, nu=1,
+ extrapolate=extrapolate)[:,i,j]
+ msg = "(%r) r = %s" % (extrapolate, repr(rr),)
+ assert_allclose((val-y) / cmpval, 0, atol=1e-7,
+ err_msg=msg)
+
+ # Check that we checked a number of roots
+ assert_(num > 100, repr(num))
+
+ def test_roots_croots(self):
+ # Test the complex root finding algorithm
+ np.random.seed(1234)
+
+ for k in range(1, 15):
+ c = np.random.rand(k, 1, 130)
+
+ if k == 3:
+ # add a case with zero discriminant
+ c[:,0,0] = 1, 2, 1
+
+ for y in [0, np.random.random()]:
+ w = np.empty(c.shape, dtype=complex)
+ _ppoly._croots_poly1(c, w)
+
+ if k == 1:
+ assert_(np.isnan(w).all())
+ continue
+
+ res = 0
+ cres = 0
+ for i in range(k):
+ res += c[i,None] * w**(k-1-i)
+ cres += abs(c[i,None] * w**(k-1-i))
+ with np.errstate(invalid='ignore'):
+ res /= cres
+ res = res.ravel()
+ res = res[~np.isnan(res)]
+ assert_allclose(res, 0, atol=1e-10)
+
+ def test_extrapolate_attr(self):
+ # [ 1 - x**2 ]
+ c = np.array([[-1, 0, 1]]).T
+ x = np.array([0, 1])
+
+ for extrapolate in [True, False, None]:
+ pp = PPoly(c, x, extrapolate=extrapolate)
+ pp_d = pp.derivative()
+ pp_i = pp.antiderivative()
+
+ if extrapolate is False:
+ assert_(np.isnan(pp([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_i([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_d([-0.1, 1.1])).all())
+ assert_equal(pp.roots(), [1])
+ else:
+ assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
+ assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
+ assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
+ assert_allclose(pp.roots(), [1, -1])
+
+
+class TestBPoly(object):
+ def test_simple(self):
+ x = [0, 1]
+ c = [[3]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.1), 3.)
+
+ def test_simple2(self):
+ x = [0, 1]
+ c = [[3], [1]]
+ bp = BPoly(c, x) # 3*(1-x) + 1*x
+ assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
+
+ def test_simple3(self):
+ x = [0, 1]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
+ assert_allclose(bp(0.2),
+ 3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
+
+ def test_simple4(self):
+ x = [0, 1]
+ c = [[1], [1], [1], [2]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**3 +
+ 3 * 0.7**2 * 0.3 +
+ 3 * 0.7 * 0.3**2 +
+ 2 * 0.3**3)
+
+ def test_simple5(self):
+ x = [0, 1]
+ c = [[1], [1], [8], [2], [1]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**4 +
+ 4 * 0.7**3 * 0.3 +
+ 8 * 6 * 0.7**2 * 0.3**2 +
+ 2 * 4 * 0.7 * 0.3**3 +
+ 0.3**4)
+
+ def test_periodic(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp = BPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(bp(3.4), 3 * 0.6**2)
+ assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
+
+ assert_allclose(bp(3.4, 1), -6 * 0.6)
+ assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
+
+ def test_descending(self):
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
+ # We need only to flip coefficients to get it right!
+ cd = ca[::-1].copy()
+
+ pa = BPoly(ca, x, extrapolate=True)
+ pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-12)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = BPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+ assert_equal(p(np.random.rand(5,6)).shape,
+ (5,6)+c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+
+ def test_interval_length(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+ xval = 0.1
+ s = xval / 2 # s = (x - xa) / (xb - xa)
+ assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
+
+ def test_two_intervals(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+
+ assert_allclose(bp(0.4), 3 * 0.6*0.6)
+ assert_allclose(bp(1.7), 2 * (0.7/2)**2)
+
+ def test_extrapolate_attr(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+
+ for extrapolate in (True, False, None):
+ bp = BPoly(c, x, extrapolate=extrapolate)
+ bp_d = bp.derivative()
+ if extrapolate is False:
+ assert_(np.isnan(bp([-0.1, 2.1])).all())
+ assert_(np.isnan(bp_d([-0.1, 2.1])).all())
+ else:
+ assert_(not np.isnan(bp([-0.1, 2.1])).any())
+ assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
+
+
+class TestBPolyCalculus(object):
+ def test_derivative(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp_der = bp.derivative()
+ assert_allclose(bp_der(0.4), -6*(0.6))
+ assert_allclose(bp_der(1.7), 0.7)
+
+ # derivatives in-place
+ assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
+ [-6*(1-0.4), 6., 0.])
+ assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
+ [0.7, 1., 0])
+
+ def test_derivative_ppoly(self):
+ # make sure it's consistent w/ power basis
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ for d in range(k):
+ bp = bp.derivative()
+ pp = pp.derivative()
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(bp(xp), pp(xp))
+
+ def test_deriv_inplace(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+
+ # test both real and complex coefficients
+ for cc in [c.copy(), c*(1. + 2.j)]:
+ bp = BPoly(cc, x)
+ xp = np.linspace(x[0], x[-1], 21)
+ for i in range(k):
+ assert_allclose(bp(xp, i), bp.derivative(i)(xp))
+
+ def test_antiderivative_simple(self):
+ # f(x) = x for x \in [0, 1),
+ # (x-1)/2 for x \in [1, 3]
+ #
+ # antiderivative is then
+ # F(x) = x**2 / 2 for x \in [0, 1),
+ # 0.5*x*(x/2 - 1) + A for x \in [1, 3]
+ # where A = 3/4 for continuity at x = 1.
+ x = [0, 1, 3]
+ c = [[0, 0], [1, 1]]
+
+ bp = BPoly(c, x)
+ bi = bp.antiderivative()
+
+ xx = np.linspace(0, 3, 11)
+ assert_allclose(bi(xx),
+ np.where(xx < 1, xx**2 / 2.,
+ 0.5 * xx * (xx/2. - 1) + 3./4),
+ atol=1e-12, rtol=1e-12)
+
+ def test_der_antider(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+
+ xx = np.linspace(x[0], x[-1], 100)
+ assert_allclose(bp.antiderivative().derivative()(xx),
+ bp(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_ppoly(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ xx = np.linspace(x[0], x[-1], 10)
+
+ assert_allclose(bp.antiderivative(2)(xx),
+ pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_continuous(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x).antiderivative()
+
+ xx = bp.x[1:-1]
+ assert_allclose(bp(xx - 1e-14),
+ bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ assert_allclose(bp.integrate(0, 1),
+ pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
+
+ def test_integrate_extrap(self):
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ # default is extrapolate=True
+ assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
+
+ # .integrate argument overrides self.extrapolate
+ b1 = BPoly(c, x, extrapolate=False)
+ assert_(np.isnan(b1.integrate(0, 2)))
+ assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_antider_neg(self):
+ # .derivative(-nu) ==> .andiderivative(nu) and vice versa
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ xx = np.linspace(0, 1, 21)
+
+ assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
+ atol=1e-12, rtol=1e-12)
+ assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
+ atol=1e-12, rtol=1e-12)
+
+
+class TestPolyConversions(object):
+ def test_bp_from_pp(self):
+ x = [0, 1, 3]
+ c = [[3, 2], [1, 8], [4, 3]]
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_bp_from_pp_random(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_pp_from_bp(self):
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ bp1 = BPoly.from_power_basis(pp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(bp(xp), pp(xp))
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_broken_conversions(self):
+ # regression test for gh-10597: from_power_basis only accepts PPoly etc.
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ pp = PPoly(c, x)
+ with assert_raises(TypeError):
+ PPoly.from_bernstein_basis(pp)
+
+ bp = BPoly(c, x)
+ with assert_raises(TypeError):
+ BPoly.from_power_basis(bp)
+
+
+class TestBPolyFromDerivatives(object):
+ def test_make_poly_1(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
+ assert_allclose(c1, [2., 3.])
+
+ def test_make_poly_2(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
+ assert_allclose(c1, [1., 1., 1.])
+
+ # f'(0) = 3
+ c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
+ assert_allclose(c2, [2., 7./2, 1.])
+
+ # f'(1) = 3
+ c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
+ assert_allclose(c3, [2., -0.5, 1.])
+
+ def test_make_poly_3(self):
+ # f'(0)=2, f''(0)=3
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
+ assert_allclose(c1, [1., 5./3, 17./6, 4.])
+
+ # f'(1)=2, f''(1)=3
+ c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
+ assert_allclose(c2, [1., 19./6, 10./3, 4.])
+
+ # f'(0)=2, f'(1)=3
+ c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
+ assert_allclose(c3, [1., 5./3, 3., 4.])
+
+ def test_make_poly_12(self):
+ np.random.seed(12345)
+ ya = np.r_[0, np.random.random(5)]
+ yb = np.r_[0, np.random.random(5)]
+
+ c = BPoly._construct_from_derivatives(0, 1, ya, yb)
+ pp = BPoly(c[:, None], [0, 1])
+ for j in range(6):
+ assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
+ pp = pp.derivative()
+
+ def test_raise_degree(self):
+ np.random.seed(12345)
+ x = [0, 1]
+ k, d = 8, 5
+ c = np.random.random((k, 1, 2, 3, 4))
+ bp = BPoly(c, x)
+
+ c1 = BPoly._raise_degree(c, d)
+ bp1 = BPoly(c1, x)
+
+ xp = np.linspace(0, 1, 11)
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_xi_yi(self):
+ assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
+
+ def test_coords_order(self):
+ xi = [0, 0, 1]
+ yi = [[0], [0], [0]]
+ assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
+
+ def test_zeros(self):
+ xi = [0, 1, 2, 3]
+ yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_(pp.c.shape == (4, 3))
+
+ ppd = pp.derivative()
+ for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
+ assert_allclose([pp(xp), ppd(xp)], [0., 0.])
+
+ def _make_random_mk(self, m, k):
+ # k derivatives at each breakpoint
+ np.random.seed(1234)
+ xi = np.asarray([1. * j**2 for j in range(m+1)])
+ yi = [np.random.random(k) for j in range(m+1)]
+ return xi, yi
+
+ def test_random_12(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ pp = BPoly.from_derivatives(xi, yi)
+
+ for order in range(k//2):
+ assert_allclose(pp(xi), [yy[order] for yy in yi])
+ pp = pp.derivative()
+
+ def test_order_zero(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ assert_raises(ValueError, BPoly.from_derivatives,
+ **dict(xi=xi, yi=yi, orders=0))
+
+ def test_orders_too_high(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
+ assert_raises(ValueError, BPoly.from_derivatives, # but this is not
+ **dict(xi=xi, yi=yi, orders=2*k))
+
+ def test_orders_global(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ # ok, this is confusing. Local polynomials will be of the order 5
+ # which means that up to the 2nd derivatives will be used at each point
+ order = 5
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+
+ for j in range(order//2+1):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ # now repeat with `order` being even: on each interval, it uses
+ # order//2 'derivatives' @ the right-hand endpoint and
+ # order//2+1 @ 'derivatives' the left-hand endpoint
+ order = 6
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+ for j in range(order//2):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ def test_orders_local(self):
+ m, k = 7, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ orders = [o + 1 for o in range(m)]
+ for i, x in enumerate(xi[1:-1]):
+ pp = BPoly.from_derivatives(xi, yi, orders=orders)
+ for j in range(orders[i] // 2 + 1):
+ assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
+
+ def test_yi_trailing_dims(self):
+ m, k = 7, 5
+ xi = np.sort(np.random.random(m+1))
+ yi = np.random.random((m+1, k, 6, 7, 8))
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
+
+ def test_gh_5430(self):
+ # At least one of these raises an error unless gh-5430 is
+ # fixed. In py2k an int is implemented using a C long, so
+ # which one fails depends on your system. In py3k there is only
+ # one arbitrary precision integer type, so both should fail.
+ orders = np.int32(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = np.int64(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+ # This worked before; make sure it still works
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+
+
+class TestNdPPoly(object):
+ def test_simple_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ xi = np.random.rand(200)
+
+ p = NdPPoly(c, (x,))
+ v1 = p((xi,))
+
+ v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
+ assert_allclose(v1, v2)
+
+ def test_simple_2d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7)
+ x = np.linspace(0, 1, 6+1)
+ y = np.linspace(0, 1, 7+1)**2
+
+ xi = np.random.rand(200)
+ yi = np.random.rand(200)
+
+ v1 = np.empty([len(xi), 1], dtype=c.dtype)
+ v1.fill(np.nan)
+ _ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
+ (x, y),
+ np.array([4, 5], dtype=np.intc),
+ np.c_[xi, yi],
+ np.array([0, 0], dtype=np.intc),
+ 1,
+ v1)
+ v1 = v1.ravel()
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi)
+ assert_allclose(v1, v2)
+
+ p = NdPPoly(c, (x, y))
+ for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
+ v1 = p(np.c_[xi, yi], nu=nu)
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ xi = np.random.rand(40)
+ yi = np.random.rand(40)
+ zi = np.random.rand(40)
+
+ p = NdPPoly(c, (x, y, z))
+
+ for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
+ (6, 0, 2)):
+ v1 = p((xi, yi, zi), nu=nu)
+ v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_4d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
+ x = np.linspace(0, 1, 8+1)
+ y = np.linspace(0, 1, 9+1)**2
+ z = np.linspace(0, 1, 10+1)**3
+ u = np.linspace(0, 1, 11+1)**4
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+ ui = np.random.rand(20)
+
+ p = NdPPoly(c, (x, y, z, u))
+ v1 = p((xi, yi, zi, ui))
+
+ v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
+ assert_allclose(v1, v2)
+
+ def test_deriv_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ p = NdPPoly(c, (x,))
+
+ # derivative
+ dp = p.derivative(nu=[1])
+ p1 = PPoly(c, x)
+ dp1 = p1.derivative()
+ assert_allclose(dp.c, dp1.c)
+
+ # antiderivative
+ dp = p.antiderivative(nu=[2])
+ p1 = PPoly(c, x)
+ dp1 = p1.antiderivative(2)
+ assert_allclose(dp.c, dp1.c)
+
+ def test_deriv_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+
+ # differentiate vs x
+ p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
+ dp = p.derivative(nu=[2])
+ dp1 = p1.derivative(2)
+ assert_allclose(dp.c,
+ dp1.c.transpose(0, 2, 3, 1, 4, 5))
+
+ # antidifferentiate vs y
+ p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
+ dp = p.antiderivative(nu=[0, 1, 0])
+ dp1 = p1.antiderivative(1)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 0, 3, 4, 1, 5))
+
+ # differentiate vs z
+ p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
+ dp = p.derivative(nu=[0, 0, 3])
+ dp1 = p1.derivative(3)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 3, 0, 4, 5, 1))
+
+ def test_deriv_3d_simple(self):
+ # Integrate to obtain function x y**2 z**4 / (2! 4!)
+
+ c = np.ones((1, 1, 1, 3, 4, 5))
+ x = np.linspace(0, 1, 3+1)**1
+ y = np.linspace(0, 1, 4+1)**2
+ z = np.linspace(0, 1, 5+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+ ip = p.antiderivative((1, 0, 4))
+ ip = ip.antiderivative((0, 2, 0))
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+
+ assert_allclose(ip((xi, yi, zi)),
+ xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
+
+ def test_integrate_2d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 16, 17)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+
+ # make continuously differentiable so that nquad() has an
+ # easier time
+ c = c.transpose(0, 2, 1, 3)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, x, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(0, 2, 1, 3)
+ c = c.transpose(1, 3, 0, 2)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, y, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(2, 0, 3, 1).copy()
+
+ # Check integration
+ p = NdPPoly(c, (x, y))
+
+ for ranges in [[(0, 1), (0, 1)],
+ [(0, 0.5), (0, 1)],
+ [(0, 1), (0, 0.5)],
+ [(0.3, 0.7), (0.6, 0.2)]]:
+
+ ig = p.integrate(ranges)
+ ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
+ opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
+ assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
+ err_msg=repr(ranges))
+
+ def test_integrate_1d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 6, 16, 17, 18)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+ z = np.linspace(0, 1, 18+1)**3
+
+ # Check 1-D integration
+ p = NdPPoly(c, (x, y, z))
+
+ u = np.random.rand(200)
+ v = np.random.rand(200)
+ a, b = 0.2, 0.7
+
+ px = p.integrate_1d(a, b, axis=0)
+ pax = p.antiderivative((1, 0, 0))
+ assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
+
+ py = p.integrate_1d(a, b, axis=1)
+ pay = p.antiderivative((0, 1, 0))
+ assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
+
+ pz = p.integrate_1d(a, b, axis=2)
+ paz = p.antiderivative((0, 0, 1))
+ assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
+
+
+def _ppoly_eval_1(c, x, xps):
+ """Evaluate piecewise polynomial manually"""
+ out = np.zeros((len(xps), c.shape[2]))
+ for i, xp in enumerate(xps):
+ if xp < 0 or xp > 1:
+ out[i,:] = np.nan
+ continue
+ j = np.searchsorted(x, xp) - 1
+ d = xp - x[j]
+ assert_(x[j] <= xp < x[j+1])
+ r = sum(c[k,j] * d**(c.shape[0]-k-1)
+ for k in range(c.shape[0]))
+ out[i,:] = r
+ return out
+
+
+def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
+ """Evaluate piecewise polynomial manually (another way)"""
+ a = breaks[0]
+ b = breaks[-1]
+ K = coeffs.shape[0]
+
+ saveshape = np.shape(xnew)
+ xnew = np.ravel(xnew)
+ res = np.empty_like(xnew)
+ mask = (xnew >= a) & (xnew <= b)
+ res[~mask] = fill
+ xx = xnew.compress(mask)
+ indxs = np.searchsorted(breaks, xx)-1
+ indxs = indxs.clip(0, len(breaks))
+ pp = coeffs
+ diff = xx - breaks.take(indxs)
+ V = np.vander(diff, N=K)
+ values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in range(len(xx))])
+ res[mask] = values
+ res.shape = saveshape
+ return res
+
+
+def _dpow(x, y, n):
+ """
+ d^n (x**y) / dx^n
+ """
+ if n < 0:
+ raise ValueError("invalid derivative order")
+ elif n > y:
+ return 0
+ else:
+ return poch(y - n + 1, n) * x**(y - n)
+
+
+def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
+ """
+ Straightforward evaluation of 2-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny = c.shape[:2]
+
+ for jout, (x, y) in enumerate(zip(xnew, ynew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+
+ val = 0
+
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ val += (c[nx-k1-1,ny-k2-1,j1,j2]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
+ """
+ Straightforward evaluation of 3-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny, nz = c.shape[:3]
+
+ for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
+ """
+ Straightforward evaluation of 4-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ mx, my, mz, mu = c.shape[:4]
+
+ for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1]) and
+ (xs[3][0] <= u <= xs[3][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+ j4 = np.searchsorted(xs[3], u) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+ s4 = u - xs[3][j4]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ for k4 in range(c.shape[3]):
+ val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2])
+ * _dpow(s4, k4, nu[3]))
+
+ out[jout] = val
+
+ return out
+
+
+class TestRegularGridInterpolator(object):
+ def _get_sample_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 4
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_2(self):
+ # create another 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def test_list_input(self):
+ points, values = self._get_sample_4d()
+
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ for method in ['linear', 'nearest']:
+ interp = RegularGridInterpolator(points,
+ values.tolist(),
+ method=method)
+ v1 = interp(sample.tolist())
+ interp = RegularGridInterpolator(points,
+ values,
+ method=method)
+ v2 = interp(sample)
+ assert_allclose(v1, v2)
+
+ def test_complex(self):
+ points, values = self._get_sample_4d()
+ values = values - 2j*values
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ for method in ['linear', 'nearest']:
+ interp = RegularGridInterpolator(points, values,
+ method=method)
+ rinterp = RegularGridInterpolator(points, values.real,
+ method=method)
+ iinterp = RegularGridInterpolator(points, values.imag,
+ method=method)
+
+ v1 = interp(sample)
+ v2 = rinterp(sample) + 1j*iinterp(sample)
+ assert_allclose(v1, v2)
+
+ def test_linear_xi1d(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ wanted = 1001.1
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_xi3d(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_nearest(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ sample = np.asarray([0.1, 0.1, .9, .9])
+ wanted = 1100.
+ assert_array_almost_equal(interp(sample), wanted)
+ sample = np.asarray([0.1, 0.1, 0.1, 0.1])
+ wanted = 0.
+ assert_array_almost_equal(interp(sample), wanted)
+ sample = np.asarray([0., 0., 0., 0.])
+ wanted = 0.
+ assert_array_almost_equal(interp(sample), wanted)
+ sample = np.asarray([1., 1., 1., 1.])
+ wanted = 1111.
+ assert_array_almost_equal(interp(sample), wanted)
+ sample = np.asarray([0.1, 0.4, 0.6, 0.9])
+ wanted = 1055.
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_edges(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ wanted = np.asarray([0., 1111.])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_valid_create(self):
+ # create a 2-D grid of 3 points in each dimension
+ points = [(0., .5, 1.), (0., 1., .5)]
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis]
+ values1 = values[np.newaxis, :]
+ values = (values0 + values1 * 10)
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [((0., .5, 1.), ), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, .75, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values,
+ method="undefmethod")
+
+ def test_valid_call(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ assert_raises(ValueError, interp, sample, "undefmethod")
+ sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
+ assert_raises(ValueError, interp, sample)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
+ assert_raises(ValueError, interp, sample)
+
+ def test_out_of_bounds_extrap(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 1111., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_extrap2(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 11., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_fill(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=np.nan)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([np.nan, np.nan, np.nan])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_nearest_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ def test_linear_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ def test_duck_typed_values(self):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ for method in ('nearest', 'linear'):
+ interp = RegularGridInterpolator((x, y), values,
+ method=method)
+ v1 = interp([0.4, 0.7])
+
+ interp = RegularGridInterpolator((x, y), values._v,
+ method=method)
+ v2 = interp([0.4, 0.7])
+ assert_allclose(v1, v2)
+
+ def test_invalid_fill_value(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+ values = np.random.rand(5, 7)
+
+ # integers can be cast to floats
+ RegularGridInterpolator((x, y), values, fill_value=1)
+
+ # complex values cannot
+ assert_raises(ValueError, RegularGridInterpolator,
+ (x, y), values, fill_value=1+2j)
+
+ def test_fillvalue_type(self):
+ # from #3703; test that interpolator object construction succeeds
+ values = np.ones((10, 20, 30), dtype='>f4')
+ points = [np.arange(n) for n in values.shape]
+ # xi = [(1, 1, 1)]
+ RegularGridInterpolator(points, values)
+ RegularGridInterpolator(points, values, fill_value=0.)
+
+ def test_broadcastable_input(self):
+ # input data
+ np.random.seed(0)
+ x = np.random.random(10)
+ y = np.random.random(10)
+ z = np.hypot(x, y)
+
+ # x-y grid for interpolation
+ X = np.linspace(min(x), max(x))
+ Y = np.linspace(min(y), max(y))
+ X, Y = np.meshgrid(X, Y)
+ XY = np.vstack((X.ravel(), Y.ravel())).T
+
+ for interpolator in (NearestNDInterpolator, LinearNDInterpolator,
+ CloughTocher2DInterpolator):
+ interp = interpolator(list(zip(x, y)), z)
+ # single array input
+ interp_points0 = interp(XY)
+ # tuple input
+ interp_points1 = interp((X, Y))
+ interp_points2 = interp((X, 0.0))
+ # broadcastable input
+ interp_points3 = interp(X, Y)
+ interp_points4 = interp(X, 0.0)
+
+ assert_equal(interp_points0.size ==
+ interp_points1.size ==
+ interp_points2.size ==
+ interp_points3.size ==
+ interp_points4.size, True)
+
+ def test_read_only(self):
+ # input data
+ np.random.seed(0)
+ xy = np.random.random((10, 2))
+ x, y = xy[:, 0], xy[:, 1]
+ z = np.hypot(x, y)
+
+ # interpolation points
+ XY = np.random.random((50, 2))
+
+ xy.setflags(write=False)
+ z.setflags(write=False)
+ XY.setflags(write=False)
+
+ for interpolator in (NearestNDInterpolator, LinearNDInterpolator,
+ CloughTocher2DInterpolator):
+ interp = interpolator(xy, z)
+ interp(XY)
+
+
+class MyValue(object):
+ """
+ Minimal indexable object
+ """
+
+ def __init__(self, shape):
+ self.ndim = 2
+ self.shape = shape
+ self._v = np.arange(np.prod(shape)).reshape(shape)
+
+ def __getitem__(self, idx):
+ return self._v[idx]
+
+ def __array_interface__(self):
+ return None
+
+ def __array__(self):
+ raise RuntimeError("No array representation")
+
+
+class TestInterpN(object):
+ def _sample_2d_data(self):
+ x = np.arange(1, 6)
+ x = np.array([.5, 2., 3., 4., 5.5])
+ y = np.arange(1, 6)
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ return x, y, z
+
+ def test_spline_2d(self):
+ x, y, z = self._sample_2d_data()
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
+ lut.ev(xi[:, 0], xi[:, 1]))
+
+ def test_list_input(self):
+ x, y, z = self._sample_2d_data()
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ for method in ['nearest', 'linear', 'splinef2d']:
+ v1 = interpn((x, y), z, xi, method=method)
+ v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
+ xi.tolist(), method=method)
+ assert_allclose(v1, v2, err_msg=method)
+
+ def test_spline_2d_outofbounds(self):
+ x = np.array([.5, 2., 3., 4., 5.5])
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+ actual = interpn((x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=999.99)
+ expected = lut.ev(xi[:, 0], xi[:, 1])
+ expected[2:4] = 999.99
+ assert_array_almost_equal(actual, expected)
+
+ # no extrapolation for splinef2d
+ assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=None)
+
+ def _sample_4d_data(self):
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def test_linear_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="linear")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_linear_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="linear",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_nearest_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values, method="nearest")
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="nearest")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_nearest_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="nearest",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_xi_1d(self):
+ # verify that 1-D xi works as expected
+ points, values = self._sample_4d_data()
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ v1 = interpn(points, values, sample, bounds_error=False)
+ v2 = interpn(points, values, sample[None,:], bounds_error=False)
+ assert_allclose(v1, v2)
+
+ def test_xi_nd(self):
+ # verify that higher-d xi works as expected
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ sample = np.random.rand(2, 3, 4)
+
+ v1 = interpn(points, values, sample, method='nearest',
+ bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ v2 = interpn(points, values, sample.reshape(-1, 4),
+ method='nearest', bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ def test_xi_broadcast(self):
+ # verify that the interpolators broadcast xi
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+
+ xi = np.linspace(0, 1, 2)
+ yi = np.linspace(0, 3, 3)
+
+ for method in ['nearest', 'linear', 'splinef2d']:
+ sample = (xi[:,None], yi[None,:])
+ v1 = interpn(points, values, sample, method=method,
+ bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ xx, yy = np.meshgrid(xi, yi)
+ sample = np.c_[xx.T.ravel(), yy.T.ravel()]
+
+ v2 = interpn(points, values, sample,
+ method=method, bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ def test_nonscalar_values(self):
+ # Verify that non-scalar valued values also works
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ values = np.random.rand(3, 3, 3, 3, 6)
+ sample = np.random.rand(7, 11, 4)
+
+ for method in ['nearest', 'linear']:
+ v = interpn(points, values, sample, method=method,
+ bounds_error=False)
+ assert_equal(v.shape, (7, 11, 6), err_msg=method)
+
+ vs = [interpn(points, values[...,j], sample, method=method,
+ bounds_error=False)
+ for j in range(6)]
+ v2 = np.array(vs).transpose(1, 2, 0)
+
+ assert_allclose(v, v2, err_msg=method)
+
+ # Vector-valued splines supported with fitpack
+ assert_raises(ValueError, interpn, points, values, sample,
+ method='splinef2d')
+
+ def test_complex(self):
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ for method in ['linear', 'nearest']:
+ v1 = interpn(points, values, sample, method=method)
+ v2r = interpn(points, values.real, sample, method=method)
+ v2i = interpn(points, values.imag, sample, method=method)
+ v2 = v2r + 1j*v2i
+ assert_allclose(v1, v2)
+
+ # Complex-valued data not supported by spline2fd
+ assert_warns(np.ComplexWarning, interpn, points, values,
+ sample, method='splinef2d')
+
+ def test_duck_typed_values(self):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ for method in ('nearest', 'linear'):
+ v1 = interpn((x, y), values, [0.4, 0.7], method=method)
+ v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
+ assert_allclose(v1, v2)
+
+ def test_matrix_input(self):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = matrix(np.random.rand(5, 7))
+
+ sample = np.random.rand(3, 7, 2)
+
+ for method in ('nearest', 'linear', 'splinef2d'):
+ v1 = interpn((x, y), values, sample, method=method)
+ v2 = interpn((x, y), np.asarray(values), sample, method=method)
+ assert_allclose(v1, v2)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_ndgriddata.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_ndgriddata.py
new file mode 100644
index 0000000..0619885
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_ndgriddata.py
@@ -0,0 +1,189 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
+from pytest import raises as assert_raises
+
+from scipy.interpolate import griddata, NearestNDInterpolator
+
+
+class TestGriddata(object):
+ def test_fill_value(self):
+ x = [(0,0), (0,1), (1,0)]
+ y = [1, 2, 3]
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
+ assert_array_equal(yi, [-1., -1, 1])
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)])
+ assert_array_equal(yi, [np.nan, np.nan, 1])
+
+ def test_alternative_call(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
+ rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multivalue_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, x, method=method, rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multipoint_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_complex_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 2j*y[::-1]
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_1d(self):
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-14)
+
+ def test_1d_borders(self):
+ # Test for nearest neighbor case with xi outside
+ # the range of the values.
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+ xi = np.array([0.9, 6.5])
+ yi_should = np.array([1.0, 1.0])
+
+ method = 'nearest'
+ assert_allclose(griddata(x, y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata((x, ), y, (xi, ),
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+
+ def test_1d_unsorted(self):
+ x = np.array([2.5, 1, 4.5, 5, 6, 3])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-10)
+
+ def test_square_rescale_manual(self):
+ points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double)
+ points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double)
+ values = np.array([1., 2., -3., 5., 9.], dtype=np.double)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+
+ for method in ('nearest', 'linear', 'cubic'):
+ msg = method
+ zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
+ method=method)
+ zi_rescaled = griddata(points, values, xi, method=method,
+ rescale=True)
+ assert_allclose(zi, zi_rescaled, err_msg=msg,
+ atol=1e-12)
+
+ def test_xi_1d(self):
+ # Check that 1-D xi is interpreted as a coordinate
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.double)
+ y = np.arange(x.shape[0], dtype=np.double)
+ y = y - 2j*y[::-1]
+
+ xi = np.array([0.5, 0.5])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ p1 = griddata(x, y, xi, method=method)
+ p2 = griddata(x, y, xi[None,:], method=method)
+ assert_allclose(p1, p2, err_msg=method)
+
+ xi1 = np.array([0.5])
+ xi3 = np.array([0.5, 0.5, 0.5])
+ assert_raises(ValueError, griddata, x, y, xi1,
+ method=method)
+ assert_raises(ValueError, griddata, x, y, xi3,
+ method=method)
+
+
+def test_nearest_options():
+ # smoke test that NearestNDInterpolator accept cKDTree options
+ npts, nd = 4, 3
+ x = np.arange(npts*nd).reshape((npts, nd))
+ y = np.arange(npts)
+ nndi = NearestNDInterpolator(x, y)
+
+ opts = {'balanced_tree': False, 'compact_nodes': False}
+ nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
+ assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
+
+
+def test_nearest_list_argument():
+ nd = np.array([[0, 0, 0, 0, 1, 0, 1],
+ [0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 2]])
+ d = nd[:, 3:]
+
+ # z is np.array
+ NI = NearestNDInterpolator((d[0], d[1]), d[2])
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+ # z is list
+ NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_pade.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_pade.py
new file mode 100644
index 0000000..5c3e03e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_pade.py
@@ -0,0 +1,101 @@
+from numpy.testing import (assert_array_equal, assert_array_almost_equal)
+from scipy.interpolate import pade
+
+def test_pade_trivial():
+ nump, denomp = pade([1.0], 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+ nump, denomp = pade([1.0], 0, 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+
+def test_pade_4term_exp():
+ # First four Taylor coefficients of exp(x).
+ # Unlike poly1d, the first array element is the zero-order term.
+ an = [1.0, 1.0, 0.5, 1.0/6]
+
+ nump, denomp = pade(an, 0)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing inclusion of optional parameter
+ nump, denomp = pade(an, 0, 3)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2, 1)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing reducing array.
+ nump, denomp = pade(an, 0, 2)
+ assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [1.0/2, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
+
+ nump, denomp = pade(an, 2, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
+
+
+def test_pade_ints():
+ # Simple test sequences (one of ints, one of floats).
+ an_int = [1, 2, 3, 4]
+ an_flt = [1.0, 2.0, 3.0, 4.0]
+
+ # Make sure integer arrays give the same result as float arrays with same values.
+ for i in range(0, len(an_int)):
+ for j in range(0, len(an_int) - i):
+
+ # Create float and int pade approximation for given order.
+ nump_int, denomp_int = pade(an_int, i, j)
+ nump_flt, denomp_flt = pade(an_flt, i, j)
+
+ # Check that they are the same.
+ assert_array_equal(nump_int.c, nump_flt.c)
+ assert_array_equal(denomp_int.c, denomp_flt.c)
+
+
+def test_pade_complex():
+ # Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
+ # Variable x is parameter - these tests will work with any complex number.
+ x = 0.2 + 0.6j
+ an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
+ -(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 2, 2)
+ assert_array_almost_equal(nump.c, [x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0])
+ assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_polyint.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_polyint.py
new file mode 100644
index 0000000..3cb2b16
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_polyint.py
@@ -0,0 +1,722 @@
+import warnings
+import io
+import numpy as np
+
+from numpy.testing import (
+ assert_almost_equal, assert_array_equal, assert_array_almost_equal,
+ assert_allclose, assert_equal, assert_)
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (
+ KroghInterpolator, krogh_interpolate,
+ BarycentricInterpolator, barycentric_interpolate,
+ approximate_taylor_polynomial, CubicHermiteSpline, pchip,
+ PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
+ make_interp_spline)
+
+
+def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
+ extra_args={}):
+ np.random.seed(1234)
+
+ x = [-1, 0, 1, 2, 3, 4]
+ s = list(range(1, len(y_shape)+1))
+ s.insert(axis % (len(y_shape)+1), 0)
+ y = np.random.rand(*((6,) + y_shape)).transpose(s)
+
+ # Cython code chokes on y.shape = (0, 3) etc., skip them
+ if y.size == 0:
+ return
+
+ xi = np.zeros(x_shape)
+ if interpolator_cls is CubicHermiteSpline:
+ dydx = np.random.rand(*((6,) + y_shape)).transpose(s)
+ yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
+ else:
+ yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
+
+ target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ + x_shape + y.shape[axis:][1:])
+ assert_equal(yi.shape, target_shape)
+
+ # check it works also with lists
+ if x_shape and y.size > 0:
+ if interpolator_cls is CubicHermiteSpline:
+ interpolator_cls(list(x), list(y), list(dydx), axis=axis,
+ **extra_args)(list(xi))
+ else:
+ interpolator_cls(list(x), list(y), axis=axis,
+ **extra_args)(list(xi))
+
+ # check also values
+ if xi.size > 0 and deriv_shape is None:
+ bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
+ yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
+ yv = yv.reshape(bs_shape)
+
+ yi, y = np.broadcast_arrays(yi, yv)
+ assert_allclose(yi, y)
+
+
+SHAPES = [(), (0,), (1,), (6, 2, 5)]
+
+
+def test_shapes():
+
+ def spl_interp(x, y, axis):
+ return make_interp_spline(x, y, axis=axis)
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
+ pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ if ip != CubicSpline:
+ check_shape(ip, s1, s2, None, axis)
+ else:
+ for bc in ['natural', 'clamped']:
+ extra = {'bc_type': bc}
+ check_shape(ip, s1, s2, None, axis, extra)
+
+def test_derivs_shapes():
+ def krogh_derivs(x, y, axis=0):
+ return KroghInterpolator(x, y, axis).derivatives
+
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(krogh_derivs, s1, s2, (6,), axis)
+
+
+def test_deriv_shapes():
+ def krogh_deriv(x, y, axis=0):
+ return KroghInterpolator(x, y, axis).derivative
+
+ def pchip_deriv(x, y, axis=0):
+ return pchip(x, y, axis).derivative()
+
+ def pchip_deriv2(x, y, axis=0):
+ return pchip(x, y, axis).derivative(2)
+
+ def pchip_antideriv(x, y, axis=0):
+ return pchip(x, y, axis).derivative()
+
+ def pchip_antideriv2(x, y, axis=0):
+ return pchip(x, y, axis).derivative(2)
+
+ def pchip_deriv_inplace(x, y, axis=0):
+ class P(PchipInterpolator):
+ def __call__(self, x):
+ return PchipInterpolator.__call__(self, x, 1)
+ pass
+ return P(x, y, axis)
+
+ def akima_deriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).derivative()
+
+ def akima_antideriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).antiderivative()
+
+ def cspline_deriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).derivative()
+
+ def cspline_antideriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).antiderivative()
+
+ def bspl_deriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).derivative()
+
+ def bspl_antideriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).antiderivative()
+
+ for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
+ pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
+ cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(ip, s1, s2, (), axis)
+
+
+def test_complex():
+ x = [1, 2, 3, 4]
+ y = [1, 2, 1j, 3]
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
+ p = ip(x, y)
+ assert_allclose(y, p(x))
+
+ dydx = [0, -1j, 2, 3j]
+ p = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(y, p(x))
+ assert_allclose(dydx, p(x, 1))
+
+
+class TestKrogh(object):
+ def setup_method(self):
+ self.true_poly = np.poly1d([-2,3,1,5,-4])
+ self.test_xs = np.linspace(-1,1,100)
+ self.xs = np.linspace(-1,1,5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_scalar(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(7),P(7))
+ assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+ def test_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_low_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs,len(self.xs)+2)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ m = 10
+ r = P.derivatives(self.test_xs,m)
+ for i in range(m):
+ assert_almost_equal(P.derivative(self.test_xs,i),r[i])
+
+ def test_high_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ for i in range(len(self.xs), 2*len(self.xs)):
+ assert_almost_equal(P.derivative(self.test_xs,i),
+ np.zeros(len(self.test_xs)))
+
+ def test_hermite(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0,1],[1,0],[2,1]])
+ P = KroghInterpolator(xs,ys)
+ Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1,3,100)
+ assert_almost_equal(P(test_xs),
+ np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
+ assert_almost_equal(P.derivatives(test_xs),
+ np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
+ (1,2,0)))
+
+ def test_empty(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(P([]), [])
+
+ def test_shapes_scalarvalue(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0,1])), (2,))
+
+ def test_shapes_scalarvalue_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,))
+ assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+ def test_shapes_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1,3))
+ assert_array_equal(np.shape(P([0,1])), (2,3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1,1))
+ assert_array_equal(np.shape(P([0,1])), (2,1))
+
+ def test_shapes_vectorvalue_derivative(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+ def test_wrapper(self):
+ P = KroghInterpolator(self.xs, self.ys)
+ ki = krogh_interpolate
+ assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
+ assert_almost_equal(P.derivative(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=2))
+ assert_almost_equal(P.derivatives(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+ def test_int_inputs(self):
+ # Check input args are cast correctly to floats, gh-3669
+ x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
+ 13104, 60000]
+ offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
+ -0.48002351, -0.34925329, -0.26503107,
+ -0.13148093, -0.12988833, -0.12979296,
+ -0.12973574, -0.08582937, 0.05])
+ f = KroghInterpolator(x, offset_cdf)
+
+ assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
+ 0, atol=1e-10)
+
+ def test_derivatives_complex(self):
+ # regression test for gh-7381: krogh.derivatives(0) fails complex y
+ x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
+ func = KroghInterpolator(x, y)
+ cmplx = func.derivatives(0)
+
+ cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
+ 1j*KroghInterpolator(x, y.imag).derivatives(0))
+ assert_allclose(cmplx, cmplx2, atol=1e-15)
+
+
+class TestTaylor(object):
+ def test_exponential(self):
+ degree = 5
+ p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
+ for i in range(degree+1):
+ assert_almost_equal(p(0),1)
+ p = p.deriv()
+ assert_almost_equal(p(0),0)
+
+
+class TestBarycentric(object):
+ def setup_method(self):
+ self.true_poly = np.poly1d([-2, 3, 1, 5, -4])
+ self.test_xs = np.linspace(-1, 1, 100)
+ self.xs = np.linspace(-1, 1, 5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_scalar(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_almost_equal(self.true_poly(7), P(7))
+ assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+ def test_delayed(self):
+ P = BarycentricInterpolator(self.xs)
+ P.set_yi(self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_append(self):
+ P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
+ P.add_xi(self.xs[3:], self.ys[3:])
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0, 1], [1, 0], [2, 1]])
+ BI = BarycentricInterpolator
+ P = BI(xs, ys)
+ Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1, 3, 100)
+ assert_almost_equal(P(test_xs),
+ np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1))
+
+ def test_shapes_scalarvalue(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0, 1])), (2,))
+
+ def test_shapes_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1, 3))
+ assert_array_equal(np.shape(P([0, 1])), (2, 3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1, 1))
+ assert_array_equal(np.shape(P([0,1])), (2, 1))
+
+ def test_wrapper(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ values = barycentric_interpolate(self.xs, self.ys, self.test_xs)
+ assert_almost_equal(P(self.test_xs), values)
+
+ def test_int_input(self):
+ x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
+ y = np.arange(1, 11)
+ value = barycentric_interpolate(x, y, 1000 * 9.5)
+ assert_almost_equal(value, 9.5)
+
+
+class TestPCHIP(object):
+ def _make_random(self, npts=20):
+ np.random.seed(1234)
+ xi = np.sort(np.random.random(npts))
+ yi = np.random.random(npts)
+ return pchip(xi, yi), xi, yi
+
+ def test_overshoot(self):
+ # PCHIP should not overshoot
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ if y1 > y2:
+ y1, y2 = y2, y1
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all())
+
+ def test_monotone(self):
+ # PCHIP should preserve monotonicty
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
+
+ def test_cast(self):
+ # regression test for integer input data, see gh-3453
+ data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
+ [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
+ xx = np.arange(100)
+ curve = pchip(data[0], data[1])(xx)
+
+ data1 = data * 1.0
+ curve1 = pchip(data1[0], data1[1])(xx)
+
+ assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
+
+ def test_nag(self):
+ # Example from NAG C implementation,
+ # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
+ # suggested in gh-5326 as a smoke test for the way the derivatives
+ # are computed (see also gh-3453)
+ dataStr = '''
+ 7.99 0.00000E+0
+ 8.09 0.27643E-4
+ 8.19 0.43750E-1
+ 8.70 0.16918E+0
+ 9.20 0.46943E+0
+ 10.00 0.94374E+0
+ 12.00 0.99864E+0
+ 15.00 0.99992E+0
+ 20.00 0.99999E+0
+ '''
+ data = np.loadtxt(io.StringIO(dataStr))
+ pch = pchip(data[:,0], data[:,1])
+
+ resultStr = '''
+ 7.9900 0.0000
+ 9.1910 0.4640
+ 10.3920 0.9645
+ 11.5930 0.9965
+ 12.7940 0.9992
+ 13.9950 0.9998
+ 15.1960 0.9999
+ 16.3970 1.0000
+ 17.5980 1.0000
+ 18.7990 1.0000
+ 20.0000 1.0000
+ '''
+ result = np.loadtxt(io.StringIO(resultStr))
+ assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
+
+ def test_endslopes(self):
+ # this is a smoke test for gh-3453: PCHIP interpolator should not
+ # set edge slopes to zero if the data do not suggest zero edge derivatives
+ x = np.array([0.0, 0.1, 0.25, 0.35])
+ y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
+ y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
+ for pp in (pchip(x, y1), pchip(x, y2)):
+ for t in (x[0], x[-1]):
+ assert_(pp(t, 1) != 0)
+
+ def test_all_zeros(self):
+ x = np.arange(10)
+ y = np.zeros_like(x)
+
+ # this should work and not generate any warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error')
+ pch = pchip(x, y)
+
+ xx = np.linspace(0, 9, 101)
+ assert_equal(pch(xx), 0.)
+
+ def test_two_points(self):
+ # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
+ # it tries to use a three-point scheme to estimate edge derivatives,
+ # while there are only two points available.
+ # Instead, it should construct a linear interpolator.
+ x = np.linspace(0, 1, 11)
+ p = pchip([0, 1], [0, 2])
+ assert_allclose(p(x), 2*x, atol=1e-15)
+
+ def test_pchip_interpolate(self):
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
+ [1.])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
+ [3.5])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
+ [[3.5], [1]])
+
+ def test_roots(self):
+ # regression test for gh-6357: .roots method should work
+ p = pchip([0, 1], [-1, 1])
+ r = p.roots()
+ assert_allclose(r, 0.5)
+
+
+class TestCubicSpline(object):
+ @staticmethod
+ def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
+ tol=1e-14):
+ """Check that spline coefficients satisfy the continuity and boundary
+ conditions."""
+ x = S.x
+ c = S.c
+ dx = np.diff(x)
+ dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
+ dxi = dx[:-1]
+
+ # Check C2 continuity.
+ assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
+ c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
+ 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
+ rtol=tol, atol=tol)
+
+ # Check that we found a parabola, the third derivative is 0.
+ if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
+ assert_allclose(c[0], 0, rtol=tol, atol=tol)
+ return
+
+ # Check periodic boundary conditions.
+ if bc_start == 'periodic':
+ assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
+ return
+
+ # Check other boundary conditions.
+ if bc_start == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
+ elif bc_start == 'clamped':
+ assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
+ elif bc_start == 'natural':
+ assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
+ else:
+ order, value = bc_start
+ assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
+
+ if bc_end == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
+ elif bc_end == 'clamped':
+ assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
+ elif bc_end == 'natural':
+ assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)
+ else:
+ order, value = bc_end
+ assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
+
+ def check_all_bc(self, x, y, axis):
+ deriv_shape = list(y.shape)
+ del deriv_shape[axis]
+ first_deriv = np.empty(deriv_shape)
+ first_deriv.fill(2)
+ second_deriv = np.empty(deriv_shape)
+ second_deriv.fill(-1)
+ bc_all = [
+ 'not-a-knot',
+ 'natural',
+ 'clamped',
+ (1, first_deriv),
+ (2, second_deriv)
+ ]
+ for bc in bc_all[:3]:
+ S = CubicSpline(x, y, axis=axis, bc_type=bc)
+ self.check_correctness(S, bc, bc)
+
+ for bc_start in bc_all:
+ for bc_end in bc_all:
+ S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
+ self.check_correctness(S, bc_start, bc_end, tol=2e-14)
+
+ def test_general(self):
+ x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
+ y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
+ for n in [2, 3, x.size]:
+ self.check_all_bc(x[:n], y[:n], 0)
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y[:n]
+ Y[0, :, 1] = y[:n] - 1
+ Y[1, :, 0] = y[:n] + 2
+ Y[1, :, 1] = y[:n] + 3
+ self.check_all_bc(x[:n], Y, 1)
+
+ def test_periodic(self):
+ for n in [2, 3, 5]:
+ x = np.linspace(0, 2 * np.pi, n)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y
+ Y[0, :, 1] = y + 2
+ Y[1, :, 0] = y - 1
+ Y[1, :, 1] = y + 5
+ S = CubicSpline(x, Y, axis=1, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_periodic_eval(self):
+ x = np.linspace(0, 2 * np.pi, 10)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
+
+ def test_second_derivative_continuity_gh_11758(self):
+ # gh-11758: C2 continuity fail
+ x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
+ 7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
+ y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
+ 2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
+ S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_three_points(self):
+ # gh-11758: Fails computing a_m2_m1
+ # In this case, s (first derivatives) could be found manually by solving
+ # system of 2 linear equations. Due to solution of this system,
+ # s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
+ # m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
+ x = np.array([1.0, 2.75, 3.0])
+ y = np.array([1.0, 15.0, 1.0])
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+ assert_allclose(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
+
+ def test_dtypes(self):
+ x = np.array([0, 1, 2, 3], dtype=int)
+ y = np.array([-5, 2, 3, 1], dtype=int)
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
+ self.check_correctness(S, "natural", (1, 2j))
+
+ y = np.array([-5, 2, 3, 1])
+ S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
+ self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
+
+ def test_small_dx(self):
+ rng = np.random.RandomState(0)
+ x = np.sort(rng.uniform(size=100))
+ y = 1e4 + rng.uniform(size=100)
+ S = CubicSpline(x, y)
+ self.check_correctness(S, tol=1e-13)
+
+ def test_incorrect_inputs(self):
+ x = np.array([1, 2, 3, 4])
+ y = np.array([1, 2, 3, 4])
+ xc = np.array([1 + 1j, 2, 3, 4])
+ xn = np.array([np.nan, 2, 3, 4])
+ xo = np.array([2, 1, 3, 4])
+ yn = np.array([np.nan, 2, 3, 4])
+ y3 = [1, 2, 3]
+ x1 = [1]
+ y1 = [1]
+
+ assert_raises(ValueError, CubicSpline, xc, y)
+ assert_raises(ValueError, CubicSpline, xn, y)
+ assert_raises(ValueError, CubicSpline, x, yn)
+ assert_raises(ValueError, CubicSpline, xo, y)
+ assert_raises(ValueError, CubicSpline, x, y3)
+ assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
+ assert_raises(ValueError, CubicSpline, x1, y1)
+
+ wrong_bc = [('periodic', 'clamped'),
+ ((2, 0), (3, 10)),
+ ((1, 0), ),
+ (0., 0.),
+ 'not-a-typo']
+
+ for bc_type in wrong_bc:
+ assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
+
+ # Shapes mismatch when giving arbitrary derivative values:
+ Y = np.c_[y, y]
+ bc1 = ('clamped', (1, 0))
+ bc2 = ('clamped', (1, [0, 0, 0]))
+ bc3 = ('clamped', (1, [[0, 0]]))
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
+
+ # periodic condition, y[-1] must be equal to y[0]:
+ assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
+
+
+def test_CubicHermiteSpline_correctness():
+ x = [0, 2, 7]
+ y = [-1, 2, 3]
+ dydx = [0, 3, 7]
+ s = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(s(x), y, rtol=1e-15)
+ assert_allclose(s(x, 1), dydx, rtol=1e-15)
+
+
+def test_CubicHermiteSpline_error_handling():
+ x = [1, 2, 3]
+ y = [0, 3, 5]
+ dydx = [1, -1, 2, 3]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
+
+ dydx_with_nan = [1, 0, np.nan]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
+
+
+def test_roots_extrapolate_gh_11185():
+ x = np.array([0.001, 0.002])
+ y = np.array([1.66066935e-06, 1.10410807e-06])
+ dy = np.array([-1.60061854, -1.600619])
+ p = CubicHermiteSpline(x, y, dy)
+
+ # roots(extrapolate=True) for a polynomial with a single interval
+ # should return all three real roots
+ r = p.roots(extrapolate=True)
+ assert_equal(p.c.shape[1], 1)
+ assert_equal(r.size, 3)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_rbf.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_rbf.py
new file mode 100644
index 0000000..23456d1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_rbf.py
@@ -0,0 +1,221 @@
+# Created by John Travers, Robert Hetland, 2007
+""" Test functions for rbf module """
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_almost_equal,
+ assert_almost_equal)
+from numpy import linspace, sin, cos, random, exp, allclose
+from scipy.interpolate.rbf import Rbf
+
+FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
+ 'cubic', 'quintic', 'thin-plate', 'linear')
+
+
+def check_rbf1d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (1D)
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_rbf2d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (2D).
+ x = random.rand(50,1)*4-2
+ y = random.rand(50,1)*4-2
+ z = x*exp(-x**2-1j*y**2)
+ rbf = Rbf(x, y, z, epsilon=2, function=function)
+ zi = rbf(x, y)
+ zi.shape = x.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_rbf3d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, 1)*4 - 2
+ y = random.rand(50, 1)*4 - 2
+ z = random.rand(50, 1)*4 - 2
+ d = x*exp(-x**2 - y**2)
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function)
+ di = rbf(x, y, z)
+ di.shape = x.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_rbf_interpolation():
+ for function in FUNCTIONS:
+ check_rbf1d_interpolation(function)
+ check_rbf2d_interpolation(function)
+ check_rbf3d_interpolation(function)
+
+
+def check_2drbf1d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (1D)
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_2drbf2d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (2D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z0 = x * exp(-x ** 2 - 1j * y ** 2)
+ z1 = y * exp(-y ** 2 - 1j * x ** 2)
+ z = np.vstack([z0, z1]).T
+ rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
+ zi = rbf(x, y)
+ zi.shape = z.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_2drbf3d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z = random.rand(50, ) * 4 - 2
+ d0 = x * exp(-x ** 2 - y ** 2)
+ d1 = y * exp(-y ** 2 - x ** 2)
+ d = np.vstack([d0, d1]).T
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
+ di = rbf(x, y, z)
+ di.shape = d.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_2drbf_interpolation():
+ for function in FUNCTIONS:
+ check_2drbf1d_interpolation(function)
+ check_2drbf2d_interpolation(function)
+ check_2drbf3d_interpolation(function)
+
+
+def check_rbf1d_regularity(function, atol):
+ # Check that the Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
+ assert_(allclose(yi, sin(xi), atol=atol), msg)
+
+
+def test_rbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.1,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_2drbf1d_regularity(function, atol):
+ # Check that the 2-D Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
+ assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
+
+
+def test_2drbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.15,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_rbf1d_stability(function):
+ # Check that the Rbf function with default epsilon is not subject
+ # to overshoot. Regression for issue #4523.
+ #
+ # Generate some data (fixed random seed hence deterministic)
+ np.random.seed(1234)
+ x = np.linspace(0, 10, 50)
+ z = x + 4.0 * np.random.randn(len(x))
+
+ rbf = Rbf(x, z, function=function)
+ xi = np.linspace(0, 10, 1000)
+ yi = rbf(xi)
+
+ # subtract the linear trend and make sure there no spikes
+ assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
+
+def test_rbf_stability():
+ for function in FUNCTIONS:
+ check_rbf1d_stability(function)
+
+
+def test_default_construction():
+ # Check that the Rbf class can be constructed with the default
+ # multiquadric basis function. Regression test for ticket #1228.
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_function_is_callable():
+ # Check that the Rbf class can be constructed with function=callable.
+ x = linspace(0,10,9)
+ y = sin(x)
+ linfunc = lambda x:x
+ rbf = Rbf(x, y, function=linfunc)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_two_arg_function_is_callable():
+ # Check that the Rbf class can be constructed with a two argument
+ # function=callable.
+ def _func(self, r):
+ return self.epsilon + r
+
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=_func)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_rbf_epsilon_none():
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ Rbf(x, y, epsilon=None)
+
+
+def test_rbf_epsilon_none_collinear():
+ # Check that collinear points in one dimension doesn't cause an error
+ # due to epsilon = 0
+ x = [1, 2, 3]
+ y = [4, 4, 4]
+ z = [5, 6, 7]
+ rbf = Rbf(x, y, z, epsilon=None)
+ assert_(rbf.epsilon > 0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_regression.py b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_regression.py
new file mode 100644
index 0000000..4b308f3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/interpolate/tests/test_regression.py
@@ -0,0 +1,14 @@
+import numpy as np
+import scipy.interpolate as interp
+from numpy.testing import assert_almost_equal
+
+
+class TestRegression(object):
+ def test_spalde_scalar_input(self):
+ """Ticket #629"""
+ x = np.linspace(0,10)
+ y = x**3
+ tck = interp.splrep(x, y, k=3, t=[5])
+ res = interp.spalde(np.float64(1), tck)
+ des = np.array([1., 3., 6., 6.])
+ assert_almost_equal(res, des)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/__init__.py
new file mode 100644
index 0000000..246c9b1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/__init__.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+u"""
+==================================
+Input and output (:mod:`scipy.io`)
+==================================
+
+.. currentmodule:: scipy.io
+
+SciPy has many modules, classes, and functions available to read data
+from and write data to a variety of file formats.
+
+.. seealso:: `NumPy IO routines `__
+
+MATLAB® files
+=============
+
+.. autosummary::
+ :toctree: generated/
+
+ loadmat - Read a MATLAB style mat file (version 4 through 7.1)
+ savemat - Write a MATLAB style mat file (version 4 through 7.1)
+ whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
+
+IDL® files
+==========
+
+.. autosummary::
+ :toctree: generated/
+
+ readsav - Read an IDL 'save' file
+
+Matrix Market files
+===================
+
+.. autosummary::
+ :toctree: generated/
+
+ mminfo - Query matrix info from Matrix Market formatted file
+ mmread - Read matrix from Matrix Market formatted file
+ mmwrite - Write matrix to Matrix Market formatted file
+
+Unformatted Fortran files
+===============================
+
+.. autosummary::
+ :toctree: generated/
+
+ FortranFile - A file object for unformatted sequential Fortran files
+ FortranEOFError - Exception indicating the end of a well-formed file
+ FortranFormattingError - Exception indicating an inappropriate end
+
+Netcdf
+======
+
+.. autosummary::
+ :toctree: generated/
+
+ netcdf_file - A file object for NetCDF data
+ netcdf_variable - A data object for the netcdf module
+
+Harwell-Boeing files
+====================
+
+.. autosummary::
+ :toctree: generated/
+
+ hb_read -- read H-B file
+ hb_write -- write H-B file
+
+Wav sound files (:mod:`scipy.io.wavfile`)
+=========================================
+
+.. module:: scipy.io.wavfile
+
+.. autosummary::
+ :toctree: generated/
+
+ read
+ write
+ WavFileWarning
+
+Arff files (:mod:`scipy.io.arff`)
+=================================
+
+.. module:: scipy.io.arff
+
+.. autosummary::
+ :toctree: generated/
+
+ loadarff
+ MetaData
+ ArffError
+ ParseArffError
+
+"""
+# matfile read and write
+from .matlab import loadmat, savemat, whosmat, byteordercodes
+
+# netCDF file support
+from .netcdf import netcdf_file, netcdf_variable
+
+# Fortran file support
+from ._fortran import FortranFile, FortranEOFError, FortranFormattingError
+
+from .mmio import mminfo, mmread, mmwrite
+from .idl import readsav
+from .harwell_boeing import hb_read, hb_write
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/_fortran.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/_fortran.py
new file mode 100644
index 0000000..b5311cf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/_fortran.py
@@ -0,0 +1,353 @@
+"""
+Module to read / write Fortran unformatted sequential files.
+
+This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz.
+
+"""
+import warnings
+import numpy as np
+
+__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError']
+
+
+class FortranEOFError(TypeError, IOError):
+ """Indicates that the file ended properly.
+
+ This error descends from TypeError because the code used to raise
+ TypeError (and this was the only way to know that the file had
+ ended) so users might have ``except TypeError:``.
+
+ """
+ pass
+
+
+class FortranFormattingError(TypeError, IOError):
+ """Indicates that the file ended mid-record.
+
+ Descends from TypeError for backward compatibility.
+
+ """
+ pass
+
+
+class FortranFile(object):
+ """
+ A file object for unformatted sequential files from Fortran code.
+
+ Parameters
+ ----------
+ filename : file or str
+ Open file object or filename.
+ mode : {'r', 'w'}, optional
+ Read-write mode, default is 'r'.
+ header_dtype : dtype, optional
+ Data type of the header. Size and endiness must match the input/output file.
+
+ Notes
+ -----
+ These files are broken up into records of unspecified types. The size of
+ each record is given at the start (although the size of this header is not
+ standard) and the data is written onto disk without any formatting. Fortran
+ compilers supporting the BACKSPACE statement will write a second copy of
+ the size to facilitate backwards seeking.
+
+ This class only supports files written with both sizes for the record.
+ It also does not support the subrecords used in Intel and gfortran compilers
+ for records which are greater than 2GB with a 4-byte header.
+
+ An example of an unformatted sequential file in Fortran would be written as::
+
+ OPEN(1, FILE=myfilename, FORM='unformatted')
+
+ WRITE(1) myvariable
+
+ Since this is a non-standard file format, whose contents depend on the
+ compiler and the endianness of the machine, caution is advised. Files from
+ gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
+
+ Consider using Fortran direct-access files or files from the newer Stream
+ I/O, which can be easily read by `numpy.fromfile`.
+
+ Examples
+ --------
+ To create an unformatted sequential Fortran file:
+
+ >>> from scipy.io import FortranFile
+ >>> f = FortranFile('test.unf', 'w')
+ >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32))
+ >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T)
+ >>> f.close()
+
+ To read this file:
+
+ >>> f = FortranFile('test.unf', 'r')
+ >>> print(f.read_ints(np.int32))
+ [1 2 3 4 5]
+ >>> print(f.read_reals(float).reshape((5,4), order="F"))
+ [[0. 0.05263158 0.10526316 0.15789474]
+ [0.21052632 0.26315789 0.31578947 0.36842105]
+ [0.42105263 0.47368421 0.52631579 0.57894737]
+ [0.63157895 0.68421053 0.73684211 0.78947368]
+ [0.84210526 0.89473684 0.94736842 1. ]]
+ >>> f.close()
+
+ Or, in Fortran::
+
+ integer :: a(5), i
+ double precision :: b(5,4)
+ open(1, file='test.unf', form='unformatted')
+ read(1) a
+ read(1) b
+ close(1)
+ write(*,*) a
+ do i = 1, 5
+ write(*,*) b(i,:)
+ end do
+
+ """
+ def __init__(self, filename, mode='r', header_dtype=np.uint32):
+ if header_dtype is None:
+ raise ValueError('Must specify dtype')
+
+ header_dtype = np.dtype(header_dtype)
+ if header_dtype.kind != 'u':
+ warnings.warn("Given a dtype which is not unsigned.")
+
+ if mode not in 'rw' or len(mode) != 1:
+ raise ValueError('mode must be either r or w')
+
+ if hasattr(filename, 'seek'):
+ self._fp = filename
+ else:
+ self._fp = open(filename, '%sb' % mode)
+
+ self._header_dtype = header_dtype
+
+ def _read_size(self, eof_ok=False):
+ n = self._header_dtype.itemsize
+ b = self._fp.read(n)
+ if (not b) and eof_ok:
+ raise FortranEOFError("End of file occurred at end of record")
+ elif len(b) < n:
+ raise FortranFormattingError(
+ "End of file in the middle of the record size")
+ return int(np.frombuffer(b, dtype=self._header_dtype, count=1))
+
+ def write_record(self, *items):
+ """
+ Write a record (including sizes) to the file.
+
+ Parameters
+ ----------
+ *items : array_like
+ The data arrays to write.
+
+ Notes
+ -----
+ Writes data items to a file::
+
+ write_record(a.T, b.T, c.T, ...)
+
+ write(1) a, b, c, ...
+
+ Note that data in multidimensional arrays is written in
+ row-major order --- to make them read correctly by Fortran
+ programs, you need to transpose the arrays yourself when
+ writing them.
+
+ """
+ items = tuple(np.asarray(item) for item in items)
+ total_size = sum(item.nbytes for item in items)
+
+ nb = np.array([total_size], dtype=self._header_dtype)
+
+ nb.tofile(self._fp)
+ for item in items:
+ item.tofile(self._fp)
+ nb.tofile(self._fp)
+
+ def read_record(self, *dtypes, **kwargs):
+ """
+ Reads a record of a given type from the file.
+
+ Parameters
+ ----------
+ *dtypes : dtypes, optional
+ Data type(s) specifying the size and endiness of the data.
+
+ Returns
+ -------
+ data : ndarray
+ A 1-D array object.
+
+ Raises
+ ------
+ FortranEOFError
+ To signal that no further records are available
+ FortranFormattingError
+ To signal that the end of the file was encountered
+ part-way through a record
+
+ Notes
+ -----
+ If the record contains a multidimensional array, you can specify
+ the size in the dtype. For example::
+
+ INTEGER var(5,4)
+
+ can be read with::
+
+ read_record('(4,5)i4').T
+
+ Note that this function does **not** assume the file data is in Fortran
+ column major order, so you need to (i) swap the order of dimensions
+ when reading and (ii) transpose the resulting array.
+
+ Alternatively, you can read the data as a 1-D array and handle the
+ ordering yourself. For example::
+
+ read_record('i4').reshape(5, 4, order='F')
+
+ For records that contain several variables or mixed types (as opposed
+ to single scalar or array types), give them as separate arguments::
+
+ double precision :: a
+ integer :: b
+ write(1) a, b
+
+ record = f.read_record('`_.
+
+See the `WEKA website `_
+for more details about the ARFF format and available datasets.
+
+"""
+from .arffread import *
+from . import arffread
+
+__all__ = arffread.__all__
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/arffread.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/arffread.py
new file mode 100644
index 0000000..0aab368
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/arffread.py
@@ -0,0 +1,908 @@
+# Last Change: Mon Aug 20 08:00 PM 2007 J
+import re
+import datetime
+from collections import OrderedDict
+
+import numpy as np
+
+import csv
+import ctypes
+
+"""A module to read arff files."""
+
+__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
+
+# An Arff file is basically two parts:
+# - header
+# - data
+#
+# A header has each of its components starting by @META where META is one of
+# the keyword (attribute of relation, for now).
+
+# TODO:
+# - both integer and reals are treated as numeric -> the integer info
+# is lost!
+# - Replace ValueError by ParseError or something
+
+# We know can handle the following:
+# - numeric and nominal attributes
+# - missing values for numeric attributes
+
+r_meta = re.compile(r'^\s*@')
+# Match a comment
+r_comment = re.compile(r'^%')
+# Match an empty line
+r_empty = re.compile(r'^\s+$')
+# Match a header line, that is a line which starts by @ + a word
+r_headerline = re.compile(r'^\s*@\S*')
+r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
+r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
+r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
+
+r_nominal = re.compile(r'{(.+)}')
+r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
+
+# To get attributes name enclosed with ''
+r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
+# To get normal attributes
+r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
+
+# ------------------------
+# Module defined exception
+# ------------------------
+
+
+class ArffError(IOError):
+ pass
+
+
+class ParseArffError(ArffError):
+ pass
+
+
+# ----------
+# Attributes
+# ----------
+class Attribute(object):
+
+ type_name = None
+
+ def __init__(self, name):
+ self.name = name
+ self.range = None
+ self.dtype = np.object_
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+ """
+ return None
+
+ def parse_data(self, data_str):
+ """
+ Parse a value of this type.
+ """
+ return None
+
+ def __str__(self):
+ """
+ Parse a value of this type.
+ """
+ return self.name + ',' + self.type_name
+
+
+class NominalAttribute(Attribute):
+
+ type_name = 'nominal'
+
+ def __init__(self, name, values):
+ super().__init__(name)
+ self.values = values
+ self.range = values
+ self.dtype = (np.string_, max(len(i) for i in values))
+
+ @staticmethod
+ def _get_nom_val(atrv):
+ """Given a string containing a nominal type, returns a tuple of the
+ possible values.
+
+ A nominal type is defined as something framed between braces ({}).
+
+ Parameters
+ ----------
+ atrv : str
+ Nominal type definition
+
+ Returns
+ -------
+ poss_vals : tuple
+ possible values
+
+ Examples
+ --------
+ >>> get_nom_val("{floup, bouga, fl, ratata}")
+ ('floup', 'bouga', 'fl', 'ratata')
+ """
+ m = r_nominal.match(atrv)
+ if m:
+ attrs, _ = split_data_line(m.group(1))
+ return tuple(attrs)
+ else:
+ raise ValueError("This does not look like a nominal string")
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+
+ For nominal attributes, the attribute string would be like '{,
+ , }'.
+ """
+ if attr_string[0] == '{':
+ values = cls._get_nom_val(attr_string)
+ return cls(name, values)
+ else:
+ return None
+
+ def parse_data(self, data_str):
+ """
+ Parse a value of this type.
+ """
+ if data_str in self.values:
+ return data_str
+ elif data_str == '?':
+ return data_str
+ else:
+ raise ValueError("%s value not in %s" % (str(data_str),
+ str(self.values)))
+
+ def __str__(self):
+ msg = self.name + ",{"
+ for i in range(len(self.values)-1):
+ msg += self.values[i] + ","
+ msg += self.values[-1]
+ msg += "}"
+ return msg
+
+
+class NumericAttribute(Attribute):
+
+ def __init__(self, name):
+ super().__init__(name)
+ self.type_name = 'numeric'
+ self.dtype = np.float_
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+
+ For numeric attributes, the attribute string would be like
+ 'numeric' or 'int' or 'real'.
+ """
+
+ attr_string = attr_string.lower().strip()
+
+ if(attr_string[:len('numeric')] == 'numeric' or
+ attr_string[:len('int')] == 'int' or
+ attr_string[:len('real')] == 'real'):
+ return cls(name)
+ else:
+ return None
+
+ def parse_data(self, data_str):
+ """
+ Parse a value of this type.
+
+ Parameters
+ ----------
+ data_str : str
+ string to convert
+
+ Returns
+ -------
+ f : float
+ where float can be nan
+
+ Examples
+ --------
+ >>> atr = NumericAttribute('atr')
+ >>> atr.parse_data('1')
+ 1.0
+ >>> atr.parse_data('1\\n')
+ 1.0
+ >>> atr.parse_data('?\\n')
+ nan
+ """
+ if '?' in data_str:
+ return np.nan
+ else:
+ return float(data_str)
+
+ def _basic_stats(self, data):
+ nbfac = data.size * 1. / (data.size - 1)
+ return (np.nanmin(data), np.nanmax(data),
+ np.mean(data), np.std(data) * nbfac)
+
+
+class StringAttribute(Attribute):
+
+ def __init__(self, name):
+ super().__init__(name)
+ self.type_name = 'string'
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+
+ For string attributes, the attribute string would be like
+ 'string'.
+ """
+
+ attr_string = attr_string.lower().strip()
+
+ if attr_string[:len('string')] == 'string':
+ return cls(name)
+ else:
+ return None
+
+
+class DateAttribute(Attribute):
+
+ def __init__(self, name, date_format, datetime_unit):
+ super().__init__(name)
+ self.date_format = date_format
+ self.datetime_unit = datetime_unit
+ self.type_name = 'date'
+ self.range = date_format
+ self.dtype = np.datetime64(0, self.datetime_unit)
+
+ @staticmethod
+ def _get_date_format(atrv):
+ m = r_date.match(atrv)
+ if m:
+ pattern = m.group(1).strip()
+ # convert time pattern from Java's SimpleDateFormat to C's format
+ datetime_unit = None
+ if "yyyy" in pattern:
+ pattern = pattern.replace("yyyy", "%Y")
+ datetime_unit = "Y"
+ elif "yy":
+ pattern = pattern.replace("yy", "%y")
+ datetime_unit = "Y"
+ if "MM" in pattern:
+ pattern = pattern.replace("MM", "%m")
+ datetime_unit = "M"
+ if "dd" in pattern:
+ pattern = pattern.replace("dd", "%d")
+ datetime_unit = "D"
+ if "HH" in pattern:
+ pattern = pattern.replace("HH", "%H")
+ datetime_unit = "h"
+ if "mm" in pattern:
+ pattern = pattern.replace("mm", "%M")
+ datetime_unit = "m"
+ if "ss" in pattern:
+ pattern = pattern.replace("ss", "%S")
+ datetime_unit = "s"
+ if "z" in pattern or "Z" in pattern:
+ raise ValueError("Date type attributes with time zone not "
+ "supported, yet")
+
+ if datetime_unit is None:
+ raise ValueError("Invalid or unsupported date format")
+
+ return pattern, datetime_unit
+ else:
+ raise ValueError("Invalid or no date format")
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+
+ For date attributes, the attribute string would be like
+ 'date '.
+ """
+
+ attr_string_lower = attr_string.lower().strip()
+
+ if attr_string_lower[:len('date')] == 'date':
+ date_format, datetime_unit = cls._get_date_format(attr_string)
+ return cls(name, date_format, datetime_unit)
+ else:
+ return None
+
+ def parse_data(self, data_str):
+ """
+ Parse a value of this type.
+ """
+ date_str = data_str.strip().strip("'").strip('"')
+ if date_str == '?':
+ return np.datetime64('NaT', self.datetime_unit)
+ else:
+ dt = datetime.datetime.strptime(date_str, self.date_format)
+ return np.datetime64(dt).astype(
+ "datetime64[%s]" % self.datetime_unit)
+
+ def __str__(self):
+ return super(DateAttribute, self).__str__() + ',' + self.date_format
+
+
+class RelationalAttribute(Attribute):
+
+ def __init__(self, name):
+ super().__init__(name)
+ self.type_name = 'relational'
+ self.dtype = np.object_
+ self.attributes = []
+ self.dialect = None
+
+ @classmethod
+ def parse_attribute(cls, name, attr_string):
+ """
+ Parse the attribute line if it knows how. Returns the parsed
+ attribute, or None.
+
+ For date attributes, the attribute string would be like
+ 'date '.
+ """
+
+ attr_string_lower = attr_string.lower().strip()
+
+ if attr_string_lower[:len('relational')] == 'relational':
+ return cls(name)
+ else:
+ return None
+
+ def parse_data(self, data_str):
+ # Copy-pasted
+ elems = list(range(len(self.attributes)))
+
+ escaped_string = data_str.encode().decode("unicode-escape")
+
+ row_tuples = []
+
+ for raw in escaped_string.split("\n"):
+ row, self.dialect = split_data_line(raw, self.dialect)
+
+ row_tuples.append(tuple(
+ [self.attributes[i].parse_data(row[i]) for i in elems]))
+
+ return np.array(row_tuples,
+ [(a.name, a.dtype) for a in self.attributes])
+
+ def __str__(self):
+ return (super(RelationalAttribute, self).__str__() + '\n\t' +
+ '\n\t'.join(str(a) for a in self.attributes))
+
+
+# -----------------
+# Various utilities
+# -----------------
+def to_attribute(name, attr_string):
+ attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
+ StringAttribute, RelationalAttribute)
+
+ for cls in attr_classes:
+ attr = cls.parse_attribute(name, attr_string)
+ if attr is not None:
+ return attr
+
+ raise ParseArffError("unknown attribute %s" % attr_string)
+
+
+def csv_sniffer_has_bug_last_field():
+ """
+ Checks if the bug https://bugs.python.org/issue30157 is unpatched.
+ """
+
+ # We only compute this once.
+ has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
+
+ if has_bug is None:
+ dialect = csv.Sniffer().sniff("3, 'a'")
+ csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
+ has_bug = csv_sniffer_has_bug_last_field.has_bug
+
+ return has_bug
+
+
+def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
+ """
+ Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
+ """
+ if csv_sniffer_has_bug_last_field():
+ # Reuses code from the csv module
+ right_regex = r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)'
+
+ for restr in (r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?",
+ r'(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # .*?",
+ right_regex, # ,".*?"
+ r'(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
+ regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
+ matches = regexp.findall(sniff_line)
+ if matches:
+ break
+
+ # If it does not match the expression that was bugged, then this bug does not apply
+ if restr != right_regex:
+ return
+
+ groupindex = regexp.groupindex
+
+ # There is only one end of the string
+ assert len(matches) == 1
+ m = matches[0]
+
+ n = groupindex['quote'] - 1
+ quote = m[n]
+
+ n = groupindex['delim'] - 1
+ delim = m[n]
+
+ n = groupindex['space'] - 1
+ space = bool(m[n])
+
+ dq_regexp = re.compile(
+ r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" %
+ {'delim': re.escape(delim), 'quote': quote}, re.MULTILINE
+ )
+
+ doublequote = bool(dq_regexp.search(sniff_line))
+
+ dialect.quotechar = quote
+ if delim in delimiters:
+ dialect.delimiter = delim
+ dialect.doublequote = doublequote
+ dialect.skipinitialspace = space
+
+
+def split_data_line(line, dialect=None):
+ delimiters = ",\t"
+
+ # This can not be done in a per reader basis, and relational fields
+ # can be HUGE
+ csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
+
+ # Remove the line end if any
+ if line[-1] == '\n':
+ line = line[:-1]
+
+ # Remove potential trailing whitespace
+ line = line.strip()
+
+ sniff_line = line
+
+ # Add a delimiter if none is present, so that the csv.Sniffer
+ # does not complain for a single-field CSV.
+ if not any(d in line for d in delimiters):
+ sniff_line += ","
+
+ if dialect is None:
+ dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
+ workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
+ dialect=dialect,
+ delimiters=delimiters)
+
+ row = next(csv.reader([line], dialect))
+
+ return row, dialect
+
+
+# --------------
+# Parsing header
+# --------------
+def tokenize_attribute(iterable, attribute):
+ """Parse a raw string in header (e.g., starts by @attribute).
+
+ Given a raw string attribute, try to get the name and type of the
+ attribute. Constraints:
+
+ * The first line must start with @attribute (case insensitive, and
+ space like characters before @attribute are allowed)
+ * Works also if the attribute is spread on multilines.
+ * Works if empty lines or comments are in between
+
+ Parameters
+ ----------
+ attribute : str
+ the attribute string.
+
+ Returns
+ -------
+ name : str
+ name of the attribute
+ value : str
+ value of the attribute
+ next : str
+ next line to be parsed
+
+ Examples
+ --------
+ If attribute is a string defined in python as r"floupi real", will
+ return floupi as name, and real as value.
+
+ >>> iterable = iter([0] * 10) # dummy iterator
+ >>> tokenize_attribute(iterable, r"@attribute floupi real")
+ ('floupi', 'real', 0)
+
+ If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
+ and real as value.
+
+ >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
+ ('floupi 2', 'real', 0)
+
+ """
+ sattr = attribute.strip()
+ mattr = r_attribute.match(sattr)
+ if mattr:
+ # atrv is everything after @attribute
+ atrv = mattr.group(1)
+ if r_comattrval.match(atrv):
+ name, type = tokenize_single_comma(atrv)
+ next_item = next(iterable)
+ elif r_wcomattrval.match(atrv):
+ name, type = tokenize_single_wcomma(atrv)
+ next_item = next(iterable)
+ else:
+ # Not sure we should support this, as it does not seem supported by
+ # weka.
+ raise ValueError("multi line not supported yet")
+ else:
+ raise ValueError("First line unparsable: %s" % sattr)
+
+ attribute = to_attribute(name, type)
+
+ if type.lower() == 'relational':
+ next_item = read_relational_attribute(iterable, attribute, next_item)
+ # raise ValueError("relational attributes not supported yet")
+
+ return attribute, next_item
+
+
+def tokenize_single_comma(val):
+ # XXX we match twice the same string (here and at the caller level). It is
+ # stupid, but it is easier for now...
+ m = r_comattrval.match(val)
+ if m:
+ try:
+ name = m.group(1).strip()
+ type = m.group(2).strip()
+ except IndexError as e:
+ raise ValueError("Error while tokenizing attribute") from e
+ else:
+ raise ValueError("Error while tokenizing single %s" % val)
+ return name, type
+
+
+def tokenize_single_wcomma(val):
+ # XXX we match twice the same string (here and at the caller level). It is
+ # stupid, but it is easier for now...
+ m = r_wcomattrval.match(val)
+ if m:
+ try:
+ name = m.group(1).strip()
+ type = m.group(2).strip()
+ except IndexError as e:
+ raise ValueError("Error while tokenizing attribute") from e
+ else:
+ raise ValueError("Error while tokenizing single %s" % val)
+ return name, type
+
+
+def read_relational_attribute(ofile, relational_attribute, i):
+ """Read the nested attributes of a relational attribute"""
+
+ r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
+ relational_attribute.name + r'\s*$')
+
+ while not r_end_relational.match(i):
+ m = r_headerline.match(i)
+ if m:
+ isattr = r_attribute.match(i)
+ if isattr:
+ attr, i = tokenize_attribute(ofile, i)
+ relational_attribute.attributes.append(attr)
+ else:
+ raise ValueError("Error parsing line %s" % i)
+ else:
+ i = next(ofile)
+
+ i = next(ofile)
+ return i
+
+
+def read_header(ofile):
+ """Read the header of the iterable ofile."""
+ i = next(ofile)
+
+ # Pass first comments
+ while r_comment.match(i):
+ i = next(ofile)
+
+ # Header is everything up to DATA attribute ?
+ relation = None
+ attributes = []
+ while not r_datameta.match(i):
+ m = r_headerline.match(i)
+ if m:
+ isattr = r_attribute.match(i)
+ if isattr:
+ attr, i = tokenize_attribute(ofile, i)
+ attributes.append(attr)
+ else:
+ isrel = r_relation.match(i)
+ if isrel:
+ relation = isrel.group(1)
+ else:
+ raise ValueError("Error parsing line %s" % i)
+ i = next(ofile)
+ else:
+ i = next(ofile)
+
+ return relation, attributes
+
+
+class MetaData(object):
+ """Small container to keep useful information on a ARFF dataset.
+
+ Knows about attributes names and types.
+
+ Examples
+ --------
+ ::
+
+ data, meta = loadarff('iris.arff')
+ # This will print the attributes names of the iris.arff dataset
+ for i in meta:
+ print(i)
+ # This works too
+ meta.names()
+ # Getting attribute type
+ types = meta.types()
+
+ Methods
+ -------
+ names
+ types
+
+ Notes
+ -----
+ Also maintains the list of attributes in order, i.e., doing for i in
+ meta, where meta is an instance of MetaData, will return the
+ different attribute names in the order they were defined.
+ """
+ def __init__(self, rel, attr):
+ self.name = rel
+
+ # We need the dictionary to be ordered
+ self._attributes = OrderedDict((a.name, a) for a in attr)
+
+ def __repr__(self):
+ msg = ""
+ msg += "Dataset: %s\n" % self.name
+ for i in self._attributes:
+ msg += "\t%s's type is %s" % (i, self._attributes[i].type_name)
+ if self._attributes[i].range:
+ msg += ", range is %s" % str(self._attributes[i].range)
+ msg += '\n'
+ return msg
+
+ def __iter__(self):
+ return iter(self._attributes)
+
+ def __getitem__(self, key):
+ attr = self._attributes[key]
+
+ return (attr.type_name, attr.range)
+
+ def names(self):
+ """Return the list of attribute names.
+
+ Returns
+ -------
+ attrnames : list of str
+ The attribute names.
+ """
+ return list(self._attributes)
+
+ def types(self):
+ """Return the list of attribute types.
+
+ Returns
+ -------
+ attr_types : list of str
+ The attribute types.
+ """
+ attr_types = [self._attributes[name].type_name
+ for name in self._attributes]
+ return attr_types
+
+
+def loadarff(f):
+ """
+ Read an arff file.
+
+ The data is returned as a record array, which can be accessed much like
+ a dictionary of NumPy arrays. For example, if one of the attributes is
+ called 'pressure', then its first 10 data points can be accessed from the
+ ``data`` record array like so: ``data['pressure'][0:10]``
+
+
+ Parameters
+ ----------
+ f : file-like or str
+ File-like object to read from, or filename to open.
+
+ Returns
+ -------
+ data : record array
+ The data of the arff file, accessible by attribute names.
+ meta : `MetaData`
+ Contains information about the arff file such as name and
+ type of attributes, the relation (name of the dataset), etc.
+
+ Raises
+ ------
+ ParseArffError
+ This is raised if the given file is not ARFF-formatted.
+ NotImplementedError
+ The ARFF file has an attribute which is not supported yet.
+
+ Notes
+ -----
+
+ This function should be able to read most arff files. Not
+ implemented functionality include:
+
+ * date type attributes
+ * string type attributes
+
+ It can read files with numeric and nominal attributes. It cannot read
+ files with sparse data ({} in the file). However, this function can
+ read files with missing data (? in the file), representing the data
+ points as NaNs.
+
+ Examples
+ --------
+ >>> from scipy.io import arff
+ >>> from io import StringIO
+ >>> content = \"\"\"
+ ... @relation foo
+ ... @attribute width numeric
+ ... @attribute height numeric
+ ... @attribute color {red,green,blue,yellow,black}
+ ... @data
+ ... 5.0,3.25,blue
+ ... 4.5,3.75,green
+ ... 3.0,4.00,red
+ ... \"\"\"
+ >>> f = StringIO(content)
+ >>> data, meta = arff.loadarff(f)
+ >>> data
+ array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
+ dtype=[('width', '>> meta
+ Dataset: foo
+ \twidth's type is numeric
+ \theight's type is numeric
+ \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
+
+ """
+ if hasattr(f, 'read'):
+ ofile = f
+ else:
+ ofile = open(f, 'rt')
+ try:
+ return _loadarff(ofile)
+ finally:
+ if ofile is not f: # only close what we opened
+ ofile.close()
+
+
+def _loadarff(ofile):
+ # Parse the header file
+ try:
+ rel, attr = read_header(ofile)
+ except ValueError as e:
+ msg = "Error while parsing header, error was: " + str(e)
+ raise ParseArffError(msg) from e
+
+ # Check whether we have a string attribute (not supported yet)
+ hasstr = False
+ for a in attr:
+ if isinstance(a, StringAttribute):
+ hasstr = True
+
+ meta = MetaData(rel, attr)
+
+ # XXX The following code is not great
+ # Build the type descriptor descr and the list of convertors to convert
+ # each attribute to the suitable type (which should match the one in
+ # descr).
+
+ # This can be used once we want to support integer as integer values and
+ # not as numeric anymore (using masked arrays ?).
+
+ if hasstr:
+ # How to support string efficiently ? Ideally, we should know the max
+ # size of the string before allocating the numpy array.
+ raise NotImplementedError("String attributes not supported yet, sorry")
+
+ ni = len(attr)
+
+ def generator(row_iter, delim=','):
+ # TODO: this is where we are spending time (~80%). I think things
+ # could be made more efficiently:
+ # - We could for example "compile" the function, because some values
+ # do not change here.
+ # - The function to convert a line to dtyped values could also be
+ # generated on the fly from a string and be executed instead of
+ # looping.
+ # - The regex are overkill: for comments, checking that a line starts
+ # by % should be enough and faster, and for empty lines, same thing
+ # --> this does not seem to change anything.
+
+ # 'compiling' the range since it does not change
+ # Note, I have already tried zipping the converters and
+ # row elements and got slightly worse performance.
+ elems = list(range(ni))
+
+ dialect = None
+ for raw in row_iter:
+ # We do not abstract skipping comments and empty lines for
+ # performance reasons.
+ if r_comment.match(raw) or r_empty.match(raw):
+ continue
+
+ row, dialect = split_data_line(raw, dialect)
+
+ yield tuple([attr[i].parse_data(row[i]) for i in elems])
+
+ a = list(generator(ofile))
+ # No error should happen here: it is a bug otherwise
+ data = np.array(a, [(a.name, a.dtype) for a in attr])
+ return data, meta
+
+
+# ----
+# Misc
+# ----
+def basic_stats(data):
+ nbfac = data.size * 1. / (data.size - 1)
+ return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
+
+
+def print_attribute(name, tp, data):
+ type = tp.type_name
+ if type == 'numeric' or type == 'real' or type == 'integer':
+ min, max, mean, std = basic_stats(data)
+ print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
+ else:
+ print(str(tp))
+
+
+def test_weka(filename):
+ data, meta = loadarff(filename)
+ print(len(data.dtype))
+ print(data.size)
+ for i in meta:
+ print_attribute(i, meta[i], data[i])
+
+
+# make sure nose does not find this as a test
+test_weka.__test__ = False
+
+
+if __name__ == '__main__':
+ import sys
+ filename = sys.argv[1]
+ test_weka(filename)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/setup.py
new file mode 100644
index 0000000..0b2417a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/setup.py
@@ -0,0 +1,11 @@
+
+def configuration(parent_package='io',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('arff', parent_package, top_path)
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/iris.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/iris.arff
new file mode 100644
index 0000000..780480c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/iris.arff
@@ -0,0 +1,225 @@
+% 1. Title: Iris Plants Database
+%
+% 2. Sources:
+% (a) Creator: R.A. Fisher
+% (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov)
+% (c) Date: July, 1988
+%
+% 3. Past Usage:
+% - Publications: too many to mention!!! Here are a few.
+% 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems"
+% Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions
+% to Mathematical Statistics" (John Wiley, NY, 1950).
+% 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis.
+% (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.
+% 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System
+% Structure and Classification Rule for Recognition in Partially Exposed
+% Environments". IEEE Transactions on Pattern Analysis and Machine
+% Intelligence, Vol. PAMI-2, No. 1, 67-71.
+% -- Results:
+% -- very low misclassification rates (0% for the setosa class)
+% 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE
+% Transactions on Information Theory, May 1972, 431-433.
+% -- Results:
+% -- very low misclassification rates again
+% 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II
+% conceptual clustering system finds 3 classes in the data.
+%
+% 4. Relevant Information:
+% --- This is perhaps the best known database to be found in the pattern
+% recognition literature. Fisher's paper is a classic in the field
+% and is referenced frequently to this day. (See Duda & Hart, for
+% example.) The data set contains 3 classes of 50 instances each,
+% where each class refers to a type of iris plant. One class is
+% linearly separable from the other 2; the latter are NOT linearly
+% separable from each other.
+% --- Predicted attribute: class of iris plant.
+% --- This is an exceedingly simple domain.
+%
+% 5. Number of Instances: 150 (50 in each of three classes)
+%
+% 6. Number of Attributes: 4 numeric, predictive attributes and the class
+%
+% 7. Attribute Information:
+% 1. sepal length in cm
+% 2. sepal width in cm
+% 3. petal length in cm
+% 4. petal width in cm
+% 5. class:
+% -- Iris Setosa
+% -- Iris Versicolour
+% -- Iris Virginica
+%
+% 8. Missing Attribute Values: None
+%
+% Summary Statistics:
+% Min Max Mean SD Class Correlation
+% sepal length: 4.3 7.9 5.84 0.83 0.7826
+% sepal width: 2.0 4.4 3.05 0.43 -0.4194
+% petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)
+% petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)
+%
+% 9. Class Distribution: 33.3% for each of 3 classes.
+
+@RELATION iris
+
+@ATTRIBUTE sepallength REAL
+@ATTRIBUTE sepalwidth REAL
+@ATTRIBUTE petallength REAL
+@ATTRIBUTE petalwidth REAL
+@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica}
+
+@DATA
+5.1,3.5,1.4,0.2,Iris-setosa
+4.9,3.0,1.4,0.2,Iris-setosa
+4.7,3.2,1.3,0.2,Iris-setosa
+4.6,3.1,1.5,0.2,Iris-setosa
+5.0,3.6,1.4,0.2,Iris-setosa
+5.4,3.9,1.7,0.4,Iris-setosa
+4.6,3.4,1.4,0.3,Iris-setosa
+5.0,3.4,1.5,0.2,Iris-setosa
+4.4,2.9,1.4,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+5.4,3.7,1.5,0.2,Iris-setosa
+4.8,3.4,1.6,0.2,Iris-setosa
+4.8,3.0,1.4,0.1,Iris-setosa
+4.3,3.0,1.1,0.1,Iris-setosa
+5.8,4.0,1.2,0.2,Iris-setosa
+5.7,4.4,1.5,0.4,Iris-setosa
+5.4,3.9,1.3,0.4,Iris-setosa
+5.1,3.5,1.4,0.3,Iris-setosa
+5.7,3.8,1.7,0.3,Iris-setosa
+5.1,3.8,1.5,0.3,Iris-setosa
+5.4,3.4,1.7,0.2,Iris-setosa
+5.1,3.7,1.5,0.4,Iris-setosa
+4.6,3.6,1.0,0.2,Iris-setosa
+5.1,3.3,1.7,0.5,Iris-setosa
+4.8,3.4,1.9,0.2,Iris-setosa
+5.0,3.0,1.6,0.2,Iris-setosa
+5.0,3.4,1.6,0.4,Iris-setosa
+5.2,3.5,1.5,0.2,Iris-setosa
+5.2,3.4,1.4,0.2,Iris-setosa
+4.7,3.2,1.6,0.2,Iris-setosa
+4.8,3.1,1.6,0.2,Iris-setosa
+5.4,3.4,1.5,0.4,Iris-setosa
+5.2,4.1,1.5,0.1,Iris-setosa
+5.5,4.2,1.4,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+5.0,3.2,1.2,0.2,Iris-setosa
+5.5,3.5,1.3,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+4.4,3.0,1.3,0.2,Iris-setosa
+5.1,3.4,1.5,0.2,Iris-setosa
+5.0,3.5,1.3,0.3,Iris-setosa
+4.5,2.3,1.3,0.3,Iris-setosa
+4.4,3.2,1.3,0.2,Iris-setosa
+5.0,3.5,1.6,0.6,Iris-setosa
+5.1,3.8,1.9,0.4,Iris-setosa
+4.8,3.0,1.4,0.3,Iris-setosa
+5.1,3.8,1.6,0.2,Iris-setosa
+4.6,3.2,1.4,0.2,Iris-setosa
+5.3,3.7,1.5,0.2,Iris-setosa
+5.0,3.3,1.4,0.2,Iris-setosa
+7.0,3.2,4.7,1.4,Iris-versicolor
+6.4,3.2,4.5,1.5,Iris-versicolor
+6.9,3.1,4.9,1.5,Iris-versicolor
+5.5,2.3,4.0,1.3,Iris-versicolor
+6.5,2.8,4.6,1.5,Iris-versicolor
+5.7,2.8,4.5,1.3,Iris-versicolor
+6.3,3.3,4.7,1.6,Iris-versicolor
+4.9,2.4,3.3,1.0,Iris-versicolor
+6.6,2.9,4.6,1.3,Iris-versicolor
+5.2,2.7,3.9,1.4,Iris-versicolor
+5.0,2.0,3.5,1.0,Iris-versicolor
+5.9,3.0,4.2,1.5,Iris-versicolor
+6.0,2.2,4.0,1.0,Iris-versicolor
+6.1,2.9,4.7,1.4,Iris-versicolor
+5.6,2.9,3.6,1.3,Iris-versicolor
+6.7,3.1,4.4,1.4,Iris-versicolor
+5.6,3.0,4.5,1.5,Iris-versicolor
+5.8,2.7,4.1,1.0,Iris-versicolor
+6.2,2.2,4.5,1.5,Iris-versicolor
+5.6,2.5,3.9,1.1,Iris-versicolor
+5.9,3.2,4.8,1.8,Iris-versicolor
+6.1,2.8,4.0,1.3,Iris-versicolor
+6.3,2.5,4.9,1.5,Iris-versicolor
+6.1,2.8,4.7,1.2,Iris-versicolor
+6.4,2.9,4.3,1.3,Iris-versicolor
+6.6,3.0,4.4,1.4,Iris-versicolor
+6.8,2.8,4.8,1.4,Iris-versicolor
+6.7,3.0,5.0,1.7,Iris-versicolor
+6.0,2.9,4.5,1.5,Iris-versicolor
+5.7,2.6,3.5,1.0,Iris-versicolor
+5.5,2.4,3.8,1.1,Iris-versicolor
+5.5,2.4,3.7,1.0,Iris-versicolor
+5.8,2.7,3.9,1.2,Iris-versicolor
+6.0,2.7,5.1,1.6,Iris-versicolor
+5.4,3.0,4.5,1.5,Iris-versicolor
+6.0,3.4,4.5,1.6,Iris-versicolor
+6.7,3.1,4.7,1.5,Iris-versicolor
+6.3,2.3,4.4,1.3,Iris-versicolor
+5.6,3.0,4.1,1.3,Iris-versicolor
+5.5,2.5,4.0,1.3,Iris-versicolor
+5.5,2.6,4.4,1.2,Iris-versicolor
+6.1,3.0,4.6,1.4,Iris-versicolor
+5.8,2.6,4.0,1.2,Iris-versicolor
+5.0,2.3,3.3,1.0,Iris-versicolor
+5.6,2.7,4.2,1.3,Iris-versicolor
+5.7,3.0,4.2,1.2,Iris-versicolor
+5.7,2.9,4.2,1.3,Iris-versicolor
+6.2,2.9,4.3,1.3,Iris-versicolor
+5.1,2.5,3.0,1.1,Iris-versicolor
+5.7,2.8,4.1,1.3,Iris-versicolor
+6.3,3.3,6.0,2.5,Iris-virginica
+5.8,2.7,5.1,1.9,Iris-virginica
+7.1,3.0,5.9,2.1,Iris-virginica
+6.3,2.9,5.6,1.8,Iris-virginica
+6.5,3.0,5.8,2.2,Iris-virginica
+7.6,3.0,6.6,2.1,Iris-virginica
+4.9,2.5,4.5,1.7,Iris-virginica
+7.3,2.9,6.3,1.8,Iris-virginica
+6.7,2.5,5.8,1.8,Iris-virginica
+7.2,3.6,6.1,2.5,Iris-virginica
+6.5,3.2,5.1,2.0,Iris-virginica
+6.4,2.7,5.3,1.9,Iris-virginica
+6.8,3.0,5.5,2.1,Iris-virginica
+5.7,2.5,5.0,2.0,Iris-virginica
+5.8,2.8,5.1,2.4,Iris-virginica
+6.4,3.2,5.3,2.3,Iris-virginica
+6.5,3.0,5.5,1.8,Iris-virginica
+7.7,3.8,6.7,2.2,Iris-virginica
+7.7,2.6,6.9,2.3,Iris-virginica
+6.0,2.2,5.0,1.5,Iris-virginica
+6.9,3.2,5.7,2.3,Iris-virginica
+5.6,2.8,4.9,2.0,Iris-virginica
+7.7,2.8,6.7,2.0,Iris-virginica
+6.3,2.7,4.9,1.8,Iris-virginica
+6.7,3.3,5.7,2.1,Iris-virginica
+7.2,3.2,6.0,1.8,Iris-virginica
+6.2,2.8,4.8,1.8,Iris-virginica
+6.1,3.0,4.9,1.8,Iris-virginica
+6.4,2.8,5.6,2.1,Iris-virginica
+7.2,3.0,5.8,1.6,Iris-virginica
+7.4,2.8,6.1,1.9,Iris-virginica
+7.9,3.8,6.4,2.0,Iris-virginica
+6.4,2.8,5.6,2.2,Iris-virginica
+6.3,2.8,5.1,1.5,Iris-virginica
+6.1,2.6,5.6,1.4,Iris-virginica
+7.7,3.0,6.1,2.3,Iris-virginica
+6.3,3.4,5.6,2.4,Iris-virginica
+6.4,3.1,5.5,1.8,Iris-virginica
+6.0,3.0,4.8,1.8,Iris-virginica
+6.9,3.1,5.4,2.1,Iris-virginica
+6.7,3.1,5.6,2.4,Iris-virginica
+6.9,3.1,5.1,2.3,Iris-virginica
+5.8,2.7,5.1,1.9,Iris-virginica
+6.8,3.2,5.9,2.3,Iris-virginica
+6.7,3.3,5.7,2.5,Iris-virginica
+6.7,3.0,5.2,2.3,Iris-virginica
+6.3,2.5,5.0,1.9,Iris-virginica
+6.5,3.0,5.2,2.0,Iris-virginica
+6.2,3.4,5.4,2.3,Iris-virginica
+5.9,3.0,5.1,1.8,Iris-virginica
+%
+%
+%
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/missing.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/missing.arff
new file mode 100644
index 0000000..dedc64c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/missing.arff
@@ -0,0 +1,8 @@
+% This arff file contains some missing data
+@relation missing
+@attribute yop real
+@attribute yap real
+@data
+1,5
+2,4
+?,?
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/nodata.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/nodata.arff
new file mode 100644
index 0000000..5766aeb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/nodata.arff
@@ -0,0 +1,11 @@
+@RELATION iris
+
+@ATTRIBUTE sepallength REAL
+@ATTRIBUTE sepalwidth REAL
+@ATTRIBUTE petallength REAL
+@ATTRIBUTE petalwidth REAL
+@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica}
+
+@DATA
+
+% This file has no data
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal.arff
new file mode 100644
index 0000000..7cd16d1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal.arff
@@ -0,0 +1,13 @@
+% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
+% Spaces between elements are stripped by the parser
+
+@relation SOME_DATA
+@attribute age numeric
+@attribute smoker {'yes', 'no'}
+@data
+18, 'no'
+24, 'yes'
+44, 'no'
+56, 'no'
+89,'yes'
+11, 'no'
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal_spaces.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal_spaces.arff
new file mode 100644
index 0000000..c799127
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/quoted_nominal_spaces.arff
@@ -0,0 +1,13 @@
+% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
+% Spaces inside quotes are NOT stripped by the parser
+
+@relation SOME_DATA
+@attribute age numeric
+@attribute smoker {' yes', 'no '}
+@data
+18,'no '
+24,' yes'
+44,'no '
+56,'no '
+89,' yes'
+11,'no '
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test1.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test1.arff
new file mode 100644
index 0000000..ccc8e0c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test1.arff
@@ -0,0 +1,10 @@
+@RELATION test1
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 REAL
+@ATTRIBUTE attr2 REAL
+@ATTRIBUTE attr3 REAL
+@ATTRIBUTE class {class0, class1, class2, class3}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test10.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test10.arff
new file mode 100644
index 0000000..094ac50
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test10.arff
@@ -0,0 +1,8 @@
+@relation test9
+
+@attribute attr_relational relational
+ @attribute attr_number integer
+@end attr_relational
+
+@data
+'0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n1862\n1863\n1864\n1865\n1866\n1867\n1868\n1869\n1870\n1871\n1872\n1873\n1874\n1875\n1876\n1877\n1878\n1879\n1880\n1881\n1882\n1883\n1884\n1885\n1886\n1887\n1888\n1889\n1890\n1891\n1892\n1893\n1894\n1895\n1896\n1897\n1898\n1899\n1900\n1901\n1902\n1903\n1904\n1905\n1906\n1907\n1908\n1909\n1910\n1911\n1912\n1913\n1914\n1915\n1916\n1917\n1918\n1919\n1920\n1921\n1922\n1923\n1924\n1925\n1926\n1927\n1928\n1929\n1930\n1931\n1932\n1933\n1934\n1935\n1936\n1937\n1938\n1939\n1940\n1941\n1942\n1943\n1944\n1945\n1946\n1947\n1948\n1949\n1950\n1951\n1952\n1953\n1954\n1955\n1956\n1957\n1958\n1959\n1960\n1961\n1962\n1963\n1964\n1965\n1966\n1967\n1968\n1969\n1970\n1971\n1972\n1973\n1974\n1975\n1976\n1977\n1978\n1979\n1980\n1981\n1982\n1983\n1984\n1985\n1986\n1987\n1988\n1989\n1990\n1991\n1992\n1993\n1994\n1995\n1996\n1997\n1998\n1999\n2000\n2001\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025\n2026\n2027\n2028\n2029\n2030\n2031\n2032\n2033\n2034\n2035\n2036\n2037\n2038\n2039\n2040\n2041\n2042\n2043\n2044\n2045\n2046\n2047\n2048\n2049\n2050\n2051\n2052\n2053\n2054\n2055\n2056\n2057\n2058\n2059\n2060\n2061\n2062\n2063\n2064\n2065\n2066\n2067\n2068\n2069\n2070\n2071\n2072\n2073\n2074\n2075\n2076\n2077\n2078\n2079\n2080\n2081\n2082\n2083\n2084\n2085\n2086\n2087\n2088\n2089\n2090\n2091\n2092\n2093\n2094\n2095\n2096\n2097\n2098\n2099\n2100\n2101\n2102\n2103\n2104\n2105\n2106\n2107\n2108\n2109\n2110\n2111\n2112\n2113\n2114\n2115\n2116\n2117\n2118\n2119\n2120\n2121\n2122\n2123\n2124\n2125\n2126\n2127\n2128\n2129\n2130\n2131\n2132\n2133\n2134\n2135\n2136\n2137\n2138\n2139\n2140\n2141\n2142\n2143\n2144\n2145\n2146\n2147\n2148\n2149\n2150\n2151\n2152\n2153\n2154\n2155\n2156\n2157\n2158\n2159\n2160\n2161\n2162\n2163\n2164\n2165\n2166\n2167\n2168\n2169\n2170\n2171\n2172\n2173\n2174\n2175\n2176\n2177\n2178\n2179\n2180\n2181\n2182\n2183\n2184\n2185\n2186\n2187\n2188\n2189\n2190\n2191\n2192\n2193\n2194\n2195\n2196\n2197\n2198\n2199\n2200\n2201\n2202\n2203\n2204\n2205\n2206\n2207\n2208\n2209\n2210\n2211\n2212\n2213\n2214\n2215\n2216\n2217\n2218\n2219\n2220\n2221\n2222\n2223\n2224\n2225\n2226\n2227\n2228\n2229\n2230\n2231\n2232\n2233\n2234\n2235\n2236\n2237\n2238\n2239\n2240\n2241\n2242\n2243\n2244\n2245\n2246\n2247\n2248\n2249\n2250\n2251\n2252\n2253\n2254\n2255\n2256\n2257\n2258\n2259\n2260\n2261\n2262\n2263\n2264\n2265\n2266\n2267\n2268\n2269\n2270\n2271\n2272\n2273\n2274\n2275\n2276\n2277\n2278\n2279\n2280\n2281\n2282\n2283\n2284\n2285\n2286\n2287\n2288\n2289\n2290\n2291\n2292\n2293\n2294\n2295\n2296\n2297\n2298\n2299\n2300\n2301\n2302\n2303\n2304\n2305\n2306\n2307\n2308\n2309\n2310\n2311\n2312\n2313\n2314\n2315\n2316\n2317\n2318\n2319\n2320\n2321\n2322\n2323\n2324\n2325\n2326\n2327\n2328\n2329\n2330\n2331\n2332\n2333\n2334\n2335\n2336\n2337\n2338\n2339\n2340\n2341\n2342\n2343\n2344\n2345\n2346\n2347\n2348\n2349\n2350\n2351\n2352\n2353\n2354\n2355\n2356\n2357\n2358\n2359\n2360\n2361\n2362\n2363\n2364\n2365\n2366\n2367\n2368\n2369\n2370\n2371\n2372\n2373\n2374\n2375\n2376\n2377\n2378\n2379\n2380\n2381\n2382\n2383\n2384\n2385\n2386\n2387\n2388\n2389\n2390\n2391\n2392\n2393\n2394\n2395\n2396\n2397\n2398\n2399\n2400\n2401\n2402\n2403\n2404\n2405\n2406\n2407\n2408\n2409\n2410\n2411\n2412\n2413\n2414\n2415\n2416\n2417\n2418\n2419\n2420\n2421\n2422\n2423\n2424\n2425\n2426\n2427\n2428\n2429\n2430\n2431\n2432\n2433\n2434\n2435\n2436\n2437\n2438\n2439\n2440\n2441\n2442\n2443\n2444\n2445\n2446\n2447\n2448\n2449\n2450\n2451\n2452\n2453\n2454\n2455\n2456\n2457\n2458\n2459\n2460\n2461\n2462\n2463\n2464\n2465\n2466\n2467\n2468\n2469\n2470\n2471\n2472\n2473\n2474\n2475\n2476\n2477\n2478\n2479\n2480\n2481\n2482\n2483\n2484\n2485\n2486\n2487\n2488\n2489\n2490\n2491\n2492\n2493\n2494\n2495\n2496\n2497\n2498\n2499\n2500\n2501\n2502\n2503\n2504\n2505\n2506\n2507\n2508\n2509\n2510\n2511\n2512\n2513\n2514\n2515\n2516\n2517\n2518\n2519\n2520\n2521\n2522\n2523\n2524\n2525\n2526\n2527\n2528\n2529\n2530\n2531\n2532\n2533\n2534\n2535\n2536\n2537\n2538\n2539\n2540\n2541\n2542\n2543\n2544\n2545\n2546\n2547\n2548\n2549\n2550\n2551\n2552\n2553\n2554\n2555\n2556\n2557\n2558\n2559\n2560\n2561\n2562\n2563\n2564\n2565\n2566\n2567\n2568\n2569\n2570\n2571\n2572\n2573\n2574\n2575\n2576\n2577\n2578\n2579\n2580\n2581\n2582\n2583\n2584\n2585\n2586\n2587\n2588\n2589\n2590\n2591\n2592\n2593\n2594\n2595\n2596\n2597\n2598\n2599\n2600\n2601\n2602\n2603\n2604\n2605\n2606\n2607\n2608\n2609\n2610\n2611\n2612\n2613\n2614\n2615\n2616\n2617\n2618\n2619\n2620\n2621\n2622\n2623\n2624\n2625\n2626\n2627\n2628\n2629\n2630\n2631\n2632\n2633\n2634\n2635\n2636\n2637\n2638\n2639\n2640\n2641\n2642\n2643\n2644\n2645\n2646\n2647\n2648\n2649\n2650\n2651\n2652\n2653\n2654\n2655\n2656\n2657\n2658\n2659\n2660\n2661\n2662\n2663\n2664\n2665\n2666\n2667\n2668\n2669\n2670\n2671\n2672\n2673\n2674\n2675\n2676\n2677\n2678\n2679\n2680\n2681\n2682\n2683\n2684\n2685\n2686\n2687\n2688\n2689\n2690\n2691\n2692\n2693\n2694\n2695\n2696\n2697\n2698\n2699\n2700\n2701\n2702\n2703\n2704\n2705\n2706\n2707\n2708\n2709\n2710\n2711\n2712\n2713\n2714\n2715\n2716\n2717\n2718\n2719\n2720\n2721\n2722\n2723\n2724\n2725\n2726\n2727\n2728\n2729\n2730\n2731\n2732\n2733\n2734\n2735\n2736\n2737\n2738\n2739\n2740\n2741\n2742\n2743\n2744\n2745\n2746\n2747\n2748\n2749\n2750\n2751\n2752\n2753\n2754\n2755\n2756\n2757\n2758\n2759\n2760\n2761\n2762\n2763\n2764\n2765\n2766\n2767\n2768\n2769\n2770\n2771\n2772\n2773\n2774\n2775\n2776\n2777\n2778\n2779\n2780\n2781\n2782\n2783\n2784\n2785\n2786\n2787\n2788\n2789\n2790\n2791\n2792\n2793\n2794\n2795\n2796\n2797\n2798\n2799\n2800\n2801\n2802\n2803\n2804\n2805\n2806\n2807\n2808\n2809\n2810\n2811\n2812\n2813\n2814\n2815\n2816\n2817\n2818\n2819\n2820\n2821\n2822\n2823\n2824\n2825\n2826\n2827\n2828\n2829\n2830\n2831\n2832\n2833\n2834\n2835\n2836\n2837\n2838\n2839\n2840\n2841\n2842\n2843\n2844\n2845\n2846\n2847\n2848\n2849\n2850\n2851\n2852\n2853\n2854\n2855\n2856\n2857\n2858\n2859\n2860\n2861\n2862\n2863\n2864\n2865\n2866\n2867\n2868\n2869\n2870\n2871\n2872\n2873\n2874\n2875\n2876\n2877\n2878\n2879\n2880\n2881\n2882\n2883\n2884\n2885\n2886\n2887\n2888\n2889\n2890\n2891\n2892\n2893\n2894\n2895\n2896\n2897\n2898\n2899\n2900\n2901\n2902\n2903\n2904\n2905\n2906\n2907\n2908\n2909\n2910\n2911\n2912\n2913\n2914\n2915\n2916\n2917\n2918\n2919\n2920\n2921\n2922\n2923\n2924\n2925\n2926\n2927\n2928\n2929\n2930\n2931\n2932\n2933\n2934\n2935\n2936\n2937\n2938\n2939\n2940\n2941\n2942\n2943\n2944\n2945\n2946\n2947\n2948\n2949\n2950\n2951\n2952\n2953\n2954\n2955\n2956\n2957\n2958\n2959\n2960\n2961\n2962\n2963\n2964\n2965\n2966\n2967\n2968\n2969\n2970\n2971\n2972\n2973\n2974\n2975\n2976\n2977\n2978\n2979\n2980\n2981\n2982\n2983\n2984\n2985\n2986\n2987\n2988\n2989\n2990\n2991\n2992\n2993\n2994\n2995\n2996\n2997\n2998\n2999\n3000\n3001\n3002\n3003\n3004\n3005\n3006\n3007\n3008\n3009\n3010\n3011\n3012\n3013\n3014\n3015\n3016\n3017\n3018\n3019\n3020\n3021\n3022\n3023\n3024\n3025\n3026\n3027\n3028\n3029\n3030\n3031\n3032\n3033\n3034\n3035\n3036\n3037\n3038\n3039\n3040\n3041\n3042\n3043\n3044\n3045\n3046\n3047\n3048\n3049\n3050\n3051\n3052\n3053\n3054\n3055\n3056\n3057\n3058\n3059\n3060\n3061\n3062\n3063\n3064\n3065\n3066\n3067\n3068\n3069\n3070\n3071\n3072\n3073\n3074\n3075\n3076\n3077\n3078\n3079\n3080\n3081\n3082\n3083\n3084\n3085\n3086\n3087\n3088\n3089\n3090\n3091\n3092\n3093\n3094\n3095\n3096\n3097\n3098\n3099\n3100\n3101\n3102\n3103\n3104\n3105\n3106\n3107\n3108\n3109\n3110\n3111\n3112\n3113\n3114\n3115\n3116\n3117\n3118\n3119\n3120\n3121\n3122\n3123\n3124\n3125\n3126\n3127\n3128\n3129\n3130\n3131\n3132\n3133\n3134\n3135\n3136\n3137\n3138\n3139\n3140\n3141\n3142\n3143\n3144\n3145\n3146\n3147\n3148\n3149\n3150\n3151\n3152\n3153\n3154\n3155\n3156\n3157\n3158\n3159\n3160\n3161\n3162\n3163\n3164\n3165\n3166\n3167\n3168\n3169\n3170\n3171\n3172\n3173\n3174\n3175\n3176\n3177\n3178\n3179\n3180\n3181\n3182\n3183\n3184\n3185\n3186\n3187\n3188\n3189\n3190\n3191\n3192\n3193\n3194\n3195\n3196\n3197\n3198\n3199\n3200\n3201\n3202\n3203\n3204\n3205\n3206\n3207\n3208\n3209\n3210\n3211\n3212\n3213\n3214\n3215\n3216\n3217\n3218\n3219\n3220\n3221\n3222\n3223\n3224\n3225\n3226\n3227\n3228\n3229\n3230\n3231\n3232\n3233\n3234\n3235\n3236\n3237\n3238\n3239\n3240\n3241\n3242\n3243\n3244\n3245\n3246\n3247\n3248\n3249\n3250\n3251\n3252\n3253\n3254\n3255\n3256\n3257\n3258\n3259\n3260\n3261\n3262\n3263\n3264\n3265\n3266\n3267\n3268\n3269\n3270\n3271\n3272\n3273\n3274\n3275\n3276\n3277\n3278\n3279\n3280\n3281\n3282\n3283\n3284\n3285\n3286\n3287\n3288\n3289\n3290\n3291\n3292\n3293\n3294\n3295\n3296\n3297\n3298\n3299\n3300\n3301\n3302\n3303\n3304\n3305\n3306\n3307\n3308\n3309\n3310\n3311\n3312\n3313\n3314\n3315\n3316\n3317\n3318\n3319\n3320\n3321\n3322\n3323\n3324\n3325\n3326\n3327\n3328\n3329\n3330\n3331\n3332\n3333\n3334\n3335\n3336\n3337\n3338\n3339\n3340\n3341\n3342\n3343\n3344\n3345\n3346\n3347\n3348\n3349\n3350\n3351\n3352\n3353\n3354\n3355\n3356\n3357\n3358\n3359\n3360\n3361\n3362\n3363\n3364\n3365\n3366\n3367\n3368\n3369\n3370\n3371\n3372\n3373\n3374\n3375\n3376\n3377\n3378\n3379\n3380\n3381\n3382\n3383\n3384\n3385\n3386\n3387\n3388\n3389\n3390\n3391\n3392\n3393\n3394\n3395\n3396\n3397\n3398\n3399\n3400\n3401\n3402\n3403\n3404\n3405\n3406\n3407\n3408\n3409\n3410\n3411\n3412\n3413\n3414\n3415\n3416\n3417\n3418\n3419\n3420\n3421\n3422\n3423\n3424\n3425\n3426\n3427\n3428\n3429\n3430\n3431\n3432\n3433\n3434\n3435\n3436\n3437\n3438\n3439\n3440\n3441\n3442\n3443\n3444\n3445\n3446\n3447\n3448\n3449\n3450\n3451\n3452\n3453\n3454\n3455\n3456\n3457\n3458\n3459\n3460\n3461\n3462\n3463\n3464\n3465\n3466\n3467\n3468\n3469\n3470\n3471\n3472\n3473\n3474\n3475\n3476\n3477\n3478\n3479\n3480\n3481\n3482\n3483\n3484\n3485\n3486\n3487\n3488\n3489\n3490\n3491\n3492\n3493\n3494\n3495\n3496\n3497\n3498\n3499\n3500\n3501\n3502\n3503\n3504\n3505\n3506\n3507\n3508\n3509\n3510\n3511\n3512\n3513\n3514\n3515\n3516\n3517\n3518\n3519\n3520\n3521\n3522\n3523\n3524\n3525\n3526\n3527\n3528\n3529\n3530\n3531\n3532\n3533\n3534\n3535\n3536\n3537\n3538\n3539\n3540\n3541\n3542\n3543\n3544\n3545\n3546\n3547\n3548\n3549\n3550\n3551\n3552\n3553\n3554\n3555\n3556\n3557\n3558\n3559\n3560\n3561\n3562\n3563\n3564\n3565\n3566\n3567\n3568\n3569\n3570\n3571\n3572\n3573\n3574\n3575\n3576\n3577\n3578\n3579\n3580\n3581\n3582\n3583\n3584\n3585\n3586\n3587\n3588\n3589\n3590\n3591\n3592\n3593\n3594\n3595\n3596\n3597\n3598\n3599\n3600\n3601\n3602\n3603\n3604\n3605\n3606\n3607\n3608\n3609\n3610\n3611\n3612\n3613\n3614\n3615\n3616\n3617\n3618\n3619\n3620\n3621\n3622\n3623\n3624\n3625\n3626\n3627\n3628\n3629\n3630\n3631\n3632\n3633\n3634\n3635\n3636\n3637\n3638\n3639\n3640\n3641\n3642\n3643\n3644\n3645\n3646\n3647\n3648\n3649\n3650\n3651\n3652\n3653\n3654\n3655\n3656\n3657\n3658\n3659\n3660\n3661\n3662\n3663\n3664\n3665\n3666\n3667\n3668\n3669\n3670\n3671\n3672\n3673\n3674\n3675\n3676\n3677\n3678\n3679\n3680\n3681\n3682\n3683\n3684\n3685\n3686\n3687\n3688\n3689\n3690\n3691\n3692\n3693\n3694\n3695\n3696\n3697\n3698\n3699\n3700\n3701\n3702\n3703\n3704\n3705\n3706\n3707\n3708\n3709\n3710\n3711\n3712\n3713\n3714\n3715\n3716\n3717\n3718\n3719\n3720\n3721\n3722\n3723\n3724\n3725\n3726\n3727\n3728\n3729\n3730\n3731\n3732\n3733\n3734\n3735\n3736\n3737\n3738\n3739\n3740\n3741\n3742\n3743\n3744\n3745\n3746\n3747\n3748\n3749\n3750\n3751\n3752\n3753\n3754\n3755\n3756\n3757\n3758\n3759\n3760\n3761\n3762\n3763\n3764\n3765\n3766\n3767\n3768\n3769\n3770\n3771\n3772\n3773\n3774\n3775\n3776\n3777\n3778\n3779\n3780\n3781\n3782\n3783\n3784\n3785\n3786\n3787\n3788\n3789\n3790\n3791\n3792\n3793\n3794\n3795\n3796\n3797\n3798\n3799\n3800\n3801\n3802\n3803\n3804\n3805\n3806\n3807\n3808\n3809\n3810\n3811\n3812\n3813\n3814\n3815\n3816\n3817\n3818\n3819\n3820\n3821\n3822\n3823\n3824\n3825\n3826\n3827\n3828\n3829\n3830\n3831\n3832\n3833\n3834\n3835\n3836\n3837\n3838\n3839\n3840\n3841\n3842\n3843\n3844\n3845\n3846\n3847\n3848\n3849\n3850\n3851\n3852\n3853\n3854\n3855\n3856\n3857\n3858\n3859\n3860\n3861\n3862\n3863\n3864\n3865\n3866\n3867\n3868\n3869\n3870\n3871\n3872\n3873\n3874\n3875\n3876\n3877\n3878\n3879\n3880\n3881\n3882\n3883\n3884\n3885\n3886\n3887\n3888\n3889\n3890\n3891\n3892\n3893\n3894\n3895\n3896\n3897\n3898\n3899\n3900\n3901\n3902\n3903\n3904\n3905\n3906\n3907\n3908\n3909\n3910\n3911\n3912\n3913\n3914\n3915\n3916\n3917\n3918\n3919\n3920\n3921\n3922\n3923\n3924\n3925\n3926\n3927\n3928\n3929\n3930\n3931\n3932\n3933\n3934\n3935\n3936\n3937\n3938\n3939\n3940\n3941\n3942\n3943\n3944\n3945\n3946\n3947\n3948\n3949\n3950\n3951\n3952\n3953\n3954\n3955\n3956\n3957\n3958\n3959\n3960\n3961\n3962\n3963\n3964\n3965\n3966\n3967\n3968\n3969\n3970\n3971\n3972\n3973\n3974\n3975\n3976\n3977\n3978\n3979\n3980\n3981\n3982\n3983\n3984\n3985\n3986\n3987\n3988\n3989\n3990\n3991\n3992\n3993\n3994\n3995\n3996\n3997\n3998\n3999\n4000\n4001\n4002\n4003\n4004\n4005\n4006\n4007\n4008\n4009\n4010\n4011\n4012\n4013\n4014\n4015\n4016\n4017\n4018\n4019\n4020\n4021\n4022\n4023\n4024\n4025\n4026\n4027\n4028\n4029\n4030\n4031\n4032\n4033\n4034\n4035\n4036\n4037\n4038\n4039\n4040\n4041\n4042\n4043\n4044\n4045\n4046\n4047\n4048\n4049\n4050\n4051\n4052\n4053\n4054\n4055\n4056\n4057\n4058\n4059\n4060\n4061\n4062\n4063\n4064\n4065\n4066\n4067\n4068\n4069\n4070\n4071\n4072\n4073\n4074\n4075\n4076\n4077\n4078\n4079\n4080\n4081\n4082\n4083\n4084\n4085\n4086\n4087\n4088\n4089\n4090\n4091\n4092\n4093\n4094\n4095\n4096\n4097\n4098\n4099\n4100\n4101\n4102\n4103\n4104\n4105\n4106\n4107\n4108\n4109\n4110\n4111\n4112\n4113\n4114\n4115\n4116\n4117\n4118\n4119\n4120\n4121\n4122\n4123\n4124\n4125\n4126\n4127\n4128\n4129\n4130\n4131\n4132\n4133\n4134\n4135\n4136\n4137\n4138\n4139\n4140\n4141\n4142\n4143\n4144\n4145\n4146\n4147\n4148\n4149\n4150\n4151\n4152\n4153\n4154\n4155\n4156\n4157\n4158\n4159\n4160\n4161\n4162\n4163\n4164\n4165\n4166\n4167\n4168\n4169\n4170\n4171\n4172\n4173\n4174\n4175\n4176\n4177\n4178\n4179\n4180\n4181\n4182\n4183\n4184\n4185\n4186\n4187\n4188\n4189\n4190\n4191\n4192\n4193\n4194\n4195\n4196\n4197\n4198\n4199\n4200\n4201\n4202\n4203\n4204\n4205\n4206\n4207\n4208\n4209\n4210\n4211\n4212\n4213\n4214\n4215\n4216\n4217\n4218\n4219\n4220\n4221\n4222\n4223\n4224\n4225\n4226\n4227\n4228\n4229\n4230\n4231\n4232\n4233\n4234\n4235\n4236\n4237\n4238\n4239\n4240\n4241\n4242\n4243\n4244\n4245\n4246\n4247\n4248\n4249\n4250\n4251\n4252\n4253\n4254\n4255\n4256\n4257\n4258\n4259\n4260\n4261\n4262\n4263\n4264\n4265\n4266\n4267\n4268\n4269\n4270\n4271\n4272\n4273\n4274\n4275\n4276\n4277\n4278\n4279\n4280\n4281\n4282\n4283\n4284\n4285\n4286\n4287\n4288\n4289\n4290\n4291\n4292\n4293\n4294\n4295\n4296\n4297\n4298\n4299\n4300\n4301\n4302\n4303\n4304\n4305\n4306\n4307\n4308\n4309\n4310\n4311\n4312\n4313\n4314\n4315\n4316\n4317\n4318\n4319\n4320\n4321\n4322\n4323\n4324\n4325\n4326\n4327\n4328\n4329\n4330\n4331\n4332\n4333\n4334\n4335\n4336\n4337\n4338\n4339\n4340\n4341\n4342\n4343\n4344\n4345\n4346\n4347\n4348\n4349\n4350\n4351\n4352\n4353\n4354\n4355\n4356\n4357\n4358\n4359\n4360\n4361\n4362\n4363\n4364\n4365\n4366\n4367\n4368\n4369\n4370\n4371\n4372\n4373\n4374\n4375\n4376\n4377\n4378\n4379\n4380\n4381\n4382\n4383\n4384\n4385\n4386\n4387\n4388\n4389\n4390\n4391\n4392\n4393\n4394\n4395\n4396\n4397\n4398\n4399\n4400\n4401\n4402\n4403\n4404\n4405\n4406\n4407\n4408\n4409\n4410\n4411\n4412\n4413\n4414\n4415\n4416\n4417\n4418\n4419\n4420\n4421\n4422\n4423\n4424\n4425\n4426\n4427\n4428\n4429\n4430\n4431\n4432\n4433\n4434\n4435\n4436\n4437\n4438\n4439\n4440\n4441\n4442\n4443\n4444\n4445\n4446\n4447\n4448\n4449\n4450\n4451\n4452\n4453\n4454\n4455\n4456\n4457\n4458\n4459\n4460\n4461\n4462\n4463\n4464\n4465\n4466\n4467\n4468\n4469\n4470\n4471\n4472\n4473\n4474\n4475\n4476\n4477\n4478\n4479\n4480\n4481\n4482\n4483\n4484\n4485\n4486\n4487\n4488\n4489\n4490\n4491\n4492\n4493\n4494\n4495\n4496\n4497\n4498\n4499\n4500\n4501\n4502\n4503\n4504\n4505\n4506\n4507\n4508\n4509\n4510\n4511\n4512\n4513\n4514\n4515\n4516\n4517\n4518\n4519\n4520\n4521\n4522\n4523\n4524\n4525\n4526\n4527\n4528\n4529\n4530\n4531\n4532\n4533\n4534\n4535\n4536\n4537\n4538\n4539\n4540\n4541\n4542\n4543\n4544\n4545\n4546\n4547\n4548\n4549\n4550\n4551\n4552\n4553\n4554\n4555\n4556\n4557\n4558\n4559\n4560\n4561\n4562\n4563\n4564\n4565\n4566\n4567\n4568\n4569\n4570\n4571\n4572\n4573\n4574\n4575\n4576\n4577\n4578\n4579\n4580\n4581\n4582\n4583\n4584\n4585\n4586\n4587\n4588\n4589\n4590\n4591\n4592\n4593\n4594\n4595\n4596\n4597\n4598\n4599\n4600\n4601\n4602\n4603\n4604\n4605\n4606\n4607\n4608\n4609\n4610\n4611\n4612\n4613\n4614\n4615\n4616\n4617\n4618\n4619\n4620\n4621\n4622\n4623\n4624\n4625\n4626\n4627\n4628\n4629\n4630\n4631\n4632\n4633\n4634\n4635\n4636\n4637\n4638\n4639\n4640\n4641\n4642\n4643\n4644\n4645\n4646\n4647\n4648\n4649\n4650\n4651\n4652\n4653\n4654\n4655\n4656\n4657\n4658\n4659\n4660\n4661\n4662\n4663\n4664\n4665\n4666\n4667\n4668\n4669\n4670\n4671\n4672\n4673\n4674\n4675\n4676\n4677\n4678\n4679\n4680\n4681\n4682\n4683\n4684\n4685\n4686\n4687\n4688\n4689\n4690\n4691\n4692\n4693\n4694\n4695\n4696\n4697\n4698\n4699\n4700\n4701\n4702\n4703\n4704\n4705\n4706\n4707\n4708\n4709\n4710\n4711\n4712\n4713\n4714\n4715\n4716\n4717\n4718\n4719\n4720\n4721\n4722\n4723\n4724\n4725\n4726\n4727\n4728\n4729\n4730\n4731\n4732\n4733\n4734\n4735\n4736\n4737\n4738\n4739\n4740\n4741\n4742\n4743\n4744\n4745\n4746\n4747\n4748\n4749\n4750\n4751\n4752\n4753\n4754\n4755\n4756\n4757\n4758\n4759\n4760\n4761\n4762\n4763\n4764\n4765\n4766\n4767\n4768\n4769\n4770\n4771\n4772\n4773\n4774\n4775\n4776\n4777\n4778\n4779\n4780\n4781\n4782\n4783\n4784\n4785\n4786\n4787\n4788\n4789\n4790\n4791\n4792\n4793\n4794\n4795\n4796\n4797\n4798\n4799\n4800\n4801\n4802\n4803\n4804\n4805\n4806\n4807\n4808\n4809\n4810\n4811\n4812\n4813\n4814\n4815\n4816\n4817\n4818\n4819\n4820\n4821\n4822\n4823\n4824\n4825\n4826\n4827\n4828\n4829\n4830\n4831\n4832\n4833\n4834\n4835\n4836\n4837\n4838\n4839\n4840\n4841\n4842\n4843\n4844\n4845\n4846\n4847\n4848\n4849\n4850\n4851\n4852\n4853\n4854\n4855\n4856\n4857\n4858\n4859\n4860\n4861\n4862\n4863\n4864\n4865\n4866\n4867\n4868\n4869\n4870\n4871\n4872\n4873\n4874\n4875\n4876\n4877\n4878\n4879\n4880\n4881\n4882\n4883\n4884\n4885\n4886\n4887\n4888\n4889\n4890\n4891\n4892\n4893\n4894\n4895\n4896\n4897\n4898\n4899\n4900\n4901\n4902\n4903\n4904\n4905\n4906\n4907\n4908\n4909\n4910\n4911\n4912\n4913\n4914\n4915\n4916\n4917\n4918\n4919\n4920\n4921\n4922\n4923\n4924\n4925\n4926\n4927\n4928\n4929\n4930\n4931\n4932\n4933\n4934\n4935\n4936\n4937\n4938\n4939\n4940\n4941\n4942\n4943\n4944\n4945\n4946\n4947\n4948\n4949\n4950\n4951\n4952\n4953\n4954\n4955\n4956\n4957\n4958\n4959\n4960\n4961\n4962\n4963\n4964\n4965\n4966\n4967\n4968\n4969\n4970\n4971\n4972\n4973\n4974\n4975\n4976\n4977\n4978\n4979\n4980\n4981\n4982\n4983\n4984\n4985\n4986\n4987\n4988\n4989\n4990\n4991\n4992\n4993\n4994\n4995\n4996\n4997\n4998\n4999\n5000\n5001\n5002\n5003\n5004\n5005\n5006\n5007\n5008\n5009\n5010\n5011\n5012\n5013\n5014\n5015\n5016\n5017\n5018\n5019\n5020\n5021\n5022\n5023\n5024\n5025\n5026\n5027\n5028\n5029\n5030\n5031\n5032\n5033\n5034\n5035\n5036\n5037\n5038\n5039\n5040\n5041\n5042\n5043\n5044\n5045\n5046\n5047\n5048\n5049\n5050\n5051\n5052\n5053\n5054\n5055\n5056\n5057\n5058\n5059\n5060\n5061\n5062\n5063\n5064\n5065\n5066\n5067\n5068\n5069\n5070\n5071\n5072\n5073\n5074\n5075\n5076\n5077\n5078\n5079\n5080\n5081\n5082\n5083\n5084\n5085\n5086\n5087\n5088\n5089\n5090\n5091\n5092\n5093\n5094\n5095\n5096\n5097\n5098\n5099\n5100\n5101\n5102\n5103\n5104\n5105\n5106\n5107\n5108\n5109\n5110\n5111\n5112\n5113\n5114\n5115\n5116\n5117\n5118\n5119\n5120\n5121\n5122\n5123\n5124\n5125\n5126\n5127\n5128\n5129\n5130\n5131\n5132\n5133\n5134\n5135\n5136\n5137\n5138\n5139\n5140\n5141\n5142\n5143\n5144\n5145\n5146\n5147\n5148\n5149\n5150\n5151\n5152\n5153\n5154\n5155\n5156\n5157\n5158\n5159\n5160\n5161\n5162\n5163\n5164\n5165\n5166\n5167\n5168\n5169\n5170\n5171\n5172\n5173\n5174\n5175\n5176\n5177\n5178\n5179\n5180\n5181\n5182\n5183\n5184\n5185\n5186\n5187\n5188\n5189\n5190\n5191\n5192\n5193\n5194\n5195\n5196\n5197\n5198\n5199\n5200\n5201\n5202\n5203\n5204\n5205\n5206\n5207\n5208\n5209\n5210\n5211\n5212\n5213\n5214\n5215\n5216\n5217\n5218\n5219\n5220\n5221\n5222\n5223\n5224\n5225\n5226\n5227\n5228\n5229\n5230\n5231\n5232\n5233\n5234\n5235\n5236\n5237\n5238\n5239\n5240\n5241\n5242\n5243\n5244\n5245\n5246\n5247\n5248\n5249\n5250\n5251\n5252\n5253\n5254\n5255\n5256\n5257\n5258\n5259\n5260\n5261\n5262\n5263\n5264\n5265\n5266\n5267\n5268\n5269\n5270\n5271\n5272\n5273\n5274\n5275\n5276\n5277\n5278\n5279\n5280\n5281\n5282\n5283\n5284\n5285\n5286\n5287\n5288\n5289\n5290\n5291\n5292\n5293\n5294\n5295\n5296\n5297\n5298\n5299\n5300\n5301\n5302\n5303\n5304\n5305\n5306\n5307\n5308\n5309\n5310\n5311\n5312\n5313\n5314\n5315\n5316\n5317\n5318\n5319\n5320\n5321\n5322\n5323\n5324\n5325\n5326\n5327\n5328\n5329\n5330\n5331\n5332\n5333\n5334\n5335\n5336\n5337\n5338\n5339\n5340\n5341\n5342\n5343\n5344\n5345\n5346\n5347\n5348\n5349\n5350\n5351\n5352\n5353\n5354\n5355\n5356\n5357\n5358\n5359\n5360\n5361\n5362\n5363\n5364\n5365\n5366\n5367\n5368\n5369\n5370\n5371\n5372\n5373\n5374\n5375\n5376\n5377\n5378\n5379\n5380\n5381\n5382\n5383\n5384\n5385\n5386\n5387\n5388\n5389\n5390\n5391\n5392\n5393\n5394\n5395\n5396\n5397\n5398\n5399\n5400\n5401\n5402\n5403\n5404\n5405\n5406\n5407\n5408\n5409\n5410\n5411\n5412\n5413\n5414\n5415\n5416\n5417\n5418\n5419\n5420\n5421\n5422\n5423\n5424\n5425\n5426\n5427\n5428\n5429\n5430\n5431\n5432\n5433\n5434\n5435\n5436\n5437\n5438\n5439\n5440\n5441\n5442\n5443\n5444\n5445\n5446\n5447\n5448\n5449\n5450\n5451\n5452\n5453\n5454\n5455\n5456\n5457\n5458\n5459\n5460\n5461\n5462\n5463\n5464\n5465\n5466\n5467\n5468\n5469\n5470\n5471\n5472\n5473\n5474\n5475\n5476\n5477\n5478\n5479\n5480\n5481\n5482\n5483\n5484\n5485\n5486\n5487\n5488\n5489\n5490\n5491\n5492\n5493\n5494\n5495\n5496\n5497\n5498\n5499\n5500\n5501\n5502\n5503\n5504\n5505\n5506\n5507\n5508\n5509\n5510\n5511\n5512\n5513\n5514\n5515\n5516\n5517\n5518\n5519\n5520\n5521\n5522\n5523\n5524\n5525\n5526\n5527\n5528\n5529\n5530\n5531\n5532\n5533\n5534\n5535\n5536\n5537\n5538\n5539\n5540\n5541\n5542\n5543\n5544\n5545\n5546\n5547\n5548\n5549\n5550\n5551\n5552\n5553\n5554\n5555\n5556\n5557\n5558\n5559\n5560\n5561\n5562\n5563\n5564\n5565\n5566\n5567\n5568\n5569\n5570\n5571\n5572\n5573\n5574\n5575\n5576\n5577\n5578\n5579\n5580\n5581\n5582\n5583\n5584\n5585\n5586\n5587\n5588\n5589\n5590\n5591\n5592\n5593\n5594\n5595\n5596\n5597\n5598\n5599\n5600\n5601\n5602\n5603\n5604\n5605\n5606\n5607\n5608\n5609\n5610\n5611\n5612\n5613\n5614\n5615\n5616\n5617\n5618\n5619\n5620\n5621\n5622\n5623\n5624\n5625\n5626\n5627\n5628\n5629\n5630\n5631\n5632\n5633\n5634\n5635\n5636\n5637\n5638\n5639\n5640\n5641\n5642\n5643\n5644\n5645\n5646\n5647\n5648\n5649\n5650\n5651\n5652\n5653\n5654\n5655\n5656\n5657\n5658\n5659\n5660\n5661\n5662\n5663\n5664\n5665\n5666\n5667\n5668\n5669\n5670\n5671\n5672\n5673\n5674\n5675\n5676\n5677\n5678\n5679\n5680\n5681\n5682\n5683\n5684\n5685\n5686\n5687\n5688\n5689\n5690\n5691\n5692\n5693\n5694\n5695\n5696\n5697\n5698\n5699\n5700\n5701\n5702\n5703\n5704\n5705\n5706\n5707\n5708\n5709\n5710\n5711\n5712\n5713\n5714\n5715\n5716\n5717\n5718\n5719\n5720\n5721\n5722\n5723\n5724\n5725\n5726\n5727\n5728\n5729\n5730\n5731\n5732\n5733\n5734\n5735\n5736\n5737\n5738\n5739\n5740\n5741\n5742\n5743\n5744\n5745\n5746\n5747\n5748\n5749\n5750\n5751\n5752\n5753\n5754\n5755\n5756\n5757\n5758\n5759\n5760\n5761\n5762\n5763\n5764\n5765\n5766\n5767\n5768\n5769\n5770\n5771\n5772\n5773\n5774\n5775\n5776\n5777\n5778\n5779\n5780\n5781\n5782\n5783\n5784\n5785\n5786\n5787\n5788\n5789\n5790\n5791\n5792\n5793\n5794\n5795\n5796\n5797\n5798\n5799\n5800\n5801\n5802\n5803\n5804\n5805\n5806\n5807\n5808\n5809\n5810\n5811\n5812\n5813\n5814\n5815\n5816\n5817\n5818\n5819\n5820\n5821\n5822\n5823\n5824\n5825\n5826\n5827\n5828\n5829\n5830\n5831\n5832\n5833\n5834\n5835\n5836\n5837\n5838\n5839\n5840\n5841\n5842\n5843\n5844\n5845\n5846\n5847\n5848\n5849\n5850\n5851\n5852\n5853\n5854\n5855\n5856\n5857\n5858\n5859\n5860\n5861\n5862\n5863\n5864\n5865\n5866\n5867\n5868\n5869\n5870\n5871\n5872\n5873\n5874\n5875\n5876\n5877\n5878\n5879\n5880\n5881\n5882\n5883\n5884\n5885\n5886\n5887\n5888\n5889\n5890\n5891\n5892\n5893\n5894\n5895\n5896\n5897\n5898\n5899\n5900\n5901\n5902\n5903\n5904\n5905\n5906\n5907\n5908\n5909\n5910\n5911\n5912\n5913\n5914\n5915\n5916\n5917\n5918\n5919\n5920\n5921\n5922\n5923\n5924\n5925\n5926\n5927\n5928\n5929\n5930\n5931\n5932\n5933\n5934\n5935\n5936\n5937\n5938\n5939\n5940\n5941\n5942\n5943\n5944\n5945\n5946\n5947\n5948\n5949\n5950\n5951\n5952\n5953\n5954\n5955\n5956\n5957\n5958\n5959\n5960\n5961\n5962\n5963\n5964\n5965\n5966\n5967\n5968\n5969\n5970\n5971\n5972\n5973\n5974\n5975\n5976\n5977\n5978\n5979\n5980\n5981\n5982\n5983\n5984\n5985\n5986\n5987\n5988\n5989\n5990\n5991\n5992\n5993\n5994\n5995\n5996\n5997\n5998\n5999\n6000\n6001\n6002\n6003\n6004\n6005\n6006\n6007\n6008\n6009\n6010\n6011\n6012\n6013\n6014\n6015\n6016\n6017\n6018\n6019\n6020\n6021\n6022\n6023\n6024\n6025\n6026\n6027\n6028\n6029\n6030\n6031\n6032\n6033\n6034\n6035\n6036\n6037\n6038\n6039\n6040\n6041\n6042\n6043\n6044\n6045\n6046\n6047\n6048\n6049\n6050\n6051\n6052\n6053\n6054\n6055\n6056\n6057\n6058\n6059\n6060\n6061\n6062\n6063\n6064\n6065\n6066\n6067\n6068\n6069\n6070\n6071\n6072\n6073\n6074\n6075\n6076\n6077\n6078\n6079\n6080\n6081\n6082\n6083\n6084\n6085\n6086\n6087\n6088\n6089\n6090\n6091\n6092\n6093\n6094\n6095\n6096\n6097\n6098\n6099\n6100\n6101\n6102\n6103\n6104\n6105\n6106\n6107\n6108\n6109\n6110\n6111\n6112\n6113\n6114\n6115\n6116\n6117\n6118\n6119\n6120\n6121\n6122\n6123\n6124\n6125\n6126\n6127\n6128\n6129\n6130\n6131\n6132\n6133\n6134\n6135\n6136\n6137\n6138\n6139\n6140\n6141\n6142\n6143\n6144\n6145\n6146\n6147\n6148\n6149\n6150\n6151\n6152\n6153\n6154\n6155\n6156\n6157\n6158\n6159\n6160\n6161\n6162\n6163\n6164\n6165\n6166\n6167\n6168\n6169\n6170\n6171\n6172\n6173\n6174\n6175\n6176\n6177\n6178\n6179\n6180\n6181\n6182\n6183\n6184\n6185\n6186\n6187\n6188\n6189\n6190\n6191\n6192\n6193\n6194\n6195\n6196\n6197\n6198\n6199\n6200\n6201\n6202\n6203\n6204\n6205\n6206\n6207\n6208\n6209\n6210\n6211\n6212\n6213\n6214\n6215\n6216\n6217\n6218\n6219\n6220\n6221\n6222\n6223\n6224\n6225\n6226\n6227\n6228\n6229\n6230\n6231\n6232\n6233\n6234\n6235\n6236\n6237\n6238\n6239\n6240\n6241\n6242\n6243\n6244\n6245\n6246\n6247\n6248\n6249\n6250\n6251\n6252\n6253\n6254\n6255\n6256\n6257\n6258\n6259\n6260\n6261\n6262\n6263\n6264\n6265\n6266\n6267\n6268\n6269\n6270\n6271\n6272\n6273\n6274\n6275\n6276\n6277\n6278\n6279\n6280\n6281\n6282\n6283\n6284\n6285\n6286\n6287\n6288\n6289\n6290\n6291\n6292\n6293\n6294\n6295\n6296\n6297\n6298\n6299\n6300\n6301\n6302\n6303\n6304\n6305\n6306\n6307\n6308\n6309\n6310\n6311\n6312\n6313\n6314\n6315\n6316\n6317\n6318\n6319\n6320\n6321\n6322\n6323\n6324\n6325\n6326\n6327\n6328\n6329\n6330\n6331\n6332\n6333\n6334\n6335\n6336\n6337\n6338\n6339\n6340\n6341\n6342\n6343\n6344\n6345\n6346\n6347\n6348\n6349\n6350\n6351\n6352\n6353\n6354\n6355\n6356\n6357\n6358\n6359\n6360\n6361\n6362\n6363\n6364\n6365\n6366\n6367\n6368\n6369\n6370\n6371\n6372\n6373\n6374\n6375\n6376\n6377\n6378\n6379\n6380\n6381\n6382\n6383\n6384\n6385\n6386\n6387\n6388\n6389\n6390\n6391\n6392\n6393\n6394\n6395\n6396\n6397\n6398\n6399\n6400\n6401\n6402\n6403\n6404\n6405\n6406\n6407\n6408\n6409\n6410\n6411\n6412\n6413\n6414\n6415\n6416\n6417\n6418\n6419\n6420\n6421\n6422\n6423\n6424\n6425\n6426\n6427\n6428\n6429\n6430\n6431\n6432\n6433\n6434\n6435\n6436\n6437\n6438\n6439\n6440\n6441\n6442\n6443\n6444\n6445\n6446\n6447\n6448\n6449\n6450\n6451\n6452\n6453\n6454\n6455\n6456\n6457\n6458\n6459\n6460\n6461\n6462\n6463\n6464\n6465\n6466\n6467\n6468\n6469\n6470\n6471\n6472\n6473\n6474\n6475\n6476\n6477\n6478\n6479\n6480\n6481\n6482\n6483\n6484\n6485\n6486\n6487\n6488\n6489\n6490\n6491\n6492\n6493\n6494\n6495\n6496\n6497\n6498\n6499\n6500\n6501\n6502\n6503\n6504\n6505\n6506\n6507\n6508\n6509\n6510\n6511\n6512\n6513\n6514\n6515\n6516\n6517\n6518\n6519\n6520\n6521\n6522\n6523\n6524\n6525\n6526\n6527\n6528\n6529\n6530\n6531\n6532\n6533\n6534\n6535\n6536\n6537\n6538\n6539\n6540\n6541\n6542\n6543\n6544\n6545\n6546\n6547\n6548\n6549\n6550\n6551\n6552\n6553\n6554\n6555\n6556\n6557\n6558\n6559\n6560\n6561\n6562\n6563\n6564\n6565\n6566\n6567\n6568\n6569\n6570\n6571\n6572\n6573\n6574\n6575\n6576\n6577\n6578\n6579\n6580\n6581\n6582\n6583\n6584\n6585\n6586\n6587\n6588\n6589\n6590\n6591\n6592\n6593\n6594\n6595\n6596\n6597\n6598\n6599\n6600\n6601\n6602\n6603\n6604\n6605\n6606\n6607\n6608\n6609\n6610\n6611\n6612\n6613\n6614\n6615\n6616\n6617\n6618\n6619\n6620\n6621\n6622\n6623\n6624\n6625\n6626\n6627\n6628\n6629\n6630\n6631\n6632\n6633\n6634\n6635\n6636\n6637\n6638\n6639\n6640\n6641\n6642\n6643\n6644\n6645\n6646\n6647\n6648\n6649\n6650\n6651\n6652\n6653\n6654\n6655\n6656\n6657\n6658\n6659\n6660\n6661\n6662\n6663\n6664\n6665\n6666\n6667\n6668\n6669\n6670\n6671\n6672\n6673\n6674\n6675\n6676\n6677\n6678\n6679\n6680\n6681\n6682\n6683\n6684\n6685\n6686\n6687\n6688\n6689\n6690\n6691\n6692\n6693\n6694\n6695\n6696\n6697\n6698\n6699\n6700\n6701\n6702\n6703\n6704\n6705\n6706\n6707\n6708\n6709\n6710\n6711\n6712\n6713\n6714\n6715\n6716\n6717\n6718\n6719\n6720\n6721\n6722\n6723\n6724\n6725\n6726\n6727\n6728\n6729\n6730\n6731\n6732\n6733\n6734\n6735\n6736\n6737\n6738\n6739\n6740\n6741\n6742\n6743\n6744\n6745\n6746\n6747\n6748\n6749\n6750\n6751\n6752\n6753\n6754\n6755\n6756\n6757\n6758\n6759\n6760\n6761\n6762\n6763\n6764\n6765\n6766\n6767\n6768\n6769\n6770\n6771\n6772\n6773\n6774\n6775\n6776\n6777\n6778\n6779\n6780\n6781\n6782\n6783\n6784\n6785\n6786\n6787\n6788\n6789\n6790\n6791\n6792\n6793\n6794\n6795\n6796\n6797\n6798\n6799\n6800\n6801\n6802\n6803\n6804\n6805\n6806\n6807\n6808\n6809\n6810\n6811\n6812\n6813\n6814\n6815\n6816\n6817\n6818\n6819\n6820\n6821\n6822\n6823\n6824\n6825\n6826\n6827\n6828\n6829\n6830\n6831\n6832\n6833\n6834\n6835\n6836\n6837\n6838\n6839\n6840\n6841\n6842\n6843\n6844\n6845\n6846\n6847\n6848\n6849\n6850\n6851\n6852\n6853\n6854\n6855\n6856\n6857\n6858\n6859\n6860\n6861\n6862\n6863\n6864\n6865\n6866\n6867\n6868\n6869\n6870\n6871\n6872\n6873\n6874\n6875\n6876\n6877\n6878\n6879\n6880\n6881\n6882\n6883\n6884\n6885\n6886\n6887\n6888\n6889\n6890\n6891\n6892\n6893\n6894\n6895\n6896\n6897\n6898\n6899\n6900\n6901\n6902\n6903\n6904\n6905\n6906\n6907\n6908\n6909\n6910\n6911\n6912\n6913\n6914\n6915\n6916\n6917\n6918\n6919\n6920\n6921\n6922\n6923\n6924\n6925\n6926\n6927\n6928\n6929\n6930\n6931\n6932\n6933\n6934\n6935\n6936\n6937\n6938\n6939\n6940\n6941\n6942\n6943\n6944\n6945\n6946\n6947\n6948\n6949\n6950\n6951\n6952\n6953\n6954\n6955\n6956\n6957\n6958\n6959\n6960\n6961\n6962\n6963\n6964\n6965\n6966\n6967\n6968\n6969\n6970\n6971\n6972\n6973\n6974\n6975\n6976\n6977\n6978\n6979\n6980\n6981\n6982\n6983\n6984\n6985\n6986\n6987\n6988\n6989\n6990\n6991\n6992\n6993\n6994\n6995\n6996\n6997\n6998\n6999\n7000\n7001\n7002\n7003\n7004\n7005\n7006\n7007\n7008\n7009\n7010\n7011\n7012\n7013\n7014\n7015\n7016\n7017\n7018\n7019\n7020\n7021\n7022\n7023\n7024\n7025\n7026\n7027\n7028\n7029\n7030\n7031\n7032\n7033\n7034\n7035\n7036\n7037\n7038\n7039\n7040\n7041\n7042\n7043\n7044\n7045\n7046\n7047\n7048\n7049\n7050\n7051\n7052\n7053\n7054\n7055\n7056\n7057\n7058\n7059\n7060\n7061\n7062\n7063\n7064\n7065\n7066\n7067\n7068\n7069\n7070\n7071\n7072\n7073\n7074\n7075\n7076\n7077\n7078\n7079\n7080\n7081\n7082\n7083\n7084\n7085\n7086\n7087\n7088\n7089\n7090\n7091\n7092\n7093\n7094\n7095\n7096\n7097\n7098\n7099\n7100\n7101\n7102\n7103\n7104\n7105\n7106\n7107\n7108\n7109\n7110\n7111\n7112\n7113\n7114\n7115\n7116\n7117\n7118\n7119\n7120\n7121\n7122\n7123\n7124\n7125\n7126\n7127\n7128\n7129\n7130\n7131\n7132\n7133\n7134\n7135\n7136\n7137\n7138\n7139\n7140\n7141\n7142\n7143\n7144\n7145\n7146\n7147\n7148\n7149\n7150\n7151\n7152\n7153\n7154\n7155\n7156\n7157\n7158\n7159\n7160\n7161\n7162\n7163\n7164\n7165\n7166\n7167\n7168\n7169\n7170\n7171\n7172\n7173\n7174\n7175\n7176\n7177\n7178\n7179\n7180\n7181\n7182\n7183\n7184\n7185\n7186\n7187\n7188\n7189\n7190\n7191\n7192\n7193\n7194\n7195\n7196\n7197\n7198\n7199\n7200\n7201\n7202\n7203\n7204\n7205\n7206\n7207\n7208\n7209\n7210\n7211\n7212\n7213\n7214\n7215\n7216\n7217\n7218\n7219\n7220\n7221\n7222\n7223\n7224\n7225\n7226\n7227\n7228\n7229\n7230\n7231\n7232\n7233\n7234\n7235\n7236\n7237\n7238\n7239\n7240\n7241\n7242\n7243\n7244\n7245\n7246\n7247\n7248\n7249\n7250\n7251\n7252\n7253\n7254\n7255\n7256\n7257\n7258\n7259\n7260\n7261\n7262\n7263\n7264\n7265\n7266\n7267\n7268\n7269\n7270\n7271\n7272\n7273\n7274\n7275\n7276\n7277\n7278\n7279\n7280\n7281\n7282\n7283\n7284\n7285\n7286\n7287\n7288\n7289\n7290\n7291\n7292\n7293\n7294\n7295\n7296\n7297\n7298\n7299\n7300\n7301\n7302\n7303\n7304\n7305\n7306\n7307\n7308\n7309\n7310\n7311\n7312\n7313\n7314\n7315\n7316\n7317\n7318\n7319\n7320\n7321\n7322\n7323\n7324\n7325\n7326\n7327\n7328\n7329\n7330\n7331\n7332\n7333\n7334\n7335\n7336\n7337\n7338\n7339\n7340\n7341\n7342\n7343\n7344\n7345\n7346\n7347\n7348\n7349\n7350\n7351\n7352\n7353\n7354\n7355\n7356\n7357\n7358\n7359\n7360\n7361\n7362\n7363\n7364\n7365\n7366\n7367\n7368\n7369\n7370\n7371\n7372\n7373\n7374\n7375\n7376\n7377\n7378\n7379\n7380\n7381\n7382\n7383\n7384\n7385\n7386\n7387\n7388\n7389\n7390\n7391\n7392\n7393\n7394\n7395\n7396\n7397\n7398\n7399\n7400\n7401\n7402\n7403\n7404\n7405\n7406\n7407\n7408\n7409\n7410\n7411\n7412\n7413\n7414\n7415\n7416\n7417\n7418\n7419\n7420\n7421\n7422\n7423\n7424\n7425\n7426\n7427\n7428\n7429\n7430\n7431\n7432\n7433\n7434\n7435\n7436\n7437\n7438\n7439\n7440\n7441\n7442\n7443\n7444\n7445\n7446\n7447\n7448\n7449\n7450\n7451\n7452\n7453\n7454\n7455\n7456\n7457\n7458\n7459\n7460\n7461\n7462\n7463\n7464\n7465\n7466\n7467\n7468\n7469\n7470\n7471\n7472\n7473\n7474\n7475\n7476\n7477\n7478\n7479\n7480\n7481\n7482\n7483\n7484\n7485\n7486\n7487\n7488\n7489\n7490\n7491\n7492\n7493\n7494\n7495\n7496\n7497\n7498\n7499\n7500\n7501\n7502\n7503\n7504\n7505\n7506\n7507\n7508\n7509\n7510\n7511\n7512\n7513\n7514\n7515\n7516\n7517\n7518\n7519\n7520\n7521\n7522\n7523\n7524\n7525\n7526\n7527\n7528\n7529\n7530\n7531\n7532\n7533\n7534\n7535\n7536\n7537\n7538\n7539\n7540\n7541\n7542\n7543\n7544\n7545\n7546\n7547\n7548\n7549\n7550\n7551\n7552\n7553\n7554\n7555\n7556\n7557\n7558\n7559\n7560\n7561\n7562\n7563\n7564\n7565\n7566\n7567\n7568\n7569\n7570\n7571\n7572\n7573\n7574\n7575\n7576\n7577\n7578\n7579\n7580\n7581\n7582\n7583\n7584\n7585\n7586\n7587\n7588\n7589\n7590\n7591\n7592\n7593\n7594\n7595\n7596\n7597\n7598\n7599\n7600\n7601\n7602\n7603\n7604\n7605\n7606\n7607\n7608\n7609\n7610\n7611\n7612\n7613\n7614\n7615\n7616\n7617\n7618\n7619\n7620\n7621\n7622\n7623\n7624\n7625\n7626\n7627\n7628\n7629\n7630\n7631\n7632\n7633\n7634\n7635\n7636\n7637\n7638\n7639\n7640\n7641\n7642\n7643\n7644\n7645\n7646\n7647\n7648\n7649\n7650\n7651\n7652\n7653\n7654\n7655\n7656\n7657\n7658\n7659\n7660\n7661\n7662\n7663\n7664\n7665\n7666\n7667\n7668\n7669\n7670\n7671\n7672\n7673\n7674\n7675\n7676\n7677\n7678\n7679\n7680\n7681\n7682\n7683\n7684\n7685\n7686\n7687\n7688\n7689\n7690\n7691\n7692\n7693\n7694\n7695\n7696\n7697\n7698\n7699\n7700\n7701\n7702\n7703\n7704\n7705\n7706\n7707\n7708\n7709\n7710\n7711\n7712\n7713\n7714\n7715\n7716\n7717\n7718\n7719\n7720\n7721\n7722\n7723\n7724\n7725\n7726\n7727\n7728\n7729\n7730\n7731\n7732\n7733\n7734\n7735\n7736\n7737\n7738\n7739\n7740\n7741\n7742\n7743\n7744\n7745\n7746\n7747\n7748\n7749\n7750\n7751\n7752\n7753\n7754\n7755\n7756\n7757\n7758\n7759\n7760\n7761\n7762\n7763\n7764\n7765\n7766\n7767\n7768\n7769\n7770\n7771\n7772\n7773\n7774\n7775\n7776\n7777\n7778\n7779\n7780\n7781\n7782\n7783\n7784\n7785\n7786\n7787\n7788\n7789\n7790\n7791\n7792\n7793\n7794\n7795\n7796\n7797\n7798\n7799\n7800\n7801\n7802\n7803\n7804\n7805\n7806\n7807\n7808\n7809\n7810\n7811\n7812\n7813\n7814\n7815\n7816\n7817\n7818\n7819\n7820\n7821\n7822\n7823\n7824\n7825\n7826\n7827\n7828\n7829\n7830\n7831\n7832\n7833\n7834\n7835\n7836\n7837\n7838\n7839\n7840\n7841\n7842\n7843\n7844\n7845\n7846\n7847\n7848\n7849\n7850\n7851\n7852\n7853\n7854\n7855\n7856\n7857\n7858\n7859\n7860\n7861\n7862\n7863\n7864\n7865\n7866\n7867\n7868\n7869\n7870\n7871\n7872\n7873\n7874\n7875\n7876\n7877\n7878\n7879\n7880\n7881\n7882\n7883\n7884\n7885\n7886\n7887\n7888\n7889\n7890\n7891\n7892\n7893\n7894\n7895\n7896\n7897\n7898\n7899\n7900\n7901\n7902\n7903\n7904\n7905\n7906\n7907\n7908\n7909\n7910\n7911\n7912\n7913\n7914\n7915\n7916\n7917\n7918\n7919\n7920\n7921\n7922\n7923\n7924\n7925\n7926\n7927\n7928\n7929\n7930\n7931\n7932\n7933\n7934\n7935\n7936\n7937\n7938\n7939\n7940\n7941\n7942\n7943\n7944\n7945\n7946\n7947\n7948\n7949\n7950\n7951\n7952\n7953\n7954\n7955\n7956\n7957\n7958\n7959\n7960\n7961\n7962\n7963\n7964\n7965\n7966\n7967\n7968\n7969\n7970\n7971\n7972\n7973\n7974\n7975\n7976\n7977\n7978\n7979\n7980\n7981\n7982\n7983\n7984\n7985\n7986\n7987\n7988\n7989\n7990\n7991\n7992\n7993\n7994\n7995\n7996\n7997\n7998\n7999\n8000\n8001\n8002\n8003\n8004\n8005\n8006\n8007\n8008\n8009\n8010\n8011\n8012\n8013\n8014\n8015\n8016\n8017\n8018\n8019\n8020\n8021\n8022\n8023\n8024\n8025\n8026\n8027\n8028\n8029\n8030\n8031\n8032\n8033\n8034\n8035\n8036\n8037\n8038\n8039\n8040\n8041\n8042\n8043\n8044\n8045\n8046\n8047\n8048\n8049\n8050\n8051\n8052\n8053\n8054\n8055\n8056\n8057\n8058\n8059\n8060\n8061\n8062\n8063\n8064\n8065\n8066\n8067\n8068\n8069\n8070\n8071\n8072\n8073\n8074\n8075\n8076\n8077\n8078\n8079\n8080\n8081\n8082\n8083\n8084\n8085\n8086\n8087\n8088\n8089\n8090\n8091\n8092\n8093\n8094\n8095\n8096\n8097\n8098\n8099\n8100\n8101\n8102\n8103\n8104\n8105\n8106\n8107\n8108\n8109\n8110\n8111\n8112\n8113\n8114\n8115\n8116\n8117\n8118\n8119\n8120\n8121\n8122\n8123\n8124\n8125\n8126\n8127\n8128\n8129\n8130\n8131\n8132\n8133\n8134\n8135\n8136\n8137\n8138\n8139\n8140\n8141\n8142\n8143\n8144\n8145\n8146\n8147\n8148\n8149\n8150\n8151\n8152\n8153\n8154\n8155\n8156\n8157\n8158\n8159\n8160\n8161\n8162\n8163\n8164\n8165\n8166\n8167\n8168\n8169\n8170\n8171\n8172\n8173\n8174\n8175\n8176\n8177\n8178\n8179\n8180\n8181\n8182\n8183\n8184\n8185\n8186\n8187\n8188\n8189\n8190\n8191\n8192\n8193\n8194\n8195\n8196\n8197\n8198\n8199\n8200\n8201\n8202\n8203\n8204\n8205\n8206\n8207\n8208\n8209\n8210\n8211\n8212\n8213\n8214\n8215\n8216\n8217\n8218\n8219\n8220\n8221\n8222\n8223\n8224\n8225\n8226\n8227\n8228\n8229\n8230\n8231\n8232\n8233\n8234\n8235\n8236\n8237\n8238\n8239\n8240\n8241\n8242\n8243\n8244\n8245\n8246\n8247\n8248\n8249\n8250\n8251\n8252\n8253\n8254\n8255\n8256\n8257\n8258\n8259\n8260\n8261\n8262\n8263\n8264\n8265\n8266\n8267\n8268\n8269\n8270\n8271\n8272\n8273\n8274\n8275\n8276\n8277\n8278\n8279\n8280\n8281\n8282\n8283\n8284\n8285\n8286\n8287\n8288\n8289\n8290\n8291\n8292\n8293\n8294\n8295\n8296\n8297\n8298\n8299\n8300\n8301\n8302\n8303\n8304\n8305\n8306\n8307\n8308\n8309\n8310\n8311\n8312\n8313\n8314\n8315\n8316\n8317\n8318\n8319\n8320\n8321\n8322\n8323\n8324\n8325\n8326\n8327\n8328\n8329\n8330\n8331\n8332\n8333\n8334\n8335\n8336\n8337\n8338\n8339\n8340\n8341\n8342\n8343\n8344\n8345\n8346\n8347\n8348\n8349\n8350\n8351\n8352\n8353\n8354\n8355\n8356\n8357\n8358\n8359\n8360\n8361\n8362\n8363\n8364\n8365\n8366\n8367\n8368\n8369\n8370\n8371\n8372\n8373\n8374\n8375\n8376\n8377\n8378\n8379\n8380\n8381\n8382\n8383\n8384\n8385\n8386\n8387\n8388\n8389\n8390\n8391\n8392\n8393\n8394\n8395\n8396\n8397\n8398\n8399\n8400\n8401\n8402\n8403\n8404\n8405\n8406\n8407\n8408\n8409\n8410\n8411\n8412\n8413\n8414\n8415\n8416\n8417\n8418\n8419\n8420\n8421\n8422\n8423\n8424\n8425\n8426\n8427\n8428\n8429\n8430\n8431\n8432\n8433\n8434\n8435\n8436\n8437\n8438\n8439\n8440\n8441\n8442\n8443\n8444\n8445\n8446\n8447\n8448\n8449\n8450\n8451\n8452\n8453\n8454\n8455\n8456\n8457\n8458\n8459\n8460\n8461\n8462\n8463\n8464\n8465\n8466\n8467\n8468\n8469\n8470\n8471\n8472\n8473\n8474\n8475\n8476\n8477\n8478\n8479\n8480\n8481\n8482\n8483\n8484\n8485\n8486\n8487\n8488\n8489\n8490\n8491\n8492\n8493\n8494\n8495\n8496\n8497\n8498\n8499\n8500\n8501\n8502\n8503\n8504\n8505\n8506\n8507\n8508\n8509\n8510\n8511\n8512\n8513\n8514\n8515\n8516\n8517\n8518\n8519\n8520\n8521\n8522\n8523\n8524\n8525\n8526\n8527\n8528\n8529\n8530\n8531\n8532\n8533\n8534\n8535\n8536\n8537\n8538\n8539\n8540\n8541\n8542\n8543\n8544\n8545\n8546\n8547\n8548\n8549\n8550\n8551\n8552\n8553\n8554\n8555\n8556\n8557\n8558\n8559\n8560\n8561\n8562\n8563\n8564\n8565\n8566\n8567\n8568\n8569\n8570\n8571\n8572\n8573\n8574\n8575\n8576\n8577\n8578\n8579\n8580\n8581\n8582\n8583\n8584\n8585\n8586\n8587\n8588\n8589\n8590\n8591\n8592\n8593\n8594\n8595\n8596\n8597\n8598\n8599\n8600\n8601\n8602\n8603\n8604\n8605\n8606\n8607\n8608\n8609\n8610\n8611\n8612\n8613\n8614\n8615\n8616\n8617\n8618\n8619\n8620\n8621\n8622\n8623\n8624\n8625\n8626\n8627\n8628\n8629\n8630\n8631\n8632\n8633\n8634\n8635\n8636\n8637\n8638\n8639\n8640\n8641\n8642\n8643\n8644\n8645\n8646\n8647\n8648\n8649\n8650\n8651\n8652\n8653\n8654\n8655\n8656\n8657\n8658\n8659\n8660\n8661\n8662\n8663\n8664\n8665\n8666\n8667\n8668\n8669\n8670\n8671\n8672\n8673\n8674\n8675\n8676\n8677\n8678\n8679\n8680\n8681\n8682\n8683\n8684\n8685\n8686\n8687\n8688\n8689\n8690\n8691\n8692\n8693\n8694\n8695\n8696\n8697\n8698\n8699\n8700\n8701\n8702\n8703\n8704\n8705\n8706\n8707\n8708\n8709\n8710\n8711\n8712\n8713\n8714\n8715\n8716\n8717\n8718\n8719\n8720\n8721\n8722\n8723\n8724\n8725\n8726\n8727\n8728\n8729\n8730\n8731\n8732\n8733\n8734\n8735\n8736\n8737\n8738\n8739\n8740\n8741\n8742\n8743\n8744\n8745\n8746\n8747\n8748\n8749\n8750\n8751\n8752\n8753\n8754\n8755\n8756\n8757\n8758\n8759\n8760\n8761\n8762\n8763\n8764\n8765\n8766\n8767\n8768\n8769\n8770\n8771\n8772\n8773\n8774\n8775\n8776\n8777\n8778\n8779\n8780\n8781\n8782\n8783\n8784\n8785\n8786\n8787\n8788\n8789\n8790\n8791\n8792\n8793\n8794\n8795\n8796\n8797\n8798\n8799\n8800\n8801\n8802\n8803\n8804\n8805\n8806\n8807\n8808\n8809\n8810\n8811\n8812\n8813\n8814\n8815\n8816\n8817\n8818\n8819\n8820\n8821\n8822\n8823\n8824\n8825\n8826\n8827\n8828\n8829\n8830\n8831\n8832\n8833\n8834\n8835\n8836\n8837\n8838\n8839\n8840\n8841\n8842\n8843\n8844\n8845\n8846\n8847\n8848\n8849\n8850\n8851\n8852\n8853\n8854\n8855\n8856\n8857\n8858\n8859\n8860\n8861\n8862\n8863\n8864\n8865\n8866\n8867\n8868\n8869\n8870\n8871\n8872\n8873\n8874\n8875\n8876\n8877\n8878\n8879\n8880\n8881\n8882\n8883\n8884\n8885\n8886\n8887\n8888\n8889\n8890\n8891\n8892\n8893\n8894\n8895\n8896\n8897\n8898\n8899\n8900\n8901\n8902\n8903\n8904\n8905\n8906\n8907\n8908\n8909\n8910\n8911\n8912\n8913\n8914\n8915\n8916\n8917\n8918\n8919\n8920\n8921\n8922\n8923\n8924\n8925\n8926\n8927\n8928\n8929\n8930\n8931\n8932\n8933\n8934\n8935\n8936\n8937\n8938\n8939\n8940\n8941\n8942\n8943\n8944\n8945\n8946\n8947\n8948\n8949\n8950\n8951\n8952\n8953\n8954\n8955\n8956\n8957\n8958\n8959\n8960\n8961\n8962\n8963\n8964\n8965\n8966\n8967\n8968\n8969\n8970\n8971\n8972\n8973\n8974\n8975\n8976\n8977\n8978\n8979\n8980\n8981\n8982\n8983\n8984\n8985\n8986\n8987\n8988\n8989\n8990\n8991\n8992\n8993\n8994\n8995\n8996\n8997\n8998\n8999\n9000\n9001\n9002\n9003\n9004\n9005\n9006\n9007\n9008\n9009\n9010\n9011\n9012\n9013\n9014\n9015\n9016\n9017\n9018\n9019\n9020\n9021\n9022\n9023\n9024\n9025\n9026\n9027\n9028\n9029\n9030\n9031\n9032\n9033\n9034\n9035\n9036\n9037\n9038\n9039\n9040\n9041\n9042\n9043\n9044\n9045\n9046\n9047\n9048\n9049\n9050\n9051\n9052\n9053\n9054\n9055\n9056\n9057\n9058\n9059\n9060\n9061\n9062\n9063\n9064\n9065\n9066\n9067\n9068\n9069\n9070\n9071\n9072\n9073\n9074\n9075\n9076\n9077\n9078\n9079\n9080\n9081\n9082\n9083\n9084\n9085\n9086\n9087\n9088\n9089\n9090\n9091\n9092\n9093\n9094\n9095\n9096\n9097\n9098\n9099\n9100\n9101\n9102\n9103\n9104\n9105\n9106\n9107\n9108\n9109\n9110\n9111\n9112\n9113\n9114\n9115\n9116\n9117\n9118\n9119\n9120\n9121\n9122\n9123\n9124\n9125\n9126\n9127\n9128\n9129\n9130\n9131\n9132\n9133\n9134\n9135\n9136\n9137\n9138\n9139\n9140\n9141\n9142\n9143\n9144\n9145\n9146\n9147\n9148\n9149\n9150\n9151\n9152\n9153\n9154\n9155\n9156\n9157\n9158\n9159\n9160\n9161\n9162\n9163\n9164\n9165\n9166\n9167\n9168\n9169\n9170\n9171\n9172\n9173\n9174\n9175\n9176\n9177\n9178\n9179\n9180\n9181\n9182\n9183\n9184\n9185\n9186\n9187\n9188\n9189\n9190\n9191\n9192\n9193\n9194\n9195\n9196\n9197\n9198\n9199\n9200\n9201\n9202\n9203\n9204\n9205\n9206\n9207\n9208\n9209\n9210\n9211\n9212\n9213\n9214\n9215\n9216\n9217\n9218\n9219\n9220\n9221\n9222\n9223\n9224\n9225\n9226\n9227\n9228\n9229\n9230\n9231\n9232\n9233\n9234\n9235\n9236\n9237\n9238\n9239\n9240\n9241\n9242\n9243\n9244\n9245\n9246\n9247\n9248\n9249\n9250\n9251\n9252\n9253\n9254\n9255\n9256\n9257\n9258\n9259\n9260\n9261\n9262\n9263\n9264\n9265\n9266\n9267\n9268\n9269\n9270\n9271\n9272\n9273\n9274\n9275\n9276\n9277\n9278\n9279\n9280\n9281\n9282\n9283\n9284\n9285\n9286\n9287\n9288\n9289\n9290\n9291\n9292\n9293\n9294\n9295\n9296\n9297\n9298\n9299\n9300\n9301\n9302\n9303\n9304\n9305\n9306\n9307\n9308\n9309\n9310\n9311\n9312\n9313\n9314\n9315\n9316\n9317\n9318\n9319\n9320\n9321\n9322\n9323\n9324\n9325\n9326\n9327\n9328\n9329\n9330\n9331\n9332\n9333\n9334\n9335\n9336\n9337\n9338\n9339\n9340\n9341\n9342\n9343\n9344\n9345\n9346\n9347\n9348\n9349\n9350\n9351\n9352\n9353\n9354\n9355\n9356\n9357\n9358\n9359\n9360\n9361\n9362\n9363\n9364\n9365\n9366\n9367\n9368\n9369\n9370\n9371\n9372\n9373\n9374\n9375\n9376\n9377\n9378\n9379\n9380\n9381\n9382\n9383\n9384\n9385\n9386\n9387\n9388\n9389\n9390\n9391\n9392\n9393\n9394\n9395\n9396\n9397\n9398\n9399\n9400\n9401\n9402\n9403\n9404\n9405\n9406\n9407\n9408\n9409\n9410\n9411\n9412\n9413\n9414\n9415\n9416\n9417\n9418\n9419\n9420\n9421\n9422\n9423\n9424\n9425\n9426\n9427\n9428\n9429\n9430\n9431\n9432\n9433\n9434\n9435\n9436\n9437\n9438\n9439\n9440\n9441\n9442\n9443\n9444\n9445\n9446\n9447\n9448\n9449\n9450\n9451\n9452\n9453\n9454\n9455\n9456\n9457\n9458\n9459\n9460\n9461\n9462\n9463\n9464\n9465\n9466\n9467\n9468\n9469\n9470\n9471\n9472\n9473\n9474\n9475\n9476\n9477\n9478\n9479\n9480\n9481\n9482\n9483\n9484\n9485\n9486\n9487\n9488\n9489\n9490\n9491\n9492\n9493\n9494\n9495\n9496\n9497\n9498\n9499\n9500\n9501\n9502\n9503\n9504\n9505\n9506\n9507\n9508\n9509\n9510\n9511\n9512\n9513\n9514\n9515\n9516\n9517\n9518\n9519\n9520\n9521\n9522\n9523\n9524\n9525\n9526\n9527\n9528\n9529\n9530\n9531\n9532\n9533\n9534\n9535\n9536\n9537\n9538\n9539\n9540\n9541\n9542\n9543\n9544\n9545\n9546\n9547\n9548\n9549\n9550\n9551\n9552\n9553\n9554\n9555\n9556\n9557\n9558\n9559\n9560\n9561\n9562\n9563\n9564\n9565\n9566\n9567\n9568\n9569\n9570\n9571\n9572\n9573\n9574\n9575\n9576\n9577\n9578\n9579\n9580\n9581\n9582\n9583\n9584\n9585\n9586\n9587\n9588\n9589\n9590\n9591\n9592\n9593\n9594\n9595\n9596\n9597\n9598\n9599\n9600\n9601\n9602\n9603\n9604\n9605\n9606\n9607\n9608\n9609\n9610\n9611\n9612\n9613\n9614\n9615\n9616\n9617\n9618\n9619\n9620\n9621\n9622\n9623\n9624\n9625\n9626\n9627\n9628\n9629\n9630\n9631\n9632\n9633\n9634\n9635\n9636\n9637\n9638\n9639\n9640\n9641\n9642\n9643\n9644\n9645\n9646\n9647\n9648\n9649\n9650\n9651\n9652\n9653\n9654\n9655\n9656\n9657\n9658\n9659\n9660\n9661\n9662\n9663\n9664\n9665\n9666\n9667\n9668\n9669\n9670\n9671\n9672\n9673\n9674\n9675\n9676\n9677\n9678\n9679\n9680\n9681\n9682\n9683\n9684\n9685\n9686\n9687\n9688\n9689\n9690\n9691\n9692\n9693\n9694\n9695\n9696\n9697\n9698\n9699\n9700\n9701\n9702\n9703\n9704\n9705\n9706\n9707\n9708\n9709\n9710\n9711\n9712\n9713\n9714\n9715\n9716\n9717\n9718\n9719\n9720\n9721\n9722\n9723\n9724\n9725\n9726\n9727\n9728\n9729\n9730\n9731\n9732\n9733\n9734\n9735\n9736\n9737\n9738\n9739\n9740\n9741\n9742\n9743\n9744\n9745\n9746\n9747\n9748\n9749\n9750\n9751\n9752\n9753\n9754\n9755\n9756\n9757\n9758\n9759\n9760\n9761\n9762\n9763\n9764\n9765\n9766\n9767\n9768\n9769\n9770\n9771\n9772\n9773\n9774\n9775\n9776\n9777\n9778\n9779\n9780\n9781\n9782\n9783\n9784\n9785\n9786\n9787\n9788\n9789\n9790\n9791\n9792\n9793\n9794\n9795\n9796\n9797\n9798\n9799\n9800\n9801\n9802\n9803\n9804\n9805\n9806\n9807\n9808\n9809\n9810\n9811\n9812\n9813\n9814\n9815\n9816\n9817\n9818\n9819\n9820\n9821\n9822\n9823\n9824\n9825\n9826\n9827\n9828\n9829\n9830\n9831\n9832\n9833\n9834\n9835\n9836\n9837\n9838\n9839\n9840\n9841\n9842\n9843\n9844\n9845\n9846\n9847\n9848\n9849\n9850\n9851\n9852\n9853\n9854\n9855\n9856\n9857\n9858\n9859\n9860\n9861\n9862\n9863\n9864\n9865\n9866\n9867\n9868\n9869\n9870\n9871\n9872\n9873\n9874\n9875\n9876\n9877\n9878\n9879\n9880\n9881\n9882\n9883\n9884\n9885\n9886\n9887\n9888\n9889\n9890\n9891\n9892\n9893\n9894\n9895\n9896\n9897\n9898\n9899\n9900\n9901\n9902\n9903\n9904\n9905\n9906\n9907\n9908\n9909\n9910\n9911\n9912\n9913\n9914\n9915\n9916\n9917\n9918\n9919\n9920\n9921\n9922\n9923\n9924\n9925\n9926\n9927\n9928\n9929\n9930\n9931\n9932\n9933\n9934\n9935\n9936\n9937\n9938\n9939\n9940\n9941\n9942\n9943\n9944\n9945\n9946\n9947\n9948\n9949\n9950\n9951\n9952\n9953\n9954\n9955\n9956\n9957\n9958\n9959\n9960\n9961\n9962\n9963\n9964\n9965\n9966\n9967\n9968\n9969\n9970\n9971\n9972\n9973\n9974\n9975\n9976\n9977\n9978\n9979\n9980\n9981\n9982\n9983\n9984\n9985\n9986\n9987\n9988\n9989\n9990\n9991\n9992\n9993\n9994\n9995\n9996\n9997\n9998\n9999\n10000\n10001\n10002\n10003\n10004\n10005\n10006\n10007\n10008\n10009\n10010\n10011\n10012\n10013\n10014\n10015\n10016\n10017\n10018\n10019\n10020\n10021\n10022\n10023\n10024\n10025\n10026\n10027\n10028\n10029\n10030\n10031\n10032\n10033\n10034\n10035\n10036\n10037\n10038\n10039\n10040\n10041\n10042\n10043\n10044\n10045\n10046\n10047\n10048\n10049\n10050\n10051\n10052\n10053\n10054\n10055\n10056\n10057\n10058\n10059\n10060\n10061\n10062\n10063\n10064\n10065\n10066\n10067\n10068\n10069\n10070\n10071\n10072\n10073\n10074\n10075\n10076\n10077\n10078\n10079\n10080\n10081\n10082\n10083\n10084\n10085\n10086\n10087\n10088\n10089\n10090\n10091\n10092\n10093\n10094\n10095\n10096\n10097\n10098\n10099\n10100\n10101\n10102\n10103\n10104\n10105\n10106\n10107\n10108\n10109\n10110\n10111\n10112\n10113\n10114\n10115\n10116\n10117\n10118\n10119\n10120\n10121\n10122\n10123\n10124\n10125\n10126\n10127\n10128\n10129\n10130\n10131\n10132\n10133\n10134\n10135\n10136\n10137\n10138\n10139\n10140\n10141\n10142\n10143\n10144\n10145\n10146\n10147\n10148\n10149\n10150\n10151\n10152\n10153\n10154\n10155\n10156\n10157\n10158\n10159\n10160\n10161\n10162\n10163\n10164\n10165\n10166\n10167\n10168\n10169\n10170\n10171\n10172\n10173\n10174\n10175\n10176\n10177\n10178\n10179\n10180\n10181\n10182\n10183\n10184\n10185\n10186\n10187\n10188\n10189\n10190\n10191\n10192\n10193\n10194\n10195\n10196\n10197\n10198\n10199\n10200\n10201\n10202\n10203\n10204\n10205\n10206\n10207\n10208\n10209\n10210\n10211\n10212\n10213\n10214\n10215\n10216\n10217\n10218\n10219\n10220\n10221\n10222\n10223\n10224\n10225\n10226\n10227\n10228\n10229\n10230\n10231\n10232\n10233\n10234\n10235\n10236\n10237\n10238\n10239\n10240\n10241\n10242\n10243\n10244\n10245\n10246\n10247\n10248\n10249\n10250\n10251\n10252\n10253\n10254\n10255\n10256\n10257\n10258\n10259\n10260\n10261\n10262\n10263\n10264\n10265\n10266\n10267\n10268\n10269\n10270\n10271\n10272\n10273\n10274\n10275\n10276\n10277\n10278\n10279\n10280\n10281\n10282\n10283\n10284\n10285\n10286\n10287\n10288\n10289\n10290\n10291\n10292\n10293\n10294\n10295\n10296\n10297\n10298\n10299\n10300\n10301\n10302\n10303\n10304\n10305\n10306\n10307\n10308\n10309\n10310\n10311\n10312\n10313\n10314\n10315\n10316\n10317\n10318\n10319\n10320\n10321\n10322\n10323\n10324\n10325\n10326\n10327\n10328\n10329\n10330\n10331\n10332\n10333\n10334\n10335\n10336\n10337\n10338\n10339\n10340\n10341\n10342\n10343\n10344\n10345\n10346\n10347\n10348\n10349\n10350\n10351\n10352\n10353\n10354\n10355\n10356\n10357\n10358\n10359\n10360\n10361\n10362\n10363\n10364\n10365\n10366\n10367\n10368\n10369\n10370\n10371\n10372\n10373\n10374\n10375\n10376\n10377\n10378\n10379\n10380\n10381\n10382\n10383\n10384\n10385\n10386\n10387\n10388\n10389\n10390\n10391\n10392\n10393\n10394\n10395\n10396\n10397\n10398\n10399\n10400\n10401\n10402\n10403\n10404\n10405\n10406\n10407\n10408\n10409\n10410\n10411\n10412\n10413\n10414\n10415\n10416\n10417\n10418\n10419\n10420\n10421\n10422\n10423\n10424\n10425\n10426\n10427\n10428\n10429\n10430\n10431\n10432\n10433\n10434\n10435\n10436\n10437\n10438\n10439\n10440\n10441\n10442\n10443\n10444\n10445\n10446\n10447\n10448\n10449\n10450\n10451\n10452\n10453\n10454\n10455\n10456\n10457\n10458\n10459\n10460\n10461\n10462\n10463\n10464\n10465\n10466\n10467\n10468\n10469\n10470\n10471\n10472\n10473\n10474\n10475\n10476\n10477\n10478\n10479\n10480\n10481\n10482\n10483\n10484\n10485\n10486\n10487\n10488\n10489\n10490\n10491\n10492\n10493\n10494\n10495\n10496\n10497\n10498\n10499\n10500\n10501\n10502\n10503\n10504\n10505\n10506\n10507\n10508\n10509\n10510\n10511\n10512\n10513\n10514\n10515\n10516\n10517\n10518\n10519\n10520\n10521\n10522\n10523\n10524\n10525\n10526\n10527\n10528\n10529\n10530\n10531\n10532\n10533\n10534\n10535\n10536\n10537\n10538\n10539\n10540\n10541\n10542\n10543\n10544\n10545\n10546\n10547\n10548\n10549\n10550\n10551\n10552\n10553\n10554\n10555\n10556\n10557\n10558\n10559\n10560\n10561\n10562\n10563\n10564\n10565\n10566\n10567\n10568\n10569\n10570\n10571\n10572\n10573\n10574\n10575\n10576\n10577\n10578\n10579\n10580\n10581\n10582\n10583\n10584\n10585\n10586\n10587\n10588\n10589\n10590\n10591\n10592\n10593\n10594\n10595\n10596\n10597\n10598\n10599\n10600\n10601\n10602\n10603\n10604\n10605\n10606\n10607\n10608\n10609\n10610\n10611\n10612\n10613\n10614\n10615\n10616\n10617\n10618\n10619\n10620\n10621\n10622\n10623\n10624\n10625\n10626\n10627\n10628\n10629\n10630\n10631\n10632\n10633\n10634\n10635\n10636\n10637\n10638\n10639\n10640\n10641\n10642\n10643\n10644\n10645\n10646\n10647\n10648\n10649\n10650\n10651\n10652\n10653\n10654\n10655\n10656\n10657\n10658\n10659\n10660\n10661\n10662\n10663\n10664\n10665\n10666\n10667\n10668\n10669\n10670\n10671\n10672\n10673\n10674\n10675\n10676\n10677\n10678\n10679\n10680\n10681\n10682\n10683\n10684\n10685\n10686\n10687\n10688\n10689\n10690\n10691\n10692\n10693\n10694\n10695\n10696\n10697\n10698\n10699\n10700\n10701\n10702\n10703\n10704\n10705\n10706\n10707\n10708\n10709\n10710\n10711\n10712\n10713\n10714\n10715\n10716\n10717\n10718\n10719\n10720\n10721\n10722\n10723\n10724\n10725\n10726\n10727\n10728\n10729\n10730\n10731\n10732\n10733\n10734\n10735\n10736\n10737\n10738\n10739\n10740\n10741\n10742\n10743\n10744\n10745\n10746\n10747\n10748\n10749\n10750\n10751\n10752\n10753\n10754\n10755\n10756\n10757\n10758\n10759\n10760\n10761\n10762\n10763\n10764\n10765\n10766\n10767\n10768\n10769\n10770\n10771\n10772\n10773\n10774\n10775\n10776\n10777\n10778\n10779\n10780\n10781\n10782\n10783\n10784\n10785\n10786\n10787\n10788\n10789\n10790\n10791\n10792\n10793\n10794\n10795\n10796\n10797\n10798\n10799\n10800\n10801\n10802\n10803\n10804\n10805\n10806\n10807\n10808\n10809\n10810\n10811\n10812\n10813\n10814\n10815\n10816\n10817\n10818\n10819\n10820\n10821\n10822\n10823\n10824\n10825\n10826\n10827\n10828\n10829\n10830\n10831\n10832\n10833\n10834\n10835\n10836\n10837\n10838\n10839\n10840\n10841\n10842\n10843\n10844\n10845\n10846\n10847\n10848\n10849\n10850\n10851\n10852\n10853\n10854\n10855\n10856\n10857\n10858\n10859\n10860\n10861\n10862\n10863\n10864\n10865\n10866\n10867\n10868\n10869\n10870\n10871\n10872\n10873\n10874\n10875\n10876\n10877\n10878\n10879\n10880\n10881\n10882\n10883\n10884\n10885\n10886\n10887\n10888\n10889\n10890\n10891\n10892\n10893\n10894\n10895\n10896\n10897\n10898\n10899\n10900\n10901\n10902\n10903\n10904\n10905\n10906\n10907\n10908\n10909\n10910\n10911\n10912\n10913\n10914\n10915\n10916\n10917\n10918\n10919\n10920\n10921\n10922\n10923\n10924\n10925\n10926\n10927\n10928\n10929\n10930\n10931\n10932\n10933\n10934\n10935\n10936\n10937\n10938\n10939\n10940\n10941\n10942\n10943\n10944\n10945\n10946\n10947\n10948\n10949\n10950\n10951\n10952\n10953\n10954\n10955\n10956\n10957\n10958\n10959\n10960\n10961\n10962\n10963\n10964\n10965\n10966\n10967\n10968\n10969\n10970\n10971\n10972\n10973\n10974\n10975\n10976\n10977\n10978\n10979\n10980\n10981\n10982\n10983\n10984\n10985\n10986\n10987\n10988\n10989\n10990\n10991\n10992\n10993\n10994\n10995\n10996\n10997\n10998\n10999\n11000\n11001\n11002\n11003\n11004\n11005\n11006\n11007\n11008\n11009\n11010\n11011\n11012\n11013\n11014\n11015\n11016\n11017\n11018\n11019\n11020\n11021\n11022\n11023\n11024\n11025\n11026\n11027\n11028\n11029\n11030\n11031\n11032\n11033\n11034\n11035\n11036\n11037\n11038\n11039\n11040\n11041\n11042\n11043\n11044\n11045\n11046\n11047\n11048\n11049\n11050\n11051\n11052\n11053\n11054\n11055\n11056\n11057\n11058\n11059\n11060\n11061\n11062\n11063\n11064\n11065\n11066\n11067\n11068\n11069\n11070\n11071\n11072\n11073\n11074\n11075\n11076\n11077\n11078\n11079\n11080\n11081\n11082\n11083\n11084\n11085\n11086\n11087\n11088\n11089\n11090\n11091\n11092\n11093\n11094\n11095\n11096\n11097\n11098\n11099\n11100\n11101\n11102\n11103\n11104\n11105\n11106\n11107\n11108\n11109\n11110\n11111\n11112\n11113\n11114\n11115\n11116\n11117\n11118\n11119\n11120\n11121\n11122\n11123\n11124\n11125\n11126\n11127\n11128\n11129\n11130\n11131\n11132\n11133\n11134\n11135\n11136\n11137\n11138\n11139\n11140\n11141\n11142\n11143\n11144\n11145\n11146\n11147\n11148\n11149\n11150\n11151\n11152\n11153\n11154\n11155\n11156\n11157\n11158\n11159\n11160\n11161\n11162\n11163\n11164\n11165\n11166\n11167\n11168\n11169\n11170\n11171\n11172\n11173\n11174\n11175\n11176\n11177\n11178\n11179\n11180\n11181\n11182\n11183\n11184\n11185\n11186\n11187\n11188\n11189\n11190\n11191\n11192\n11193\n11194\n11195\n11196\n11197\n11198\n11199\n11200\n11201\n11202\n11203\n11204\n11205\n11206\n11207\n11208\n11209\n11210\n11211\n11212\n11213\n11214\n11215\n11216\n11217\n11218\n11219\n11220\n11221\n11222\n11223\n11224\n11225\n11226\n11227\n11228\n11229\n11230\n11231\n11232\n11233\n11234\n11235\n11236\n11237\n11238\n11239\n11240\n11241\n11242\n11243\n11244\n11245\n11246\n11247\n11248\n11249\n11250\n11251\n11252\n11253\n11254\n11255\n11256\n11257\n11258\n11259\n11260\n11261\n11262\n11263\n11264\n11265\n11266\n11267\n11268\n11269\n11270\n11271\n11272\n11273\n11274\n11275\n11276\n11277\n11278\n11279\n11280\n11281\n11282\n11283\n11284\n11285\n11286\n11287\n11288\n11289\n11290\n11291\n11292\n11293\n11294\n11295\n11296\n11297\n11298\n11299\n11300\n11301\n11302\n11303\n11304\n11305\n11306\n11307\n11308\n11309\n11310\n11311\n11312\n11313\n11314\n11315\n11316\n11317\n11318\n11319\n11320\n11321\n11322\n11323\n11324\n11325\n11326\n11327\n11328\n11329\n11330\n11331\n11332\n11333\n11334\n11335\n11336\n11337\n11338\n11339\n11340\n11341\n11342\n11343\n11344\n11345\n11346\n11347\n11348\n11349\n11350\n11351\n11352\n11353\n11354\n11355\n11356\n11357\n11358\n11359\n11360\n11361\n11362\n11363\n11364\n11365\n11366\n11367\n11368\n11369\n11370\n11371\n11372\n11373\n11374\n11375\n11376\n11377\n11378\n11379\n11380\n11381\n11382\n11383\n11384\n11385\n11386\n11387\n11388\n11389\n11390\n11391\n11392\n11393\n11394\n11395\n11396\n11397\n11398\n11399\n11400\n11401\n11402\n11403\n11404\n11405\n11406\n11407\n11408\n11409\n11410\n11411\n11412\n11413\n11414\n11415\n11416\n11417\n11418\n11419\n11420\n11421\n11422\n11423\n11424\n11425\n11426\n11427\n11428\n11429\n11430\n11431\n11432\n11433\n11434\n11435\n11436\n11437\n11438\n11439\n11440\n11441\n11442\n11443\n11444\n11445\n11446\n11447\n11448\n11449\n11450\n11451\n11452\n11453\n11454\n11455\n11456\n11457\n11458\n11459\n11460\n11461\n11462\n11463\n11464\n11465\n11466\n11467\n11468\n11469\n11470\n11471\n11472\n11473\n11474\n11475\n11476\n11477\n11478\n11479\n11480\n11481\n11482\n11483\n11484\n11485\n11486\n11487\n11488\n11489\n11490\n11491\n11492\n11493\n11494\n11495\n11496\n11497\n11498\n11499\n11500\n11501\n11502\n11503\n11504\n11505\n11506\n11507\n11508\n11509\n11510\n11511\n11512\n11513\n11514\n11515\n11516\n11517\n11518\n11519\n11520\n11521\n11522\n11523\n11524\n11525\n11526\n11527\n11528\n11529\n11530\n11531\n11532\n11533\n11534\n11535\n11536\n11537\n11538\n11539\n11540\n11541\n11542\n11543\n11544\n11545\n11546\n11547\n11548\n11549\n11550\n11551\n11552\n11553\n11554\n11555\n11556\n11557\n11558\n11559\n11560\n11561\n11562\n11563\n11564\n11565\n11566\n11567\n11568\n11569\n11570\n11571\n11572\n11573\n11574\n11575\n11576\n11577\n11578\n11579\n11580\n11581\n11582\n11583\n11584\n11585\n11586\n11587\n11588\n11589\n11590\n11591\n11592\n11593\n11594\n11595\n11596\n11597\n11598\n11599\n11600\n11601\n11602\n11603\n11604\n11605\n11606\n11607\n11608\n11609\n11610\n11611\n11612\n11613\n11614\n11615\n11616\n11617\n11618\n11619\n11620\n11621\n11622\n11623\n11624\n11625\n11626\n11627\n11628\n11629\n11630\n11631\n11632\n11633\n11634\n11635\n11636\n11637\n11638\n11639\n11640\n11641\n11642\n11643\n11644\n11645\n11646\n11647\n11648\n11649\n11650\n11651\n11652\n11653\n11654\n11655\n11656\n11657\n11658\n11659\n11660\n11661\n11662\n11663\n11664\n11665\n11666\n11667\n11668\n11669\n11670\n11671\n11672\n11673\n11674\n11675\n11676\n11677\n11678\n11679\n11680\n11681\n11682\n11683\n11684\n11685\n11686\n11687\n11688\n11689\n11690\n11691\n11692\n11693\n11694\n11695\n11696\n11697\n11698\n11699\n11700\n11701\n11702\n11703\n11704\n11705\n11706\n11707\n11708\n11709\n11710\n11711\n11712\n11713\n11714\n11715\n11716\n11717\n11718\n11719\n11720\n11721\n11722\n11723\n11724\n11725\n11726\n11727\n11728\n11729\n11730\n11731\n11732\n11733\n11734\n11735\n11736\n11737\n11738\n11739\n11740\n11741\n11742\n11743\n11744\n11745\n11746\n11747\n11748\n11749\n11750\n11751\n11752\n11753\n11754\n11755\n11756\n11757\n11758\n11759\n11760\n11761\n11762\n11763\n11764\n11765\n11766\n11767\n11768\n11769\n11770\n11771\n11772\n11773\n11774\n11775\n11776\n11777\n11778\n11779\n11780\n11781\n11782\n11783\n11784\n11785\n11786\n11787\n11788\n11789\n11790\n11791\n11792\n11793\n11794\n11795\n11796\n11797\n11798\n11799\n11800\n11801\n11802\n11803\n11804\n11805\n11806\n11807\n11808\n11809\n11810\n11811\n11812\n11813\n11814\n11815\n11816\n11817\n11818\n11819\n11820\n11821\n11822\n11823\n11824\n11825\n11826\n11827\n11828\n11829\n11830\n11831\n11832\n11833\n11834\n11835\n11836\n11837\n11838\n11839\n11840\n11841\n11842\n11843\n11844\n11845\n11846\n11847\n11848\n11849\n11850\n11851\n11852\n11853\n11854\n11855\n11856\n11857\n11858\n11859\n11860\n11861\n11862\n11863\n11864\n11865\n11866\n11867\n11868\n11869\n11870\n11871\n11872\n11873\n11874\n11875\n11876\n11877\n11878\n11879\n11880\n11881\n11882\n11883\n11884\n11885\n11886\n11887\n11888\n11889\n11890\n11891\n11892\n11893\n11894\n11895\n11896\n11897\n11898\n11899\n11900\n11901\n11902\n11903\n11904\n11905\n11906\n11907\n11908\n11909\n11910\n11911\n11912\n11913\n11914\n11915\n11916\n11917\n11918\n11919\n11920\n11921\n11922\n11923\n11924\n11925\n11926\n11927\n11928\n11929\n11930\n11931\n11932\n11933\n11934\n11935\n11936\n11937\n11938\n11939\n11940\n11941\n11942\n11943\n11944\n11945\n11946\n11947\n11948\n11949\n11950\n11951\n11952\n11953\n11954\n11955\n11956\n11957\n11958\n11959\n11960\n11961\n11962\n11963\n11964\n11965\n11966\n11967\n11968\n11969\n11970\n11971\n11972\n11973\n11974\n11975\n11976\n11977\n11978\n11979\n11980\n11981\n11982\n11983\n11984\n11985\n11986\n11987\n11988\n11989\n11990\n11991\n11992\n11993\n11994\n11995\n11996\n11997\n11998\n11999\n12000\n12001\n12002\n12003\n12004\n12005\n12006\n12007\n12008\n12009\n12010\n12011\n12012\n12013\n12014\n12015\n12016\n12017\n12018\n12019\n12020\n12021\n12022\n12023\n12024\n12025\n12026\n12027\n12028\n12029\n12030\n12031\n12032\n12033\n12034\n12035\n12036\n12037\n12038\n12039\n12040\n12041\n12042\n12043\n12044\n12045\n12046\n12047\n12048\n12049\n12050\n12051\n12052\n12053\n12054\n12055\n12056\n12057\n12058\n12059\n12060\n12061\n12062\n12063\n12064\n12065\n12066\n12067\n12068\n12069\n12070\n12071\n12072\n12073\n12074\n12075\n12076\n12077\n12078\n12079\n12080\n12081\n12082\n12083\n12084\n12085\n12086\n12087\n12088\n12089\n12090\n12091\n12092\n12093\n12094\n12095\n12096\n12097\n12098\n12099\n12100\n12101\n12102\n12103\n12104\n12105\n12106\n12107\n12108\n12109\n12110\n12111\n12112\n12113\n12114\n12115\n12116\n12117\n12118\n12119\n12120\n12121\n12122\n12123\n12124\n12125\n12126\n12127\n12128\n12129\n12130\n12131\n12132\n12133\n12134\n12135\n12136\n12137\n12138\n12139\n12140\n12141\n12142\n12143\n12144\n12145\n12146\n12147\n12148\n12149\n12150\n12151\n12152\n12153\n12154\n12155\n12156\n12157\n12158\n12159\n12160\n12161\n12162\n12163\n12164\n12165\n12166\n12167\n12168\n12169\n12170\n12171\n12172\n12173\n12174\n12175\n12176\n12177\n12178\n12179\n12180\n12181\n12182\n12183\n12184\n12185\n12186\n12187\n12188\n12189\n12190\n12191\n12192\n12193\n12194\n12195\n12196\n12197\n12198\n12199\n12200\n12201\n12202\n12203\n12204\n12205\n12206\n12207\n12208\n12209\n12210\n12211\n12212\n12213\n12214\n12215\n12216\n12217\n12218\n12219\n12220\n12221\n12222\n12223\n12224\n12225\n12226\n12227\n12228\n12229\n12230\n12231\n12232\n12233\n12234\n12235\n12236\n12237\n12238\n12239\n12240\n12241\n12242\n12243\n12244\n12245\n12246\n12247\n12248\n12249\n12250\n12251\n12252\n12253\n12254\n12255\n12256\n12257\n12258\n12259\n12260\n12261\n12262\n12263\n12264\n12265\n12266\n12267\n12268\n12269\n12270\n12271\n12272\n12273\n12274\n12275\n12276\n12277\n12278\n12279\n12280\n12281\n12282\n12283\n12284\n12285\n12286\n12287\n12288\n12289\n12290\n12291\n12292\n12293\n12294\n12295\n12296\n12297\n12298\n12299\n12300\n12301\n12302\n12303\n12304\n12305\n12306\n12307\n12308\n12309\n12310\n12311\n12312\n12313\n12314\n12315\n12316\n12317\n12318\n12319\n12320\n12321\n12322\n12323\n12324\n12325\n12326\n12327\n12328\n12329\n12330\n12331\n12332\n12333\n12334\n12335\n12336\n12337\n12338\n12339\n12340\n12341\n12342\n12343\n12344\n12345\n12346\n12347\n12348\n12349\n12350\n12351\n12352\n12353\n12354\n12355\n12356\n12357\n12358\n12359\n12360\n12361\n12362\n12363\n12364\n12365\n12366\n12367\n12368\n12369\n12370\n12371\n12372\n12373\n12374\n12375\n12376\n12377\n12378\n12379\n12380\n12381\n12382\n12383\n12384\n12385\n12386\n12387\n12388\n12389\n12390\n12391\n12392\n12393\n12394\n12395\n12396\n12397\n12398\n12399\n12400\n12401\n12402\n12403\n12404\n12405\n12406\n12407\n12408\n12409\n12410\n12411\n12412\n12413\n12414\n12415\n12416\n12417\n12418\n12419\n12420\n12421\n12422\n12423\n12424\n12425\n12426\n12427\n12428\n12429\n12430\n12431\n12432\n12433\n12434\n12435\n12436\n12437\n12438\n12439\n12440\n12441\n12442\n12443\n12444\n12445\n12446\n12447\n12448\n12449\n12450\n12451\n12452\n12453\n12454\n12455\n12456\n12457\n12458\n12459\n12460\n12461\n12462\n12463\n12464\n12465\n12466\n12467\n12468\n12469\n12470\n12471\n12472\n12473\n12474\n12475\n12476\n12477\n12478\n12479\n12480\n12481\n12482\n12483\n12484\n12485\n12486\n12487\n12488\n12489\n12490\n12491\n12492\n12493\n12494\n12495\n12496\n12497\n12498\n12499\n12500\n12501\n12502\n12503\n12504\n12505\n12506\n12507\n12508\n12509\n12510\n12511\n12512\n12513\n12514\n12515\n12516\n12517\n12518\n12519\n12520\n12521\n12522\n12523\n12524\n12525\n12526\n12527\n12528\n12529\n12530\n12531\n12532\n12533\n12534\n12535\n12536\n12537\n12538\n12539\n12540\n12541\n12542\n12543\n12544\n12545\n12546\n12547\n12548\n12549\n12550\n12551\n12552\n12553\n12554\n12555\n12556\n12557\n12558\n12559\n12560\n12561\n12562\n12563\n12564\n12565\n12566\n12567\n12568\n12569\n12570\n12571\n12572\n12573\n12574\n12575\n12576\n12577\n12578\n12579\n12580\n12581\n12582\n12583\n12584\n12585\n12586\n12587\n12588\n12589\n12590\n12591\n12592\n12593\n12594\n12595\n12596\n12597\n12598\n12599\n12600\n12601\n12602\n12603\n12604\n12605\n12606\n12607\n12608\n12609\n12610\n12611\n12612\n12613\n12614\n12615\n12616\n12617\n12618\n12619\n12620\n12621\n12622\n12623\n12624\n12625\n12626\n12627\n12628\n12629\n12630\n12631\n12632\n12633\n12634\n12635\n12636\n12637\n12638\n12639\n12640\n12641\n12642\n12643\n12644\n12645\n12646\n12647\n12648\n12649\n12650\n12651\n12652\n12653\n12654\n12655\n12656\n12657\n12658\n12659\n12660\n12661\n12662\n12663\n12664\n12665\n12666\n12667\n12668\n12669\n12670\n12671\n12672\n12673\n12674\n12675\n12676\n12677\n12678\n12679\n12680\n12681\n12682\n12683\n12684\n12685\n12686\n12687\n12688\n12689\n12690\n12691\n12692\n12693\n12694\n12695\n12696\n12697\n12698\n12699\n12700\n12701\n12702\n12703\n12704\n12705\n12706\n12707\n12708\n12709\n12710\n12711\n12712\n12713\n12714\n12715\n12716\n12717\n12718\n12719\n12720\n12721\n12722\n12723\n12724\n12725\n12726\n12727\n12728\n12729\n12730\n12731\n12732\n12733\n12734\n12735\n12736\n12737\n12738\n12739\n12740\n12741\n12742\n12743\n12744\n12745\n12746\n12747\n12748\n12749\n12750\n12751\n12752\n12753\n12754\n12755\n12756\n12757\n12758\n12759\n12760\n12761\n12762\n12763\n12764\n12765\n12766\n12767\n12768\n12769\n12770\n12771\n12772\n12773\n12774\n12775\n12776\n12777\n12778\n12779\n12780\n12781\n12782\n12783\n12784\n12785\n12786\n12787\n12788\n12789\n12790\n12791\n12792\n12793\n12794\n12795\n12796\n12797\n12798\n12799\n12800\n12801\n12802\n12803\n12804\n12805\n12806\n12807\n12808\n12809\n12810\n12811\n12812\n12813\n12814\n12815\n12816\n12817\n12818\n12819\n12820\n12821\n12822\n12823\n12824\n12825\n12826\n12827\n12828\n12829\n12830\n12831\n12832\n12833\n12834\n12835\n12836\n12837\n12838\n12839\n12840\n12841\n12842\n12843\n12844\n12845\n12846\n12847\n12848\n12849\n12850\n12851\n12852\n12853\n12854\n12855\n12856\n12857\n12858\n12859\n12860\n12861\n12862\n12863\n12864\n12865\n12866\n12867\n12868\n12869\n12870\n12871\n12872\n12873\n12874\n12875\n12876\n12877\n12878\n12879\n12880\n12881\n12882\n12883\n12884\n12885\n12886\n12887\n12888\n12889\n12890\n12891\n12892\n12893\n12894\n12895\n12896\n12897\n12898\n12899\n12900\n12901\n12902\n12903\n12904\n12905\n12906\n12907\n12908\n12909\n12910\n12911\n12912\n12913\n12914\n12915\n12916\n12917\n12918\n12919\n12920\n12921\n12922\n12923\n12924\n12925\n12926\n12927\n12928\n12929\n12930\n12931\n12932\n12933\n12934\n12935\n12936\n12937\n12938\n12939\n12940\n12941\n12942\n12943\n12944\n12945\n12946\n12947\n12948\n12949\n12950\n12951\n12952\n12953\n12954\n12955\n12956\n12957\n12958\n12959\n12960\n12961\n12962\n12963\n12964\n12965\n12966\n12967\n12968\n12969\n12970\n12971\n12972\n12973\n12974\n12975\n12976\n12977\n12978\n12979\n12980\n12981\n12982\n12983\n12984\n12985\n12986\n12987\n12988\n12989\n12990\n12991\n12992\n12993\n12994\n12995\n12996\n12997\n12998\n12999\n13000\n13001\n13002\n13003\n13004\n13005\n13006\n13007\n13008\n13009\n13010\n13011\n13012\n13013\n13014\n13015\n13016\n13017\n13018\n13019\n13020\n13021\n13022\n13023\n13024\n13025\n13026\n13027\n13028\n13029\n13030\n13031\n13032\n13033\n13034\n13035\n13036\n13037\n13038\n13039\n13040\n13041\n13042\n13043\n13044\n13045\n13046\n13047\n13048\n13049\n13050\n13051\n13052\n13053\n13054\n13055\n13056\n13057\n13058\n13059\n13060\n13061\n13062\n13063\n13064\n13065\n13066\n13067\n13068\n13069\n13070\n13071\n13072\n13073\n13074\n13075\n13076\n13077\n13078\n13079\n13080\n13081\n13082\n13083\n13084\n13085\n13086\n13087\n13088\n13089\n13090\n13091\n13092\n13093\n13094\n13095\n13096\n13097\n13098\n13099\n13100\n13101\n13102\n13103\n13104\n13105\n13106\n13107\n13108\n13109\n13110\n13111\n13112\n13113\n13114\n13115\n13116\n13117\n13118\n13119\n13120\n13121\n13122\n13123\n13124\n13125\n13126\n13127\n13128\n13129\n13130\n13131\n13132\n13133\n13134\n13135\n13136\n13137\n13138\n13139\n13140\n13141\n13142\n13143\n13144\n13145\n13146\n13147\n13148\n13149\n13150\n13151\n13152\n13153\n13154\n13155\n13156\n13157\n13158\n13159\n13160\n13161\n13162\n13163\n13164\n13165\n13166\n13167\n13168\n13169\n13170\n13171\n13172\n13173\n13174\n13175\n13176\n13177\n13178\n13179\n13180\n13181\n13182\n13183\n13184\n13185\n13186\n13187\n13188\n13189\n13190\n13191\n13192\n13193\n13194\n13195\n13196\n13197\n13198\n13199\n13200\n13201\n13202\n13203\n13204\n13205\n13206\n13207\n13208\n13209\n13210\n13211\n13212\n13213\n13214\n13215\n13216\n13217\n13218\n13219\n13220\n13221\n13222\n13223\n13224\n13225\n13226\n13227\n13228\n13229\n13230\n13231\n13232\n13233\n13234\n13235\n13236\n13237\n13238\n13239\n13240\n13241\n13242\n13243\n13244\n13245\n13246\n13247\n13248\n13249\n13250\n13251\n13252\n13253\n13254\n13255\n13256\n13257\n13258\n13259\n13260\n13261\n13262\n13263\n13264\n13265\n13266\n13267\n13268\n13269\n13270\n13271\n13272\n13273\n13274\n13275\n13276\n13277\n13278\n13279\n13280\n13281\n13282\n13283\n13284\n13285\n13286\n13287\n13288\n13289\n13290\n13291\n13292\n13293\n13294\n13295\n13296\n13297\n13298\n13299\n13300\n13301\n13302\n13303\n13304\n13305\n13306\n13307\n13308\n13309\n13310\n13311\n13312\n13313\n13314\n13315\n13316\n13317\n13318\n13319\n13320\n13321\n13322\n13323\n13324\n13325\n13326\n13327\n13328\n13329\n13330\n13331\n13332\n13333\n13334\n13335\n13336\n13337\n13338\n13339\n13340\n13341\n13342\n13343\n13344\n13345\n13346\n13347\n13348\n13349\n13350\n13351\n13352\n13353\n13354\n13355\n13356\n13357\n13358\n13359\n13360\n13361\n13362\n13363\n13364\n13365\n13366\n13367\n13368\n13369\n13370\n13371\n13372\n13373\n13374\n13375\n13376\n13377\n13378\n13379\n13380\n13381\n13382\n13383\n13384\n13385\n13386\n13387\n13388\n13389\n13390\n13391\n13392\n13393\n13394\n13395\n13396\n13397\n13398\n13399\n13400\n13401\n13402\n13403\n13404\n13405\n13406\n13407\n13408\n13409\n13410\n13411\n13412\n13413\n13414\n13415\n13416\n13417\n13418\n13419\n13420\n13421\n13422\n13423\n13424\n13425\n13426\n13427\n13428\n13429\n13430\n13431\n13432\n13433\n13434\n13435\n13436\n13437\n13438\n13439\n13440\n13441\n13442\n13443\n13444\n13445\n13446\n13447\n13448\n13449\n13450\n13451\n13452\n13453\n13454\n13455\n13456\n13457\n13458\n13459\n13460\n13461\n13462\n13463\n13464\n13465\n13466\n13467\n13468\n13469\n13470\n13471\n13472\n13473\n13474\n13475\n13476\n13477\n13478\n13479\n13480\n13481\n13482\n13483\n13484\n13485\n13486\n13487\n13488\n13489\n13490\n13491\n13492\n13493\n13494\n13495\n13496\n13497\n13498\n13499\n13500\n13501\n13502\n13503\n13504\n13505\n13506\n13507\n13508\n13509\n13510\n13511\n13512\n13513\n13514\n13515\n13516\n13517\n13518\n13519\n13520\n13521\n13522\n13523\n13524\n13525\n13526\n13527\n13528\n13529\n13530\n13531\n13532\n13533\n13534\n13535\n13536\n13537\n13538\n13539\n13540\n13541\n13542\n13543\n13544\n13545\n13546\n13547\n13548\n13549\n13550\n13551\n13552\n13553\n13554\n13555\n13556\n13557\n13558\n13559\n13560\n13561\n13562\n13563\n13564\n13565\n13566\n13567\n13568\n13569\n13570\n13571\n13572\n13573\n13574\n13575\n13576\n13577\n13578\n13579\n13580\n13581\n13582\n13583\n13584\n13585\n13586\n13587\n13588\n13589\n13590\n13591\n13592\n13593\n13594\n13595\n13596\n13597\n13598\n13599\n13600\n13601\n13602\n13603\n13604\n13605\n13606\n13607\n13608\n13609\n13610\n13611\n13612\n13613\n13614\n13615\n13616\n13617\n13618\n13619\n13620\n13621\n13622\n13623\n13624\n13625\n13626\n13627\n13628\n13629\n13630\n13631\n13632\n13633\n13634\n13635\n13636\n13637\n13638\n13639\n13640\n13641\n13642\n13643\n13644\n13645\n13646\n13647\n13648\n13649\n13650\n13651\n13652\n13653\n13654\n13655\n13656\n13657\n13658\n13659\n13660\n13661\n13662\n13663\n13664\n13665\n13666\n13667\n13668\n13669\n13670\n13671\n13672\n13673\n13674\n13675\n13676\n13677\n13678\n13679\n13680\n13681\n13682\n13683\n13684\n13685\n13686\n13687\n13688\n13689\n13690\n13691\n13692\n13693\n13694\n13695\n13696\n13697\n13698\n13699\n13700\n13701\n13702\n13703\n13704\n13705\n13706\n13707\n13708\n13709\n13710\n13711\n13712\n13713\n13714\n13715\n13716\n13717\n13718\n13719\n13720\n13721\n13722\n13723\n13724\n13725\n13726\n13727\n13728\n13729\n13730\n13731\n13732\n13733\n13734\n13735\n13736\n13737\n13738\n13739\n13740\n13741\n13742\n13743\n13744\n13745\n13746\n13747\n13748\n13749\n13750\n13751\n13752\n13753\n13754\n13755\n13756\n13757\n13758\n13759\n13760\n13761\n13762\n13763\n13764\n13765\n13766\n13767\n13768\n13769\n13770\n13771\n13772\n13773\n13774\n13775\n13776\n13777\n13778\n13779\n13780\n13781\n13782\n13783\n13784\n13785\n13786\n13787\n13788\n13789\n13790\n13791\n13792\n13793\n13794\n13795\n13796\n13797\n13798\n13799\n13800\n13801\n13802\n13803\n13804\n13805\n13806\n13807\n13808\n13809\n13810\n13811\n13812\n13813\n13814\n13815\n13816\n13817\n13818\n13819\n13820\n13821\n13822\n13823\n13824\n13825\n13826\n13827\n13828\n13829\n13830\n13831\n13832\n13833\n13834\n13835\n13836\n13837\n13838\n13839\n13840\n13841\n13842\n13843\n13844\n13845\n13846\n13847\n13848\n13849\n13850\n13851\n13852\n13853\n13854\n13855\n13856\n13857\n13858\n13859\n13860\n13861\n13862\n13863\n13864\n13865\n13866\n13867\n13868\n13869\n13870\n13871\n13872\n13873\n13874\n13875\n13876\n13877\n13878\n13879\n13880\n13881\n13882\n13883\n13884\n13885\n13886\n13887\n13888\n13889\n13890\n13891\n13892\n13893\n13894\n13895\n13896\n13897\n13898\n13899\n13900\n13901\n13902\n13903\n13904\n13905\n13906\n13907\n13908\n13909\n13910\n13911\n13912\n13913\n13914\n13915\n13916\n13917\n13918\n13919\n13920\n13921\n13922\n13923\n13924\n13925\n13926\n13927\n13928\n13929\n13930\n13931\n13932\n13933\n13934\n13935\n13936\n13937\n13938\n13939\n13940\n13941\n13942\n13943\n13944\n13945\n13946\n13947\n13948\n13949\n13950\n13951\n13952\n13953\n13954\n13955\n13956\n13957\n13958\n13959\n13960\n13961\n13962\n13963\n13964\n13965\n13966\n13967\n13968\n13969\n13970\n13971\n13972\n13973\n13974\n13975\n13976\n13977\n13978\n13979\n13980\n13981\n13982\n13983\n13984\n13985\n13986\n13987\n13988\n13989\n13990\n13991\n13992\n13993\n13994\n13995\n13996\n13997\n13998\n13999\n14000\n14001\n14002\n14003\n14004\n14005\n14006\n14007\n14008\n14009\n14010\n14011\n14012\n14013\n14014\n14015\n14016\n14017\n14018\n14019\n14020\n14021\n14022\n14023\n14024\n14025\n14026\n14027\n14028\n14029\n14030\n14031\n14032\n14033\n14034\n14035\n14036\n14037\n14038\n14039\n14040\n14041\n14042\n14043\n14044\n14045\n14046\n14047\n14048\n14049\n14050\n14051\n14052\n14053\n14054\n14055\n14056\n14057\n14058\n14059\n14060\n14061\n14062\n14063\n14064\n14065\n14066\n14067\n14068\n14069\n14070\n14071\n14072\n14073\n14074\n14075\n14076\n14077\n14078\n14079\n14080\n14081\n14082\n14083\n14084\n14085\n14086\n14087\n14088\n14089\n14090\n14091\n14092\n14093\n14094\n14095\n14096\n14097\n14098\n14099\n14100\n14101\n14102\n14103\n14104\n14105\n14106\n14107\n14108\n14109\n14110\n14111\n14112\n14113\n14114\n14115\n14116\n14117\n14118\n14119\n14120\n14121\n14122\n14123\n14124\n14125\n14126\n14127\n14128\n14129\n14130\n14131\n14132\n14133\n14134\n14135\n14136\n14137\n14138\n14139\n14140\n14141\n14142\n14143\n14144\n14145\n14146\n14147\n14148\n14149\n14150\n14151\n14152\n14153\n14154\n14155\n14156\n14157\n14158\n14159\n14160\n14161\n14162\n14163\n14164\n14165\n14166\n14167\n14168\n14169\n14170\n14171\n14172\n14173\n14174\n14175\n14176\n14177\n14178\n14179\n14180\n14181\n14182\n14183\n14184\n14185\n14186\n14187\n14188\n14189\n14190\n14191\n14192\n14193\n14194\n14195\n14196\n14197\n14198\n14199\n14200\n14201\n14202\n14203\n14204\n14205\n14206\n14207\n14208\n14209\n14210\n14211\n14212\n14213\n14214\n14215\n14216\n14217\n14218\n14219\n14220\n14221\n14222\n14223\n14224\n14225\n14226\n14227\n14228\n14229\n14230\n14231\n14232\n14233\n14234\n14235\n14236\n14237\n14238\n14239\n14240\n14241\n14242\n14243\n14244\n14245\n14246\n14247\n14248\n14249\n14250\n14251\n14252\n14253\n14254\n14255\n14256\n14257\n14258\n14259\n14260\n14261\n14262\n14263\n14264\n14265\n14266\n14267\n14268\n14269\n14270\n14271\n14272\n14273\n14274\n14275\n14276\n14277\n14278\n14279\n14280\n14281\n14282\n14283\n14284\n14285\n14286\n14287\n14288\n14289\n14290\n14291\n14292\n14293\n14294\n14295\n14296\n14297\n14298\n14299\n14300\n14301\n14302\n14303\n14304\n14305\n14306\n14307\n14308\n14309\n14310\n14311\n14312\n14313\n14314\n14315\n14316\n14317\n14318\n14319\n14320\n14321\n14322\n14323\n14324\n14325\n14326\n14327\n14328\n14329\n14330\n14331\n14332\n14333\n14334\n14335\n14336\n14337\n14338\n14339\n14340\n14341\n14342\n14343\n14344\n14345\n14346\n14347\n14348\n14349\n14350\n14351\n14352\n14353\n14354\n14355\n14356\n14357\n14358\n14359\n14360\n14361\n14362\n14363\n14364\n14365\n14366\n14367\n14368\n14369\n14370\n14371\n14372\n14373\n14374\n14375\n14376\n14377\n14378\n14379\n14380\n14381\n14382\n14383\n14384\n14385\n14386\n14387\n14388\n14389\n14390\n14391\n14392\n14393\n14394\n14395\n14396\n14397\n14398\n14399\n14400\n14401\n14402\n14403\n14404\n14405\n14406\n14407\n14408\n14409\n14410\n14411\n14412\n14413\n14414\n14415\n14416\n14417\n14418\n14419\n14420\n14421\n14422\n14423\n14424\n14425\n14426\n14427\n14428\n14429\n14430\n14431\n14432\n14433\n14434\n14435\n14436\n14437\n14438\n14439\n14440\n14441\n14442\n14443\n14444\n14445\n14446\n14447\n14448\n14449\n14450\n14451\n14452\n14453\n14454\n14455\n14456\n14457\n14458\n14459\n14460\n14461\n14462\n14463\n14464\n14465\n14466\n14467\n14468\n14469\n14470\n14471\n14472\n14473\n14474\n14475\n14476\n14477\n14478\n14479\n14480\n14481\n14482\n14483\n14484\n14485\n14486\n14487\n14488\n14489\n14490\n14491\n14492\n14493\n14494\n14495\n14496\n14497\n14498\n14499\n14500\n14501\n14502\n14503\n14504\n14505\n14506\n14507\n14508\n14509\n14510\n14511\n14512\n14513\n14514\n14515\n14516\n14517\n14518\n14519\n14520\n14521\n14522\n14523\n14524\n14525\n14526\n14527\n14528\n14529\n14530\n14531\n14532\n14533\n14534\n14535\n14536\n14537\n14538\n14539\n14540\n14541\n14542\n14543\n14544\n14545\n14546\n14547\n14548\n14549\n14550\n14551\n14552\n14553\n14554\n14555\n14556\n14557\n14558\n14559\n14560\n14561\n14562\n14563\n14564\n14565\n14566\n14567\n14568\n14569\n14570\n14571\n14572\n14573\n14574\n14575\n14576\n14577\n14578\n14579\n14580\n14581\n14582\n14583\n14584\n14585\n14586\n14587\n14588\n14589\n14590\n14591\n14592\n14593\n14594\n14595\n14596\n14597\n14598\n14599\n14600\n14601\n14602\n14603\n14604\n14605\n14606\n14607\n14608\n14609\n14610\n14611\n14612\n14613\n14614\n14615\n14616\n14617\n14618\n14619\n14620\n14621\n14622\n14623\n14624\n14625\n14626\n14627\n14628\n14629\n14630\n14631\n14632\n14633\n14634\n14635\n14636\n14637\n14638\n14639\n14640\n14641\n14642\n14643\n14644\n14645\n14646\n14647\n14648\n14649\n14650\n14651\n14652\n14653\n14654\n14655\n14656\n14657\n14658\n14659\n14660\n14661\n14662\n14663\n14664\n14665\n14666\n14667\n14668\n14669\n14670\n14671\n14672\n14673\n14674\n14675\n14676\n14677\n14678\n14679\n14680\n14681\n14682\n14683\n14684\n14685\n14686\n14687\n14688\n14689\n14690\n14691\n14692\n14693\n14694\n14695\n14696\n14697\n14698\n14699\n14700\n14701\n14702\n14703\n14704\n14705\n14706\n14707\n14708\n14709\n14710\n14711\n14712\n14713\n14714\n14715\n14716\n14717\n14718\n14719\n14720\n14721\n14722\n14723\n14724\n14725\n14726\n14727\n14728\n14729\n14730\n14731\n14732\n14733\n14734\n14735\n14736\n14737\n14738\n14739\n14740\n14741\n14742\n14743\n14744\n14745\n14746\n14747\n14748\n14749\n14750\n14751\n14752\n14753\n14754\n14755\n14756\n14757\n14758\n14759\n14760\n14761\n14762\n14763\n14764\n14765\n14766\n14767\n14768\n14769\n14770\n14771\n14772\n14773\n14774\n14775\n14776\n14777\n14778\n14779\n14780\n14781\n14782\n14783\n14784\n14785\n14786\n14787\n14788\n14789\n14790\n14791\n14792\n14793\n14794\n14795\n14796\n14797\n14798\n14799\n14800\n14801\n14802\n14803\n14804\n14805\n14806\n14807\n14808\n14809\n14810\n14811\n14812\n14813\n14814\n14815\n14816\n14817\n14818\n14819\n14820\n14821\n14822\n14823\n14824\n14825\n14826\n14827\n14828\n14829\n14830\n14831\n14832\n14833\n14834\n14835\n14836\n14837\n14838\n14839\n14840\n14841\n14842\n14843\n14844\n14845\n14846\n14847\n14848\n14849\n14850\n14851\n14852\n14853\n14854\n14855\n14856\n14857\n14858\n14859\n14860\n14861\n14862\n14863\n14864\n14865\n14866\n14867\n14868\n14869\n14870\n14871\n14872\n14873\n14874\n14875\n14876\n14877\n14878\n14879\n14880\n14881\n14882\n14883\n14884\n14885\n14886\n14887\n14888\n14889\n14890\n14891\n14892\n14893\n14894\n14895\n14896\n14897\n14898\n14899\n14900\n14901\n14902\n14903\n14904\n14905\n14906\n14907\n14908\n14909\n14910\n14911\n14912\n14913\n14914\n14915\n14916\n14917\n14918\n14919\n14920\n14921\n14922\n14923\n14924\n14925\n14926\n14927\n14928\n14929\n14930\n14931\n14932\n14933\n14934\n14935\n14936\n14937\n14938\n14939\n14940\n14941\n14942\n14943\n14944\n14945\n14946\n14947\n14948\n14949\n14950\n14951\n14952\n14953\n14954\n14955\n14956\n14957\n14958\n14959\n14960\n14961\n14962\n14963\n14964\n14965\n14966\n14967\n14968\n14969\n14970\n14971\n14972\n14973\n14974\n14975\n14976\n14977\n14978\n14979\n14980\n14981\n14982\n14983\n14984\n14985\n14986\n14987\n14988\n14989\n14990\n14991\n14992\n14993\n14994\n14995\n14996\n14997\n14998\n14999\n15000\n15001\n15002\n15003\n15004\n15005\n15006\n15007\n15008\n15009\n15010\n15011\n15012\n15013\n15014\n15015\n15016\n15017\n15018\n15019\n15020\n15021\n15022\n15023\n15024\n15025\n15026\n15027\n15028\n15029\n15030\n15031\n15032\n15033\n15034\n15035\n15036\n15037\n15038\n15039\n15040\n15041\n15042\n15043\n15044\n15045\n15046\n15047\n15048\n15049\n15050\n15051\n15052\n15053\n15054\n15055\n15056\n15057\n15058\n15059\n15060\n15061\n15062\n15063\n15064\n15065\n15066\n15067\n15068\n15069\n15070\n15071\n15072\n15073\n15074\n15075\n15076\n15077\n15078\n15079\n15080\n15081\n15082\n15083\n15084\n15085\n15086\n15087\n15088\n15089\n15090\n15091\n15092\n15093\n15094\n15095\n15096\n15097\n15098\n15099\n15100\n15101\n15102\n15103\n15104\n15105\n15106\n15107\n15108\n15109\n15110\n15111\n15112\n15113\n15114\n15115\n15116\n15117\n15118\n15119\n15120\n15121\n15122\n15123\n15124\n15125\n15126\n15127\n15128\n15129\n15130\n15131\n15132\n15133\n15134\n15135\n15136\n15137\n15138\n15139\n15140\n15141\n15142\n15143\n15144\n15145\n15146\n15147\n15148\n15149\n15150\n15151\n15152\n15153\n15154\n15155\n15156\n15157\n15158\n15159\n15160\n15161\n15162\n15163\n15164\n15165\n15166\n15167\n15168\n15169\n15170\n15171\n15172\n15173\n15174\n15175\n15176\n15177\n15178\n15179\n15180\n15181\n15182\n15183\n15184\n15185\n15186\n15187\n15188\n15189\n15190\n15191\n15192\n15193\n15194\n15195\n15196\n15197\n15198\n15199\n15200\n15201\n15202\n15203\n15204\n15205\n15206\n15207\n15208\n15209\n15210\n15211\n15212\n15213\n15214\n15215\n15216\n15217\n15218\n15219\n15220\n15221\n15222\n15223\n15224\n15225\n15226\n15227\n15228\n15229\n15230\n15231\n15232\n15233\n15234\n15235\n15236\n15237\n15238\n15239\n15240\n15241\n15242\n15243\n15244\n15245\n15246\n15247\n15248\n15249\n15250\n15251\n15252\n15253\n15254\n15255\n15256\n15257\n15258\n15259\n15260\n15261\n15262\n15263\n15264\n15265\n15266\n15267\n15268\n15269\n15270\n15271\n15272\n15273\n15274\n15275\n15276\n15277\n15278\n15279\n15280\n15281\n15282\n15283\n15284\n15285\n15286\n15287\n15288\n15289\n15290\n15291\n15292\n15293\n15294\n15295\n15296\n15297\n15298\n15299\n15300\n15301\n15302\n15303\n15304\n15305\n15306\n15307\n15308\n15309\n15310\n15311\n15312\n15313\n15314\n15315\n15316\n15317\n15318\n15319\n15320\n15321\n15322\n15323\n15324\n15325\n15326\n15327\n15328\n15329\n15330\n15331\n15332\n15333\n15334\n15335\n15336\n15337\n15338\n15339\n15340\n15341\n15342\n15343\n15344\n15345\n15346\n15347\n15348\n15349\n15350\n15351\n15352\n15353\n15354\n15355\n15356\n15357\n15358\n15359\n15360\n15361\n15362\n15363\n15364\n15365\n15366\n15367\n15368\n15369\n15370\n15371\n15372\n15373\n15374\n15375\n15376\n15377\n15378\n15379\n15380\n15381\n15382\n15383\n15384\n15385\n15386\n15387\n15388\n15389\n15390\n15391\n15392\n15393\n15394\n15395\n15396\n15397\n15398\n15399\n15400\n15401\n15402\n15403\n15404\n15405\n15406\n15407\n15408\n15409\n15410\n15411\n15412\n15413\n15414\n15415\n15416\n15417\n15418\n15419\n15420\n15421\n15422\n15423\n15424\n15425\n15426\n15427\n15428\n15429\n15430\n15431\n15432\n15433\n15434\n15435\n15436\n15437\n15438\n15439\n15440\n15441\n15442\n15443\n15444\n15445\n15446\n15447\n15448\n15449\n15450\n15451\n15452\n15453\n15454\n15455\n15456\n15457\n15458\n15459\n15460\n15461\n15462\n15463\n15464\n15465\n15466\n15467\n15468\n15469\n15470\n15471\n15472\n15473\n15474\n15475\n15476\n15477\n15478\n15479\n15480\n15481\n15482\n15483\n15484\n15485\n15486\n15487\n15488\n15489\n15490\n15491\n15492\n15493\n15494\n15495\n15496\n15497\n15498\n15499\n15500\n15501\n15502\n15503\n15504\n15505\n15506\n15507\n15508\n15509\n15510\n15511\n15512\n15513\n15514\n15515\n15516\n15517\n15518\n15519\n15520\n15521\n15522\n15523\n15524\n15525\n15526\n15527\n15528\n15529\n15530\n15531\n15532\n15533\n15534\n15535\n15536\n15537\n15538\n15539\n15540\n15541\n15542\n15543\n15544\n15545\n15546\n15547\n15548\n15549\n15550\n15551\n15552\n15553\n15554\n15555\n15556\n15557\n15558\n15559\n15560\n15561\n15562\n15563\n15564\n15565\n15566\n15567\n15568\n15569\n15570\n15571\n15572\n15573\n15574\n15575\n15576\n15577\n15578\n15579\n15580\n15581\n15582\n15583\n15584\n15585\n15586\n15587\n15588\n15589\n15590\n15591\n15592\n15593\n15594\n15595\n15596\n15597\n15598\n15599\n15600\n15601\n15602\n15603\n15604\n15605\n15606\n15607\n15608\n15609\n15610\n15611\n15612\n15613\n15614\n15615\n15616\n15617\n15618\n15619\n15620\n15621\n15622\n15623\n15624\n15625\n15626\n15627\n15628\n15629\n15630\n15631\n15632\n15633\n15634\n15635\n15636\n15637\n15638\n15639\n15640\n15641\n15642\n15643\n15644\n15645\n15646\n15647\n15648\n15649\n15650\n15651\n15652\n15653\n15654\n15655\n15656\n15657\n15658\n15659\n15660\n15661\n15662\n15663\n15664\n15665\n15666\n15667\n15668\n15669\n15670\n15671\n15672\n15673\n15674\n15675\n15676\n15677\n15678\n15679\n15680\n15681\n15682\n15683\n15684\n15685\n15686\n15687\n15688\n15689\n15690\n15691\n15692\n15693\n15694\n15695\n15696\n15697\n15698\n15699\n15700\n15701\n15702\n15703\n15704\n15705\n15706\n15707\n15708\n15709\n15710\n15711\n15712\n15713\n15714\n15715\n15716\n15717\n15718\n15719\n15720\n15721\n15722\n15723\n15724\n15725\n15726\n15727\n15728\n15729\n15730\n15731\n15732\n15733\n15734\n15735\n15736\n15737\n15738\n15739\n15740\n15741\n15742\n15743\n15744\n15745\n15746\n15747\n15748\n15749\n15750\n15751\n15752\n15753\n15754\n15755\n15756\n15757\n15758\n15759\n15760\n15761\n15762\n15763\n15764\n15765\n15766\n15767\n15768\n15769\n15770\n15771\n15772\n15773\n15774\n15775\n15776\n15777\n15778\n15779\n15780\n15781\n15782\n15783\n15784\n15785\n15786\n15787\n15788\n15789\n15790\n15791\n15792\n15793\n15794\n15795\n15796\n15797\n15798\n15799\n15800\n15801\n15802\n15803\n15804\n15805\n15806\n15807\n15808\n15809\n15810\n15811\n15812\n15813\n15814\n15815\n15816\n15817\n15818\n15819\n15820\n15821\n15822\n15823\n15824\n15825\n15826\n15827\n15828\n15829\n15830\n15831\n15832\n15833\n15834\n15835\n15836\n15837\n15838\n15839\n15840\n15841\n15842\n15843\n15844\n15845\n15846\n15847\n15848\n15849\n15850\n15851\n15852\n15853\n15854\n15855\n15856\n15857\n15858\n15859\n15860\n15861\n15862\n15863\n15864\n15865\n15866\n15867\n15868\n15869\n15870\n15871\n15872\n15873\n15874\n15875\n15876\n15877\n15878\n15879\n15880\n15881\n15882\n15883\n15884\n15885\n15886\n15887\n15888\n15889\n15890\n15891\n15892\n15893\n15894\n15895\n15896\n15897\n15898\n15899\n15900\n15901\n15902\n15903\n15904\n15905\n15906\n15907\n15908\n15909\n15910\n15911\n15912\n15913\n15914\n15915\n15916\n15917\n15918\n15919\n15920\n15921\n15922\n15923\n15924\n15925\n15926\n15927\n15928\n15929\n15930\n15931\n15932\n15933\n15934\n15935\n15936\n15937\n15938\n15939\n15940\n15941\n15942\n15943\n15944\n15945\n15946\n15947\n15948\n15949\n15950\n15951\n15952\n15953\n15954\n15955\n15956\n15957\n15958\n15959\n15960\n15961\n15962\n15963\n15964\n15965\n15966\n15967\n15968\n15969\n15970\n15971\n15972\n15973\n15974\n15975\n15976\n15977\n15978\n15979\n15980\n15981\n15982\n15983\n15984\n15985\n15986\n15987\n15988\n15989\n15990\n15991\n15992\n15993\n15994\n15995\n15996\n15997\n15998\n15999\n16000\n16001\n16002\n16003\n16004\n16005\n16006\n16007\n16008\n16009\n16010\n16011\n16012\n16013\n16014\n16015\n16016\n16017\n16018\n16019\n16020\n16021\n16022\n16023\n16024\n16025\n16026\n16027\n16028\n16029\n16030\n16031\n16032\n16033\n16034\n16035\n16036\n16037\n16038\n16039\n16040\n16041\n16042\n16043\n16044\n16045\n16046\n16047\n16048\n16049\n16050\n16051\n16052\n16053\n16054\n16055\n16056\n16057\n16058\n16059\n16060\n16061\n16062\n16063\n16064\n16065\n16066\n16067\n16068\n16069\n16070\n16071\n16072\n16073\n16074\n16075\n16076\n16077\n16078\n16079\n16080\n16081\n16082\n16083\n16084\n16085\n16086\n16087\n16088\n16089\n16090\n16091\n16092\n16093\n16094\n16095\n16096\n16097\n16098\n16099\n16100\n16101\n16102\n16103\n16104\n16105\n16106\n16107\n16108\n16109\n16110\n16111\n16112\n16113\n16114\n16115\n16116\n16117\n16118\n16119\n16120\n16121\n16122\n16123\n16124\n16125\n16126\n16127\n16128\n16129\n16130\n16131\n16132\n16133\n16134\n16135\n16136\n16137\n16138\n16139\n16140\n16141\n16142\n16143\n16144\n16145\n16146\n16147\n16148\n16149\n16150\n16151\n16152\n16153\n16154\n16155\n16156\n16157\n16158\n16159\n16160\n16161\n16162\n16163\n16164\n16165\n16166\n16167\n16168\n16169\n16170\n16171\n16172\n16173\n16174\n16175\n16176\n16177\n16178\n16179\n16180\n16181\n16182\n16183\n16184\n16185\n16186\n16187\n16188\n16189\n16190\n16191\n16192\n16193\n16194\n16195\n16196\n16197\n16198\n16199\n16200\n16201\n16202\n16203\n16204\n16205\n16206\n16207\n16208\n16209\n16210\n16211\n16212\n16213\n16214\n16215\n16216\n16217\n16218\n16219\n16220\n16221\n16222\n16223\n16224\n16225\n16226\n16227\n16228\n16229\n16230\n16231\n16232\n16233\n16234\n16235\n16236\n16237\n16238\n16239\n16240\n16241\n16242\n16243\n16244\n16245\n16246\n16247\n16248\n16249\n16250\n16251\n16252\n16253\n16254\n16255\n16256\n16257\n16258\n16259\n16260\n16261\n16262\n16263\n16264\n16265\n16266\n16267\n16268\n16269\n16270\n16271\n16272\n16273\n16274\n16275\n16276\n16277\n16278\n16279\n16280\n16281\n16282\n16283\n16284\n16285\n16286\n16287\n16288\n16289\n16290\n16291\n16292\n16293\n16294\n16295\n16296\n16297\n16298\n16299\n16300\n16301\n16302\n16303\n16304\n16305\n16306\n16307\n16308\n16309\n16310\n16311\n16312\n16313\n16314\n16315\n16316\n16317\n16318\n16319\n16320\n16321\n16322\n16323\n16324\n16325\n16326\n16327\n16328\n16329\n16330\n16331\n16332\n16333\n16334\n16335\n16336\n16337\n16338\n16339\n16340\n16341\n16342\n16343\n16344\n16345\n16346\n16347\n16348\n16349\n16350\n16351\n16352\n16353\n16354\n16355\n16356\n16357\n16358\n16359\n16360\n16361\n16362\n16363\n16364\n16365\n16366\n16367\n16368\n16369\n16370\n16371\n16372\n16373\n16374\n16375\n16376\n16377\n16378\n16379\n16380\n16381\n16382\n16383\n16384\n16385\n16386\n16387\n16388\n16389\n16390\n16391\n16392\n16393\n16394\n16395\n16396\n16397\n16398\n16399\n16400\n16401\n16402\n16403\n16404\n16405\n16406\n16407\n16408\n16409\n16410\n16411\n16412\n16413\n16414\n16415\n16416\n16417\n16418\n16419\n16420\n16421\n16422\n16423\n16424\n16425\n16426\n16427\n16428\n16429\n16430\n16431\n16432\n16433\n16434\n16435\n16436\n16437\n16438\n16439\n16440\n16441\n16442\n16443\n16444\n16445\n16446\n16447\n16448\n16449\n16450\n16451\n16452\n16453\n16454\n16455\n16456\n16457\n16458\n16459\n16460\n16461\n16462\n16463\n16464\n16465\n16466\n16467\n16468\n16469\n16470\n16471\n16472\n16473\n16474\n16475\n16476\n16477\n16478\n16479\n16480\n16481\n16482\n16483\n16484\n16485\n16486\n16487\n16488\n16489\n16490\n16491\n16492\n16493\n16494\n16495\n16496\n16497\n16498\n16499\n16500\n16501\n16502\n16503\n16504\n16505\n16506\n16507\n16508\n16509\n16510\n16511\n16512\n16513\n16514\n16515\n16516\n16517\n16518\n16519\n16520\n16521\n16522\n16523\n16524\n16525\n16526\n16527\n16528\n16529\n16530\n16531\n16532\n16533\n16534\n16535\n16536\n16537\n16538\n16539\n16540\n16541\n16542\n16543\n16544\n16545\n16546\n16547\n16548\n16549\n16550\n16551\n16552\n16553\n16554\n16555\n16556\n16557\n16558\n16559\n16560\n16561\n16562\n16563\n16564\n16565\n16566\n16567\n16568\n16569\n16570\n16571\n16572\n16573\n16574\n16575\n16576\n16577\n16578\n16579\n16580\n16581\n16582\n16583\n16584\n16585\n16586\n16587\n16588\n16589\n16590\n16591\n16592\n16593\n16594\n16595\n16596\n16597\n16598\n16599\n16600\n16601\n16602\n16603\n16604\n16605\n16606\n16607\n16608\n16609\n16610\n16611\n16612\n16613\n16614\n16615\n16616\n16617\n16618\n16619\n16620\n16621\n16622\n16623\n16624\n16625\n16626\n16627\n16628\n16629\n16630\n16631\n16632\n16633\n16634\n16635\n16636\n16637\n16638\n16639\n16640\n16641\n16642\n16643\n16644\n16645\n16646\n16647\n16648\n16649\n16650\n16651\n16652\n16653\n16654\n16655\n16656\n16657\n16658\n16659\n16660\n16661\n16662\n16663\n16664\n16665\n16666\n16667\n16668\n16669\n16670\n16671\n16672\n16673\n16674\n16675\n16676\n16677\n16678\n16679\n16680\n16681\n16682\n16683\n16684\n16685\n16686\n16687\n16688\n16689\n16690\n16691\n16692\n16693\n16694\n16695\n16696\n16697\n16698\n16699\n16700\n16701\n16702\n16703\n16704\n16705\n16706\n16707\n16708\n16709\n16710\n16711\n16712\n16713\n16714\n16715\n16716\n16717\n16718\n16719\n16720\n16721\n16722\n16723\n16724\n16725\n16726\n16727\n16728\n16729\n16730\n16731\n16732\n16733\n16734\n16735\n16736\n16737\n16738\n16739\n16740\n16741\n16742\n16743\n16744\n16745\n16746\n16747\n16748\n16749\n16750\n16751\n16752\n16753\n16754\n16755\n16756\n16757\n16758\n16759\n16760\n16761\n16762\n16763\n16764\n16765\n16766\n16767\n16768\n16769\n16770\n16771\n16772\n16773\n16774\n16775\n16776\n16777\n16778\n16779\n16780\n16781\n16782\n16783\n16784\n16785\n16786\n16787\n16788\n16789\n16790\n16791\n16792\n16793\n16794\n16795\n16796\n16797\n16798\n16799\n16800\n16801\n16802\n16803\n16804\n16805\n16806\n16807\n16808\n16809\n16810\n16811\n16812\n16813\n16814\n16815\n16816\n16817\n16818\n16819\n16820\n16821\n16822\n16823\n16824\n16825\n16826\n16827\n16828\n16829\n16830\n16831\n16832\n16833\n16834\n16835\n16836\n16837\n16838\n16839\n16840\n16841\n16842\n16843\n16844\n16845\n16846\n16847\n16848\n16849\n16850\n16851\n16852\n16853\n16854\n16855\n16856\n16857\n16858\n16859\n16860\n16861\n16862\n16863\n16864\n16865\n16866\n16867\n16868\n16869\n16870\n16871\n16872\n16873\n16874\n16875\n16876\n16877\n16878\n16879\n16880\n16881\n16882\n16883\n16884\n16885\n16886\n16887\n16888\n16889\n16890\n16891\n16892\n16893\n16894\n16895\n16896\n16897\n16898\n16899\n16900\n16901\n16902\n16903\n16904\n16905\n16906\n16907\n16908\n16909\n16910\n16911\n16912\n16913\n16914\n16915\n16916\n16917\n16918\n16919\n16920\n16921\n16922\n16923\n16924\n16925\n16926\n16927\n16928\n16929\n16930\n16931\n16932\n16933\n16934\n16935\n16936\n16937\n16938\n16939\n16940\n16941\n16942\n16943\n16944\n16945\n16946\n16947\n16948\n16949\n16950\n16951\n16952\n16953\n16954\n16955\n16956\n16957\n16958\n16959\n16960\n16961\n16962\n16963\n16964\n16965\n16966\n16967\n16968\n16969\n16970\n16971\n16972\n16973\n16974\n16975\n16976\n16977\n16978\n16979\n16980\n16981\n16982\n16983\n16984\n16985\n16986\n16987\n16988\n16989\n16990\n16991\n16992\n16993\n16994\n16995\n16996\n16997\n16998\n16999\n17000\n17001\n17002\n17003\n17004\n17005\n17006\n17007\n17008\n17009\n17010\n17011\n17012\n17013\n17014\n17015\n17016\n17017\n17018\n17019\n17020\n17021\n17022\n17023\n17024\n17025\n17026\n17027\n17028\n17029\n17030\n17031\n17032\n17033\n17034\n17035\n17036\n17037\n17038\n17039\n17040\n17041\n17042\n17043\n17044\n17045\n17046\n17047\n17048\n17049\n17050\n17051\n17052\n17053\n17054\n17055\n17056\n17057\n17058\n17059\n17060\n17061\n17062\n17063\n17064\n17065\n17066\n17067\n17068\n17069\n17070\n17071\n17072\n17073\n17074\n17075\n17076\n17077\n17078\n17079\n17080\n17081\n17082\n17083\n17084\n17085\n17086\n17087\n17088\n17089\n17090\n17091\n17092\n17093\n17094\n17095\n17096\n17097\n17098\n17099\n17100\n17101\n17102\n17103\n17104\n17105\n17106\n17107\n17108\n17109\n17110\n17111\n17112\n17113\n17114\n17115\n17116\n17117\n17118\n17119\n17120\n17121\n17122\n17123\n17124\n17125\n17126\n17127\n17128\n17129\n17130\n17131\n17132\n17133\n17134\n17135\n17136\n17137\n17138\n17139\n17140\n17141\n17142\n17143\n17144\n17145\n17146\n17147\n17148\n17149\n17150\n17151\n17152\n17153\n17154\n17155\n17156\n17157\n17158\n17159\n17160\n17161\n17162\n17163\n17164\n17165\n17166\n17167\n17168\n17169\n17170\n17171\n17172\n17173\n17174\n17175\n17176\n17177\n17178\n17179\n17180\n17181\n17182\n17183\n17184\n17185\n17186\n17187\n17188\n17189\n17190\n17191\n17192\n17193\n17194\n17195\n17196\n17197\n17198\n17199\n17200\n17201\n17202\n17203\n17204\n17205\n17206\n17207\n17208\n17209\n17210\n17211\n17212\n17213\n17214\n17215\n17216\n17217\n17218\n17219\n17220\n17221\n17222\n17223\n17224\n17225\n17226\n17227\n17228\n17229\n17230\n17231\n17232\n17233\n17234\n17235\n17236\n17237\n17238\n17239\n17240\n17241\n17242\n17243\n17244\n17245\n17246\n17247\n17248\n17249\n17250\n17251\n17252\n17253\n17254\n17255\n17256\n17257\n17258\n17259\n17260\n17261\n17262\n17263\n17264\n17265\n17266\n17267\n17268\n17269\n17270\n17271\n17272\n17273\n17274\n17275\n17276\n17277\n17278\n17279\n17280\n17281\n17282\n17283\n17284\n17285\n17286\n17287\n17288\n17289\n17290\n17291\n17292\n17293\n17294\n17295\n17296\n17297\n17298\n17299\n17300\n17301\n17302\n17303\n17304\n17305\n17306\n17307\n17308\n17309\n17310\n17311\n17312\n17313\n17314\n17315\n17316\n17317\n17318\n17319\n17320\n17321\n17322\n17323\n17324\n17325\n17326\n17327\n17328\n17329\n17330\n17331\n17332\n17333\n17334\n17335\n17336\n17337\n17338\n17339\n17340\n17341\n17342\n17343\n17344\n17345\n17346\n17347\n17348\n17349\n17350\n17351\n17352\n17353\n17354\n17355\n17356\n17357\n17358\n17359\n17360\n17361\n17362\n17363\n17364\n17365\n17366\n17367\n17368\n17369\n17370\n17371\n17372\n17373\n17374\n17375\n17376\n17377\n17378\n17379\n17380\n17381\n17382\n17383\n17384\n17385\n17386\n17387\n17388\n17389\n17390\n17391\n17392\n17393\n17394\n17395\n17396\n17397\n17398\n17399\n17400\n17401\n17402\n17403\n17404\n17405\n17406\n17407\n17408\n17409\n17410\n17411\n17412\n17413\n17414\n17415\n17416\n17417\n17418\n17419\n17420\n17421\n17422\n17423\n17424\n17425\n17426\n17427\n17428\n17429\n17430\n17431\n17432\n17433\n17434\n17435\n17436\n17437\n17438\n17439\n17440\n17441\n17442\n17443\n17444\n17445\n17446\n17447\n17448\n17449\n17450\n17451\n17452\n17453\n17454\n17455\n17456\n17457\n17458\n17459\n17460\n17461\n17462\n17463\n17464\n17465\n17466\n17467\n17468\n17469\n17470\n17471\n17472\n17473\n17474\n17475\n17476\n17477\n17478\n17479\n17480\n17481\n17482\n17483\n17484\n17485\n17486\n17487\n17488\n17489\n17490\n17491\n17492\n17493\n17494\n17495\n17496\n17497\n17498\n17499\n17500\n17501\n17502\n17503\n17504\n17505\n17506\n17507\n17508\n17509\n17510\n17511\n17512\n17513\n17514\n17515\n17516\n17517\n17518\n17519\n17520\n17521\n17522\n17523\n17524\n17525\n17526\n17527\n17528\n17529\n17530\n17531\n17532\n17533\n17534\n17535\n17536\n17537\n17538\n17539\n17540\n17541\n17542\n17543\n17544\n17545\n17546\n17547\n17548\n17549\n17550\n17551\n17552\n17553\n17554\n17555\n17556\n17557\n17558\n17559\n17560\n17561\n17562\n17563\n17564\n17565\n17566\n17567\n17568\n17569\n17570\n17571\n17572\n17573\n17574\n17575\n17576\n17577\n17578\n17579\n17580\n17581\n17582\n17583\n17584\n17585\n17586\n17587\n17588\n17589\n17590\n17591\n17592\n17593\n17594\n17595\n17596\n17597\n17598\n17599\n17600\n17601\n17602\n17603\n17604\n17605\n17606\n17607\n17608\n17609\n17610\n17611\n17612\n17613\n17614\n17615\n17616\n17617\n17618\n17619\n17620\n17621\n17622\n17623\n17624\n17625\n17626\n17627\n17628\n17629\n17630\n17631\n17632\n17633\n17634\n17635\n17636\n17637\n17638\n17639\n17640\n17641\n17642\n17643\n17644\n17645\n17646\n17647\n17648\n17649\n17650\n17651\n17652\n17653\n17654\n17655\n17656\n17657\n17658\n17659\n17660\n17661\n17662\n17663\n17664\n17665\n17666\n17667\n17668\n17669\n17670\n17671\n17672\n17673\n17674\n17675\n17676\n17677\n17678\n17679\n17680\n17681\n17682\n17683\n17684\n17685\n17686\n17687\n17688\n17689\n17690\n17691\n17692\n17693\n17694\n17695\n17696\n17697\n17698\n17699\n17700\n17701\n17702\n17703\n17704\n17705\n17706\n17707\n17708\n17709\n17710\n17711\n17712\n17713\n17714\n17715\n17716\n17717\n17718\n17719\n17720\n17721\n17722\n17723\n17724\n17725\n17726\n17727\n17728\n17729\n17730\n17731\n17732\n17733\n17734\n17735\n17736\n17737\n17738\n17739\n17740\n17741\n17742\n17743\n17744\n17745\n17746\n17747\n17748\n17749\n17750\n17751\n17752\n17753\n17754\n17755\n17756\n17757\n17758\n17759\n17760\n17761\n17762\n17763\n17764\n17765\n17766\n17767\n17768\n17769\n17770\n17771\n17772\n17773\n17774\n17775\n17776\n17777\n17778\n17779\n17780\n17781\n17782\n17783\n17784\n17785\n17786\n17787\n17788\n17789\n17790\n17791\n17792\n17793\n17794\n17795\n17796\n17797\n17798\n17799\n17800\n17801\n17802\n17803\n17804\n17805\n17806\n17807\n17808\n17809\n17810\n17811\n17812\n17813\n17814\n17815\n17816\n17817\n17818\n17819\n17820\n17821\n17822\n17823\n17824\n17825\n17826\n17827\n17828\n17829\n17830\n17831\n17832\n17833\n17834\n17835\n17836\n17837\n17838\n17839\n17840\n17841\n17842\n17843\n17844\n17845\n17846\n17847\n17848\n17849\n17850\n17851\n17852\n17853\n17854\n17855\n17856\n17857\n17858\n17859\n17860\n17861\n17862\n17863\n17864\n17865\n17866\n17867\n17868\n17869\n17870\n17871\n17872\n17873\n17874\n17875\n17876\n17877\n17878\n17879\n17880\n17881\n17882\n17883\n17884\n17885\n17886\n17887\n17888\n17889\n17890\n17891\n17892\n17893\n17894\n17895\n17896\n17897\n17898\n17899\n17900\n17901\n17902\n17903\n17904\n17905\n17906\n17907\n17908\n17909\n17910\n17911\n17912\n17913\n17914\n17915\n17916\n17917\n17918\n17919\n17920\n17921\n17922\n17923\n17924\n17925\n17926\n17927\n17928\n17929\n17930\n17931\n17932\n17933\n17934\n17935\n17936\n17937\n17938\n17939\n17940\n17941\n17942\n17943\n17944\n17945\n17946\n17947\n17948\n17949\n17950\n17951\n17952\n17953\n17954\n17955\n17956\n17957\n17958\n17959\n17960\n17961\n17962\n17963\n17964\n17965\n17966\n17967\n17968\n17969\n17970\n17971\n17972\n17973\n17974\n17975\n17976\n17977\n17978\n17979\n17980\n17981\n17982\n17983\n17984\n17985\n17986\n17987\n17988\n17989\n17990\n17991\n17992\n17993\n17994\n17995\n17996\n17997\n17998\n17999\n18000\n18001\n18002\n18003\n18004\n18005\n18006\n18007\n18008\n18009\n18010\n18011\n18012\n18013\n18014\n18015\n18016\n18017\n18018\n18019\n18020\n18021\n18022\n18023\n18024\n18025\n18026\n18027\n18028\n18029\n18030\n18031\n18032\n18033\n18034\n18035\n18036\n18037\n18038\n18039\n18040\n18041\n18042\n18043\n18044\n18045\n18046\n18047\n18048\n18049\n18050\n18051\n18052\n18053\n18054\n18055\n18056\n18057\n18058\n18059\n18060\n18061\n18062\n18063\n18064\n18065\n18066\n18067\n18068\n18069\n18070\n18071\n18072\n18073\n18074\n18075\n18076\n18077\n18078\n18079\n18080\n18081\n18082\n18083\n18084\n18085\n18086\n18087\n18088\n18089\n18090\n18091\n18092\n18093\n18094\n18095\n18096\n18097\n18098\n18099\n18100\n18101\n18102\n18103\n18104\n18105\n18106\n18107\n18108\n18109\n18110\n18111\n18112\n18113\n18114\n18115\n18116\n18117\n18118\n18119\n18120\n18121\n18122\n18123\n18124\n18125\n18126\n18127\n18128\n18129\n18130\n18131\n18132\n18133\n18134\n18135\n18136\n18137\n18138\n18139\n18140\n18141\n18142\n18143\n18144\n18145\n18146\n18147\n18148\n18149\n18150\n18151\n18152\n18153\n18154\n18155\n18156\n18157\n18158\n18159\n18160\n18161\n18162\n18163\n18164\n18165\n18166\n18167\n18168\n18169\n18170\n18171\n18172\n18173\n18174\n18175\n18176\n18177\n18178\n18179\n18180\n18181\n18182\n18183\n18184\n18185\n18186\n18187\n18188\n18189\n18190\n18191\n18192\n18193\n18194\n18195\n18196\n18197\n18198\n18199\n18200\n18201\n18202\n18203\n18204\n18205\n18206\n18207\n18208\n18209\n18210\n18211\n18212\n18213\n18214\n18215\n18216\n18217\n18218\n18219\n18220\n18221\n18222\n18223\n18224\n18225\n18226\n18227\n18228\n18229\n18230\n18231\n18232\n18233\n18234\n18235\n18236\n18237\n18238\n18239\n18240\n18241\n18242\n18243\n18244\n18245\n18246\n18247\n18248\n18249\n18250\n18251\n18252\n18253\n18254\n18255\n18256\n18257\n18258\n18259\n18260\n18261\n18262\n18263\n18264\n18265\n18266\n18267\n18268\n18269\n18270\n18271\n18272\n18273\n18274\n18275\n18276\n18277\n18278\n18279\n18280\n18281\n18282\n18283\n18284\n18285\n18286\n18287\n18288\n18289\n18290\n18291\n18292\n18293\n18294\n18295\n18296\n18297\n18298\n18299\n18300\n18301\n18302\n18303\n18304\n18305\n18306\n18307\n18308\n18309\n18310\n18311\n18312\n18313\n18314\n18315\n18316\n18317\n18318\n18319\n18320\n18321\n18322\n18323\n18324\n18325\n18326\n18327\n18328\n18329\n18330\n18331\n18332\n18333\n18334\n18335\n18336\n18337\n18338\n18339\n18340\n18341\n18342\n18343\n18344\n18345\n18346\n18347\n18348\n18349\n18350\n18351\n18352\n18353\n18354\n18355\n18356\n18357\n18358\n18359\n18360\n18361\n18362\n18363\n18364\n18365\n18366\n18367\n18368\n18369\n18370\n18371\n18372\n18373\n18374\n18375\n18376\n18377\n18378\n18379\n18380\n18381\n18382\n18383\n18384\n18385\n18386\n18387\n18388\n18389\n18390\n18391\n18392\n18393\n18394\n18395\n18396\n18397\n18398\n18399\n18400\n18401\n18402\n18403\n18404\n18405\n18406\n18407\n18408\n18409\n18410\n18411\n18412\n18413\n18414\n18415\n18416\n18417\n18418\n18419\n18420\n18421\n18422\n18423\n18424\n18425\n18426\n18427\n18428\n18429\n18430\n18431\n18432\n18433\n18434\n18435\n18436\n18437\n18438\n18439\n18440\n18441\n18442\n18443\n18444\n18445\n18446\n18447\n18448\n18449\n18450\n18451\n18452\n18453\n18454\n18455\n18456\n18457\n18458\n18459\n18460\n18461\n18462\n18463\n18464\n18465\n18466\n18467\n18468\n18469\n18470\n18471\n18472\n18473\n18474\n18475\n18476\n18477\n18478\n18479\n18480\n18481\n18482\n18483\n18484\n18485\n18486\n18487\n18488\n18489\n18490\n18491\n18492\n18493\n18494\n18495\n18496\n18497\n18498\n18499\n18500\n18501\n18502\n18503\n18504\n18505\n18506\n18507\n18508\n18509\n18510\n18511\n18512\n18513\n18514\n18515\n18516\n18517\n18518\n18519\n18520\n18521\n18522\n18523\n18524\n18525\n18526\n18527\n18528\n18529\n18530\n18531\n18532\n18533\n18534\n18535\n18536\n18537\n18538\n18539\n18540\n18541\n18542\n18543\n18544\n18545\n18546\n18547\n18548\n18549\n18550\n18551\n18552\n18553\n18554\n18555\n18556\n18557\n18558\n18559\n18560\n18561\n18562\n18563\n18564\n18565\n18566\n18567\n18568\n18569\n18570\n18571\n18572\n18573\n18574\n18575\n18576\n18577\n18578\n18579\n18580\n18581\n18582\n18583\n18584\n18585\n18586\n18587\n18588\n18589\n18590\n18591\n18592\n18593\n18594\n18595\n18596\n18597\n18598\n18599\n18600\n18601\n18602\n18603\n18604\n18605\n18606\n18607\n18608\n18609\n18610\n18611\n18612\n18613\n18614\n18615\n18616\n18617\n18618\n18619\n18620\n18621\n18622\n18623\n18624\n18625\n18626\n18627\n18628\n18629\n18630\n18631\n18632\n18633\n18634\n18635\n18636\n18637\n18638\n18639\n18640\n18641\n18642\n18643\n18644\n18645\n18646\n18647\n18648\n18649\n18650\n18651\n18652\n18653\n18654\n18655\n18656\n18657\n18658\n18659\n18660\n18661\n18662\n18663\n18664\n18665\n18666\n18667\n18668\n18669\n18670\n18671\n18672\n18673\n18674\n18675\n18676\n18677\n18678\n18679\n18680\n18681\n18682\n18683\n18684\n18685\n18686\n18687\n18688\n18689\n18690\n18691\n18692\n18693\n18694\n18695\n18696\n18697\n18698\n18699\n18700\n18701\n18702\n18703\n18704\n18705\n18706\n18707\n18708\n18709\n18710\n18711\n18712\n18713\n18714\n18715\n18716\n18717\n18718\n18719\n18720\n18721\n18722\n18723\n18724\n18725\n18726\n18727\n18728\n18729\n18730\n18731\n18732\n18733\n18734\n18735\n18736\n18737\n18738\n18739\n18740\n18741\n18742\n18743\n18744\n18745\n18746\n18747\n18748\n18749\n18750\n18751\n18752\n18753\n18754\n18755\n18756\n18757\n18758\n18759\n18760\n18761\n18762\n18763\n18764\n18765\n18766\n18767\n18768\n18769\n18770\n18771\n18772\n18773\n18774\n18775\n18776\n18777\n18778\n18779\n18780\n18781\n18782\n18783\n18784\n18785\n18786\n18787\n18788\n18789\n18790\n18791\n18792\n18793\n18794\n18795\n18796\n18797\n18798\n18799\n18800\n18801\n18802\n18803\n18804\n18805\n18806\n18807\n18808\n18809\n18810\n18811\n18812\n18813\n18814\n18815\n18816\n18817\n18818\n18819\n18820\n18821\n18822\n18823\n18824\n18825\n18826\n18827\n18828\n18829\n18830\n18831\n18832\n18833\n18834\n18835\n18836\n18837\n18838\n18839\n18840\n18841\n18842\n18843\n18844\n18845\n18846\n18847\n18848\n18849\n18850\n18851\n18852\n18853\n18854\n18855\n18856\n18857\n18858\n18859\n18860\n18861\n18862\n18863\n18864\n18865\n18866\n18867\n18868\n18869\n18870\n18871\n18872\n18873\n18874\n18875\n18876\n18877\n18878\n18879\n18880\n18881\n18882\n18883\n18884\n18885\n18886\n18887\n18888\n18889\n18890\n18891\n18892\n18893\n18894\n18895\n18896\n18897\n18898\n18899\n18900\n18901\n18902\n18903\n18904\n18905\n18906\n18907\n18908\n18909\n18910\n18911\n18912\n18913\n18914\n18915\n18916\n18917\n18918\n18919\n18920\n18921\n18922\n18923\n18924\n18925\n18926\n18927\n18928\n18929\n18930\n18931\n18932\n18933\n18934\n18935\n18936\n18937\n18938\n18939\n18940\n18941\n18942\n18943\n18944\n18945\n18946\n18947\n18948\n18949\n18950\n18951\n18952\n18953\n18954\n18955\n18956\n18957\n18958\n18959\n18960\n18961\n18962\n18963\n18964\n18965\n18966\n18967\n18968\n18969\n18970\n18971\n18972\n18973\n18974\n18975\n18976\n18977\n18978\n18979\n18980\n18981\n18982\n18983\n18984\n18985\n18986\n18987\n18988\n18989\n18990\n18991\n18992\n18993\n18994\n18995\n18996\n18997\n18998\n18999\n19000\n19001\n19002\n19003\n19004\n19005\n19006\n19007\n19008\n19009\n19010\n19011\n19012\n19013\n19014\n19015\n19016\n19017\n19018\n19019\n19020\n19021\n19022\n19023\n19024\n19025\n19026\n19027\n19028\n19029\n19030\n19031\n19032\n19033\n19034\n19035\n19036\n19037\n19038\n19039\n19040\n19041\n19042\n19043\n19044\n19045\n19046\n19047\n19048\n19049\n19050\n19051\n19052\n19053\n19054\n19055\n19056\n19057\n19058\n19059\n19060\n19061\n19062\n19063\n19064\n19065\n19066\n19067\n19068\n19069\n19070\n19071\n19072\n19073\n19074\n19075\n19076\n19077\n19078\n19079\n19080\n19081\n19082\n19083\n19084\n19085\n19086\n19087\n19088\n19089\n19090\n19091\n19092\n19093\n19094\n19095\n19096\n19097\n19098\n19099\n19100\n19101\n19102\n19103\n19104\n19105\n19106\n19107\n19108\n19109\n19110\n19111\n19112\n19113\n19114\n19115\n19116\n19117\n19118\n19119\n19120\n19121\n19122\n19123\n19124\n19125\n19126\n19127\n19128\n19129\n19130\n19131\n19132\n19133\n19134\n19135\n19136\n19137\n19138\n19139\n19140\n19141\n19142\n19143\n19144\n19145\n19146\n19147\n19148\n19149\n19150\n19151\n19152\n19153\n19154\n19155\n19156\n19157\n19158\n19159\n19160\n19161\n19162\n19163\n19164\n19165\n19166\n19167\n19168\n19169\n19170\n19171\n19172\n19173\n19174\n19175\n19176\n19177\n19178\n19179\n19180\n19181\n19182\n19183\n19184\n19185\n19186\n19187\n19188\n19189\n19190\n19191\n19192\n19193\n19194\n19195\n19196\n19197\n19198\n19199\n19200\n19201\n19202\n19203\n19204\n19205\n19206\n19207\n19208\n19209\n19210\n19211\n19212\n19213\n19214\n19215\n19216\n19217\n19218\n19219\n19220\n19221\n19222\n19223\n19224\n19225\n19226\n19227\n19228\n19229\n19230\n19231\n19232\n19233\n19234\n19235\n19236\n19237\n19238\n19239\n19240\n19241\n19242\n19243\n19244\n19245\n19246\n19247\n19248\n19249\n19250\n19251\n19252\n19253\n19254\n19255\n19256\n19257\n19258\n19259\n19260\n19261\n19262\n19263\n19264\n19265\n19266\n19267\n19268\n19269\n19270\n19271\n19272\n19273\n19274\n19275\n19276\n19277\n19278\n19279\n19280\n19281\n19282\n19283\n19284\n19285\n19286\n19287\n19288\n19289\n19290\n19291\n19292\n19293\n19294\n19295\n19296\n19297\n19298\n19299\n19300\n19301\n19302\n19303\n19304\n19305\n19306\n19307\n19308\n19309\n19310\n19311\n19312\n19313\n19314\n19315\n19316\n19317\n19318\n19319\n19320\n19321\n19322\n19323\n19324\n19325\n19326\n19327\n19328\n19329\n19330\n19331\n19332\n19333\n19334\n19335\n19336\n19337\n19338\n19339\n19340\n19341\n19342\n19343\n19344\n19345\n19346\n19347\n19348\n19349\n19350\n19351\n19352\n19353\n19354\n19355\n19356\n19357\n19358\n19359\n19360\n19361\n19362\n19363\n19364\n19365\n19366\n19367\n19368\n19369\n19370\n19371\n19372\n19373\n19374\n19375\n19376\n19377\n19378\n19379\n19380\n19381\n19382\n19383\n19384\n19385\n19386\n19387\n19388\n19389\n19390\n19391\n19392\n19393\n19394\n19395\n19396\n19397\n19398\n19399\n19400\n19401\n19402\n19403\n19404\n19405\n19406\n19407\n19408\n19409\n19410\n19411\n19412\n19413\n19414\n19415\n19416\n19417\n19418\n19419\n19420\n19421\n19422\n19423\n19424\n19425\n19426\n19427\n19428\n19429\n19430\n19431\n19432\n19433\n19434\n19435\n19436\n19437\n19438\n19439\n19440\n19441\n19442\n19443\n19444\n19445\n19446\n19447\n19448\n19449\n19450\n19451\n19452\n19453\n19454\n19455\n19456\n19457\n19458\n19459\n19460\n19461\n19462\n19463\n19464\n19465\n19466\n19467\n19468\n19469\n19470\n19471\n19472\n19473\n19474\n19475\n19476\n19477\n19478\n19479\n19480\n19481\n19482\n19483\n19484\n19485\n19486\n19487\n19488\n19489\n19490\n19491\n19492\n19493\n19494\n19495\n19496\n19497\n19498\n19499\n19500\n19501\n19502\n19503\n19504\n19505\n19506\n19507\n19508\n19509\n19510\n19511\n19512\n19513\n19514\n19515\n19516\n19517\n19518\n19519\n19520\n19521\n19522\n19523\n19524\n19525\n19526\n19527\n19528\n19529\n19530\n19531\n19532\n19533\n19534\n19535\n19536\n19537\n19538\n19539\n19540\n19541\n19542\n19543\n19544\n19545\n19546\n19547\n19548\n19549\n19550\n19551\n19552\n19553\n19554\n19555\n19556\n19557\n19558\n19559\n19560\n19561\n19562\n19563\n19564\n19565\n19566\n19567\n19568\n19569\n19570\n19571\n19572\n19573\n19574\n19575\n19576\n19577\n19578\n19579\n19580\n19581\n19582\n19583\n19584\n19585\n19586\n19587\n19588\n19589\n19590\n19591\n19592\n19593\n19594\n19595\n19596\n19597\n19598\n19599\n19600\n19601\n19602\n19603\n19604\n19605\n19606\n19607\n19608\n19609\n19610\n19611\n19612\n19613\n19614\n19615\n19616\n19617\n19618\n19619\n19620\n19621\n19622\n19623\n19624\n19625\n19626\n19627\n19628\n19629\n19630\n19631\n19632\n19633\n19634\n19635\n19636\n19637\n19638\n19639\n19640\n19641\n19642\n19643\n19644\n19645\n19646\n19647\n19648\n19649\n19650\n19651\n19652\n19653\n19654\n19655\n19656\n19657\n19658\n19659\n19660\n19661\n19662\n19663\n19664\n19665\n19666\n19667\n19668\n19669\n19670\n19671\n19672\n19673\n19674\n19675\n19676\n19677\n19678\n19679\n19680\n19681\n19682\n19683\n19684\n19685\n19686\n19687\n19688\n19689\n19690\n19691\n19692\n19693\n19694\n19695\n19696\n19697\n19698\n19699\n19700\n19701\n19702\n19703\n19704\n19705\n19706\n19707\n19708\n19709\n19710\n19711\n19712\n19713\n19714\n19715\n19716\n19717\n19718\n19719\n19720\n19721\n19722\n19723\n19724\n19725\n19726\n19727\n19728\n19729\n19730\n19731\n19732\n19733\n19734\n19735\n19736\n19737\n19738\n19739\n19740\n19741\n19742\n19743\n19744\n19745\n19746\n19747\n19748\n19749\n19750\n19751\n19752\n19753\n19754\n19755\n19756\n19757\n19758\n19759\n19760\n19761\n19762\n19763\n19764\n19765\n19766\n19767\n19768\n19769\n19770\n19771\n19772\n19773\n19774\n19775\n19776\n19777\n19778\n19779\n19780\n19781\n19782\n19783\n19784\n19785\n19786\n19787\n19788\n19789\n19790\n19791\n19792\n19793\n19794\n19795\n19796\n19797\n19798\n19799\n19800\n19801\n19802\n19803\n19804\n19805\n19806\n19807\n19808\n19809\n19810\n19811\n19812\n19813\n19814\n19815\n19816\n19817\n19818\n19819\n19820\n19821\n19822\n19823\n19824\n19825\n19826\n19827\n19828\n19829\n19830\n19831\n19832\n19833\n19834\n19835\n19836\n19837\n19838\n19839\n19840\n19841\n19842\n19843\n19844\n19845\n19846\n19847\n19848\n19849\n19850\n19851\n19852\n19853\n19854\n19855\n19856\n19857\n19858\n19859\n19860\n19861\n19862\n19863\n19864\n19865\n19866\n19867\n19868\n19869\n19870\n19871\n19872\n19873\n19874\n19875\n19876\n19877\n19878\n19879\n19880\n19881\n19882\n19883\n19884\n19885\n19886\n19887\n19888\n19889\n19890\n19891\n19892\n19893\n19894\n19895\n19896\n19897\n19898\n19899\n19900\n19901\n19902\n19903\n19904\n19905\n19906\n19907\n19908\n19909\n19910\n19911\n19912\n19913\n19914\n19915\n19916\n19917\n19918\n19919\n19920\n19921\n19922\n19923\n19924\n19925\n19926\n19927\n19928\n19929\n19930\n19931\n19932\n19933\n19934\n19935\n19936\n19937\n19938\n19939\n19940\n19941\n19942\n19943\n19944\n19945\n19946\n19947\n19948\n19949\n19950\n19951\n19952\n19953\n19954\n19955\n19956\n19957\n19958\n19959\n19960\n19961\n19962\n19963\n19964\n19965\n19966\n19967\n19968\n19969\n19970\n19971\n19972\n19973\n19974\n19975\n19976\n19977\n19978\n19979\n19980\n19981\n19982\n19983\n19984\n19985\n19986\n19987\n19988\n19989\n19990\n19991\n19992\n19993\n19994\n19995\n19996\n19997\n19998\n19999\n20000\n20001\n20002\n20003\n20004\n20005\n20006\n20007\n20008\n20009\n20010\n20011\n20012\n20013\n20014\n20015\n20016\n20017\n20018\n20019\n20020\n20021\n20022\n20023\n20024\n20025\n20026\n20027\n20028\n20029\n20030\n20031\n20032\n20033\n20034\n20035\n20036\n20037\n20038\n20039\n20040\n20041\n20042\n20043\n20044\n20045\n20046\n20047\n20048\n20049\n20050\n20051\n20052\n20053\n20054\n20055\n20056\n20057\n20058\n20059\n20060\n20061\n20062\n20063\n20064\n20065\n20066\n20067\n20068\n20069\n20070\n20071\n20072\n20073\n20074\n20075\n20076\n20077\n20078\n20079\n20080\n20081\n20082\n20083\n20084\n20085\n20086\n20087\n20088\n20089\n20090\n20091\n20092\n20093\n20094\n20095\n20096\n20097\n20098\n20099\n20100\n20101\n20102\n20103\n20104\n20105\n20106\n20107\n20108\n20109\n20110\n20111\n20112\n20113\n20114\n20115\n20116\n20117\n20118\n20119\n20120\n20121\n20122\n20123\n20124\n20125\n20126\n20127\n20128\n20129\n20130\n20131\n20132\n20133\n20134\n20135\n20136\n20137\n20138\n20139\n20140\n20141\n20142\n20143\n20144\n20145\n20146\n20147\n20148\n20149\n20150\n20151\n20152\n20153\n20154\n20155\n20156\n20157\n20158\n20159\n20160\n20161\n20162\n20163\n20164\n20165\n20166\n20167\n20168\n20169\n20170\n20171\n20172\n20173\n20174\n20175\n20176\n20177\n20178\n20179\n20180\n20181\n20182\n20183\n20184\n20185\n20186\n20187\n20188\n20189\n20190\n20191\n20192\n20193\n20194\n20195\n20196\n20197\n20198\n20199\n20200\n20201\n20202\n20203\n20204\n20205\n20206\n20207\n20208\n20209\n20210\n20211\n20212\n20213\n20214\n20215\n20216\n20217\n20218\n20219\n20220\n20221\n20222\n20223\n20224\n20225\n20226\n20227\n20228\n20229\n20230\n20231\n20232\n20233\n20234\n20235\n20236\n20237\n20238\n20239\n20240\n20241\n20242\n20243\n20244\n20245\n20246\n20247\n20248\n20249\n20250\n20251\n20252\n20253\n20254\n20255\n20256\n20257\n20258\n20259\n20260\n20261\n20262\n20263\n20264\n20265\n20266\n20267\n20268\n20269\n20270\n20271\n20272\n20273\n20274\n20275\n20276\n20277\n20278\n20279\n20280\n20281\n20282\n20283\n20284\n20285\n20286\n20287\n20288\n20289\n20290\n20291\n20292\n20293\n20294\n20295\n20296\n20297\n20298\n20299\n20300\n20301\n20302\n20303\n20304\n20305\n20306\n20307\n20308\n20309\n20310\n20311\n20312\n20313\n20314\n20315\n20316\n20317\n20318\n20319\n20320\n20321\n20322\n20323\n20324\n20325\n20326\n20327\n20328\n20329\n20330\n20331\n20332\n20333\n20334\n20335\n20336\n20337\n20338\n20339\n20340\n20341\n20342\n20343\n20344\n20345\n20346\n20347\n20348\n20349\n20350\n20351\n20352\n20353\n20354\n20355\n20356\n20357\n20358\n20359\n20360\n20361\n20362\n20363\n20364\n20365\n20366\n20367\n20368\n20369\n20370\n20371\n20372\n20373\n20374\n20375\n20376\n20377\n20378\n20379\n20380\n20381\n20382\n20383\n20384\n20385\n20386\n20387\n20388\n20389\n20390\n20391\n20392\n20393\n20394\n20395\n20396\n20397\n20398\n20399\n20400\n20401\n20402\n20403\n20404\n20405\n20406\n20407\n20408\n20409\n20410\n20411\n20412\n20413\n20414\n20415\n20416\n20417\n20418\n20419\n20420\n20421\n20422\n20423\n20424\n20425\n20426\n20427\n20428\n20429\n20430\n20431\n20432\n20433\n20434\n20435\n20436\n20437\n20438\n20439\n20440\n20441\n20442\n20443\n20444\n20445\n20446\n20447\n20448\n20449\n20450\n20451\n20452\n20453\n20454\n20455\n20456\n20457\n20458\n20459\n20460\n20461\n20462\n20463\n20464\n20465\n20466\n20467\n20468\n20469\n20470\n20471\n20472\n20473\n20474\n20475\n20476\n20477\n20478\n20479\n20480\n20481\n20482\n20483\n20484\n20485\n20486\n20487\n20488\n20489\n20490\n20491\n20492\n20493\n20494\n20495\n20496\n20497\n20498\n20499\n20500\n20501\n20502\n20503\n20504\n20505\n20506\n20507\n20508\n20509\n20510\n20511\n20512\n20513\n20514\n20515\n20516\n20517\n20518\n20519\n20520\n20521\n20522\n20523\n20524\n20525\n20526\n20527\n20528\n20529\n20530\n20531\n20532\n20533\n20534\n20535\n20536\n20537\n20538\n20539\n20540\n20541\n20542\n20543\n20544\n20545\n20546\n20547\n20548\n20549\n20550\n20551\n20552\n20553\n20554\n20555\n20556\n20557\n20558\n20559\n20560\n20561\n20562\n20563\n20564\n20565\n20566\n20567\n20568\n20569\n20570\n20571\n20572\n20573\n20574\n20575\n20576\n20577\n20578\n20579\n20580\n20581\n20582\n20583\n20584\n20585\n20586\n20587\n20588\n20589\n20590\n20591\n20592\n20593\n20594\n20595\n20596\n20597\n20598\n20599\n20600\n20601\n20602\n20603\n20604\n20605\n20606\n20607\n20608\n20609\n20610\n20611\n20612\n20613\n20614\n20615\n20616\n20617\n20618\n20619\n20620\n20621\n20622\n20623\n20624\n20625\n20626\n20627\n20628\n20629\n20630\n20631\n20632\n20633\n20634\n20635\n20636\n20637\n20638\n20639\n20640\n20641\n20642\n20643\n20644\n20645\n20646\n20647\n20648\n20649\n20650\n20651\n20652\n20653\n20654\n20655\n20656\n20657\n20658\n20659\n20660\n20661\n20662\n20663\n20664\n20665\n20666\n20667\n20668\n20669\n20670\n20671\n20672\n20673\n20674\n20675\n20676\n20677\n20678\n20679\n20680\n20681\n20682\n20683\n20684\n20685\n20686\n20687\n20688\n20689\n20690\n20691\n20692\n20693\n20694\n20695\n20696\n20697\n20698\n20699\n20700\n20701\n20702\n20703\n20704\n20705\n20706\n20707\n20708\n20709\n20710\n20711\n20712\n20713\n20714\n20715\n20716\n20717\n20718\n20719\n20720\n20721\n20722\n20723\n20724\n20725\n20726\n20727\n20728\n20729\n20730\n20731\n20732\n20733\n20734\n20735\n20736\n20737\n20738\n20739\n20740\n20741\n20742\n20743\n20744\n20745\n20746\n20747\n20748\n20749\n20750\n20751\n20752\n20753\n20754\n20755\n20756\n20757\n20758\n20759\n20760\n20761\n20762\n20763\n20764\n20765\n20766\n20767\n20768\n20769\n20770\n20771\n20772\n20773\n20774\n20775\n20776\n20777\n20778\n20779\n20780\n20781\n20782\n20783\n20784\n20785\n20786\n20787\n20788\n20789\n20790\n20791\n20792\n20793\n20794\n20795\n20796\n20797\n20798\n20799\n20800\n20801\n20802\n20803\n20804\n20805\n20806\n20807\n20808\n20809\n20810\n20811\n20812\n20813\n20814\n20815\n20816\n20817\n20818\n20819\n20820\n20821\n20822\n20823\n20824\n20825\n20826\n20827\n20828\n20829\n20830\n20831\n20832\n20833\n20834\n20835\n20836\n20837\n20838\n20839\n20840\n20841\n20842\n20843\n20844\n20845\n20846\n20847\n20848\n20849\n20850\n20851\n20852\n20853\n20854\n20855\n20856\n20857\n20858\n20859\n20860\n20861\n20862\n20863\n20864\n20865\n20866\n20867\n20868\n20869\n20870\n20871\n20872\n20873\n20874\n20875\n20876\n20877\n20878\n20879\n20880\n20881\n20882\n20883\n20884\n20885\n20886\n20887\n20888\n20889\n20890\n20891\n20892\n20893\n20894\n20895\n20896\n20897\n20898\n20899\n20900\n20901\n20902\n20903\n20904\n20905\n20906\n20907\n20908\n20909\n20910\n20911\n20912\n20913\n20914\n20915\n20916\n20917\n20918\n20919\n20920\n20921\n20922\n20923\n20924\n20925\n20926\n20927\n20928\n20929\n20930\n20931\n20932\n20933\n20934\n20935\n20936\n20937\n20938\n20939\n20940\n20941\n20942\n20943\n20944\n20945\n20946\n20947\n20948\n20949\n20950\n20951\n20952\n20953\n20954\n20955\n20956\n20957\n20958\n20959\n20960\n20961\n20962\n20963\n20964\n20965\n20966\n20967\n20968\n20969\n20970\n20971\n20972\n20973\n20974\n20975\n20976\n20977\n20978\n20979\n20980\n20981\n20982\n20983\n20984\n20985\n20986\n20987\n20988\n20989\n20990\n20991\n20992\n20993\n20994\n20995\n20996\n20997\n20998\n20999\n21000\n21001\n21002\n21003\n21004\n21005\n21006\n21007\n21008\n21009\n21010\n21011\n21012\n21013\n21014\n21015\n21016\n21017\n21018\n21019\n21020\n21021\n21022\n21023\n21024\n21025\n21026\n21027\n21028\n21029\n21030\n21031\n21032\n21033\n21034\n21035\n21036\n21037\n21038\n21039\n21040\n21041\n21042\n21043\n21044\n21045\n21046\n21047\n21048\n21049\n21050\n21051\n21052\n21053\n21054\n21055\n21056\n21057\n21058\n21059\n21060\n21061\n21062\n21063\n21064\n21065\n21066\n21067\n21068\n21069\n21070\n21071\n21072\n21073\n21074\n21075\n21076\n21077\n21078\n21079\n21080\n21081\n21082\n21083\n21084\n21085\n21086\n21087\n21088\n21089\n21090\n21091\n21092\n21093\n21094\n21095\n21096\n21097\n21098\n21099\n21100\n21101\n21102\n21103\n21104\n21105\n21106\n21107\n21108\n21109\n21110\n21111\n21112\n21113\n21114\n21115\n21116\n21117\n21118\n21119\n21120\n21121\n21122\n21123\n21124\n21125\n21126\n21127\n21128\n21129\n21130\n21131\n21132\n21133\n21134\n21135\n21136\n21137\n21138\n21139\n21140\n21141\n21142\n21143\n21144\n21145\n21146\n21147\n21148\n21149\n21150\n21151\n21152\n21153\n21154\n21155\n21156\n21157\n21158\n21159\n21160\n21161\n21162\n21163\n21164\n21165\n21166\n21167\n21168\n21169\n21170\n21171\n21172\n21173\n21174\n21175\n21176\n21177\n21178\n21179\n21180\n21181\n21182\n21183\n21184\n21185\n21186\n21187\n21188\n21189\n21190\n21191\n21192\n21193\n21194\n21195\n21196\n21197\n21198\n21199\n21200\n21201\n21202\n21203\n21204\n21205\n21206\n21207\n21208\n21209\n21210\n21211\n21212\n21213\n21214\n21215\n21216\n21217\n21218\n21219\n21220\n21221\n21222\n21223\n21224\n21225\n21226\n21227\n21228\n21229\n21230\n21231\n21232\n21233\n21234\n21235\n21236\n21237\n21238\n21239\n21240\n21241\n21242\n21243\n21244\n21245\n21246\n21247\n21248\n21249\n21250\n21251\n21252\n21253\n21254\n21255\n21256\n21257\n21258\n21259\n21260\n21261\n21262\n21263\n21264\n21265\n21266\n21267\n21268\n21269\n21270\n21271\n21272\n21273\n21274\n21275\n21276\n21277\n21278\n21279\n21280\n21281\n21282\n21283\n21284\n21285\n21286\n21287\n21288\n21289\n21290\n21291\n21292\n21293\n21294\n21295\n21296\n21297\n21298\n21299\n21300\n21301\n21302\n21303\n21304\n21305\n21306\n21307\n21308\n21309\n21310\n21311\n21312\n21313\n21314\n21315\n21316\n21317\n21318\n21319\n21320\n21321\n21322\n21323\n21324\n21325\n21326\n21327\n21328\n21329\n21330\n21331\n21332\n21333\n21334\n21335\n21336\n21337\n21338\n21339\n21340\n21341\n21342\n21343\n21344\n21345\n21346\n21347\n21348\n21349\n21350\n21351\n21352\n21353\n21354\n21355\n21356\n21357\n21358\n21359\n21360\n21361\n21362\n21363\n21364\n21365\n21366\n21367\n21368\n21369\n21370\n21371\n21372\n21373\n21374\n21375\n21376\n21377\n21378\n21379\n21380\n21381\n21382\n21383\n21384\n21385\n21386\n21387\n21388\n21389\n21390\n21391\n21392\n21393\n21394\n21395\n21396\n21397\n21398\n21399\n21400\n21401\n21402\n21403\n21404\n21405\n21406\n21407\n21408\n21409\n21410\n21411\n21412\n21413\n21414\n21415\n21416\n21417\n21418\n21419\n21420\n21421\n21422\n21423\n21424\n21425\n21426\n21427\n21428\n21429\n21430\n21431\n21432\n21433\n21434\n21435\n21436\n21437\n21438\n21439\n21440\n21441\n21442\n21443\n21444\n21445\n21446\n21447\n21448\n21449\n21450\n21451\n21452\n21453\n21454\n21455\n21456\n21457\n21458\n21459\n21460\n21461\n21462\n21463\n21464\n21465\n21466\n21467\n21468\n21469\n21470\n21471\n21472\n21473\n21474\n21475\n21476\n21477\n21478\n21479\n21480\n21481\n21482\n21483\n21484\n21485\n21486\n21487\n21488\n21489\n21490\n21491\n21492\n21493\n21494\n21495\n21496\n21497\n21498\n21499\n21500\n21501\n21502\n21503\n21504\n21505\n21506\n21507\n21508\n21509\n21510\n21511\n21512\n21513\n21514\n21515\n21516\n21517\n21518\n21519\n21520\n21521\n21522\n21523\n21524\n21525\n21526\n21527\n21528\n21529\n21530\n21531\n21532\n21533\n21534\n21535\n21536\n21537\n21538\n21539\n21540\n21541\n21542\n21543\n21544\n21545\n21546\n21547\n21548\n21549\n21550\n21551\n21552\n21553\n21554\n21555\n21556\n21557\n21558\n21559\n21560\n21561\n21562\n21563\n21564\n21565\n21566\n21567\n21568\n21569\n21570\n21571\n21572\n21573\n21574\n21575\n21576\n21577\n21578\n21579\n21580\n21581\n21582\n21583\n21584\n21585\n21586\n21587\n21588\n21589\n21590\n21591\n21592\n21593\n21594\n21595\n21596\n21597\n21598\n21599\n21600\n21601\n21602\n21603\n21604\n21605\n21606\n21607\n21608\n21609\n21610\n21611\n21612\n21613\n21614\n21615\n21616\n21617\n21618\n21619\n21620\n21621\n21622\n21623\n21624\n21625\n21626\n21627\n21628\n21629\n21630\n21631\n21632\n21633\n21634\n21635\n21636\n21637\n21638\n21639\n21640\n21641\n21642\n21643\n21644\n21645\n21646\n21647\n21648\n21649\n21650\n21651\n21652\n21653\n21654\n21655\n21656\n21657\n21658\n21659\n21660\n21661\n21662\n21663\n21664\n21665\n21666\n21667\n21668\n21669\n21670\n21671\n21672\n21673\n21674\n21675\n21676\n21677\n21678\n21679\n21680\n21681\n21682\n21683\n21684\n21685\n21686\n21687\n21688\n21689\n21690\n21691\n21692\n21693\n21694\n21695\n21696\n21697\n21698\n21699\n21700\n21701\n21702\n21703\n21704\n21705\n21706\n21707\n21708\n21709\n21710\n21711\n21712\n21713\n21714\n21715\n21716\n21717\n21718\n21719\n21720\n21721\n21722\n21723\n21724\n21725\n21726\n21727\n21728\n21729\n21730\n21731\n21732\n21733\n21734\n21735\n21736\n21737\n21738\n21739\n21740\n21741\n21742\n21743\n21744\n21745\n21746\n21747\n21748\n21749\n21750\n21751\n21752\n21753\n21754\n21755\n21756\n21757\n21758\n21759\n21760\n21761\n21762\n21763\n21764\n21765\n21766\n21767\n21768\n21769\n21770\n21771\n21772\n21773\n21774\n21775\n21776\n21777\n21778\n21779\n21780\n21781\n21782\n21783\n21784\n21785\n21786\n21787\n21788\n21789\n21790\n21791\n21792\n21793\n21794\n21795\n21796\n21797\n21798\n21799\n21800\n21801\n21802\n21803\n21804\n21805\n21806\n21807\n21808\n21809\n21810\n21811\n21812\n21813\n21814\n21815\n21816\n21817\n21818\n21819\n21820\n21821\n21822\n21823\n21824\n21825\n21826\n21827\n21828\n21829\n21830\n21831\n21832\n21833\n21834\n21835\n21836\n21837\n21838\n21839\n21840\n21841\n21842\n21843\n21844\n21845\n21846\n21847\n21848\n21849\n21850\n21851\n21852\n21853\n21854\n21855\n21856\n21857\n21858\n21859\n21860\n21861\n21862\n21863\n21864\n21865\n21866\n21867\n21868\n21869\n21870\n21871\n21872\n21873\n21874\n21875\n21876\n21877\n21878\n21879\n21880\n21881\n21882\n21883\n21884\n21885\n21886\n21887\n21888\n21889\n21890\n21891\n21892\n21893\n21894\n21895\n21896\n21897\n21898\n21899\n21900\n21901\n21902\n21903\n21904\n21905\n21906\n21907\n21908\n21909\n21910\n21911\n21912\n21913\n21914\n21915\n21916\n21917\n21918\n21919\n21920\n21921\n21922\n21923\n21924\n21925\n21926\n21927\n21928\n21929\n21930\n21931\n21932\n21933\n21934\n21935\n21936\n21937\n21938\n21939\n21940\n21941\n21942\n21943\n21944\n21945\n21946\n21947\n21948\n21949\n21950\n21951\n21952\n21953\n21954\n21955\n21956\n21957\n21958\n21959\n21960\n21961\n21962\n21963\n21964\n21965\n21966\n21967\n21968\n21969\n21970\n21971\n21972\n21973\n21974\n21975\n21976\n21977\n21978\n21979\n21980\n21981\n21982\n21983\n21984\n21985\n21986\n21987\n21988\n21989\n21990\n21991\n21992\n21993\n21994\n21995\n21996\n21997\n21998\n21999\n22000\n22001\n22002\n22003\n22004\n22005\n22006\n22007\n22008\n22009\n22010\n22011\n22012\n22013\n22014\n22015\n22016\n22017\n22018\n22019\n22020\n22021\n22022\n22023\n22024\n22025\n22026\n22027\n22028\n22029\n22030\n22031\n22032\n22033\n22034\n22035\n22036\n22037\n22038\n22039\n22040\n22041\n22042\n22043\n22044\n22045\n22046\n22047\n22048\n22049\n22050\n22051\n22052\n22053\n22054\n22055\n22056\n22057\n22058\n22059\n22060\n22061\n22062\n22063\n22064\n22065\n22066\n22067\n22068\n22069\n22070\n22071\n22072\n22073\n22074\n22075\n22076\n22077\n22078\n22079\n22080\n22081\n22082\n22083\n22084\n22085\n22086\n22087\n22088\n22089\n22090\n22091\n22092\n22093\n22094\n22095\n22096\n22097\n22098\n22099\n22100\n22101\n22102\n22103\n22104\n22105\n22106\n22107\n22108\n22109\n22110\n22111\n22112\n22113\n22114\n22115\n22116\n22117\n22118\n22119\n22120\n22121\n22122\n22123\n22124\n22125\n22126\n22127\n22128\n22129\n22130\n22131\n22132\n22133\n22134\n22135\n22136\n22137\n22138\n22139\n22140\n22141\n22142\n22143\n22144\n22145\n22146\n22147\n22148\n22149\n22150\n22151\n22152\n22153\n22154\n22155\n22156\n22157\n22158\n22159\n22160\n22161\n22162\n22163\n22164\n22165\n22166\n22167\n22168\n22169\n22170\n22171\n22172\n22173\n22174\n22175\n22176\n22177\n22178\n22179\n22180\n22181\n22182\n22183\n22184\n22185\n22186\n22187\n22188\n22189\n22190\n22191\n22192\n22193\n22194\n22195\n22196\n22197\n22198\n22199\n22200\n22201\n22202\n22203\n22204\n22205\n22206\n22207\n22208\n22209\n22210\n22211\n22212\n22213\n22214\n22215\n22216\n22217\n22218\n22219\n22220\n22221\n22222\n22223\n22224\n22225\n22226\n22227\n22228\n22229\n22230\n22231\n22232\n22233\n22234\n22235\n22236\n22237\n22238\n22239\n22240\n22241\n22242\n22243\n22244\n22245\n22246\n22247\n22248\n22249\n22250\n22251\n22252\n22253\n22254\n22255\n22256\n22257\n22258\n22259\n22260\n22261\n22262\n22263\n22264\n22265\n22266\n22267\n22268\n22269\n22270\n22271\n22272\n22273\n22274\n22275\n22276\n22277\n22278\n22279\n22280\n22281\n22282\n22283\n22284\n22285\n22286\n22287\n22288\n22289\n22290\n22291\n22292\n22293\n22294\n22295\n22296\n22297\n22298\n22299\n22300\n22301\n22302\n22303\n22304\n22305\n22306\n22307\n22308\n22309\n22310\n22311\n22312\n22313\n22314\n22315\n22316\n22317\n22318\n22319\n22320\n22321\n22322\n22323\n22324\n22325\n22326\n22327\n22328\n22329\n22330\n22331\n22332\n22333\n22334\n22335\n22336\n22337\n22338\n22339\n22340\n22341\n22342\n22343\n22344\n22345\n22346\n22347\n22348\n22349\n22350\n22351\n22352\n22353\n22354\n22355\n22356\n22357\n22358\n22359\n22360\n22361\n22362\n22363\n22364\n22365\n22366\n22367\n22368\n22369\n22370\n22371\n22372\n22373\n22374\n22375\n22376\n22377\n22378\n22379\n22380\n22381\n22382\n22383\n22384\n22385\n22386\n22387\n22388\n22389\n22390\n22391\n22392\n22393\n22394\n22395\n22396\n22397\n22398\n22399\n22400\n22401\n22402\n22403\n22404\n22405\n22406\n22407\n22408\n22409\n22410\n22411\n22412\n22413\n22414\n22415\n22416\n22417\n22418\n22419\n22420\n22421\n22422\n22423\n22424\n22425\n22426\n22427\n22428\n22429\n22430\n22431\n22432\n22433\n22434\n22435\n22436\n22437\n22438\n22439\n22440\n22441\n22442\n22443\n22444\n22445\n22446\n22447\n22448\n22449\n22450\n22451\n22452\n22453\n22454\n22455\n22456\n22457\n22458\n22459\n22460\n22461\n22462\n22463\n22464\n22465\n22466\n22467\n22468\n22469\n22470\n22471\n22472\n22473\n22474\n22475\n22476\n22477\n22478\n22479\n22480\n22481\n22482\n22483\n22484\n22485\n22486\n22487\n22488\n22489\n22490\n22491\n22492\n22493\n22494\n22495\n22496\n22497\n22498\n22499\n22500\n22501\n22502\n22503\n22504\n22505\n22506\n22507\n22508\n22509\n22510\n22511\n22512\n22513\n22514\n22515\n22516\n22517\n22518\n22519\n22520\n22521\n22522\n22523\n22524\n22525\n22526\n22527\n22528\n22529\n22530\n22531\n22532\n22533\n22534\n22535\n22536\n22537\n22538\n22539\n22540\n22541\n22542\n22543\n22544\n22545\n22546\n22547\n22548\n22549\n22550\n22551\n22552\n22553\n22554\n22555\n22556\n22557\n22558\n22559\n22560\n22561\n22562\n22563\n22564\n22565\n22566\n22567\n22568\n22569\n22570\n22571\n22572\n22573\n22574\n22575\n22576\n22577\n22578\n22579\n22580\n22581\n22582\n22583\n22584\n22585\n22586\n22587\n22588\n22589\n22590\n22591\n22592\n22593\n22594\n22595\n22596\n22597\n22598\n22599\n22600\n22601\n22602\n22603\n22604\n22605\n22606\n22607\n22608\n22609\n22610\n22611\n22612\n22613\n22614\n22615\n22616\n22617\n22618\n22619\n22620\n22621\n22622\n22623\n22624\n22625\n22626\n22627\n22628\n22629\n22630\n22631\n22632\n22633\n22634\n22635\n22636\n22637\n22638\n22639\n22640\n22641\n22642\n22643\n22644\n22645\n22646\n22647\n22648\n22649\n22650\n22651\n22652\n22653\n22654\n22655\n22656\n22657\n22658\n22659\n22660\n22661\n22662\n22663\n22664\n22665\n22666\n22667\n22668\n22669\n22670\n22671\n22672\n22673\n22674\n22675\n22676\n22677\n22678\n22679\n22680\n22681\n22682\n22683\n22684\n22685\n22686\n22687\n22688\n22689\n22690\n22691\n22692\n22693\n22694\n22695\n22696\n22697\n22698\n22699\n22700\n22701\n22702\n22703\n22704\n22705\n22706\n22707\n22708\n22709\n22710\n22711\n22712\n22713\n22714\n22715\n22716\n22717\n22718\n22719\n22720\n22721\n22722\n22723\n22724\n22725\n22726\n22727\n22728\n22729\n22730\n22731\n22732\n22733\n22734\n22735\n22736\n22737\n22738\n22739\n22740\n22741\n22742\n22743\n22744\n22745\n22746\n22747\n22748\n22749\n22750\n22751\n22752\n22753\n22754\n22755\n22756\n22757\n22758\n22759\n22760\n22761\n22762\n22763\n22764\n22765\n22766\n22767\n22768\n22769\n22770\n22771\n22772\n22773\n22774\n22775\n22776\n22777\n22778\n22779\n22780\n22781\n22782\n22783\n22784\n22785\n22786\n22787\n22788\n22789\n22790\n22791\n22792\n22793\n22794\n22795\n22796\n22797\n22798\n22799\n22800\n22801\n22802\n22803\n22804\n22805\n22806\n22807\n22808\n22809\n22810\n22811\n22812\n22813\n22814\n22815\n22816\n22817\n22818\n22819\n22820\n22821\n22822\n22823\n22824\n22825\n22826\n22827\n22828\n22829\n22830\n22831\n22832\n22833\n22834\n22835\n22836\n22837\n22838\n22839\n22840\n22841\n22842\n22843\n22844\n22845\n22846\n22847\n22848\n22849\n22850\n22851\n22852\n22853\n22854\n22855\n22856\n22857\n22858\n22859\n22860\n22861\n22862\n22863\n22864\n22865\n22866\n22867\n22868\n22869\n22870\n22871\n22872\n22873\n22874\n22875\n22876\n22877\n22878\n22879\n22880\n22881\n22882\n22883\n22884\n22885\n22886\n22887\n22888\n22889\n22890\n22891\n22892\n22893\n22894\n22895\n22896\n22897\n22898\n22899\n22900\n22901\n22902\n22903\n22904\n22905\n22906\n22907\n22908\n22909\n22910\n22911\n22912\n22913\n22914\n22915\n22916\n22917\n22918\n22919\n22920\n22921\n22922\n22923\n22924\n22925\n22926\n22927\n22928\n22929\n22930\n22931\n22932\n22933\n22934\n22935\n22936\n22937\n22938\n22939\n22940\n22941\n22942\n22943\n22944\n22945\n22946\n22947\n22948\n22949\n22950\n22951\n22952\n22953\n22954\n22955\n22956\n22957\n22958\n22959\n22960\n22961\n22962\n22963\n22964\n22965\n22966\n22967\n22968\n22969\n22970\n22971\n22972\n22973\n22974\n22975\n22976\n22977\n22978\n22979\n22980\n22981\n22982\n22983\n22984\n22985\n22986\n22987\n22988\n22989\n22990\n22991\n22992\n22993\n22994\n22995\n22996\n22997\n22998\n22999\n23000\n23001\n23002\n23003\n23004\n23005\n23006\n23007\n23008\n23009\n23010\n23011\n23012\n23013\n23014\n23015\n23016\n23017\n23018\n23019\n23020\n23021\n23022\n23023\n23024\n23025\n23026\n23027\n23028\n23029\n23030\n23031\n23032\n23033\n23034\n23035\n23036\n23037\n23038\n23039\n23040\n23041\n23042\n23043\n23044\n23045\n23046\n23047\n23048\n23049\n23050\n23051\n23052\n23053\n23054\n23055\n23056\n23057\n23058\n23059\n23060\n23061\n23062\n23063\n23064\n23065\n23066\n23067\n23068\n23069\n23070\n23071\n23072\n23073\n23074\n23075\n23076\n23077\n23078\n23079\n23080\n23081\n23082\n23083\n23084\n23085\n23086\n23087\n23088\n23089\n23090\n23091\n23092\n23093\n23094\n23095\n23096\n23097\n23098\n23099\n23100\n23101\n23102\n23103\n23104\n23105\n23106\n23107\n23108\n23109\n23110\n23111\n23112\n23113\n23114\n23115\n23116\n23117\n23118\n23119\n23120\n23121\n23122\n23123\n23124\n23125\n23126\n23127\n23128\n23129\n23130\n23131\n23132\n23133\n23134\n23135\n23136\n23137\n23138\n23139\n23140\n23141\n23142\n23143\n23144\n23145\n23146\n23147\n23148\n23149\n23150\n23151\n23152\n23153\n23154\n23155\n23156\n23157\n23158\n23159\n23160\n23161\n23162\n23163\n23164\n23165\n23166\n23167\n23168\n23169\n23170\n23171\n23172\n23173\n23174\n23175\n23176\n23177\n23178\n23179\n23180\n23181\n23182\n23183\n23184\n23185\n23186\n23187\n23188\n23189\n23190\n23191\n23192\n23193\n23194\n23195\n23196\n23197\n23198\n23199\n23200\n23201\n23202\n23203\n23204\n23205\n23206\n23207\n23208\n23209\n23210\n23211\n23212\n23213\n23214\n23215\n23216\n23217\n23218\n23219\n23220\n23221\n23222\n23223\n23224\n23225\n23226\n23227\n23228\n23229\n23230\n23231\n23232\n23233\n23234\n23235\n23236\n23237\n23238\n23239\n23240\n23241\n23242\n23243\n23244\n23245\n23246\n23247\n23248\n23249\n23250\n23251\n23252\n23253\n23254\n23255\n23256\n23257\n23258\n23259\n23260\n23261\n23262\n23263\n23264\n23265\n23266\n23267\n23268\n23269\n23270\n23271\n23272\n23273\n23274\n23275\n23276\n23277\n23278\n23279\n23280\n23281\n23282\n23283\n23284\n23285\n23286\n23287\n23288\n23289\n23290\n23291\n23292\n23293\n23294\n23295\n23296\n23297\n23298\n23299\n23300\n23301\n23302\n23303\n23304\n23305\n23306\n23307\n23308\n23309\n23310\n23311\n23312\n23313\n23314\n23315\n23316\n23317\n23318\n23319\n23320\n23321\n23322\n23323\n23324\n23325\n23326\n23327\n23328\n23329\n23330\n23331\n23332\n23333\n23334\n23335\n23336\n23337\n23338\n23339\n23340\n23341\n23342\n23343\n23344\n23345\n23346\n23347\n23348\n23349\n23350\n23351\n23352\n23353\n23354\n23355\n23356\n23357\n23358\n23359\n23360\n23361\n23362\n23363\n23364\n23365\n23366\n23367\n23368\n23369\n23370\n23371\n23372\n23373\n23374\n23375\n23376\n23377\n23378\n23379\n23380\n23381\n23382\n23383\n23384\n23385\n23386\n23387\n23388\n23389\n23390\n23391\n23392\n23393\n23394\n23395\n23396\n23397\n23398\n23399\n23400\n23401\n23402\n23403\n23404\n23405\n23406\n23407\n23408\n23409\n23410\n23411\n23412\n23413\n23414\n23415\n23416\n23417\n23418\n23419\n23420\n23421\n23422\n23423\n23424\n23425\n23426\n23427\n23428\n23429\n23430\n23431\n23432\n23433\n23434\n23435\n23436\n23437\n23438\n23439\n23440\n23441\n23442\n23443\n23444\n23445\n23446\n23447\n23448\n23449\n23450\n23451\n23452\n23453\n23454\n23455\n23456\n23457\n23458\n23459\n23460\n23461\n23462\n23463\n23464\n23465\n23466\n23467\n23468\n23469\n23470\n23471\n23472\n23473\n23474\n23475\n23476\n23477\n23478\n23479\n23480\n23481\n23482\n23483\n23484\n23485\n23486\n23487\n23488\n23489\n23490\n23491\n23492\n23493\n23494\n23495\n23496\n23497\n23498\n23499\n23500\n23501\n23502\n23503\n23504\n23505\n23506\n23507\n23508\n23509\n23510\n23511\n23512\n23513\n23514\n23515\n23516\n23517\n23518\n23519\n23520\n23521\n23522\n23523\n23524\n23525\n23526\n23527\n23528\n23529\n23530\n23531\n23532\n23533\n23534\n23535\n23536\n23537\n23538\n23539\n23540\n23541\n23542\n23543\n23544\n23545\n23546\n23547\n23548\n23549\n23550\n23551\n23552\n23553\n23554\n23555\n23556\n23557\n23558\n23559\n23560\n23561\n23562\n23563\n23564\n23565\n23566\n23567\n23568\n23569\n23570\n23571\n23572\n23573\n23574\n23575\n23576\n23577\n23578\n23579\n23580\n23581\n23582\n23583\n23584\n23585\n23586\n23587\n23588\n23589\n23590\n23591\n23592\n23593\n23594\n23595\n23596\n23597\n23598\n23599\n23600\n23601\n23602\n23603\n23604\n23605\n23606\n23607\n23608\n23609\n23610\n23611\n23612\n23613\n23614\n23615\n23616\n23617\n23618\n23619\n23620\n23621\n23622\n23623\n23624\n23625\n23626\n23627\n23628\n23629\n23630\n23631\n23632\n23633\n23634\n23635\n23636\n23637\n23638\n23639\n23640\n23641\n23642\n23643\n23644\n23645\n23646\n23647\n23648\n23649\n23650\n23651\n23652\n23653\n23654\n23655\n23656\n23657\n23658\n23659\n23660\n23661\n23662\n23663\n23664\n23665\n23666\n23667\n23668\n23669\n23670\n23671\n23672\n23673\n23674\n23675\n23676\n23677\n23678\n23679\n23680\n23681\n23682\n23683\n23684\n23685\n23686\n23687\n23688\n23689\n23690\n23691\n23692\n23693\n23694\n23695\n23696\n23697\n23698\n23699\n23700\n23701\n23702\n23703\n23704\n23705\n23706\n23707\n23708\n23709\n23710\n23711\n23712\n23713\n23714\n23715\n23716\n23717\n23718\n23719\n23720\n23721\n23722\n23723\n23724\n23725\n23726\n23727\n23728\n23729\n23730\n23731\n23732\n23733\n23734\n23735\n23736\n23737\n23738\n23739\n23740\n23741\n23742\n23743\n23744\n23745\n23746\n23747\n23748\n23749\n23750\n23751\n23752\n23753\n23754\n23755\n23756\n23757\n23758\n23759\n23760\n23761\n23762\n23763\n23764\n23765\n23766\n23767\n23768\n23769\n23770\n23771\n23772\n23773\n23774\n23775\n23776\n23777\n23778\n23779\n23780\n23781\n23782\n23783\n23784\n23785\n23786\n23787\n23788\n23789\n23790\n23791\n23792\n23793\n23794\n23795\n23796\n23797\n23798\n23799\n23800\n23801\n23802\n23803\n23804\n23805\n23806\n23807\n23808\n23809\n23810\n23811\n23812\n23813\n23814\n23815\n23816\n23817\n23818\n23819\n23820\n23821\n23822\n23823\n23824\n23825\n23826\n23827\n23828\n23829\n23830\n23831\n23832\n23833\n23834\n23835\n23836\n23837\n23838\n23839\n23840\n23841\n23842\n23843\n23844\n23845\n23846\n23847\n23848\n23849\n23850\n23851\n23852\n23853\n23854\n23855\n23856\n23857\n23858\n23859\n23860\n23861\n23862\n23863\n23864\n23865\n23866\n23867\n23868\n23869\n23870\n23871\n23872\n23873\n23874\n23875\n23876\n23877\n23878\n23879\n23880\n23881\n23882\n23883\n23884\n23885\n23886\n23887\n23888\n23889\n23890\n23891\n23892\n23893\n23894\n23895\n23896\n23897\n23898\n23899\n23900\n23901\n23902\n23903\n23904\n23905\n23906\n23907\n23908\n23909\n23910\n23911\n23912\n23913\n23914\n23915\n23916\n23917\n23918\n23919\n23920\n23921\n23922\n23923\n23924\n23925\n23926\n23927\n23928\n23929\n23930\n23931\n23932\n23933\n23934\n23935\n23936\n23937\n23938\n23939\n23940\n23941\n23942\n23943\n23944\n23945\n23946\n23947\n23948\n23949\n23950\n23951\n23952\n23953\n23954\n23955\n23956\n23957\n23958\n23959\n23960\n23961\n23962\n23963\n23964\n23965\n23966\n23967\n23968\n23969\n23970\n23971\n23972\n23973\n23974\n23975\n23976\n23977\n23978\n23979\n23980\n23981\n23982\n23983\n23984\n23985\n23986\n23987\n23988\n23989\n23990\n23991\n23992\n23993\n23994\n23995\n23996\n23997\n23998\n23999\n24000\n24001\n24002\n24003\n24004\n24005\n24006\n24007\n24008\n24009\n24010\n24011\n24012\n24013\n24014\n24015\n24016\n24017\n24018\n24019\n24020\n24021\n24022\n24023\n24024\n24025\n24026\n24027\n24028\n24029\n24030\n24031\n24032\n24033\n24034\n24035\n24036\n24037\n24038\n24039\n24040\n24041\n24042\n24043\n24044\n24045\n24046\n24047\n24048\n24049\n24050\n24051\n24052\n24053\n24054\n24055\n24056\n24057\n24058\n24059\n24060\n24061\n24062\n24063\n24064\n24065\n24066\n24067\n24068\n24069\n24070\n24071\n24072\n24073\n24074\n24075\n24076\n24077\n24078\n24079\n24080\n24081\n24082\n24083\n24084\n24085\n24086\n24087\n24088\n24089\n24090\n24091\n24092\n24093\n24094\n24095\n24096\n24097\n24098\n24099\n24100\n24101\n24102\n24103\n24104\n24105\n24106\n24107\n24108\n24109\n24110\n24111\n24112\n24113\n24114\n24115\n24116\n24117\n24118\n24119\n24120\n24121\n24122\n24123\n24124\n24125\n24126\n24127\n24128\n24129\n24130\n24131\n24132\n24133\n24134\n24135\n24136\n24137\n24138\n24139\n24140\n24141\n24142\n24143\n24144\n24145\n24146\n24147\n24148\n24149\n24150\n24151\n24152\n24153\n24154\n24155\n24156\n24157\n24158\n24159\n24160\n24161\n24162\n24163\n24164\n24165\n24166\n24167\n24168\n24169\n24170\n24171\n24172\n24173\n24174\n24175\n24176\n24177\n24178\n24179\n24180\n24181\n24182\n24183\n24184\n24185\n24186\n24187\n24188\n24189\n24190\n24191\n24192\n24193\n24194\n24195\n24196\n24197\n24198\n24199\n24200\n24201\n24202\n24203\n24204\n24205\n24206\n24207\n24208\n24209\n24210\n24211\n24212\n24213\n24214\n24215\n24216\n24217\n24218\n24219\n24220\n24221\n24222\n24223\n24224\n24225\n24226\n24227\n24228\n24229\n24230\n24231\n24232\n24233\n24234\n24235\n24236\n24237\n24238\n24239\n24240\n24241\n24242\n24243\n24244\n24245\n24246\n24247\n24248\n24249\n24250\n24251\n24252\n24253\n24254\n24255\n24256\n24257\n24258\n24259\n24260\n24261\n24262\n24263\n24264\n24265\n24266\n24267\n24268\n24269\n24270\n24271\n24272\n24273\n24274\n24275\n24276\n24277\n24278\n24279\n24280\n24281\n24282\n24283\n24284\n24285\n24286\n24287\n24288\n24289\n24290\n24291\n24292\n24293\n24294\n24295\n24296\n24297\n24298\n24299\n24300\n24301\n24302\n24303\n24304\n24305\n24306\n24307\n24308\n24309\n24310\n24311\n24312\n24313\n24314\n24315\n24316\n24317\n24318\n24319\n24320\n24321\n24322\n24323\n24324\n24325\n24326\n24327\n24328\n24329\n24330\n24331\n24332\n24333\n24334\n24335\n24336\n24337\n24338\n24339\n24340\n24341\n24342\n24343\n24344\n24345\n24346\n24347\n24348\n24349\n24350\n24351\n24352\n24353\n24354\n24355\n24356\n24357\n24358\n24359\n24360\n24361\n24362\n24363\n24364\n24365\n24366\n24367\n24368\n24369\n24370\n24371\n24372\n24373\n24374\n24375\n24376\n24377\n24378\n24379\n24380\n24381\n24382\n24383\n24384\n24385\n24386\n24387\n24388\n24389\n24390\n24391\n24392\n24393\n24394\n24395\n24396\n24397\n24398\n24399\n24400\n24401\n24402\n24403\n24404\n24405\n24406\n24407\n24408\n24409\n24410\n24411\n24412\n24413\n24414\n24415\n24416\n24417\n24418\n24419\n24420\n24421\n24422\n24423\n24424\n24425\n24426\n24427\n24428\n24429\n24430\n24431\n24432\n24433\n24434\n24435\n24436\n24437\n24438\n24439\n24440\n24441\n24442\n24443\n24444\n24445\n24446\n24447\n24448\n24449\n24450\n24451\n24452\n24453\n24454\n24455\n24456\n24457\n24458\n24459\n24460\n24461\n24462\n24463\n24464\n24465\n24466\n24467\n24468\n24469\n24470\n24471\n24472\n24473\n24474\n24475\n24476\n24477\n24478\n24479\n24480\n24481\n24482\n24483\n24484\n24485\n24486\n24487\n24488\n24489\n24490\n24491\n24492\n24493\n24494\n24495\n24496\n24497\n24498\n24499\n24500\n24501\n24502\n24503\n24504\n24505\n24506\n24507\n24508\n24509\n24510\n24511\n24512\n24513\n24514\n24515\n24516\n24517\n24518\n24519\n24520\n24521\n24522\n24523\n24524\n24525\n24526\n24527\n24528\n24529\n24530\n24531\n24532\n24533\n24534\n24535\n24536\n24537\n24538\n24539\n24540\n24541\n24542\n24543\n24544\n24545\n24546\n24547\n24548\n24549\n24550\n24551\n24552\n24553\n24554\n24555\n24556\n24557\n24558\n24559\n24560\n24561\n24562\n24563\n24564\n24565\n24566\n24567\n24568\n24569\n24570\n24571\n24572\n24573\n24574\n24575\n24576\n24577\n24578\n24579\n24580\n24581\n24582\n24583\n24584\n24585\n24586\n24587\n24588\n24589\n24590\n24591\n24592\n24593\n24594\n24595\n24596\n24597\n24598\n24599\n24600\n24601\n24602\n24603\n24604\n24605\n24606\n24607\n24608\n24609\n24610\n24611\n24612\n24613\n24614\n24615\n24616\n24617\n24618\n24619\n24620\n24621\n24622\n24623\n24624\n24625\n24626\n24627\n24628\n24629\n24630\n24631\n24632\n24633\n24634\n24635\n24636\n24637\n24638\n24639\n24640\n24641\n24642\n24643\n24644\n24645\n24646\n24647\n24648\n24649\n24650\n24651\n24652\n24653\n24654\n24655\n24656\n24657\n24658\n24659\n24660\n24661\n24662\n24663\n24664\n24665\n24666\n24667\n24668\n24669\n24670\n24671\n24672\n24673\n24674\n24675\n24676\n24677\n24678\n24679\n24680\n24681\n24682\n24683\n24684\n24685\n24686\n24687\n24688\n24689\n24690\n24691\n24692\n24693\n24694\n24695\n24696\n24697\n24698\n24699\n24700\n24701\n24702\n24703\n24704\n24705\n24706\n24707\n24708\n24709\n24710\n24711\n24712\n24713\n24714\n24715\n24716\n24717\n24718\n24719\n24720\n24721\n24722\n24723\n24724\n24725\n24726\n24727\n24728\n24729\n24730\n24731\n24732\n24733\n24734\n24735\n24736\n24737\n24738\n24739\n24740\n24741\n24742\n24743\n24744\n24745\n24746\n24747\n24748\n24749\n24750\n24751\n24752\n24753\n24754\n24755\n24756\n24757\n24758\n24759\n24760\n24761\n24762\n24763\n24764\n24765\n24766\n24767\n24768\n24769\n24770\n24771\n24772\n24773\n24774\n24775\n24776\n24777\n24778\n24779\n24780\n24781\n24782\n24783\n24784\n24785\n24786\n24787\n24788\n24789\n24790\n24791\n24792\n24793\n24794\n24795\n24796\n24797\n24798\n24799\n24800\n24801\n24802\n24803\n24804\n24805\n24806\n24807\n24808\n24809\n24810\n24811\n24812\n24813\n24814\n24815\n24816\n24817\n24818\n24819\n24820\n24821\n24822\n24823\n24824\n24825\n24826\n24827\n24828\n24829\n24830\n24831\n24832\n24833\n24834\n24835\n24836\n24837\n24838\n24839\n24840\n24841\n24842\n24843\n24844\n24845\n24846\n24847\n24848\n24849\n24850\n24851\n24852\n24853\n24854\n24855\n24856\n24857\n24858\n24859\n24860\n24861\n24862\n24863\n24864\n24865\n24866\n24867\n24868\n24869\n24870\n24871\n24872\n24873\n24874\n24875\n24876\n24877\n24878\n24879\n24880\n24881\n24882\n24883\n24884\n24885\n24886\n24887\n24888\n24889\n24890\n24891\n24892\n24893\n24894\n24895\n24896\n24897\n24898\n24899\n24900\n24901\n24902\n24903\n24904\n24905\n24906\n24907\n24908\n24909\n24910\n24911\n24912\n24913\n24914\n24915\n24916\n24917\n24918\n24919\n24920\n24921\n24922\n24923\n24924\n24925\n24926\n24927\n24928\n24929\n24930\n24931\n24932\n24933\n24934\n24935\n24936\n24937\n24938\n24939\n24940\n24941\n24942\n24943\n24944\n24945\n24946\n24947\n24948\n24949\n24950\n24951\n24952\n24953\n24954\n24955\n24956\n24957\n24958\n24959\n24960\n24961\n24962\n24963\n24964\n24965\n24966\n24967\n24968\n24969\n24970\n24971\n24972\n24973\n24974\n24975\n24976\n24977\n24978\n24979\n24980\n24981\n24982\n24983\n24984\n24985\n24986\n24987\n24988\n24989\n24990\n24991\n24992\n24993\n24994\n24995\n24996\n24997\n24998\n24999\n25000\n25001\n25002\n25003\n25004\n25005\n25006\n25007\n25008\n25009\n25010\n25011\n25012\n25013\n25014\n25015\n25016\n25017\n25018\n25019\n25020\n25021\n25022\n25023\n25024\n25025\n25026\n25027\n25028\n25029\n25030\n25031\n25032\n25033\n25034\n25035\n25036\n25037\n25038\n25039\n25040\n25041\n25042\n25043\n25044\n25045\n25046\n25047\n25048\n25049\n25050\n25051\n25052\n25053\n25054\n25055\n25056\n25057\n25058\n25059\n25060\n25061\n25062\n25063\n25064\n25065\n25066\n25067\n25068\n25069\n25070\n25071\n25072\n25073\n25074\n25075\n25076\n25077\n25078\n25079\n25080\n25081\n25082\n25083\n25084\n25085\n25086\n25087\n25088\n25089\n25090\n25091\n25092\n25093\n25094\n25095\n25096\n25097\n25098\n25099\n25100\n25101\n25102\n25103\n25104\n25105\n25106\n25107\n25108\n25109\n25110\n25111\n25112\n25113\n25114\n25115\n25116\n25117\n25118\n25119\n25120\n25121\n25122\n25123\n25124\n25125\n25126\n25127\n25128\n25129\n25130\n25131\n25132\n25133\n25134\n25135\n25136\n25137\n25138\n25139\n25140\n25141\n25142\n25143\n25144\n25145\n25146\n25147\n25148\n25149\n25150\n25151\n25152\n25153\n25154\n25155\n25156\n25157\n25158\n25159\n25160\n25161\n25162\n25163\n25164\n25165\n25166\n25167\n25168\n25169\n25170\n25171\n25172\n25173\n25174\n25175\n25176\n25177\n25178\n25179\n25180\n25181\n25182\n25183\n25184\n25185\n25186\n25187\n25188\n25189\n25190\n25191\n25192\n25193\n25194\n25195\n25196\n25197\n25198\n25199\n25200\n25201\n25202\n25203\n25204\n25205\n25206\n25207\n25208\n25209\n25210\n25211\n25212\n25213\n25214\n25215\n25216\n25217\n25218\n25219\n25220\n25221\n25222\n25223\n25224\n25225\n25226\n25227\n25228\n25229\n25230\n25231\n25232\n25233\n25234\n25235\n25236\n25237\n25238\n25239\n25240\n25241\n25242\n25243\n25244\n25245\n25246\n25247\n25248\n25249\n25250\n25251\n25252\n25253\n25254\n25255\n25256\n25257\n25258\n25259\n25260\n25261\n25262\n25263\n25264\n25265\n25266\n25267\n25268\n25269\n25270\n25271\n25272\n25273\n25274\n25275\n25276\n25277\n25278\n25279\n25280\n25281\n25282\n25283\n25284\n25285\n25286\n25287\n25288\n25289\n25290\n25291\n25292\n25293\n25294\n25295\n25296\n25297\n25298\n25299\n25300\n25301\n25302\n25303\n25304\n25305\n25306\n25307\n25308\n25309\n25310\n25311\n25312\n25313\n25314\n25315\n25316\n25317\n25318\n25319\n25320\n25321\n25322\n25323\n25324\n25325\n25326\n25327\n25328\n25329\n25330\n25331\n25332\n25333\n25334\n25335\n25336\n25337\n25338\n25339\n25340\n25341\n25342\n25343\n25344\n25345\n25346\n25347\n25348\n25349\n25350\n25351\n25352\n25353\n25354\n25355\n25356\n25357\n25358\n25359\n25360\n25361\n25362\n25363\n25364\n25365\n25366\n25367\n25368\n25369\n25370\n25371\n25372\n25373\n25374\n25375\n25376\n25377\n25378\n25379\n25380\n25381\n25382\n25383\n25384\n25385\n25386\n25387\n25388\n25389\n25390\n25391\n25392\n25393\n25394\n25395\n25396\n25397\n25398\n25399\n25400\n25401\n25402\n25403\n25404\n25405\n25406\n25407\n25408\n25409\n25410\n25411\n25412\n25413\n25414\n25415\n25416\n25417\n25418\n25419\n25420\n25421\n25422\n25423\n25424\n25425\n25426\n25427\n25428\n25429\n25430\n25431\n25432\n25433\n25434\n25435\n25436\n25437\n25438\n25439\n25440\n25441\n25442\n25443\n25444\n25445\n25446\n25447\n25448\n25449\n25450\n25451\n25452\n25453\n25454\n25455\n25456\n25457\n25458\n25459\n25460\n25461\n25462\n25463\n25464\n25465\n25466\n25467\n25468\n25469\n25470\n25471\n25472\n25473\n25474\n25475\n25476\n25477\n25478\n25479\n25480\n25481\n25482\n25483\n25484\n25485\n25486\n25487\n25488\n25489\n25490\n25491\n25492\n25493\n25494\n25495\n25496\n25497\n25498\n25499\n25500\n25501\n25502\n25503\n25504\n25505\n25506\n25507\n25508\n25509\n25510\n25511\n25512\n25513\n25514\n25515\n25516\n25517\n25518\n25519\n25520\n25521\n25522\n25523\n25524\n25525\n25526\n25527\n25528\n25529\n25530\n25531\n25532\n25533\n25534\n25535\n25536\n25537\n25538\n25539\n25540\n25541\n25542\n25543\n25544\n25545\n25546\n25547\n25548\n25549\n25550\n25551\n25552\n25553\n25554\n25555\n25556\n25557\n25558\n25559\n25560\n25561\n25562\n25563\n25564\n25565\n25566\n25567\n25568\n25569\n25570\n25571\n25572\n25573\n25574\n25575\n25576\n25577\n25578\n25579\n25580\n25581\n25582\n25583\n25584\n25585\n25586\n25587\n25588\n25589\n25590\n25591\n25592\n25593\n25594\n25595\n25596\n25597\n25598\n25599\n25600\n25601\n25602\n25603\n25604\n25605\n25606\n25607\n25608\n25609\n25610\n25611\n25612\n25613\n25614\n25615\n25616\n25617\n25618\n25619\n25620\n25621\n25622\n25623\n25624\n25625\n25626\n25627\n25628\n25629\n25630\n25631\n25632\n25633\n25634\n25635\n25636\n25637\n25638\n25639\n25640\n25641\n25642\n25643\n25644\n25645\n25646\n25647\n25648\n25649\n25650\n25651\n25652\n25653\n25654\n25655\n25656\n25657\n25658\n25659\n25660\n25661\n25662\n25663\n25664\n25665\n25666\n25667\n25668\n25669\n25670\n25671\n25672\n25673\n25674\n25675\n25676\n25677\n25678\n25679\n25680\n25681\n25682\n25683\n25684\n25685\n25686\n25687\n25688\n25689\n25690\n25691\n25692\n25693\n25694\n25695\n25696\n25697\n25698\n25699\n25700\n25701\n25702\n25703\n25704\n25705\n25706\n25707\n25708\n25709\n25710\n25711\n25712\n25713\n25714\n25715\n25716\n25717\n25718\n25719\n25720\n25721\n25722\n25723\n25724\n25725\n25726\n25727\n25728\n25729\n25730\n25731\n25732\n25733\n25734\n25735\n25736\n25737\n25738\n25739\n25740\n25741\n25742\n25743\n25744\n25745\n25746\n25747\n25748\n25749\n25750\n25751\n25752\n25753\n25754\n25755\n25756\n25757\n25758\n25759\n25760\n25761\n25762\n25763\n25764\n25765\n25766\n25767\n25768\n25769\n25770\n25771\n25772\n25773\n25774\n25775\n25776\n25777\n25778\n25779\n25780\n25781\n25782\n25783\n25784\n25785\n25786\n25787\n25788\n25789\n25790\n25791\n25792\n25793\n25794\n25795\n25796\n25797\n25798\n25799\n25800\n25801\n25802\n25803\n25804\n25805\n25806\n25807\n25808\n25809\n25810\n25811\n25812\n25813\n25814\n25815\n25816\n25817\n25818\n25819\n25820\n25821\n25822\n25823\n25824\n25825\n25826\n25827\n25828\n25829\n25830\n25831\n25832\n25833\n25834\n25835\n25836\n25837\n25838\n25839\n25840\n25841\n25842\n25843\n25844\n25845\n25846\n25847\n25848\n25849\n25850\n25851\n25852\n25853\n25854\n25855\n25856\n25857\n25858\n25859\n25860\n25861\n25862\n25863\n25864\n25865\n25866\n25867\n25868\n25869\n25870\n25871\n25872\n25873\n25874\n25875\n25876\n25877\n25878\n25879\n25880\n25881\n25882\n25883\n25884\n25885\n25886\n25887\n25888\n25889\n25890\n25891\n25892\n25893\n25894\n25895\n25896\n25897\n25898\n25899\n25900\n25901\n25902\n25903\n25904\n25905\n25906\n25907\n25908\n25909\n25910\n25911\n25912\n25913\n25914\n25915\n25916\n25917\n25918\n25919\n25920\n25921\n25922\n25923\n25924\n25925\n25926\n25927\n25928\n25929\n25930\n25931\n25932\n25933\n25934\n25935\n25936\n25937\n25938\n25939\n25940\n25941\n25942\n25943\n25944\n25945\n25946\n25947\n25948\n25949\n25950\n25951\n25952\n25953\n25954\n25955\n25956\n25957\n25958\n25959\n25960\n25961\n25962\n25963\n25964\n25965\n25966\n25967\n25968\n25969\n25970\n25971\n25972\n25973\n25974\n25975\n25976\n25977\n25978\n25979\n25980\n25981\n25982\n25983\n25984\n25985\n25986\n25987\n25988\n25989\n25990\n25991\n25992\n25993\n25994\n25995\n25996\n25997\n25998\n25999\n26000\n26001\n26002\n26003\n26004\n26005\n26006\n26007\n26008\n26009\n26010\n26011\n26012\n26013\n26014\n26015\n26016\n26017\n26018\n26019\n26020\n26021\n26022\n26023\n26024\n26025\n26026\n26027\n26028\n26029\n26030\n26031\n26032\n26033\n26034\n26035\n26036\n26037\n26038\n26039\n26040\n26041\n26042\n26043\n26044\n26045\n26046\n26047\n26048\n26049\n26050\n26051\n26052\n26053\n26054\n26055\n26056\n26057\n26058\n26059\n26060\n26061\n26062\n26063\n26064\n26065\n26066\n26067\n26068\n26069\n26070\n26071\n26072\n26073\n26074\n26075\n26076\n26077\n26078\n26079\n26080\n26081\n26082\n26083\n26084\n26085\n26086\n26087\n26088\n26089\n26090\n26091\n26092\n26093\n26094\n26095\n26096\n26097\n26098\n26099\n26100\n26101\n26102\n26103\n26104\n26105\n26106\n26107\n26108\n26109\n26110\n26111\n26112\n26113\n26114\n26115\n26116\n26117\n26118\n26119\n26120\n26121\n26122\n26123\n26124\n26125\n26126\n26127\n26128\n26129\n26130\n26131\n26132\n26133\n26134\n26135\n26136\n26137\n26138\n26139\n26140\n26141\n26142\n26143\n26144\n26145\n26146\n26147\n26148\n26149\n26150\n26151\n26152\n26153\n26154\n26155\n26156\n26157\n26158\n26159\n26160\n26161\n26162\n26163\n26164\n26165\n26166\n26167\n26168\n26169\n26170\n26171\n26172\n26173\n26174\n26175\n26176\n26177\n26178\n26179\n26180\n26181\n26182\n26183\n26184\n26185\n26186\n26187\n26188\n26189\n26190\n26191\n26192\n26193\n26194\n26195\n26196\n26197\n26198\n26199\n26200\n26201\n26202\n26203\n26204\n26205\n26206\n26207\n26208\n26209\n26210\n26211\n26212\n26213\n26214\n26215\n26216\n26217\n26218\n26219\n26220\n26221\n26222\n26223\n26224\n26225\n26226\n26227\n26228\n26229\n26230\n26231\n26232\n26233\n26234\n26235\n26236\n26237\n26238\n26239\n26240\n26241\n26242\n26243\n26244\n26245\n26246\n26247\n26248\n26249\n26250\n26251\n26252\n26253\n26254\n26255\n26256\n26257\n26258\n26259\n26260\n26261\n26262\n26263\n26264\n26265\n26266\n26267\n26268\n26269\n26270\n26271\n26272\n26273\n26274\n26275\n26276\n26277\n26278\n26279\n26280\n26281\n26282\n26283\n26284\n26285\n26286\n26287\n26288\n26289\n26290\n26291\n26292\n26293\n26294\n26295\n26296\n26297\n26298\n26299\n26300\n26301\n26302\n26303\n26304\n26305\n26306\n26307\n26308\n26309\n26310\n26311\n26312\n26313\n26314\n26315\n26316\n26317\n26318\n26319\n26320\n26321\n26322\n26323\n26324\n26325\n26326\n26327\n26328\n26329\n26330\n26331\n26332\n26333\n26334\n26335\n26336\n26337\n26338\n26339\n26340\n26341\n26342\n26343\n26344\n26345\n26346\n26347\n26348\n26349\n26350\n26351\n26352\n26353\n26354\n26355\n26356\n26357\n26358\n26359\n26360\n26361\n26362\n26363\n26364\n26365\n26366\n26367\n26368\n26369\n26370\n26371\n26372\n26373\n26374\n26375\n26376\n26377\n26378\n26379\n26380\n26381\n26382\n26383\n26384\n26385\n26386\n26387\n26388\n26389\n26390\n26391\n26392\n26393\n26394\n26395\n26396\n26397\n26398\n26399\n26400\n26401\n26402\n26403\n26404\n26405\n26406\n26407\n26408\n26409\n26410\n26411\n26412\n26413\n26414\n26415\n26416\n26417\n26418\n26419\n26420\n26421\n26422\n26423\n26424\n26425\n26426\n26427\n26428\n26429\n26430\n26431\n26432\n26433\n26434\n26435\n26436\n26437\n26438\n26439\n26440\n26441\n26442\n26443\n26444\n26445\n26446\n26447\n26448\n26449\n26450\n26451\n26452\n26453\n26454\n26455\n26456\n26457\n26458\n26459\n26460\n26461\n26462\n26463\n26464\n26465\n26466\n26467\n26468\n26469\n26470\n26471\n26472\n26473\n26474\n26475\n26476\n26477\n26478\n26479\n26480\n26481\n26482\n26483\n26484\n26485\n26486\n26487\n26488\n26489\n26490\n26491\n26492\n26493\n26494\n26495\n26496\n26497\n26498\n26499\n26500\n26501\n26502\n26503\n26504\n26505\n26506\n26507\n26508\n26509\n26510\n26511\n26512\n26513\n26514\n26515\n26516\n26517\n26518\n26519\n26520\n26521\n26522\n26523\n26524\n26525\n26526\n26527\n26528\n26529\n26530\n26531\n26532\n26533\n26534\n26535\n26536\n26537\n26538\n26539\n26540\n26541\n26542\n26543\n26544\n26545\n26546\n26547\n26548\n26549\n26550\n26551\n26552\n26553\n26554\n26555\n26556\n26557\n26558\n26559\n26560\n26561\n26562\n26563\n26564\n26565\n26566\n26567\n26568\n26569\n26570\n26571\n26572\n26573\n26574\n26575\n26576\n26577\n26578\n26579\n26580\n26581\n26582\n26583\n26584\n26585\n26586\n26587\n26588\n26589\n26590\n26591\n26592\n26593\n26594\n26595\n26596\n26597\n26598\n26599\n26600\n26601\n26602\n26603\n26604\n26605\n26606\n26607\n26608\n26609\n26610\n26611\n26612\n26613\n26614\n26615\n26616\n26617\n26618\n26619\n26620\n26621\n26622\n26623\n26624\n26625\n26626\n26627\n26628\n26629\n26630\n26631\n26632\n26633\n26634\n26635\n26636\n26637\n26638\n26639\n26640\n26641\n26642\n26643\n26644\n26645\n26646\n26647\n26648\n26649\n26650\n26651\n26652\n26653\n26654\n26655\n26656\n26657\n26658\n26659\n26660\n26661\n26662\n26663\n26664\n26665\n26666\n26667\n26668\n26669\n26670\n26671\n26672\n26673\n26674\n26675\n26676\n26677\n26678\n26679\n26680\n26681\n26682\n26683\n26684\n26685\n26686\n26687\n26688\n26689\n26690\n26691\n26692\n26693\n26694\n26695\n26696\n26697\n26698\n26699\n26700\n26701\n26702\n26703\n26704\n26705\n26706\n26707\n26708\n26709\n26710\n26711\n26712\n26713\n26714\n26715\n26716\n26717\n26718\n26719\n26720\n26721\n26722\n26723\n26724\n26725\n26726\n26727\n26728\n26729\n26730\n26731\n26732\n26733\n26734\n26735\n26736\n26737\n26738\n26739\n26740\n26741\n26742\n26743\n26744\n26745\n26746\n26747\n26748\n26749\n26750\n26751\n26752\n26753\n26754\n26755\n26756\n26757\n26758\n26759\n26760\n26761\n26762\n26763\n26764\n26765\n26766\n26767\n26768\n26769\n26770\n26771\n26772\n26773\n26774\n26775\n26776\n26777\n26778\n26779\n26780\n26781\n26782\n26783\n26784\n26785\n26786\n26787\n26788\n26789\n26790\n26791\n26792\n26793\n26794\n26795\n26796\n26797\n26798\n26799\n26800\n26801\n26802\n26803\n26804\n26805\n26806\n26807\n26808\n26809\n26810\n26811\n26812\n26813\n26814\n26815\n26816\n26817\n26818\n26819\n26820\n26821\n26822\n26823\n26824\n26825\n26826\n26827\n26828\n26829\n26830\n26831\n26832\n26833\n26834\n26835\n26836\n26837\n26838\n26839\n26840\n26841\n26842\n26843\n26844\n26845\n26846\n26847\n26848\n26849\n26850\n26851\n26852\n26853\n26854\n26855\n26856\n26857\n26858\n26859\n26860\n26861\n26862\n26863\n26864\n26865\n26866\n26867\n26868\n26869\n26870\n26871\n26872\n26873\n26874\n26875\n26876\n26877\n26878\n26879\n26880\n26881\n26882\n26883\n26884\n26885\n26886\n26887\n26888\n26889\n26890\n26891\n26892\n26893\n26894\n26895\n26896\n26897\n26898\n26899\n26900\n26901\n26902\n26903\n26904\n26905\n26906\n26907\n26908\n26909\n26910\n26911\n26912\n26913\n26914\n26915\n26916\n26917\n26918\n26919\n26920\n26921\n26922\n26923\n26924\n26925\n26926\n26927\n26928\n26929\n26930\n26931\n26932\n26933\n26934\n26935\n26936\n26937\n26938\n26939\n26940\n26941\n26942\n26943\n26944\n26945\n26946\n26947\n26948\n26949\n26950\n26951\n26952\n26953\n26954\n26955\n26956\n26957\n26958\n26959\n26960\n26961\n26962\n26963\n26964\n26965\n26966\n26967\n26968\n26969\n26970\n26971\n26972\n26973\n26974\n26975\n26976\n26977\n26978\n26979\n26980\n26981\n26982\n26983\n26984\n26985\n26986\n26987\n26988\n26989\n26990\n26991\n26992\n26993\n26994\n26995\n26996\n26997\n26998\n26999\n27000\n27001\n27002\n27003\n27004\n27005\n27006\n27007\n27008\n27009\n27010\n27011\n27012\n27013\n27014\n27015\n27016\n27017\n27018\n27019\n27020\n27021\n27022\n27023\n27024\n27025\n27026\n27027\n27028\n27029\n27030\n27031\n27032\n27033\n27034\n27035\n27036\n27037\n27038\n27039\n27040\n27041\n27042\n27043\n27044\n27045\n27046\n27047\n27048\n27049\n27050\n27051\n27052\n27053\n27054\n27055\n27056\n27057\n27058\n27059\n27060\n27061\n27062\n27063\n27064\n27065\n27066\n27067\n27068\n27069\n27070\n27071\n27072\n27073\n27074\n27075\n27076\n27077\n27078\n27079\n27080\n27081\n27082\n27083\n27084\n27085\n27086\n27087\n27088\n27089\n27090\n27091\n27092\n27093\n27094\n27095\n27096\n27097\n27098\n27099\n27100\n27101\n27102\n27103\n27104\n27105\n27106\n27107\n27108\n27109\n27110\n27111\n27112\n27113\n27114\n27115\n27116\n27117\n27118\n27119\n27120\n27121\n27122\n27123\n27124\n27125\n27126\n27127\n27128\n27129\n27130\n27131\n27132\n27133\n27134\n27135\n27136\n27137\n27138\n27139\n27140\n27141\n27142\n27143\n27144\n27145\n27146\n27147\n27148\n27149\n27150\n27151\n27152\n27153\n27154\n27155\n27156\n27157\n27158\n27159\n27160\n27161\n27162\n27163\n27164\n27165\n27166\n27167\n27168\n27169\n27170\n27171\n27172\n27173\n27174\n27175\n27176\n27177\n27178\n27179\n27180\n27181\n27182\n27183\n27184\n27185\n27186\n27187\n27188\n27189\n27190\n27191\n27192\n27193\n27194\n27195\n27196\n27197\n27198\n27199\n27200\n27201\n27202\n27203\n27204\n27205\n27206\n27207\n27208\n27209\n27210\n27211\n27212\n27213\n27214\n27215\n27216\n27217\n27218\n27219\n27220\n27221\n27222\n27223\n27224\n27225\n27226\n27227\n27228\n27229\n27230\n27231\n27232\n27233\n27234\n27235\n27236\n27237\n27238\n27239\n27240\n27241\n27242\n27243\n27244\n27245\n27246\n27247\n27248\n27249\n27250\n27251\n27252\n27253\n27254\n27255\n27256\n27257\n27258\n27259\n27260\n27261\n27262\n27263\n27264\n27265\n27266\n27267\n27268\n27269\n27270\n27271\n27272\n27273\n27274\n27275\n27276\n27277\n27278\n27279\n27280\n27281\n27282\n27283\n27284\n27285\n27286\n27287\n27288\n27289\n27290\n27291\n27292\n27293\n27294\n27295\n27296\n27297\n27298\n27299\n27300\n27301\n27302\n27303\n27304\n27305\n27306\n27307\n27308\n27309\n27310\n27311\n27312\n27313\n27314\n27315\n27316\n27317\n27318\n27319\n27320\n27321\n27322\n27323\n27324\n27325\n27326\n27327\n27328\n27329\n27330\n27331\n27332\n27333\n27334\n27335\n27336\n27337\n27338\n27339\n27340\n27341\n27342\n27343\n27344\n27345\n27346\n27347\n27348\n27349\n27350\n27351\n27352\n27353\n27354\n27355\n27356\n27357\n27358\n27359\n27360\n27361\n27362\n27363\n27364\n27365\n27366\n27367\n27368\n27369\n27370\n27371\n27372\n27373\n27374\n27375\n27376\n27377\n27378\n27379\n27380\n27381\n27382\n27383\n27384\n27385\n27386\n27387\n27388\n27389\n27390\n27391\n27392\n27393\n27394\n27395\n27396\n27397\n27398\n27399\n27400\n27401\n27402\n27403\n27404\n27405\n27406\n27407\n27408\n27409\n27410\n27411\n27412\n27413\n27414\n27415\n27416\n27417\n27418\n27419\n27420\n27421\n27422\n27423\n27424\n27425\n27426\n27427\n27428\n27429\n27430\n27431\n27432\n27433\n27434\n27435\n27436\n27437\n27438\n27439\n27440\n27441\n27442\n27443\n27444\n27445\n27446\n27447\n27448\n27449\n27450\n27451\n27452\n27453\n27454\n27455\n27456\n27457\n27458\n27459\n27460\n27461\n27462\n27463\n27464\n27465\n27466\n27467\n27468\n27469\n27470\n27471\n27472\n27473\n27474\n27475\n27476\n27477\n27478\n27479\n27480\n27481\n27482\n27483\n27484\n27485\n27486\n27487\n27488\n27489\n27490\n27491\n27492\n27493\n27494\n27495\n27496\n27497\n27498\n27499\n27500\n27501\n27502\n27503\n27504\n27505\n27506\n27507\n27508\n27509\n27510\n27511\n27512\n27513\n27514\n27515\n27516\n27517\n27518\n27519\n27520\n27521\n27522\n27523\n27524\n27525\n27526\n27527\n27528\n27529\n27530\n27531\n27532\n27533\n27534\n27535\n27536\n27537\n27538\n27539\n27540\n27541\n27542\n27543\n27544\n27545\n27546\n27547\n27548\n27549\n27550\n27551\n27552\n27553\n27554\n27555\n27556\n27557\n27558\n27559\n27560\n27561\n27562\n27563\n27564\n27565\n27566\n27567\n27568\n27569\n27570\n27571\n27572\n27573\n27574\n27575\n27576\n27577\n27578\n27579\n27580\n27581\n27582\n27583\n27584\n27585\n27586\n27587\n27588\n27589\n27590\n27591\n27592\n27593\n27594\n27595\n27596\n27597\n27598\n27599\n27600\n27601\n27602\n27603\n27604\n27605\n27606\n27607\n27608\n27609\n27610\n27611\n27612\n27613\n27614\n27615\n27616\n27617\n27618\n27619\n27620\n27621\n27622\n27623\n27624\n27625\n27626\n27627\n27628\n27629\n27630\n27631\n27632\n27633\n27634\n27635\n27636\n27637\n27638\n27639\n27640\n27641\n27642\n27643\n27644\n27645\n27646\n27647\n27648\n27649\n27650\n27651\n27652\n27653\n27654\n27655\n27656\n27657\n27658\n27659\n27660\n27661\n27662\n27663\n27664\n27665\n27666\n27667\n27668\n27669\n27670\n27671\n27672\n27673\n27674\n27675\n27676\n27677\n27678\n27679\n27680\n27681\n27682\n27683\n27684\n27685\n27686\n27687\n27688\n27689\n27690\n27691\n27692\n27693\n27694\n27695\n27696\n27697\n27698\n27699\n27700\n27701\n27702\n27703\n27704\n27705\n27706\n27707\n27708\n27709\n27710\n27711\n27712\n27713\n27714\n27715\n27716\n27717\n27718\n27719\n27720\n27721\n27722\n27723\n27724\n27725\n27726\n27727\n27728\n27729\n27730\n27731\n27732\n27733\n27734\n27735\n27736\n27737\n27738\n27739\n27740\n27741\n27742\n27743\n27744\n27745\n27746\n27747\n27748\n27749\n27750\n27751\n27752\n27753\n27754\n27755\n27756\n27757\n27758\n27759\n27760\n27761\n27762\n27763\n27764\n27765\n27766\n27767\n27768\n27769\n27770\n27771\n27772\n27773\n27774\n27775\n27776\n27777\n27778\n27779\n27780\n27781\n27782\n27783\n27784\n27785\n27786\n27787\n27788\n27789\n27790\n27791\n27792\n27793\n27794\n27795\n27796\n27797\n27798\n27799\n27800\n27801\n27802\n27803\n27804\n27805\n27806\n27807\n27808\n27809\n27810\n27811\n27812\n27813\n27814\n27815\n27816\n27817\n27818\n27819\n27820\n27821\n27822\n27823\n27824\n27825\n27826\n27827\n27828\n27829\n27830\n27831\n27832\n27833\n27834\n27835\n27836\n27837\n27838\n27839\n27840\n27841\n27842\n27843\n27844\n27845\n27846\n27847\n27848\n27849\n27850\n27851\n27852\n27853\n27854\n27855\n27856\n27857\n27858\n27859\n27860\n27861\n27862\n27863\n27864\n27865\n27866\n27867\n27868\n27869\n27870\n27871\n27872\n27873\n27874\n27875\n27876\n27877\n27878\n27879\n27880\n27881\n27882\n27883\n27884\n27885\n27886\n27887\n27888\n27889\n27890\n27891\n27892\n27893\n27894\n27895\n27896\n27897\n27898\n27899\n27900\n27901\n27902\n27903\n27904\n27905\n27906\n27907\n27908\n27909\n27910\n27911\n27912\n27913\n27914\n27915\n27916\n27917\n27918\n27919\n27920\n27921\n27922\n27923\n27924\n27925\n27926\n27927\n27928\n27929\n27930\n27931\n27932\n27933\n27934\n27935\n27936\n27937\n27938\n27939\n27940\n27941\n27942\n27943\n27944\n27945\n27946\n27947\n27948\n27949\n27950\n27951\n27952\n27953\n27954\n27955\n27956\n27957\n27958\n27959\n27960\n27961\n27962\n27963\n27964\n27965\n27966\n27967\n27968\n27969\n27970\n27971\n27972\n27973\n27974\n27975\n27976\n27977\n27978\n27979\n27980\n27981\n27982\n27983\n27984\n27985\n27986\n27987\n27988\n27989\n27990\n27991\n27992\n27993\n27994\n27995\n27996\n27997\n27998\n27999\n28000\n28001\n28002\n28003\n28004\n28005\n28006\n28007\n28008\n28009\n28010\n28011\n28012\n28013\n28014\n28015\n28016\n28017\n28018\n28019\n28020\n28021\n28022\n28023\n28024\n28025\n28026\n28027\n28028\n28029\n28030\n28031\n28032\n28033\n28034\n28035\n28036\n28037\n28038\n28039\n28040\n28041\n28042\n28043\n28044\n28045\n28046\n28047\n28048\n28049\n28050\n28051\n28052\n28053\n28054\n28055\n28056\n28057\n28058\n28059\n28060\n28061\n28062\n28063\n28064\n28065\n28066\n28067\n28068\n28069\n28070\n28071\n28072\n28073\n28074\n28075\n28076\n28077\n28078\n28079\n28080\n28081\n28082\n28083\n28084\n28085\n28086\n28087\n28088\n28089\n28090\n28091\n28092\n28093\n28094\n28095\n28096\n28097\n28098\n28099\n28100\n28101\n28102\n28103\n28104\n28105\n28106\n28107\n28108\n28109\n28110\n28111\n28112\n28113\n28114\n28115\n28116\n28117\n28118\n28119\n28120\n28121\n28122\n28123\n28124\n28125\n28126\n28127\n28128\n28129\n28130\n28131\n28132\n28133\n28134\n28135\n28136\n28137\n28138\n28139\n28140\n28141\n28142\n28143\n28144\n28145\n28146\n28147\n28148\n28149\n28150\n28151\n28152\n28153\n28154\n28155\n28156\n28157\n28158\n28159\n28160\n28161\n28162\n28163\n28164\n28165\n28166\n28167\n28168\n28169\n28170\n28171\n28172\n28173\n28174\n28175\n28176\n28177\n28178\n28179\n28180\n28181\n28182\n28183\n28184\n28185\n28186\n28187\n28188\n28189\n28190\n28191\n28192\n28193\n28194\n28195\n28196\n28197\n28198\n28199\n28200\n28201\n28202\n28203\n28204\n28205\n28206\n28207\n28208\n28209\n28210\n28211\n28212\n28213\n28214\n28215\n28216\n28217\n28218\n28219\n28220\n28221\n28222\n28223\n28224\n28225\n28226\n28227\n28228\n28229\n28230\n28231\n28232\n28233\n28234\n28235\n28236\n28237\n28238\n28239\n28240\n28241\n28242\n28243\n28244\n28245\n28246\n28247\n28248\n28249\n28250\n28251\n28252\n28253\n28254\n28255\n28256\n28257\n28258\n28259\n28260\n28261\n28262\n28263\n28264\n28265\n28266\n28267\n28268\n28269\n28270\n28271\n28272\n28273\n28274\n28275\n28276\n28277\n28278\n28279\n28280\n28281\n28282\n28283\n28284\n28285\n28286\n28287\n28288\n28289\n28290\n28291\n28292\n28293\n28294\n28295\n28296\n28297\n28298\n28299\n28300\n28301\n28302\n28303\n28304\n28305\n28306\n28307\n28308\n28309\n28310\n28311\n28312\n28313\n28314\n28315\n28316\n28317\n28318\n28319\n28320\n28321\n28322\n28323\n28324\n28325\n28326\n28327\n28328\n28329\n28330\n28331\n28332\n28333\n28334\n28335\n28336\n28337\n28338\n28339\n28340\n28341\n28342\n28343\n28344\n28345\n28346\n28347\n28348\n28349\n28350\n28351\n28352\n28353\n28354\n28355\n28356\n28357\n28358\n28359\n28360\n28361\n28362\n28363\n28364\n28365\n28366\n28367\n28368\n28369\n28370\n28371\n28372\n28373\n28374\n28375\n28376\n28377\n28378\n28379\n28380\n28381\n28382\n28383\n28384\n28385\n28386\n28387\n28388\n28389\n28390\n28391\n28392\n28393\n28394\n28395\n28396\n28397\n28398\n28399\n28400\n28401\n28402\n28403\n28404\n28405\n28406\n28407\n28408\n28409\n28410\n28411\n28412\n28413\n28414\n28415\n28416\n28417\n28418\n28419\n28420\n28421\n28422\n28423\n28424\n28425\n28426\n28427\n28428\n28429\n28430\n28431\n28432\n28433\n28434\n28435\n28436\n28437\n28438\n28439\n28440\n28441\n28442\n28443\n28444\n28445\n28446\n28447\n28448\n28449\n28450\n28451\n28452\n28453\n28454\n28455\n28456\n28457\n28458\n28459\n28460\n28461\n28462\n28463\n28464\n28465\n28466\n28467\n28468\n28469\n28470\n28471\n28472\n28473\n28474\n28475\n28476\n28477\n28478\n28479\n28480\n28481\n28482\n28483\n28484\n28485\n28486\n28487\n28488\n28489\n28490\n28491\n28492\n28493\n28494\n28495\n28496\n28497\n28498\n28499\n28500\n28501\n28502\n28503\n28504\n28505\n28506\n28507\n28508\n28509\n28510\n28511\n28512\n28513\n28514\n28515\n28516\n28517\n28518\n28519\n28520\n28521\n28522\n28523\n28524\n28525\n28526\n28527\n28528\n28529\n28530\n28531\n28532\n28533\n28534\n28535\n28536\n28537\n28538\n28539\n28540\n28541\n28542\n28543\n28544\n28545\n28546\n28547\n28548\n28549\n28550\n28551\n28552\n28553\n28554\n28555\n28556\n28557\n28558\n28559\n28560\n28561\n28562\n28563\n28564\n28565\n28566\n28567\n28568\n28569\n28570\n28571\n28572\n28573\n28574\n28575\n28576\n28577\n28578\n28579\n28580\n28581\n28582\n28583\n28584\n28585\n28586\n28587\n28588\n28589\n28590\n28591\n28592\n28593\n28594\n28595\n28596\n28597\n28598\n28599\n28600\n28601\n28602\n28603\n28604\n28605\n28606\n28607\n28608\n28609\n28610\n28611\n28612\n28613\n28614\n28615\n28616\n28617\n28618\n28619\n28620\n28621\n28622\n28623\n28624\n28625\n28626\n28627\n28628\n28629\n28630\n28631\n28632\n28633\n28634\n28635\n28636\n28637\n28638\n28639\n28640\n28641\n28642\n28643\n28644\n28645\n28646\n28647\n28648\n28649\n28650\n28651\n28652\n28653\n28654\n28655\n28656\n28657\n28658\n28659\n28660\n28661\n28662\n28663\n28664\n28665\n28666\n28667\n28668\n28669\n28670\n28671\n28672\n28673\n28674\n28675\n28676\n28677\n28678\n28679\n28680\n28681\n28682\n28683\n28684\n28685\n28686\n28687\n28688\n28689\n28690\n28691\n28692\n28693\n28694\n28695\n28696\n28697\n28698\n28699\n28700\n28701\n28702\n28703\n28704\n28705\n28706\n28707\n28708\n28709\n28710\n28711\n28712\n28713\n28714\n28715\n28716\n28717\n28718\n28719\n28720\n28721\n28722\n28723\n28724\n28725\n28726\n28727\n28728\n28729\n28730\n28731\n28732\n28733\n28734\n28735\n28736\n28737\n28738\n28739\n28740\n28741\n28742\n28743\n28744\n28745\n28746\n28747\n28748\n28749\n28750\n28751\n28752\n28753\n28754\n28755\n28756\n28757\n28758\n28759\n28760\n28761\n28762\n28763\n28764\n28765\n28766\n28767\n28768\n28769\n28770\n28771\n28772\n28773\n28774\n28775\n28776\n28777\n28778\n28779\n28780\n28781\n28782\n28783\n28784\n28785\n28786\n28787\n28788\n28789\n28790\n28791\n28792\n28793\n28794\n28795\n28796\n28797\n28798\n28799\n28800\n28801\n28802\n28803\n28804\n28805\n28806\n28807\n28808\n28809\n28810\n28811\n28812\n28813\n28814\n28815\n28816\n28817\n28818\n28819\n28820\n28821\n28822\n28823\n28824\n28825\n28826\n28827\n28828\n28829\n28830\n28831\n28832\n28833\n28834\n28835\n28836\n28837\n28838\n28839\n28840\n28841\n28842\n28843\n28844\n28845\n28846\n28847\n28848\n28849\n28850\n28851\n28852\n28853\n28854\n28855\n28856\n28857\n28858\n28859\n28860\n28861\n28862\n28863\n28864\n28865\n28866\n28867\n28868\n28869\n28870\n28871\n28872\n28873\n28874\n28875\n28876\n28877\n28878\n28879\n28880\n28881\n28882\n28883\n28884\n28885\n28886\n28887\n28888\n28889\n28890\n28891\n28892\n28893\n28894\n28895\n28896\n28897\n28898\n28899\n28900\n28901\n28902\n28903\n28904\n28905\n28906\n28907\n28908\n28909\n28910\n28911\n28912\n28913\n28914\n28915\n28916\n28917\n28918\n28919\n28920\n28921\n28922\n28923\n28924\n28925\n28926\n28927\n28928\n28929\n28930\n28931\n28932\n28933\n28934\n28935\n28936\n28937\n28938\n28939\n28940\n28941\n28942\n28943\n28944\n28945\n28946\n28947\n28948\n28949\n28950\n28951\n28952\n28953\n28954\n28955\n28956\n28957\n28958\n28959\n28960\n28961\n28962\n28963\n28964\n28965\n28966\n28967\n28968\n28969\n28970\n28971\n28972\n28973\n28974\n28975\n28976\n28977\n28978\n28979\n28980\n28981\n28982\n28983\n28984\n28985\n28986\n28987\n28988\n28989\n28990\n28991\n28992\n28993\n28994\n28995\n28996\n28997\n28998\n28999\n29000\n29001\n29002\n29003\n29004\n29005\n29006\n29007\n29008\n29009\n29010\n29011\n29012\n29013\n29014\n29015\n29016\n29017\n29018\n29019\n29020\n29021\n29022\n29023\n29024\n29025\n29026\n29027\n29028\n29029\n29030\n29031\n29032\n29033\n29034\n29035\n29036\n29037\n29038\n29039\n29040\n29041\n29042\n29043\n29044\n29045\n29046\n29047\n29048\n29049\n29050\n29051\n29052\n29053\n29054\n29055\n29056\n29057\n29058\n29059\n29060\n29061\n29062\n29063\n29064\n29065\n29066\n29067\n29068\n29069\n29070\n29071\n29072\n29073\n29074\n29075\n29076\n29077\n29078\n29079\n29080\n29081\n29082\n29083\n29084\n29085\n29086\n29087\n29088\n29089\n29090\n29091\n29092\n29093\n29094\n29095\n29096\n29097\n29098\n29099\n29100\n29101\n29102\n29103\n29104\n29105\n29106\n29107\n29108\n29109\n29110\n29111\n29112\n29113\n29114\n29115\n29116\n29117\n29118\n29119\n29120\n29121\n29122\n29123\n29124\n29125\n29126\n29127\n29128\n29129\n29130\n29131\n29132\n29133\n29134\n29135\n29136\n29137\n29138\n29139\n29140\n29141\n29142\n29143\n29144\n29145\n29146\n29147\n29148\n29149\n29150\n29151\n29152\n29153\n29154\n29155\n29156\n29157\n29158\n29159\n29160\n29161\n29162\n29163\n29164\n29165\n29166\n29167\n29168\n29169\n29170\n29171\n29172\n29173\n29174\n29175\n29176\n29177\n29178\n29179\n29180\n29181\n29182\n29183\n29184\n29185\n29186\n29187\n29188\n29189\n29190\n29191\n29192\n29193\n29194\n29195\n29196\n29197\n29198\n29199\n29200\n29201\n29202\n29203\n29204\n29205\n29206\n29207\n29208\n29209\n29210\n29211\n29212\n29213\n29214\n29215\n29216\n29217\n29218\n29219\n29220\n29221\n29222\n29223\n29224\n29225\n29226\n29227\n29228\n29229\n29230\n29231\n29232\n29233\n29234\n29235\n29236\n29237\n29238\n29239\n29240\n29241\n29242\n29243\n29244\n29245\n29246\n29247\n29248\n29249\n29250\n29251\n29252\n29253\n29254\n29255\n29256\n29257\n29258\n29259\n29260\n29261\n29262\n29263\n29264\n29265\n29266\n29267\n29268\n29269\n29270\n29271\n29272\n29273\n29274\n29275\n29276\n29277\n29278\n29279\n29280\n29281\n29282\n29283\n29284\n29285\n29286\n29287\n29288\n29289\n29290\n29291\n29292\n29293\n29294\n29295\n29296\n29297\n29298\n29299\n29300\n29301\n29302\n29303\n29304\n29305\n29306\n29307\n29308\n29309\n29310\n29311\n29312\n29313\n29314\n29315\n29316\n29317\n29318\n29319\n29320\n29321\n29322\n29323\n29324\n29325\n29326\n29327\n29328\n29329\n29330\n29331\n29332\n29333\n29334\n29335\n29336\n29337\n29338\n29339\n29340\n29341\n29342\n29343\n29344\n29345\n29346\n29347\n29348\n29349\n29350\n29351\n29352\n29353\n29354\n29355\n29356\n29357\n29358\n29359\n29360\n29361\n29362\n29363\n29364\n29365\n29366\n29367\n29368\n29369\n29370\n29371\n29372\n29373\n29374\n29375\n29376\n29377\n29378\n29379\n29380\n29381\n29382\n29383\n29384\n29385\n29386\n29387\n29388\n29389\n29390\n29391\n29392\n29393\n29394\n29395\n29396\n29397\n29398\n29399\n29400\n29401\n29402\n29403\n29404\n29405\n29406\n29407\n29408\n29409\n29410\n29411\n29412\n29413\n29414\n29415\n29416\n29417\n29418\n29419\n29420\n29421\n29422\n29423\n29424\n29425\n29426\n29427\n29428\n29429\n29430\n29431\n29432\n29433\n29434\n29435\n29436\n29437\n29438\n29439\n29440\n29441\n29442\n29443\n29444\n29445\n29446\n29447\n29448\n29449\n29450\n29451\n29452\n29453\n29454\n29455\n29456\n29457\n29458\n29459\n29460\n29461\n29462\n29463\n29464\n29465\n29466\n29467\n29468\n29469\n29470\n29471\n29472\n29473\n29474\n29475\n29476\n29477\n29478\n29479\n29480\n29481\n29482\n29483\n29484\n29485\n29486\n29487\n29488\n29489\n29490\n29491\n29492\n29493\n29494\n29495\n29496\n29497\n29498\n29499\n29500\n29501\n29502\n29503\n29504\n29505\n29506\n29507\n29508\n29509\n29510\n29511\n29512\n29513\n29514\n29515\n29516\n29517\n29518\n29519\n29520\n29521\n29522\n29523\n29524\n29525\n29526\n29527\n29528\n29529\n29530\n29531\n29532\n29533\n29534\n29535\n29536\n29537\n29538\n29539\n29540\n29541\n29542\n29543\n29544\n29545\n29546\n29547\n29548\n29549\n29550\n29551\n29552\n29553\n29554\n29555\n29556\n29557\n29558\n29559\n29560\n29561\n29562\n29563\n29564\n29565\n29566\n29567\n29568\n29569\n29570\n29571\n29572\n29573\n29574\n29575\n29576\n29577\n29578\n29579\n29580\n29581\n29582\n29583\n29584\n29585\n29586\n29587\n29588\n29589\n29590\n29591\n29592\n29593\n29594\n29595\n29596\n29597\n29598\n29599\n29600\n29601\n29602\n29603\n29604\n29605\n29606\n29607\n29608\n29609\n29610\n29611\n29612\n29613\n29614\n29615\n29616\n29617\n29618\n29619\n29620\n29621\n29622\n29623\n29624\n29625\n29626\n29627\n29628\n29629\n29630\n29631\n29632\n29633\n29634\n29635\n29636\n29637\n29638\n29639\n29640\n29641\n29642\n29643\n29644\n29645\n29646\n29647\n29648\n29649\n29650\n29651\n29652\n29653\n29654\n29655\n29656\n29657\n29658\n29659\n29660\n29661\n29662\n29663\n29664\n29665\n29666\n29667\n29668\n29669\n29670\n29671\n29672\n29673\n29674\n29675\n29676\n29677\n29678\n29679\n29680\n29681\n29682\n29683\n29684\n29685\n29686\n29687\n29688\n29689\n29690\n29691\n29692\n29693\n29694\n29695\n29696\n29697\n29698\n29699\n29700\n29701\n29702\n29703\n29704\n29705\n29706\n29707\n29708\n29709\n29710\n29711\n29712\n29713\n29714\n29715\n29716\n29717\n29718\n29719\n29720\n29721\n29722\n29723\n29724\n29725\n29726\n29727\n29728\n29729\n29730\n29731\n29732\n29733\n29734\n29735\n29736\n29737\n29738\n29739\n29740\n29741\n29742\n29743\n29744\n29745\n29746\n29747\n29748\n29749\n29750\n29751\n29752\n29753\n29754\n29755\n29756\n29757\n29758\n29759\n29760\n29761\n29762\n29763\n29764\n29765\n29766\n29767\n29768\n29769\n29770\n29771\n29772\n29773\n29774\n29775\n29776\n29777\n29778\n29779\n29780\n29781\n29782\n29783\n29784\n29785\n29786\n29787\n29788\n29789\n29790\n29791\n29792\n29793\n29794\n29795\n29796\n29797\n29798\n29799\n29800\n29801\n29802\n29803\n29804\n29805\n29806\n29807\n29808\n29809\n29810\n29811\n29812\n29813\n29814\n29815\n29816\n29817\n29818\n29819\n29820\n29821\n29822\n29823\n29824\n29825\n29826\n29827\n29828\n29829\n29830\n29831\n29832\n29833\n29834\n29835\n29836\n29837\n29838\n29839\n29840\n29841\n29842\n29843\n29844\n29845\n29846\n29847\n29848\n29849\n29850\n29851\n29852\n29853\n29854\n29855\n29856\n29857\n29858\n29859\n29860\n29861\n29862\n29863\n29864\n29865\n29866\n29867\n29868\n29869\n29870\n29871\n29872\n29873\n29874\n29875\n29876\n29877\n29878\n29879\n29880\n29881\n29882\n29883\n29884\n29885\n29886\n29887\n29888\n29889\n29890\n29891\n29892\n29893\n29894\n29895\n29896\n29897\n29898\n29899\n29900\n29901\n29902\n29903\n29904\n29905\n29906\n29907\n29908\n29909\n29910\n29911\n29912\n29913\n29914\n29915\n29916\n29917\n29918\n29919\n29920\n29921\n29922\n29923\n29924\n29925\n29926\n29927\n29928\n29929\n29930\n29931\n29932\n29933\n29934\n29935\n29936\n29937\n29938\n29939\n29940\n29941\n29942\n29943\n29944\n29945\n29946\n29947\n29948\n29949\n29950\n29951\n29952\n29953\n29954\n29955\n29956\n29957\n29958\n29959\n29960\n29961\n29962\n29963\n29964\n29965\n29966\n29967\n29968\n29969\n29970\n29971\n29972\n29973\n29974\n29975\n29976\n29977\n29978\n29979\n29980\n29981\n29982\n29983\n29984\n29985\n29986\n29987\n29988\n29989\n29990\n29991\n29992\n29993\n29994\n29995\n29996\n29997\n29998\n29999'
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test11.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test11.arff
new file mode 100644
index 0000000..fadfaee
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test11.arff
@@ -0,0 +1,11 @@
+@RELATION test11
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 REAL
+@ATTRIBUTE attr2 REAL
+@ATTRIBUTE attr3 REAL
+@ATTRIBUTE class { class0, class1, class2, class3 }
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
+-0.1, -0.2, -0.3, -0.4,class2
+1, 2, 3, 4,class3
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test2.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test2.arff
new file mode 100644
index 0000000..30f0dbf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test2.arff
@@ -0,0 +1,15 @@
+@RELATION test2
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 real
+@ATTRIBUTE attr2 integer
+@ATTRIBUTE attr3 Integer
+@ATTRIBUTE attr4 Numeric
+@ATTRIBUTE attr5 numeric
+@ATTRIBUTE attr6 string
+@ATTRIBUTE attr7 STRING
+@ATTRIBUTE attr8 {bla}
+@ATTRIBUTE attr9 {bla, bla}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test3.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test3.arff
new file mode 100644
index 0000000..23da3b3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test3.arff
@@ -0,0 +1,6 @@
+@RELATION test3
+
+@ATTRIBUTE attr0 crap
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test4.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test4.arff
new file mode 100644
index 0000000..bf5f99c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test4.arff
@@ -0,0 +1,11 @@
+@RELATION test5
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 REAL
+@ATTRIBUTE attr2 REAL
+@ATTRIBUTE attr3 REAL
+@ATTRIBUTE class {class0, class1, class2, class3}
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
+-0.1, -0.2, -0.3, -0.4,class2
+1, 2, 3, 4,class3
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test5.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test5.arff
new file mode 100644
index 0000000..0075daf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test5.arff
@@ -0,0 +1,26 @@
+@RELATION test4
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 REAL
+@ATTRIBUTE attr2 REAL
+@ATTRIBUTE attr3 REAL
+@ATTRIBUTE class {class0, class1, class2, class3}
+
+@DATA
+
+% lsdflkjhaksjdhf
+
+% lsdflkjhaksjdhf
+
+0.1, 0.2, 0.3, 0.4,class1
+% laksjdhf
+
+% lsdflkjhaksjdhf
+-0.1, -0.2, -0.3, -0.4,class2
+
+% lsdflkjhaksjdhf
+% lsdflkjhaksjdhf
+
+% lsdflkjhaksjdhf
+
+1, 2, 3, 4,class3
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test6.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test6.arff
new file mode 100644
index 0000000..b63280b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test6.arff
@@ -0,0 +1,12 @@
+@RELATION test6
+
+@ATTRIBUTE attr0 REAL
+@ATTRIBUTE attr1 REAL
+@ATTRIBUTE attr2 REAL
+@ATTRIBUTE attr3 REAL
+@ATTRIBUTE class {C}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,C
+-0.1, -0.2, -0.3, -0.4,C
+1, 2, 3, 4,C
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test7.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test7.arff
new file mode 100644
index 0000000..38ef6c9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test7.arff
@@ -0,0 +1,15 @@
+@RELATION test7
+
+@ATTRIBUTE attr_year DATE yyyy
+@ATTRIBUTE attr_month DATE yyyy-MM
+@ATTRIBUTE attr_date DATE yyyy-MM-dd
+@ATTRIBUTE attr_datetime_local DATE "yyyy-MM-dd HH:mm"
+@ATTRIBUTE attr_datetime_missing DATE "yyyy-MM-dd HH:mm"
+
+@DATA
+1999,1999-01,1999-01-31,"1999-01-31 00:01",?
+2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59"
+1817,1817-04,1817-04-28,"1817-04-28 13:00",?
+2100,2100-09,2100-09-10,"2100-09-10 12:00",?
+2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55"
+1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04"
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test8.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test8.arff
new file mode 100644
index 0000000..776deb4
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test8.arff
@@ -0,0 +1,12 @@
+@RELATION test8
+
+@ATTRIBUTE attr_datetime_utc DATE "yyyy-MM-dd HH:mm Z"
+@ATTRIBUTE attr_datetime_full DATE "yy-MM-dd HH:mm:ss z"
+
+@DATA
+"1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430"
+"2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800"
+"1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000"
+"2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300"
+"2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100"
+"1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000"
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test9.arff b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test9.arff
new file mode 100644
index 0000000..b3f97e3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/data/test9.arff
@@ -0,0 +1,14 @@
+@RELATION test9
+
+@ATTRIBUTE attr_date_number RELATIONAL
+ @ATTRIBUTE attr_date DATE "yyyy-MM-dd"
+ @ATTRIBUTE attr_number INTEGER
+@END attr_date_number
+
+@DATA
+"1999-01-31 1\n1935-11-27 10"
+"2004-12-01 2\n1942-08-13 20"
+"1817-04-28 3"
+"2100-09-10 4\n1957-04-17 40\n1721-01-14 400"
+"2013-11-30 5"
+"1631-10-15 6"
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/test_arffread.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/test_arffread.py
new file mode 100644
index 0000000..aff3388
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/arff/tests/test_arffread.py
@@ -0,0 +1,414 @@
+import datetime
+import os
+import sys
+from os.path import join as pjoin
+
+from io import StringIO
+
+import numpy as np
+
+from numpy.testing import (assert_array_almost_equal,
+ assert_array_equal, assert_equal, assert_)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.io.arff.arffread import loadarff
+from scipy.io.arff.arffread import read_header, ParseArffError
+
+
+data_path = pjoin(os.path.dirname(__file__), 'data')
+
+test1 = pjoin(data_path, 'test1.arff')
+test2 = pjoin(data_path, 'test2.arff')
+test3 = pjoin(data_path, 'test3.arff')
+
+test4 = pjoin(data_path, 'test4.arff')
+test5 = pjoin(data_path, 'test5.arff')
+test6 = pjoin(data_path, 'test6.arff')
+test7 = pjoin(data_path, 'test7.arff')
+test8 = pjoin(data_path, 'test8.arff')
+test9 = pjoin(data_path, 'test9.arff')
+test10 = pjoin(data_path, 'test10.arff')
+test11 = pjoin(data_path, 'test11.arff')
+test_quoted_nominal = pjoin(data_path, 'quoted_nominal.arff')
+test_quoted_nominal_spaces = pjoin(data_path, 'quoted_nominal_spaces.arff')
+
+expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
+ (-0.1, -0.2, -0.3, -0.4, 'class2'),
+ (1, 2, 3, 4, 'class3')]
+expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
+
+missing = pjoin(data_path, 'missing.arff')
+expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
+expect_missing = np.empty(3, [('yop', float), ('yap', float)])
+expect_missing['yop'] = expect_missing_raw[:, 0]
+expect_missing['yap'] = expect_missing_raw[:, 1]
+
+
+class TestData(object):
+ def test1(self):
+ # Parsing trivial file with nothing.
+ self._test(test4)
+
+ def test2(self):
+ # Parsing trivial file with some comments in the data section.
+ self._test(test5)
+
+ def test3(self):
+ # Parsing trivial file with nominal attribute of 1 character.
+ self._test(test6)
+
+ def test4(self):
+ # Parsing trivial file with trailing spaces in attribute declaration.
+ self._test(test11)
+
+ def _test(self, test_file):
+ data, meta = loadarff(test_file)
+ for i in range(len(data)):
+ for j in range(4):
+ assert_array_almost_equal(expect4_data[i][j], data[i][j])
+ assert_equal(meta.types(), expected_types)
+
+ def test_filelike(self):
+ # Test reading from file-like object (StringIO)
+ with open(test1) as f1:
+ data1, meta1 = loadarff(f1)
+ with open(test1) as f2:
+ data2, meta2 = loadarff(StringIO(f2.read()))
+ assert_(data1 == data2)
+ assert_(repr(meta1) == repr(meta2))
+
+ def test_path(self):
+ # Test reading from `pathlib.Path` object
+ from pathlib import Path
+
+ with open(test1) as f1:
+ data1, meta1 = loadarff(f1)
+
+ data2, meta2 = loadarff(Path(test1))
+
+ assert_(data1 == data2)
+ assert_(repr(meta1) == repr(meta2))
+
+
+class TestMissingData(object):
+ def test_missing(self):
+ data, meta = loadarff(missing)
+ for i in ['yop', 'yap']:
+ assert_array_almost_equal(data[i], expect_missing[i])
+
+
+class TestNoData(object):
+ def test_nodata(self):
+ # The file nodata.arff has no data in the @DATA section.
+ # Reading it should result in an array with length 0.
+ nodata_filename = os.path.join(data_path, 'nodata.arff')
+ data, meta = loadarff(nodata_filename)
+ expected_dtype = np.dtype([('sepallength', ' 0, -n and n if n < 0
+
+ Parameters
+ ----------
+ n : int
+ max number one wants to be able to represent
+ min : int
+ minimum number of characters to use for the format
+
+ Returns
+ -------
+ res : IntFormat
+ IntFormat instance with reasonable (see Notes) computed width
+
+ Notes
+ -----
+ Reasonable should be understood as the minimal string length necessary
+ without losing precision. For example, IntFormat.from_number(1) will
+ return an IntFormat instance of width 2, so that any 0 and 1 may be
+ represented as 1-character strings without loss of information.
+ """
+ width = number_digits(n) + 1
+ if n < 0:
+ width += 1
+ repeat = 80 // width
+ return cls(width, min, repeat=repeat)
+
+ def __init__(self, width, min=None, repeat=None):
+ self.width = width
+ self.repeat = repeat
+ self.min = min
+
+ def __repr__(self):
+ r = "IntFormat("
+ if self.repeat:
+ r += "%d" % self.repeat
+ r += "I%d" % self.width
+ if self.min:
+ r += ".%d" % self.min
+ return r + ")"
+
+ @property
+ def fortran_format(self):
+ r = "("
+ if self.repeat:
+ r += "%d" % self.repeat
+ r += "I%d" % self.width
+ if self.min:
+ r += ".%d" % self.min
+ return r + ")"
+
+ @property
+ def python_format(self):
+ return "%" + str(self.width) + "d"
+
+
+class ExpFormat(object):
+ @classmethod
+ def from_number(cls, n, min=None):
+ """Given a float number, returns a "reasonable" ExpFormat instance to
+ represent any number between -n and n.
+
+ Parameters
+ ----------
+ n : float
+ max number one wants to be able to represent
+ min : int
+ minimum number of characters to use for the format
+
+ Returns
+ -------
+ res : ExpFormat
+ ExpFormat instance with reasonable (see Notes) computed width
+
+ Notes
+ -----
+ Reasonable should be understood as the minimal string length necessary
+ to avoid losing precision.
+ """
+ # len of one number in exp format: sign + 1|0 + "." +
+ # number of digit for fractional part + 'E' + sign of exponent +
+ # len of exponent
+ finfo = np.finfo(n.dtype)
+ # Number of digits for fractional part
+ n_prec = finfo.precision + 1
+ # Number of digits for exponential part
+ n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
+ width = 1 + 1 + n_prec + 1 + n_exp + 1
+ if n < 0:
+ width += 1
+ repeat = int(np.floor(80 / width))
+ return cls(width, n_prec, min, repeat=repeat)
+
+ def __init__(self, width, significand, min=None, repeat=None):
+ """\
+ Parameters
+ ----------
+ width : int
+ number of characters taken by the string (includes space).
+ """
+ self.width = width
+ self.significand = significand
+ self.repeat = repeat
+ self.min = min
+
+ def __repr__(self):
+ r = "ExpFormat("
+ if self.repeat:
+ r += "%d" % self.repeat
+ r += "E%d.%d" % (self.width, self.significand)
+ if self.min:
+ r += "E%d" % self.min
+ return r + ")"
+
+ @property
+ def fortran_format(self):
+ r = "("
+ if self.repeat:
+ r += "%d" % self.repeat
+ r += "E%d.%d" % (self.width, self.significand)
+ if self.min:
+ r += "E%d" % self.min
+ return r + ")"
+
+ @property
+ def python_format(self):
+ return "%" + str(self.width-1) + "." + str(self.significand) + "E"
+
+
+class Token(object):
+ def __init__(self, type, value, pos):
+ self.type = type
+ self.value = value
+ self.pos = pos
+
+ def __str__(self):
+ return """Token('%s', "%s")""" % (self.type, self.value)
+
+ def __repr__(self):
+ return self.__str__()
+
+
+class Tokenizer(object):
+ def __init__(self):
+ self.tokens = list(TOKENS.keys())
+ self.res = [re.compile(TOKENS[i]) for i in self.tokens]
+
+ def input(self, s):
+ self.data = s
+ self.curpos = 0
+ self.len = len(s)
+
+ def next_token(self):
+ curpos = self.curpos
+
+ while curpos < self.len:
+ for i, r in enumerate(self.res):
+ m = r.match(self.data, curpos)
+ if m is None:
+ continue
+ else:
+ self.curpos = m.end()
+ return Token(self.tokens[i], m.group(), self.curpos)
+ raise SyntaxError("Unknown character at position %d (%s)"
+ % (self.curpos, self.data[curpos]))
+
+
+# Grammar for fortran format:
+# format : LPAR format_string RPAR
+# format_string : repeated | simple
+# repeated : repeat simple
+# simple : int_fmt | exp_fmt
+# int_fmt : INT_ID width
+# exp_fmt : simple_exp_fmt
+# simple_exp_fmt : EXP_ID width DOT significand
+# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
+# repeat : INT
+# width : INT
+# significand : INT
+# ndigits : INT
+
+# Naive fortran formatter - parser is hand-made
+class FortranFormatParser(object):
+ """Parser for Fortran format strings. The parse method returns a *Format
+ instance.
+
+ Notes
+ -----
+ Only ExpFormat (exponential format for floating values) and IntFormat
+ (integer format) for now.
+ """
+ def __init__(self):
+ self.tokenizer = Tokenizer()
+
+ def parse(self, s):
+ self.tokenizer.input(s)
+
+ tokens = []
+
+ try:
+ while True:
+ t = self.tokenizer.next_token()
+ if t is None:
+ break
+ else:
+ tokens.append(t)
+ return self._parse_format(tokens)
+ except SyntaxError as e:
+ raise BadFortranFormat(str(e)) from e
+
+ def _get_min(self, tokens):
+ next = tokens.pop(0)
+ if not next.type == "DOT":
+ raise SyntaxError()
+ next = tokens.pop(0)
+ return next.value
+
+ def _expect(self, token, tp):
+ if not token.type == tp:
+ raise SyntaxError()
+
+ def _parse_format(self, tokens):
+ if not tokens[0].type == "LPAR":
+ raise SyntaxError("Expected left parenthesis at position "
+ "%d (got '%s')" % (0, tokens[0].value))
+ elif not tokens[-1].type == "RPAR":
+ raise SyntaxError("Expected right parenthesis at position "
+ "%d (got '%s')" % (len(tokens), tokens[-1].value))
+
+ tokens = tokens[1:-1]
+ types = [t.type for t in tokens]
+ if types[0] == "INT":
+ repeat = int(tokens.pop(0).value)
+ else:
+ repeat = None
+
+ next = tokens.pop(0)
+ if next.type == "INT_ID":
+ next = self._next(tokens, "INT")
+ width = int(next.value)
+ if tokens:
+ min = int(self._get_min(tokens))
+ else:
+ min = None
+ return IntFormat(width, min, repeat)
+ elif next.type == "EXP_ID":
+ next = self._next(tokens, "INT")
+ width = int(next.value)
+
+ next = self._next(tokens, "DOT")
+
+ next = self._next(tokens, "INT")
+ significand = int(next.value)
+
+ if tokens:
+ next = self._next(tokens, "EXP_ID")
+
+ next = self._next(tokens, "INT")
+ min = int(next.value)
+ else:
+ min = None
+ return ExpFormat(width, significand, min, repeat)
+ else:
+ raise SyntaxError("Invalid formater type %s" % next.value)
+
+ def _next(self, tokens, tp):
+ if not len(tokens) > 0:
+ raise SyntaxError()
+ next = tokens.pop(0)
+ self._expect(next, tp)
+ return next
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/hb.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/hb.py
new file mode 100644
index 0000000..00561f8
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/hb.py
@@ -0,0 +1,571 @@
+"""
+Implementation of Harwell-Boeing read/write.
+
+At the moment not the full Harwell-Boeing format is supported. Supported
+features are:
+
+ - assembled, non-symmetric, real matrices
+ - integer for pointer/indices
+ - exponential format for float values, and int format
+
+"""
+# TODO:
+# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
+
+# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
+# takes a lot of memory. Being faster would require compiled code.
+# write is not efficient. Although not a terribly exciting task,
+# having reusable facilities to efficiently read/write fortran-formatted files
+# would be useful outside this module.
+
+import warnings
+
+import numpy as np
+from scipy.sparse import csc_matrix
+from scipy.io.harwell_boeing._fortran_format_parser import \
+ FortranFormatParser, IntFormat, ExpFormat
+
+__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
+ "HBMatrixType"]
+
+
+class MalformedHeader(Exception):
+ pass
+
+
+class LineOverflow(Warning):
+ pass
+
+
+def _nbytes_full(fmt, nlines):
+ """Return the number of bytes to read to get every full lines for the
+ given parsed fortran format."""
+ return (fmt.repeat * fmt.width + 1) * (nlines - 1)
+
+
+class HBInfo(object):
+ @classmethod
+ def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
+ """Create a HBInfo instance from an existing sparse matrix.
+
+ Parameters
+ ----------
+ m : sparse matrix
+ the HBInfo instance will derive its parameters from m
+ title : str
+ Title to put in the HB header
+ key : str
+ Key
+ mxtype : HBMatrixType
+ type of the input matrix
+ fmt : dict
+ not implemented
+
+ Returns
+ -------
+ hb_info : HBInfo instance
+ """
+ m = m.tocsc(copy=False)
+
+ pointer = m.indptr
+ indices = m.indices
+ values = m.data
+
+ nrows, ncols = m.shape
+ nnon_zeros = m.nnz
+
+ if fmt is None:
+ # +1 because HB use one-based indexing (Fortran), and we will write
+ # the indices /pointer as such
+ pointer_fmt = IntFormat.from_number(np.max(pointer+1))
+ indices_fmt = IntFormat.from_number(np.max(indices+1))
+
+ if values.dtype.kind in np.typecodes["AllFloat"]:
+ values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
+ elif values.dtype.kind in np.typecodes["AllInteger"]:
+ values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
+ else:
+ raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
+ else:
+ raise NotImplementedError("fmt argument not supported yet.")
+
+ if mxtype is None:
+ if not np.isrealobj(values):
+ raise ValueError("Complex values not supported yet")
+ if values.dtype.kind in np.typecodes["AllInteger"]:
+ tp = "integer"
+ elif values.dtype.kind in np.typecodes["AllFloat"]:
+ tp = "real"
+ else:
+ raise NotImplementedError("type %s for values not implemented"
+ % values.dtype)
+ mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
+ else:
+ raise ValueError("mxtype argument not handled yet.")
+
+ def _nlines(fmt, size):
+ nlines = size // fmt.repeat
+ if nlines * fmt.repeat != size:
+ nlines += 1
+ return nlines
+
+ pointer_nlines = _nlines(pointer_fmt, pointer.size)
+ indices_nlines = _nlines(indices_fmt, indices.size)
+ values_nlines = _nlines(values_fmt, values.size)
+
+ total_nlines = pointer_nlines + indices_nlines + values_nlines
+
+ return cls(title, key,
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
+ mxtype, nrows, ncols, nnon_zeros,
+ pointer_fmt.fortran_format, indices_fmt.fortran_format,
+ values_fmt.fortran_format)
+
+ @classmethod
+ def from_file(cls, fid):
+ """Create a HBInfo instance from a file object containing a matrix in the
+ HB format.
+
+ Parameters
+ ----------
+ fid : file-like matrix
+ File or file-like object containing a matrix in the HB format.
+
+ Returns
+ -------
+ hb_info : HBInfo instance
+ """
+ # First line
+ line = fid.readline().strip("\n")
+ if not len(line) > 72:
+ raise ValueError("Expected at least 72 characters for first line, "
+ "got: \n%s" % line)
+ title = line[:72]
+ key = line[72:]
+
+ # Second line
+ line = fid.readline().strip("\n")
+ if not len(line.rstrip()) >= 56:
+ raise ValueError("Expected at least 56 characters for second line, "
+ "got: \n%s" % line)
+ total_nlines = _expect_int(line[:14])
+ pointer_nlines = _expect_int(line[14:28])
+ indices_nlines = _expect_int(line[28:42])
+ values_nlines = _expect_int(line[42:56])
+
+ rhs_nlines = line[56:72].strip()
+ if rhs_nlines == '':
+ rhs_nlines = 0
+ else:
+ rhs_nlines = _expect_int(rhs_nlines)
+ if not rhs_nlines == 0:
+ raise ValueError("Only files without right hand side supported for "
+ "now.")
+
+ # Third line
+ line = fid.readline().strip("\n")
+ if not len(line) >= 70:
+ raise ValueError("Expected at least 72 character for third line, got:\n"
+ "%s" % line)
+
+ mxtype_s = line[:3].upper()
+ if not len(mxtype_s) == 3:
+ raise ValueError("mxtype expected to be 3 characters long")
+
+ mxtype = HBMatrixType.from_fortran(mxtype_s)
+ if mxtype.value_type not in ["real", "integer"]:
+ raise ValueError("Only real or integer matrices supported for "
+ "now (detected %s)" % mxtype)
+ if not mxtype.structure == "unsymmetric":
+ raise ValueError("Only unsymmetric matrices supported for "
+ "now (detected %s)" % mxtype)
+ if not mxtype.storage == "assembled":
+ raise ValueError("Only assembled matrices supported for now")
+
+ if not line[3:14] == " " * 11:
+ raise ValueError("Malformed data for third line: %s" % line)
+
+ nrows = _expect_int(line[14:28])
+ ncols = _expect_int(line[28:42])
+ nnon_zeros = _expect_int(line[42:56])
+ nelementals = _expect_int(line[56:70])
+ if not nelementals == 0:
+ raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
+ % nelementals)
+
+ # Fourth line
+ line = fid.readline().strip("\n")
+
+ ct = line.split()
+ if not len(ct) == 3:
+ raise ValueError("Expected 3 formats, got %s" % ct)
+
+ return cls(title, key,
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
+ mxtype, nrows, ncols, nnon_zeros,
+ ct[0], ct[1], ct[2],
+ rhs_nlines, nelementals)
+
+ def __init__(self, title, key,
+ total_nlines, pointer_nlines, indices_nlines, values_nlines,
+ mxtype, nrows, ncols, nnon_zeros,
+ pointer_format_str, indices_format_str, values_format_str,
+ right_hand_sides_nlines=0, nelementals=0):
+ """Do not use this directly, but the class ctrs (from_* functions)."""
+ self.title = title
+ self.key = key
+ if title is None:
+ title = "No Title"
+ if len(title) > 72:
+ raise ValueError("title cannot be > 72 characters")
+
+ if key is None:
+ key = "|No Key"
+ if len(key) > 8:
+ warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
+
+ self.total_nlines = total_nlines
+ self.pointer_nlines = pointer_nlines
+ self.indices_nlines = indices_nlines
+ self.values_nlines = values_nlines
+
+ parser = FortranFormatParser()
+ pointer_format = parser.parse(pointer_format_str)
+ if not isinstance(pointer_format, IntFormat):
+ raise ValueError("Expected int format for pointer format, got %s"
+ % pointer_format)
+
+ indices_format = parser.parse(indices_format_str)
+ if not isinstance(indices_format, IntFormat):
+ raise ValueError("Expected int format for indices format, got %s" %
+ indices_format)
+
+ values_format = parser.parse(values_format_str)
+ if isinstance(values_format, ExpFormat):
+ if mxtype.value_type not in ["real", "complex"]:
+ raise ValueError("Inconsistency between matrix type %s and "
+ "value type %s" % (mxtype, values_format))
+ values_dtype = np.float64
+ elif isinstance(values_format, IntFormat):
+ if mxtype.value_type not in ["integer"]:
+ raise ValueError("Inconsistency between matrix type %s and "
+ "value type %s" % (mxtype, values_format))
+ # XXX: fortran int -> dtype association ?
+ values_dtype = int
+ else:
+ raise ValueError("Unsupported format for values %r" % (values_format,))
+
+ self.pointer_format = pointer_format
+ self.indices_format = indices_format
+ self.values_format = values_format
+
+ self.pointer_dtype = np.int32
+ self.indices_dtype = np.int32
+ self.values_dtype = values_dtype
+
+ self.pointer_nlines = pointer_nlines
+ self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
+
+ self.indices_nlines = indices_nlines
+ self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
+
+ self.values_nlines = values_nlines
+ self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
+
+ self.nrows = nrows
+ self.ncols = ncols
+ self.nnon_zeros = nnon_zeros
+ self.nelementals = nelementals
+ self.mxtype = mxtype
+
+ def dump(self):
+ """Gives the header corresponding to this instance as a string."""
+ header = [self.title.ljust(72) + self.key.ljust(8)]
+
+ header.append("%14d%14d%14d%14d" %
+ (self.total_nlines, self.pointer_nlines,
+ self.indices_nlines, self.values_nlines))
+ header.append("%14s%14d%14d%14d%14d" %
+ (self.mxtype.fortran_format.ljust(14), self.nrows,
+ self.ncols, self.nnon_zeros, 0))
+
+ pffmt = self.pointer_format.fortran_format
+ iffmt = self.indices_format.fortran_format
+ vffmt = self.values_format.fortran_format
+ header.append("%16s%16s%20s" %
+ (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
+ return "\n".join(header)
+
+
+def _expect_int(value, msg=None):
+ try:
+ return int(value)
+ except ValueError as e:
+ if msg is None:
+ msg = "Expected an int, got %s"
+ raise ValueError(msg % value) from e
+
+
+def _read_hb_data(content, header):
+ # XXX: look at a way to reduce memory here (big string creation)
+ ptr_string = "".join([content.read(header.pointer_nbytes_full),
+ content.readline()])
+ ptr = np.fromstring(ptr_string,
+ dtype=int, sep=' ')
+
+ ind_string = "".join([content.read(header.indices_nbytes_full),
+ content.readline()])
+ ind = np.fromstring(ind_string,
+ dtype=int, sep=' ')
+
+ val_string = "".join([content.read(header.values_nbytes_full),
+ content.readline()])
+ val = np.fromstring(val_string,
+ dtype=header.values_dtype, sep=' ')
+
+ try:
+ return csc_matrix((val, ind-1, ptr-1),
+ shape=(header.nrows, header.ncols))
+ except ValueError as e:
+ raise e
+
+
+def _write_data(m, fid, header):
+ m = m.tocsc(copy=False)
+
+ def write_array(f, ar, nlines, fmt):
+ # ar_nlines is the number of full lines, n is the number of items per
+ # line, ffmt the fortran format
+ pyfmt = fmt.python_format
+ pyfmt_full = pyfmt * fmt.repeat
+
+ # for each array to write, we first write the full lines, and special
+ # case for partial line
+ full = ar[:(nlines - 1) * fmt.repeat]
+ for row in full.reshape((nlines-1, fmt.repeat)):
+ f.write(pyfmt_full % tuple(row) + "\n")
+ nremain = ar.size - full.size
+ if nremain > 0:
+ f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
+
+ fid.write(header.dump())
+ fid.write("\n")
+ # +1 is for Fortran one-based indexing
+ write_array(fid, m.indptr+1, header.pointer_nlines,
+ header.pointer_format)
+ write_array(fid, m.indices+1, header.indices_nlines,
+ header.indices_format)
+ write_array(fid, m.data, header.values_nlines,
+ header.values_format)
+
+
+class HBMatrixType(object):
+ """Class to hold the matrix type."""
+ # q2f* translates qualified names to Fortran character
+ _q2f_type = {
+ "real": "R",
+ "complex": "C",
+ "pattern": "P",
+ "integer": "I",
+ }
+ _q2f_structure = {
+ "symmetric": "S",
+ "unsymmetric": "U",
+ "hermitian": "H",
+ "skewsymmetric": "Z",
+ "rectangular": "R"
+ }
+ _q2f_storage = {
+ "assembled": "A",
+ "elemental": "E",
+ }
+
+ _f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
+ _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
+ _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
+
+ @classmethod
+ def from_fortran(cls, fmt):
+ if not len(fmt) == 3:
+ raise ValueError("Fortran format for matrix type should be 3 "
+ "characters long")
+ try:
+ value_type = cls._f2q_type[fmt[0]]
+ structure = cls._f2q_structure[fmt[1]]
+ storage = cls._f2q_storage[fmt[2]]
+ return cls(value_type, structure, storage)
+ except KeyError as e:
+ raise ValueError("Unrecognized format %s" % fmt) from e
+
+ def __init__(self, value_type, structure, storage="assembled"):
+ self.value_type = value_type
+ self.structure = structure
+ self.storage = storage
+
+ if value_type not in self._q2f_type:
+ raise ValueError("Unrecognized type %s" % value_type)
+ if structure not in self._q2f_structure:
+ raise ValueError("Unrecognized structure %s" % structure)
+ if storage not in self._q2f_storage:
+ raise ValueError("Unrecognized storage %s" % storage)
+
+ @property
+ def fortran_format(self):
+ return self._q2f_type[self.value_type] + \
+ self._q2f_structure[self.structure] + \
+ self._q2f_storage[self.storage]
+
+ def __repr__(self):
+ return "HBMatrixType(%s, %s, %s)" % \
+ (self.value_type, self.structure, self.storage)
+
+
+class HBFile(object):
+ def __init__(self, file, hb_info=None):
+ """Create a HBFile instance.
+
+ Parameters
+ ----------
+ file : file-object
+ StringIO work as well
+ hb_info : HBInfo, optional
+ Should be given as an argument for writing, in which case the file
+ should be writable.
+ """
+ self._fid = file
+ if hb_info is None:
+ self._hb_info = HBInfo.from_file(file)
+ else:
+ #raise IOError("file %s is not writable, and hb_info "
+ # "was given." % file)
+ self._hb_info = hb_info
+
+ @property
+ def title(self):
+ return self._hb_info.title
+
+ @property
+ def key(self):
+ return self._hb_info.key
+
+ @property
+ def type(self):
+ return self._hb_info.mxtype.value_type
+
+ @property
+ def structure(self):
+ return self._hb_info.mxtype.structure
+
+ @property
+ def storage(self):
+ return self._hb_info.mxtype.storage
+
+ def read_matrix(self):
+ return _read_hb_data(self._fid, self._hb_info)
+
+ def write_matrix(self, m):
+ return _write_data(m, self._fid, self._hb_info)
+
+
+def hb_read(path_or_open_file):
+ """Read HB-format file.
+
+ Parameters
+ ----------
+ path_or_open_file : path-like or file-like
+ If a file-like object, it is used as-is. Otherwise, it is opened
+ before reading.
+
+ Returns
+ -------
+ data : scipy.sparse.csc_matrix instance
+ The data read from the HB file as a sparse matrix.
+
+ Notes
+ -----
+ At the moment not the full Harwell-Boeing format is supported. Supported
+ features are:
+
+ - assembled, non-symmetric, real matrices
+ - integer for pointer/indices
+ - exponential format for float values, and int format
+
+ Examples
+ --------
+ We can read and write a harwell-boeing format file:
+
+ >>> from scipy.io.harwell_boeing import hb_read, hb_write
+ >>> from scipy.sparse import csr_matrix, eye
+ >>> data = csr_matrix(eye(3)) # create a sparse matrix
+ >>> hb_write("data.hb", data) # write a hb file
+ >>> print(hb_read("data.hb")) # read a hb file
+ (0, 0) 1.0
+ (1, 1) 1.0
+ (2, 2) 1.0
+
+ """
+ def _get_matrix(fid):
+ hb = HBFile(fid)
+ return hb.read_matrix()
+
+ if hasattr(path_or_open_file, 'read'):
+ return _get_matrix(path_or_open_file)
+ else:
+ with open(path_or_open_file) as f:
+ return _get_matrix(f)
+
+
+def hb_write(path_or_open_file, m, hb_info=None):
+ """Write HB-format file.
+
+ Parameters
+ ----------
+ path_or_open_file : path-like or file-like
+ If a file-like object, it is used as-is. Otherwise, it is opened
+ before writing.
+ m : sparse-matrix
+ the sparse matrix to write
+ hb_info : HBInfo
+ contains the meta-data for write
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ At the moment not the full Harwell-Boeing format is supported. Supported
+ features are:
+
+ - assembled, non-symmetric, real matrices
+ - integer for pointer/indices
+ - exponential format for float values, and int format
+
+ Examples
+ --------
+ We can read and write a harwell-boeing format file:
+
+ >>> from scipy.io.harwell_boeing import hb_read, hb_write
+ >>> from scipy.sparse import csr_matrix, eye
+ >>> data = csr_matrix(eye(3)) # create a sparse matrix
+ >>> hb_write("data.hb", data) # write a hb file
+ >>> print(hb_read("data.hb")) # read a hb file
+ (0, 0) 1.0
+ (1, 1) 1.0
+ (2, 2) 1.0
+
+ """
+ m = m.tocsc(copy=False)
+
+ if hb_info is None:
+ hb_info = HBInfo.from_data(m)
+
+ def _set_matrix(fid):
+ hb = HBFile(fid, hb_info)
+ return hb.write_matrix(m)
+
+ if hasattr(path_or_open_file, 'write'):
+ return _set_matrix(path_or_open_file)
+ else:
+ with open(path_or_open_file, 'w') as f:
+ return _set_matrix(f)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/setup.py
new file mode 100644
index 0000000..8cca81e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/setup.py
@@ -0,0 +1,12 @@
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('harwell_boeing',parent_package,top_path)
+ config.add_data_dir('tests')
+
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_fortran_format.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_fortran_format.py
new file mode 100644
index 0000000..ca5af52
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_fortran_format.py
@@ -0,0 +1,74 @@
+import numpy as np
+
+from numpy.testing import assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io.harwell_boeing._fortran_format_parser import (
+ FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat)
+
+
+class TestFortranFormatParser(object):
+ def setup_method(self):
+ self.parser = FortranFormatParser()
+
+ def _test_equal(self, format, ref):
+ ret = self.parser.parse(format)
+ assert_equal(ret.__dict__, ref.__dict__)
+
+ def test_simple_int(self):
+ self._test_equal("(I4)", IntFormat(4))
+
+ def test_simple_repeated_int(self):
+ self._test_equal("(3I4)", IntFormat(4, repeat=3))
+
+ def test_simple_exp(self):
+ self._test_equal("(E4.3)", ExpFormat(4, 3))
+
+ def test_exp_exp(self):
+ self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3))
+
+ def test_repeat_exp(self):
+ self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2))
+
+ def test_repeat_exp_exp(self):
+ self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2))
+
+ def test_wrong_formats(self):
+ def _test_invalid(bad_format):
+ assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format))
+ _test_invalid("I4")
+ _test_invalid("(E4)")
+ _test_invalid("(E4.)")
+ _test_invalid("(E4.E3)")
+
+
+class TestIntFormat(object):
+ def test_to_fortran(self):
+ f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)]
+ res = ["(I10)", "(I12.10)", "(3I12.10)"]
+
+ for i, j in zip(f, res):
+ assert_equal(i.fortran_format, j)
+
+ def test_from_number(self):
+ f = [10, -12, 123456789]
+ r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20),
+ IntFormat(10, repeat=8)]
+ for i, j in zip(f, r_f):
+ assert_equal(IntFormat.from_number(i).__dict__, j.__dict__)
+
+
+class TestExpFormat(object):
+ def test_to_fortran(self):
+ f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3),
+ ExpFormat(10, 5, repeat=3)]
+ res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"]
+
+ for i, j in zip(f, res):
+ assert_equal(i.fortran_format, j)
+
+ def test_from_number(self):
+ f = np.array([1.0, -1.2])
+ r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)]
+ for i, j in zip(f, r_f):
+ assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_hb.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_hb.py
new file mode 100644
index 0000000..c00cde3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/harwell_boeing/tests/test_hb.py
@@ -0,0 +1,65 @@
+from io import StringIO
+import tempfile
+
+import numpy as np
+
+from numpy.testing import assert_equal, \
+ assert_array_almost_equal_nulp
+
+from scipy.sparse import coo_matrix, csc_matrix, rand
+
+from scipy.io import hb_read, hb_write
+
+
+SIMPLE = """\
+No Title |No Key
+ 9 4 1 4
+RUA 100 100 10 0
+(26I3) (26I3) (3E23.15)
+1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
+3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
+3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
+9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
+37 71 89 18 30 45 70 19 25 52
+2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
+6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
+4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
+6.912334991524289e-01
+"""
+
+SIMPLE_MATRIX = coo_matrix(
+ ((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
+ 0.0661749042483, 0.887037034319, 0.419647859016,
+ 0.564960307211, 0.993442388709, 0.691233499152,),
+ (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
+ [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
+
+
+def assert_csc_almost_equal(r, l):
+ r = csc_matrix(r)
+ l = csc_matrix(l)
+ assert_equal(r.indptr, l.indptr)
+ assert_equal(r.indices, l.indices)
+ assert_array_almost_equal_nulp(r.data, l.data, 10000)
+
+
+class TestHBReader(object):
+ def test_simple(self):
+ m = hb_read(StringIO(SIMPLE))
+ assert_csc_almost_equal(m, SIMPLE_MATRIX)
+
+
+class TestHBReadWrite(object):
+
+ def check_save_load(self, value):
+ with tempfile.NamedTemporaryFile(mode='w+t') as file:
+ hb_write(file, value)
+ file.file.seek(0)
+ value_loaded = hb_read(file)
+ assert_csc_almost_equal(value, value_loaded)
+
+ def test_simple(self):
+ random_matrix = rand(10, 100, 0.1)
+ for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
+ matrix = random_matrix.asformat(matrix_format, copy=False)
+ self.check_save_load(matrix)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/idl.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/idl.py
new file mode 100644
index 0000000..d0e19b5
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/idl.py
@@ -0,0 +1,901 @@
+# IDLSave - a python module to read IDL 'save' files
+# Copyright (c) 2010 Thomas P. Robitaille
+
+# Many thanks to Craig Markwardt for publishing the Unofficial Format
+# Specification for IDL .sav files, without which this Python module would not
+# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
+
+# This code was developed by with permission from ITT Visual Information
+# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
+# Inc. for their Interactive Data Language software.
+
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+__all__ = ['readsav']
+
+import struct
+import numpy as np
+from numpy.compat import asstr
+import tempfile
+import zlib
+import warnings
+
+# Define the different data types that can be found in an IDL save file
+DTYPE_DICT = {1: '>u1',
+ 2: '>i2',
+ 3: '>i4',
+ 4: '>f4',
+ 5: '>f8',
+ 6: '>c8',
+ 7: '|O',
+ 8: '|O',
+ 9: '>c16',
+ 10: '|O',
+ 11: '|O',
+ 12: '>u2',
+ 13: '>u4',
+ 14: '>i8',
+ 15: '>u8'}
+
+# Define the different record types that can be found in an IDL save file
+RECTYPE_DICT = {0: "START_MARKER",
+ 1: "COMMON_VARIABLE",
+ 2: "VARIABLE",
+ 3: "SYSTEM_VARIABLE",
+ 6: "END_MARKER",
+ 10: "TIMESTAMP",
+ 12: "COMPILED",
+ 13: "IDENTIFICATION",
+ 14: "VERSION",
+ 15: "HEAP_HEADER",
+ 16: "HEAP_DATA",
+ 17: "PROMOTE64",
+ 19: "NOTICE",
+ 20: "DESCRIPTION"}
+
+# Define a dictionary to contain structure definitions
+STRUCT_DICT = {}
+
+
+def _align_32(f):
+ '''Align to the next 32-bit position in a file'''
+
+ pos = f.tell()
+ if pos % 4 != 0:
+ f.seek(pos + 4 - pos % 4)
+ return
+
+
+def _skip_bytes(f, n):
+ '''Skip `n` bytes'''
+ f.read(n)
+ return
+
+
+def _read_bytes(f, n):
+ '''Read the next `n` bytes'''
+ return f.read(n)
+
+
+def _read_byte(f):
+ '''Read a single byte'''
+ return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
+
+
+def _read_long(f):
+ '''Read a signed 32-bit integer'''
+ return np.int32(struct.unpack('>l', f.read(4))[0])
+
+
+def _read_int16(f):
+ '''Read a signed 16-bit integer'''
+ return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
+
+
+def _read_int32(f):
+ '''Read a signed 32-bit integer'''
+ return np.int32(struct.unpack('>i', f.read(4))[0])
+
+
+def _read_int64(f):
+ '''Read a signed 64-bit integer'''
+ return np.int64(struct.unpack('>q', f.read(8))[0])
+
+
+def _read_uint16(f):
+ '''Read an unsigned 16-bit integer'''
+ return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
+
+
+def _read_uint32(f):
+ '''Read an unsigned 32-bit integer'''
+ return np.uint32(struct.unpack('>I', f.read(4))[0])
+
+
+def _read_uint64(f):
+ '''Read an unsigned 64-bit integer'''
+ return np.uint64(struct.unpack('>Q', f.read(8))[0])
+
+
+def _read_float32(f):
+ '''Read a 32-bit float'''
+ return np.float32(struct.unpack('>f', f.read(4))[0])
+
+
+def _read_float64(f):
+ '''Read a 64-bit float'''
+ return np.float64(struct.unpack('>d', f.read(8))[0])
+
+
+class Pointer(object):
+ '''Class used to define pointers'''
+
+ def __init__(self, index):
+ self.index = index
+ return
+
+
+class ObjectPointer(Pointer):
+ '''Class used to define object pointers'''
+ pass
+
+
+def _read_string(f):
+ '''Read a string'''
+ length = _read_long(f)
+ if length > 0:
+ chars = _read_bytes(f, length)
+ _align_32(f)
+ chars = asstr(chars)
+ else:
+ chars = ''
+ return chars
+
+
+def _read_string_data(f):
+ '''Read a data string (length is specified twice)'''
+ length = _read_long(f)
+ if length > 0:
+ length = _read_long(f)
+ string_data = _read_bytes(f, length)
+ _align_32(f)
+ else:
+ string_data = ''
+ return string_data
+
+
+def _read_data(f, dtype):
+ '''Read a variable with a specified data type'''
+ if dtype == 1:
+ if _read_int32(f) != 1:
+ raise Exception("Error occurred while reading byte variable")
+ return _read_byte(f)
+ elif dtype == 2:
+ return _read_int16(f)
+ elif dtype == 3:
+ return _read_int32(f)
+ elif dtype == 4:
+ return _read_float32(f)
+ elif dtype == 5:
+ return _read_float64(f)
+ elif dtype == 6:
+ real = _read_float32(f)
+ imag = _read_float32(f)
+ return np.complex64(real + imag * 1j)
+ elif dtype == 7:
+ return _read_string_data(f)
+ elif dtype == 8:
+ raise Exception("Should not be here - please report this")
+ elif dtype == 9:
+ real = _read_float64(f)
+ imag = _read_float64(f)
+ return np.complex128(real + imag * 1j)
+ elif dtype == 10:
+ return Pointer(_read_int32(f))
+ elif dtype == 11:
+ return ObjectPointer(_read_int32(f))
+ elif dtype == 12:
+ return _read_uint16(f)
+ elif dtype == 13:
+ return _read_uint32(f)
+ elif dtype == 14:
+ return _read_int64(f)
+ elif dtype == 15:
+ return _read_uint64(f)
+ else:
+ raise Exception("Unknown IDL type: %i - please report this" % dtype)
+
+
+def _read_structure(f, array_desc, struct_desc):
+ '''
+ Read a structure, with the array and structure descriptors given as
+ `array_desc` and `structure_desc` respectively.
+ '''
+
+ nrows = array_desc['nelements']
+ columns = struct_desc['tagtable']
+
+ dtype = []
+ for col in columns:
+ if col['structure'] or col['array']:
+ dtype.append(((col['name'].lower(), col['name']), np.object_))
+ else:
+ if col['typecode'] in DTYPE_DICT:
+ dtype.append(((col['name'].lower(), col['name']),
+ DTYPE_DICT[col['typecode']]))
+ else:
+ raise Exception("Variable type %i not implemented" %
+ col['typecode'])
+
+ structure = np.recarray((nrows, ), dtype=dtype)
+
+ for i in range(nrows):
+ for col in columns:
+ dtype = col['typecode']
+ if col['structure']:
+ structure[col['name']][i] = _read_structure(f,
+ struct_desc['arrtable'][col['name']],
+ struct_desc['structtable'][col['name']])
+ elif col['array']:
+ structure[col['name']][i] = _read_array(f, dtype,
+ struct_desc['arrtable'][col['name']])
+ else:
+ structure[col['name']][i] = _read_data(f, dtype)
+
+ # Reshape structure if needed
+ if array_desc['ndims'] > 1:
+ dims = array_desc['dims'][:int(array_desc['ndims'])]
+ dims.reverse()
+ structure = structure.reshape(dims)
+
+ return structure
+
+
+def _read_array(f, typecode, array_desc):
+ '''
+ Read an array of type `typecode`, with the array descriptor given as
+ `array_desc`.
+ '''
+
+ if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
+
+ if typecode == 1:
+ nbytes = _read_int32(f)
+ if nbytes != array_desc['nbytes']:
+ warnings.warn("Not able to verify number of bytes from header")
+
+ # Read bytes as numpy array
+ array = np.frombuffer(f.read(array_desc['nbytes']),
+ dtype=DTYPE_DICT[typecode])
+
+ elif typecode in [2, 12]:
+
+ # These are 2 byte types, need to skip every two as they are not packed
+
+ array = np.frombuffer(f.read(array_desc['nbytes']*2),
+ dtype=DTYPE_DICT[typecode])[1::2]
+
+ else:
+
+ # Read bytes into list
+ array = []
+ for i in range(array_desc['nelements']):
+ dtype = typecode
+ data = _read_data(f, dtype)
+ array.append(data)
+
+ array = np.array(array, dtype=np.object_)
+
+ # Reshape array if needed
+ if array_desc['ndims'] > 1:
+ dims = array_desc['dims'][:int(array_desc['ndims'])]
+ dims.reverse()
+ array = array.reshape(dims)
+
+ # Go to next alignment position
+ _align_32(f)
+
+ return array
+
+
+def _read_record(f):
+ '''Function to read in a full record'''
+
+ record = {'rectype': _read_long(f)}
+
+ nextrec = _read_uint32(f)
+ nextrec += _read_uint32(f) * 2**32
+
+ _skip_bytes(f, 4)
+
+ if not record['rectype'] in RECTYPE_DICT:
+ raise Exception("Unknown RECTYPE: %i" % record['rectype'])
+
+ record['rectype'] = RECTYPE_DICT[record['rectype']]
+
+ if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
+
+ if record['rectype'] == "VARIABLE":
+ record['varname'] = _read_string(f)
+ else:
+ record['heap_index'] = _read_long(f)
+ _skip_bytes(f, 4)
+
+ rectypedesc = _read_typedesc(f)
+
+ if rectypedesc['typecode'] == 0:
+
+ if nextrec == f.tell():
+ record['data'] = None # Indicates NULL value
+ else:
+ raise ValueError("Unexpected type code: 0")
+
+ else:
+
+ varstart = _read_long(f)
+ if varstart != 7:
+ raise Exception("VARSTART is not 7")
+
+ if rectypedesc['structure']:
+ record['data'] = _read_structure(f, rectypedesc['array_desc'],
+ rectypedesc['struct_desc'])
+ elif rectypedesc['array']:
+ record['data'] = _read_array(f, rectypedesc['typecode'],
+ rectypedesc['array_desc'])
+ else:
+ dtype = rectypedesc['typecode']
+ record['data'] = _read_data(f, dtype)
+
+ elif record['rectype'] == "TIMESTAMP":
+
+ _skip_bytes(f, 4*256)
+ record['date'] = _read_string(f)
+ record['user'] = _read_string(f)
+ record['host'] = _read_string(f)
+
+ elif record['rectype'] == "VERSION":
+
+ record['format'] = _read_long(f)
+ record['arch'] = _read_string(f)
+ record['os'] = _read_string(f)
+ record['release'] = _read_string(f)
+
+ elif record['rectype'] == "IDENTIFICATON":
+
+ record['author'] = _read_string(f)
+ record['title'] = _read_string(f)
+ record['idcode'] = _read_string(f)
+
+ elif record['rectype'] == "NOTICE":
+
+ record['notice'] = _read_string(f)
+
+ elif record['rectype'] == "DESCRIPTION":
+
+ record['description'] = _read_string_data(f)
+
+ elif record['rectype'] == "HEAP_HEADER":
+
+ record['nvalues'] = _read_long(f)
+ record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
+
+ elif record['rectype'] == "COMMONBLOCK":
+
+ record['nvars'] = _read_long(f)
+ record['name'] = _read_string(f)
+ record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
+
+ elif record['rectype'] == "END_MARKER":
+
+ record['end'] = True
+
+ elif record['rectype'] == "UNKNOWN":
+
+ warnings.warn("Skipping UNKNOWN record")
+
+ elif record['rectype'] == "SYSTEM_VARIABLE":
+
+ warnings.warn("Skipping SYSTEM_VARIABLE record")
+
+ else:
+
+ raise Exception("record['rectype']=%s not implemented" %
+ record['rectype'])
+
+ f.seek(nextrec)
+
+ return record
+
+
+def _read_typedesc(f):
+ '''Function to read in a type descriptor'''
+
+ typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
+
+ if typedesc['varflags'] & 2 == 2:
+ raise Exception("System variables not implemented")
+
+ typedesc['array'] = typedesc['varflags'] & 4 == 4
+ typedesc['structure'] = typedesc['varflags'] & 32 == 32
+
+ if typedesc['structure']:
+ typedesc['array_desc'] = _read_arraydesc(f)
+ typedesc['struct_desc'] = _read_structdesc(f)
+ elif typedesc['array']:
+ typedesc['array_desc'] = _read_arraydesc(f)
+
+ return typedesc
+
+
+def _read_arraydesc(f):
+ '''Function to read in an array descriptor'''
+
+ arraydesc = {'arrstart': _read_long(f)}
+
+ if arraydesc['arrstart'] == 8:
+
+ _skip_bytes(f, 4)
+
+ arraydesc['nbytes'] = _read_long(f)
+ arraydesc['nelements'] = _read_long(f)
+ arraydesc['ndims'] = _read_long(f)
+
+ _skip_bytes(f, 8)
+
+ arraydesc['nmax'] = _read_long(f)
+
+ arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
+
+ elif arraydesc['arrstart'] == 18:
+
+ warnings.warn("Using experimental 64-bit array read")
+
+ _skip_bytes(f, 8)
+
+ arraydesc['nbytes'] = _read_uint64(f)
+ arraydesc['nelements'] = _read_uint64(f)
+ arraydesc['ndims'] = _read_long(f)
+
+ _skip_bytes(f, 8)
+
+ arraydesc['nmax'] = 8
+
+ arraydesc['dims'] = []
+ for d in range(arraydesc['nmax']):
+ v = _read_long(f)
+ if v != 0:
+ raise Exception("Expected a zero in ARRAY_DESC")
+ arraydesc['dims'].append(_read_long(f))
+
+ else:
+
+ raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
+
+ return arraydesc
+
+
+def _read_structdesc(f):
+ '''Function to read in a structure descriptor'''
+
+ structdesc = {}
+
+ structstart = _read_long(f)
+ if structstart != 9:
+ raise Exception("STRUCTSTART should be 9")
+
+ structdesc['name'] = _read_string(f)
+ predef = _read_long(f)
+ structdesc['ntags'] = _read_long(f)
+ structdesc['nbytes'] = _read_long(f)
+
+ structdesc['predef'] = predef & 1
+ structdesc['inherits'] = predef & 2
+ structdesc['is_super'] = predef & 4
+
+ if not structdesc['predef']:
+
+ structdesc['tagtable'] = [_read_tagdesc(f)
+ for _ in range(structdesc['ntags'])]
+
+ for tag in structdesc['tagtable']:
+ tag['name'] = _read_string(f)
+
+ structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
+ for tag in structdesc['tagtable']
+ if tag['array']}
+
+ structdesc['structtable'] = {tag['name']: _read_structdesc(f)
+ for tag in structdesc['tagtable']
+ if tag['structure']}
+
+ if structdesc['inherits'] or structdesc['is_super']:
+ structdesc['classname'] = _read_string(f)
+ structdesc['nsupclasses'] = _read_long(f)
+ structdesc['supclassnames'] = [
+ _read_string(f) for _ in range(structdesc['nsupclasses'])]
+ structdesc['supclasstable'] = [
+ _read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
+
+ STRUCT_DICT[structdesc['name']] = structdesc
+
+ else:
+
+ if not structdesc['name'] in STRUCT_DICT:
+ raise Exception("PREDEF=1 but can't find definition")
+
+ structdesc = STRUCT_DICT[structdesc['name']]
+
+ return structdesc
+
+
+def _read_tagdesc(f):
+ '''Function to read in a tag descriptor'''
+
+ tagdesc = {'offset': _read_long(f)}
+
+ if tagdesc['offset'] == -1:
+ tagdesc['offset'] = _read_uint64(f)
+
+ tagdesc['typecode'] = _read_long(f)
+ tagflags = _read_long(f)
+
+ tagdesc['array'] = tagflags & 4 == 4
+ tagdesc['structure'] = tagflags & 32 == 32
+ tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
+ # Assume '10'x is scalar
+
+ return tagdesc
+
+
+def _replace_heap(variable, heap):
+
+ if isinstance(variable, Pointer):
+
+ while isinstance(variable, Pointer):
+
+ if variable.index == 0:
+ variable = None
+ else:
+ if variable.index in heap:
+ variable = heap[variable.index]
+ else:
+ warnings.warn("Variable referenced by pointer not found "
+ "in heap: variable will be set to None")
+ variable = None
+
+ replace, new = _replace_heap(variable, heap)
+
+ if replace:
+ variable = new
+
+ return True, variable
+
+ elif isinstance(variable, np.core.records.recarray):
+
+ # Loop over records
+ for ir, record in enumerate(variable):
+
+ replace, new = _replace_heap(record, heap)
+
+ if replace:
+ variable[ir] = new
+
+ return False, variable
+
+ elif isinstance(variable, np.core.records.record):
+
+ # Loop over values
+ for iv, value in enumerate(variable):
+
+ replace, new = _replace_heap(value, heap)
+
+ if replace:
+ variable[iv] = new
+
+ return False, variable
+
+ elif isinstance(variable, np.ndarray):
+
+ # Loop over values if type is np.object_
+ if variable.dtype.type is np.object_:
+
+ for iv in range(variable.size):
+
+ replace, new = _replace_heap(variable.item(iv), heap)
+
+ if replace:
+ variable.itemset(iv, new)
+
+ return False, variable
+
+ else:
+
+ return False, variable
+
+
+class AttrDict(dict):
+ '''
+ A case-insensitive dictionary with access via item, attribute, and call
+ notations:
+
+ >>> d = AttrDict()
+ >>> d['Variable'] = 123
+ >>> d['Variable']
+ 123
+ >>> d.Variable
+ 123
+ >>> d.variable
+ 123
+ >>> d('VARIABLE')
+ 123
+ '''
+
+ def __init__(self, init={}):
+ dict.__init__(self, init)
+
+ def __getitem__(self, name):
+ return super(AttrDict, self).__getitem__(name.lower())
+
+ def __setitem__(self, key, value):
+ return super(AttrDict, self).__setitem__(key.lower(), value)
+
+ __getattr__ = __getitem__
+ __setattr__ = __setitem__
+ __call__ = __getitem__
+
+
+def readsav(file_name, idict=None, python_dict=False,
+ uncompressed_file_name=None, verbose=False):
+ """
+ Read an IDL .sav file.
+
+ Parameters
+ ----------
+ file_name : str
+ Name of the IDL save file.
+ idict : dict, optional
+ Dictionary in which to insert .sav file variables.
+ python_dict : bool, optional
+ By default, the object return is not a Python dictionary, but a
+ case-insensitive dictionary with item, attribute, and call access
+ to variables. To get a standard Python dictionary, set this option
+ to True.
+ uncompressed_file_name : str, optional
+ This option only has an effect for .sav files written with the
+ /compress option. If a file name is specified, compressed .sav
+ files are uncompressed to this file. Otherwise, readsav will use
+ the `tempfile` module to determine a temporary filename
+ automatically, and will remove the temporary file upon successfully
+ reading it in.
+ verbose : bool, optional
+ Whether to print out information about the save file, including
+ the records read, and available variables.
+
+ Returns
+ -------
+ idl_dict : AttrDict or dict
+ If `python_dict` is set to False (default), this function returns a
+ case-insensitive dictionary with item, attribute, and call access
+ to variables. If `python_dict` is set to True, this function
+ returns a Python dictionary with all variable names in lowercase.
+ If `idict` was specified, then variables are written to the
+ dictionary specified, and the updated dictionary is returned.
+
+ Examples
+ --------
+ >>> from os.path import dirname, join as pjoin
+ >>> import scipy.io as sio
+ >>> from scipy.io import readsav
+
+ Get the filename for an example .sav file from the tests/data directory.
+
+ >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
+ >>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
+
+ Load the .sav file contents.
+
+ >>> sav_data = readsav(sav_fname)
+
+ Get keys of the .sav file contents.
+
+ >>> print(sav_data.keys())
+ dict_keys(['array1d'])
+
+ Access a content with a key.
+
+ >>> print(sav_data['array1d'])
+ [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+ 0. 0. 0.]
+
+ """
+
+ # Initialize record and variable holders
+ records = []
+ if python_dict or idict:
+ variables = {}
+ else:
+ variables = AttrDict()
+
+ # Open the IDL file
+ f = open(file_name, 'rb')
+
+ # Read the signature, which should be 'SR'
+ signature = _read_bytes(f, 2)
+ if signature != b'SR':
+ raise Exception("Invalid SIGNATURE: %s" % signature)
+
+ # Next, the record format, which is '\x00\x04' for normal .sav
+ # files, and '\x00\x06' for compressed .sav files.
+ recfmt = _read_bytes(f, 2)
+
+ if recfmt == b'\x00\x04':
+ pass
+
+ elif recfmt == b'\x00\x06':
+
+ if verbose:
+ print("IDL Save file is compressed")
+
+ if uncompressed_file_name:
+ fout = open(uncompressed_file_name, 'w+b')
+ else:
+ fout = tempfile.NamedTemporaryFile(suffix='.sav')
+
+ if verbose:
+ print(" -> expanding to %s" % fout.name)
+
+ # Write header
+ fout.write(b'SR\x00\x04')
+
+ # Cycle through records
+ while True:
+
+ # Read record type
+ rectype = _read_long(f)
+ fout.write(struct.pack('>l', int(rectype)))
+
+ # Read position of next record and return as int
+ nextrec = _read_uint32(f)
+ nextrec += _read_uint32(f) * 2**32
+
+ # Read the unknown 4 bytes
+ unknown = f.read(4)
+
+ # Check if the end of the file has been reached
+ if RECTYPE_DICT[rectype] == 'END_MARKER':
+ fout.write(struct.pack('>I', int(nextrec) % 2**32))
+ fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
+ fout.write(unknown)
+ break
+
+ # Find current position
+ pos = f.tell()
+
+ # Decompress record
+ rec_string = zlib.decompress(f.read(nextrec-pos))
+
+ # Find new position of next record
+ nextrec = fout.tell() + len(rec_string) + 12
+
+ # Write out record
+ fout.write(struct.pack('>I', int(nextrec % 2**32)))
+ fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
+ fout.write(unknown)
+ fout.write(rec_string)
+
+ # Close the original compressed file
+ f.close()
+
+ # Set f to be the decompressed file, and skip the first four bytes
+ f = fout
+ f.seek(4)
+
+ else:
+ raise Exception("Invalid RECFMT: %s" % recfmt)
+
+ # Loop through records, and add them to the list
+ while True:
+ r = _read_record(f)
+ records.append(r)
+ if 'end' in r:
+ if r['end']:
+ break
+
+ # Close the file
+ f.close()
+
+ # Find heap data variables
+ heap = {}
+ for r in records:
+ if r['rectype'] == "HEAP_DATA":
+ heap[r['heap_index']] = r['data']
+
+ # Find all variables
+ for r in records:
+ if r['rectype'] == "VARIABLE":
+ replace, new = _replace_heap(r['data'], heap)
+ if replace:
+ r['data'] = new
+ variables[r['varname'].lower()] = r['data']
+
+ if verbose:
+
+ # Print out timestamp info about the file
+ for record in records:
+ if record['rectype'] == "TIMESTAMP":
+ print("-"*50)
+ print("Date: %s" % record['date'])
+ print("User: %s" % record['user'])
+ print("Host: %s" % record['host'])
+ break
+
+ # Print out version info about the file
+ for record in records:
+ if record['rectype'] == "VERSION":
+ print("-"*50)
+ print("Format: %s" % record['format'])
+ print("Architecture: %s" % record['arch'])
+ print("Operating System: %s" % record['os'])
+ print("IDL Version: %s" % record['release'])
+ break
+
+ # Print out identification info about the file
+ for record in records:
+ if record['rectype'] == "IDENTIFICATON":
+ print("-"*50)
+ print("Author: %s" % record['author'])
+ print("Title: %s" % record['title'])
+ print("ID Code: %s" % record['idcode'])
+ break
+
+ # Print out descriptions saved with the file
+ for record in records:
+ if record['rectype'] == "DESCRIPTION":
+ print("-"*50)
+ print("Description: %s" % record['description'])
+ break
+
+ print("-"*50)
+ print("Successfully read %i records of which:" %
+ (len(records)))
+
+ # Create convenience list of record types
+ rectypes = [r['rectype'] for r in records]
+
+ for rt in set(rectypes):
+ if rt != 'END_MARKER':
+ print(" - %i are of type %s" % (rectypes.count(rt), rt))
+ print("-"*50)
+
+ if 'VARIABLE' in rectypes:
+ print("Available variables:")
+ for var in variables:
+ print(" - %s [%s]" % (var, type(variables[var])))
+ print("-"*50)
+
+ if idict:
+ for var in variables:
+ idict[var] = variables[var]
+ return idict
+ else:
+ return variables
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/__init__.py
new file mode 100644
index 0000000..37196ba
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/__init__.py
@@ -0,0 +1,18 @@
+"""
+Utilities for dealing with MATLAB(R) files
+
+Notes
+-----
+MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill
+Drive, Natick, MA 01760-2098, USA.
+
+"""
+# Matlab file read and write utilities
+from .mio import loadmat, savemat, whosmat
+from . import byteordercodes
+
+__all__ = ['loadmat', 'savemat', 'whosmat', 'byteordercodes']
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/byteordercodes.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/byteordercodes.py
new file mode 100644
index 0000000..46d1069
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/byteordercodes.py
@@ -0,0 +1,68 @@
+''' Byteorder utilities for system - numpy byteorder encoding
+
+Converts a variety of string codes for little endian, big endian,
+native byte order and swapped byte order to explicit NumPy endian
+codes - one of '<' (little endian) or '>' (big endian)
+
+'''
+import sys
+
+sys_is_le = sys.byteorder == 'little'
+native_code = sys_is_le and '<' or '>'
+swapped_code = sys_is_le and '>' or '<'
+
+aliases = {'little': ('little', '<', 'l', 'le'),
+ 'big': ('big', '>', 'b', 'be'),
+ 'native': ('native', '='),
+ 'swapped': ('swapped', 'S')}
+
+
+def to_numpy_code(code):
+ """
+ Convert various order codings to NumPy format.
+
+ Parameters
+ ----------
+ code : str
+ The code to convert. It is converted to lower case before parsing.
+ Legal values are:
+ 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=',
+ 'swapped', 's'.
+
+ Returns
+ -------
+ out_code : {'<', '>'}
+ Here '<' is the numpy dtype code for little endian,
+ and '>' is the code for big endian.
+
+ Examples
+ --------
+ >>> import sys
+ >>> sys_is_le == (sys.byteorder == 'little')
+ True
+ >>> to_numpy_code('big')
+ '>'
+ >>> to_numpy_code('little')
+ '<'
+ >>> nc = to_numpy_code('native')
+ >>> nc == '<' if sys_is_le else nc == '>'
+ True
+ >>> sc = to_numpy_code('swapped')
+ >>> sc == '>' if sys_is_le else sc == '<'
+ True
+
+ """
+ code = code.lower()
+ if code is None:
+ return native_code
+ if code in aliases['little']:
+ return '<'
+ elif code in aliases['big']:
+ return '>'
+ elif code in aliases['native']:
+ return native_code
+ elif code in aliases['swapped']:
+ return swapped_code
+ else:
+ raise ValueError(
+ 'We cannot handle byte order %s' % code)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio.py
new file mode 100644
index 0000000..d23601d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio.py
@@ -0,0 +1,336 @@
+"""
+Module for reading and writing matlab (TM) .mat files
+"""
+# Authors: Travis Oliphant, Matthew Brett
+
+from contextlib import contextmanager
+
+from .miobase import get_matfile_version, docfiller
+from .mio4 import MatFile4Reader, MatFile4Writer
+from .mio5 import MatFile5Reader, MatFile5Writer
+
+__all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat']
+
+
+@contextmanager
+def _open_file_context(file_like, appendmat, mode='rb'):
+ f, opened = _open_file(file_like, appendmat, mode)
+ try:
+ yield f
+ finally:
+ if opened:
+ f.close()
+
+
+def _open_file(file_like, appendmat, mode='rb'):
+ """
+ Open `file_like` and return as file-like object. First, check if object is
+ already file-like; if so, return it as-is. Otherwise, try to pass it
+ to open(). If that fails, and `file_like` is a string, and `appendmat` is true,
+ append '.mat' and try again.
+ """
+ reqs = {'read'} if set(mode) & set('r+') else set()
+ if set(mode) & set('wax+'):
+ reqs.add('write')
+ if reqs.issubset(dir(file_like)):
+ return file_like, False
+
+ try:
+ return open(file_like, mode), True
+ except IOError as e:
+ # Probably "not found"
+ if isinstance(file_like, str):
+ if appendmat and not file_like.endswith('.mat'):
+ file_like += '.mat'
+ return open(file_like, mode), True
+ else:
+ raise IOError(
+ 'Reader needs file name or open file-like object'
+ ) from e
+
+
+@docfiller
+def mat_reader_factory(file_name, appendmat=True, **kwargs):
+ """
+ Create reader for matlab .mat format files.
+
+ Parameters
+ ----------
+ %(file_arg)s
+ %(append_arg)s
+ %(load_args)s
+ %(struct_arg)s
+
+ Returns
+ -------
+ matreader : MatFileReader object
+ Initialized instance of MatFileReader class matching the mat file
+ type detected in `filename`.
+ file_opened : bool
+ Whether the file was opened by this routine.
+
+ """
+ byte_stream, file_opened = _open_file(file_name, appendmat)
+ mjv, mnv = get_matfile_version(byte_stream)
+ if mjv == 0:
+ return MatFile4Reader(byte_stream, **kwargs), file_opened
+ elif mjv == 1:
+ return MatFile5Reader(byte_stream, **kwargs), file_opened
+ elif mjv == 2:
+ raise NotImplementedError('Please use HDF reader for matlab v7.3 files')
+ else:
+ raise TypeError('Did not recognize version %s' % mjv)
+
+
+@docfiller
+def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
+ """
+ Load MATLAB file.
+
+ Parameters
+ ----------
+ file_name : str
+ Name of the mat file (do not need .mat extension if
+ appendmat==True). Can also pass open file-like object.
+ mdict : dict, optional
+ Dictionary in which to insert matfile variables.
+ appendmat : bool, optional
+ True to append the .mat extension to the end of the given
+ filename, if not already present.
+ byte_order : str or None, optional
+ None by default, implying byte order guessed from mat
+ file. Otherwise can be one of ('native', '=', 'little', '<',
+ 'BIG', '>').
+ mat_dtype : bool, optional
+ If True, return arrays in same dtype as would be loaded into
+ MATLAB (instead of the dtype with which they are saved).
+ squeeze_me : bool, optional
+ Whether to squeeze unit matrix dimensions or not.
+ chars_as_strings : bool, optional
+ Whether to convert char arrays to string arrays.
+ matlab_compatible : bool, optional
+ Returns matrices as would be loaded by MATLAB (implies
+ squeeze_me=False, chars_as_strings=False, mat_dtype=True,
+ struct_as_record=True).
+ struct_as_record : bool, optional
+ Whether to load MATLAB structs as NumPy record arrays, or as
+ old-style NumPy arrays with dtype=object. Setting this flag to
+ False replicates the behavior of scipy version 0.7.x (returning
+ NumPy object arrays). The default setting is True, because it
+ allows easier round-trip load and save of MATLAB files.
+ verify_compressed_data_integrity : bool, optional
+ Whether the length of compressed sequences in the MATLAB file
+ should be checked, to ensure that they are not longer than we expect.
+ It is advisable to enable this (the default) because overlong
+ compressed sequences in MATLAB files generally indicate that the
+ files have experienced some sort of corruption.
+ variable_names : None or sequence
+ If None (the default) - read all variables in file. Otherwise,
+ `variable_names` should be a sequence of strings, giving names of the
+ MATLAB variables to read from the file. The reader will skip any
+ variable with a name not in this sequence, possibly saving some read
+ processing.
+ simplify_cells : False, optional
+ If True, return a simplified dict structure (which is useful if the mat
+ file contains cell arrays). Note that this only affects the structure
+ of the result and not its contents (which is identical for both output
+ structures). If True, this automatically sets `struct_as_record` to
+ False and `squeeze_me` to True, which is required to simplify cells.
+
+ Returns
+ -------
+ mat_dict : dict
+ dictionary with variable names as keys, and loaded matrices as
+ values.
+
+ Notes
+ -----
+ v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
+
+ You will need an HDF5 Python library to read MATLAB 7.3 format mat
+ files. Because SciPy does not supply one, we do not implement the
+ HDF5 / 7.3 interface here.
+
+ Examples
+ --------
+ >>> from os.path import dirname, join as pjoin
+ >>> import scipy.io as sio
+
+ Get the filename for an example .mat file from the tests/data directory.
+
+ >>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data')
+ >>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat')
+
+ Load the .mat file contents.
+
+ >>> mat_contents = sio.loadmat(mat_fname)
+
+ The result is a dictionary, one key/value pair for each variable:
+
+ >>> sorted(mat_contents.keys())
+ ['__globals__', '__header__', '__version__', 'testdouble']
+ >>> mat_contents['testdouble']
+ array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265,
+ 3.92699082, 4.71238898, 5.49778714, 6.28318531]])
+
+ By default SciPy reads MATLAB structs as structured NumPy arrays where the
+ dtype fields are of type `object` and the names correspond to the MATLAB
+ struct field names. This can be disabled by setting the optional argument
+ `struct_as_record=False`.
+
+ Get the filename for an example .mat file that contains a MATLAB struct
+ called `teststruct` and load the contents.
+
+ >>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat')
+ >>> matstruct_contents = sio.loadmat(matstruct_fname)
+ >>> teststruct = matstruct_contents['teststruct']
+ >>> teststruct.dtype
+ dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')])
+
+ The size of the structured array is the size of the MATLAB struct, not the
+ number of elements in any particular field. The shape defaults to 2-D
+ unless the optional argument `squeeze_me=True`, in which case all length 1
+ dimensions are removed.
+
+ >>> teststruct.size
+ 1
+ >>> teststruct.shape
+ (1, 1)
+
+ Get the 'stringfield' of the first element in the MATLAB struct.
+
+ >>> teststruct[0, 0]['stringfield']
+ array(['Rats live on no evil star.'],
+ dtype='>> teststruct['doublefield'][0, 0]
+ array([[ 1.41421356, 2.71828183, 3.14159265]])
+
+ Load the MATLAB struct, squeezing out length 1 dimensions, and get the item
+ from the 'complexfield'.
+
+ >>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True)
+ >>> matstruct_squeezed['teststruct'].shape
+ ()
+ >>> matstruct_squeezed['teststruct']['complexfield'].shape
+ ()
+ >>> matstruct_squeezed['teststruct']['complexfield'].item()
+ array([ 1.41421356+1.41421356j, 2.71828183+2.71828183j,
+ 3.14159265+3.14159265j])
+ """
+ variable_names = kwargs.pop('variable_names', None)
+ with _open_file_context(file_name, appendmat) as f:
+ MR, _ = mat_reader_factory(f, **kwargs)
+ matfile_dict = MR.get_variables(variable_names)
+
+ if mdict is not None:
+ mdict.update(matfile_dict)
+ else:
+ mdict = matfile_dict
+
+ return mdict
+
+
+@docfiller
+def savemat(file_name, mdict,
+ appendmat=True,
+ format='5',
+ long_field_names=False,
+ do_compression=False,
+ oned_as='row'):
+ """
+ Save a dictionary of names and arrays into a MATLAB-style .mat file.
+
+ This saves the array objects in the given dictionary to a MATLAB-
+ style .mat file.
+
+ Parameters
+ ----------
+ file_name : str or file-like object
+ Name of the .mat file (.mat extension not needed if ``appendmat ==
+ True``).
+ Can also pass open file_like object.
+ mdict : dict
+ Dictionary from which to save matfile variables.
+ appendmat : bool, optional
+ True (the default) to append the .mat extension to the end of the
+ given filename, if not already present.
+ format : {'5', '4'}, string, optional
+ '5' (the default) for MATLAB 5 and up (to 7.2),
+ '4' for MATLAB 4 .mat files.
+ long_field_names : bool, optional
+ False (the default) - maximum field name length in a structure is
+ 31 characters which is the documented maximum length.
+ True - maximum field name length in a structure is 63 characters
+ which works for MATLAB 7.6+.
+ do_compression : bool, optional
+ Whether or not to compress matrices on write. Default is False.
+ oned_as : {'row', 'column'}, optional
+ If 'column', write 1-D NumPy arrays as column vectors.
+ If 'row', write 1-D NumPy arrays as row vectors.
+
+ Examples
+ --------
+ >>> from scipy.io import savemat
+ >>> a = np.arange(20)
+ >>> mdic = {"a": a, "label": "experiment"}
+ >>> mdic
+ {'a': array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19]),
+ 'label': 'experiment'}
+ >>> savemat("matlab_matrix.mat", mdic)
+ """
+ with _open_file_context(file_name, appendmat, 'wb') as file_stream:
+ if format == '4':
+ if long_field_names:
+ raise ValueError("Long field names are not available for version 4 files")
+ MW = MatFile4Writer(file_stream, oned_as)
+ elif format == '5':
+ MW = MatFile5Writer(file_stream,
+ do_compression=do_compression,
+ unicode_strings=True,
+ long_field_names=long_field_names,
+ oned_as=oned_as)
+ else:
+ raise ValueError("Format should be '4' or '5'")
+ MW.put_variables(mdict)
+
+
+@docfiller
+def whosmat(file_name, appendmat=True, **kwargs):
+ """
+ List variables inside a MATLAB file.
+
+ Parameters
+ ----------
+ %(file_arg)s
+ %(append_arg)s
+ %(load_args)s
+ %(struct_arg)s
+
+ Returns
+ -------
+ variables : list of tuples
+ A list of tuples, where each tuple holds the matrix name (a string),
+ its shape (tuple of ints), and its data class (a string).
+ Possible data classes are: int8, uint8, int16, uint16, int32, uint32,
+ int64, uint64, single, double, cell, struct, object, char, sparse,
+ function, opaque, logical, unknown.
+
+ Notes
+ -----
+ v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
+
+ You will need an HDF5 python library to read matlab 7.3 format mat
+ files. Because SciPy does not supply one, we do not implement the
+ HDF5 / 7.3 interface here.
+
+ .. versionadded:: 0.12.0
+
+ """
+ with _open_file_context(file_name, appendmat) as f:
+ ML, file_opened = mat_reader_factory(f, **kwargs)
+ variables = ML.list_variables()
+ return variables
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio4.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio4.py
new file mode 100644
index 0000000..dc76861
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio4.py
@@ -0,0 +1,614 @@
+''' Classes for read / write of matlab (TM) 4 files
+'''
+import sys
+import warnings
+
+import numpy as np
+from numpy.compat import asbytes, asstr
+
+import scipy.sparse
+
+from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
+ convert_dtypes, arr_to_chars, arr_dtype_number)
+
+from .mio_utils import squeeze_element, chars_to_strings
+from functools import reduce
+
+
+SYS_LITTLE_ENDIAN = sys.byteorder == 'little'
+
+miDOUBLE = 0
+miSINGLE = 1
+miINT32 = 2
+miINT16 = 3
+miUINT16 = 4
+miUINT8 = 5
+
+mdtypes_template = {
+ miDOUBLE: 'f8',
+ miSINGLE: 'f4',
+ miINT32: 'i4',
+ miINT16: 'i2',
+ miUINT16: 'u2',
+ miUINT8: 'u1',
+ 'header': [('mopt', 'i4'),
+ ('mrows', 'i4'),
+ ('ncols', 'i4'),
+ ('imagf', 'i4'),
+ ('namlen', 'i4')],
+ 'U1': 'U1',
+ }
+
+np_to_mtypes = {
+ 'f8': miDOUBLE,
+ 'c32': miDOUBLE,
+ 'c24': miDOUBLE,
+ 'c16': miDOUBLE,
+ 'f4': miSINGLE,
+ 'c8': miSINGLE,
+ 'i4': miINT32,
+ 'i2': miINT16,
+ 'u2': miUINT16,
+ 'u1': miUINT8,
+ 'S1': miUINT8,
+ }
+
+# matrix classes
+mxFULL_CLASS = 0
+mxCHAR_CLASS = 1
+mxSPARSE_CLASS = 2
+
+order_codes = {
+ 0: '<',
+ 1: '>',
+ 2: 'VAX D-float', # !
+ 3: 'VAX G-float',
+ 4: 'Cray', # !!
+ }
+
+mclass_info = {
+ mxFULL_CLASS: 'double',
+ mxCHAR_CLASS: 'char',
+ mxSPARSE_CLASS: 'sparse',
+ }
+
+
+class VarHeader4(object):
+ # Mat4 variables never logical or global
+ is_logical = False
+ is_global = False
+
+ def __init__(self,
+ name,
+ dtype,
+ mclass,
+ dims,
+ is_complex):
+ self.name = name
+ self.dtype = dtype
+ self.mclass = mclass
+ self.dims = dims
+ self.is_complex = is_complex
+
+
+class VarReader4(object):
+ ''' Class to read matlab 4 variables '''
+
+ def __init__(self, file_reader):
+ self.file_reader = file_reader
+ self.mat_stream = file_reader.mat_stream
+ self.dtypes = file_reader.dtypes
+ self.chars_as_strings = file_reader.chars_as_strings
+ self.squeeze_me = file_reader.squeeze_me
+
+ def read_header(self):
+ ''' Read and return header for variable '''
+ data = read_dtype(self.mat_stream, self.dtypes['header'])
+ name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00')
+ if data['mopt'] < 0 or data['mopt'] > 5000:
+ raise ValueError('Mat 4 mopt wrong format, byteswapping problem?')
+ M, rest = divmod(data['mopt'], 1000) # order code
+ if M not in (0, 1):
+ warnings.warn("We do not support byte ordering '%s'; returned "
+ "data may be corrupt" % order_codes[M],
+ UserWarning)
+ O, rest = divmod(rest, 100) # unused, should be 0
+ if O != 0:
+ raise ValueError('O in MOPT integer should be 0, wrong format?')
+ P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above)
+ T = rest # matrix type code e.g., mxFULL_CLASS (see above)
+ dims = (data['mrows'], data['ncols'])
+ is_complex = data['imagf'] == 1
+ dtype = self.dtypes[P]
+ return VarHeader4(
+ name,
+ dtype,
+ T,
+ dims,
+ is_complex)
+
+ def array_from_header(self, hdr, process=True):
+ mclass = hdr.mclass
+ if mclass == mxFULL_CLASS:
+ arr = self.read_full_array(hdr)
+ elif mclass == mxCHAR_CLASS:
+ arr = self.read_char_array(hdr)
+ if process and self.chars_as_strings:
+ arr = chars_to_strings(arr)
+ elif mclass == mxSPARSE_CLASS:
+ # no current processing (below) makes sense for sparse
+ return self.read_sparse_array(hdr)
+ else:
+ raise TypeError('No reader for class code %s' % mclass)
+ if process and self.squeeze_me:
+ return squeeze_element(arr)
+ return arr
+
+ def read_sub_array(self, hdr, copy=True):
+ ''' Mat4 read using header `hdr` dtype and dims
+
+ Parameters
+ ----------
+ hdr : object
+ object with attributes ``dtype``, ``dims``. dtype is assumed to be
+ the correct endianness
+ copy : bool, optional
+ copies array before return if True (default True)
+ (buffer is usually read only)
+
+ Returns
+ -------
+ arr : ndarray
+ of dtype given by `hdr` ``dtype`` and shape given by `hdr` ``dims``
+ '''
+ dt = hdr.dtype
+ dims = hdr.dims
+ num_bytes = dt.itemsize
+ for d in dims:
+ num_bytes *= d
+ buffer = self.mat_stream.read(int(num_bytes))
+ if len(buffer) != num_bytes:
+ raise ValueError("Not enough bytes to read matrix '%s'; is this "
+ "a badly-formed file? Consider listing matrices "
+ "with `whosmat` and loading named matrices with "
+ "`variable_names` kwarg to `loadmat`" % hdr.name)
+ arr = np.ndarray(shape=dims,
+ dtype=dt,
+ buffer=buffer,
+ order='F')
+ if copy:
+ arr = arr.copy()
+ return arr
+
+ def read_full_array(self, hdr):
+ ''' Full (rather than sparse) matrix getter
+
+ Read matrix (array) can be real or complex
+
+ Parameters
+ ----------
+ hdr : ``VarHeader4`` instance
+
+ Returns
+ -------
+ arr : ndarray
+ complex array if ``hdr.is_complex`` is True, otherwise a real
+ numeric array
+ '''
+ if hdr.is_complex:
+ # avoid array copy to save memory
+ res = self.read_sub_array(hdr, copy=False)
+ res_j = self.read_sub_array(hdr, copy=False)
+ return res + (res_j * 1j)
+ return self.read_sub_array(hdr)
+
+ def read_char_array(self, hdr):
+ ''' latin-1 text matrix (char matrix) reader
+
+ Parameters
+ ----------
+ hdr : ``VarHeader4`` instance
+
+ Returns
+ -------
+ arr : ndarray
+ with dtype 'U1', shape given by `hdr` ``dims``
+ '''
+ arr = self.read_sub_array(hdr).astype(np.uint8)
+ S = arr.tobytes().decode('latin-1')
+ return np.ndarray(shape=hdr.dims,
+ dtype=np.dtype('U1'),
+ buffer=np.array(S)).copy()
+
+ def read_sparse_array(self, hdr):
+ ''' Read and return sparse matrix type
+
+ Parameters
+ ----------
+ hdr : ``VarHeader4`` instance
+
+ Returns
+ -------
+ arr : ``scipy.sparse.coo_matrix``
+ with dtype ``float`` and shape read from the sparse matrix data
+
+ Notes
+ -----
+ MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where
+ N is the number of non-zero values. Column 1 values [0:N] are the
+ (1-based) row indices of the each non-zero value, column 2 [0:N] are the
+ column indices, column 3 [0:N] are the (real) values. The last values
+ [-1,0:2] of the rows, column indices are shape[0] and shape[1]
+ respectively of the output matrix. The last value for the values column
+ is a padding 0. mrows and ncols values from the header give the shape of
+ the stored matrix, here [N+1, 3]. Complex data are saved as a 4 column
+ matrix, where the fourth column contains the imaginary component; the
+ last value is again 0. Complex sparse data do *not* have the header
+ ``imagf`` field set to True; the fact that the data are complex is only
+ detectable because there are 4 storage columns.
+ '''
+ res = self.read_sub_array(hdr)
+ tmp = res[:-1,:]
+ # All numbers are float64 in Matlab, but SciPy sparse expects int shape
+ dims = (int(res[-1,0]), int(res[-1,1]))
+ I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also
+ J = np.ascontiguousarray(tmp[:,1],dtype='intc')
+ I -= 1 # for 1-based indexing
+ J -= 1
+ if res.shape[1] == 3:
+ V = np.ascontiguousarray(tmp[:,2],dtype='float')
+ else:
+ V = np.ascontiguousarray(tmp[:,2],dtype='complex')
+ V.imag = tmp[:,3]
+ return scipy.sparse.coo_matrix((V,(I,J)), dims)
+
+ def shape_from_header(self, hdr):
+ '''Read the shape of the array described by the header.
+ The file position after this call is unspecified.
+ '''
+ mclass = hdr.mclass
+ if mclass == mxFULL_CLASS:
+ shape = tuple(map(int, hdr.dims))
+ elif mclass == mxCHAR_CLASS:
+ shape = tuple(map(int, hdr.dims))
+ if self.chars_as_strings:
+ shape = shape[:-1]
+ elif mclass == mxSPARSE_CLASS:
+ dt = hdr.dtype
+ dims = hdr.dims
+
+ if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1):
+ return ()
+
+ # Read only the row and column counts
+ self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
+ rows = np.ndarray(shape=(), dtype=dt,
+ buffer=self.mat_stream.read(dt.itemsize))
+ self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
+ cols = np.ndarray(shape=(), dtype=dt,
+ buffer=self.mat_stream.read(dt.itemsize))
+
+ shape = (int(rows), int(cols))
+ else:
+ raise TypeError('No reader for class code %s' % mclass)
+
+ if self.squeeze_me:
+ shape = tuple([x for x in shape if x != 1])
+ return shape
+
+
+class MatFile4Reader(MatFileReader):
+ ''' Reader for Mat4 files '''
+ @docfiller
+ def __init__(self, mat_stream, *args, **kwargs):
+ ''' Initialize matlab 4 file reader
+
+ %(matstream_arg)s
+ %(load_args)s
+ '''
+ super(MatFile4Reader, self).__init__(mat_stream, *args, **kwargs)
+ self._matrix_reader = None
+
+ def guess_byte_order(self):
+ self.mat_stream.seek(0)
+ mopt = read_dtype(self.mat_stream, np.dtype('i4'))
+ self.mat_stream.seek(0)
+ if mopt == 0:
+ return '<'
+ if mopt < 0 or mopt > 5000:
+ # Number must have been byteswapped
+ return SYS_LITTLE_ENDIAN and '>' or '<'
+ # Not byteswapped
+ return SYS_LITTLE_ENDIAN and '<' or '>'
+
+ def initialize_read(self):
+ ''' Run when beginning read of variables
+
+ Sets up readers from parameters in `self`
+ '''
+ self.dtypes = convert_dtypes(mdtypes_template, self.byte_order)
+ self._matrix_reader = VarReader4(self)
+
+ def read_var_header(self):
+ ''' Read and return header, next position
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ header : object
+ object that can be passed to self.read_var_array, and that
+ has attributes ``name`` and ``is_global``
+ next_position : int
+ position in stream of next variable
+ '''
+ hdr = self._matrix_reader.read_header()
+ n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product
+ remaining_bytes = hdr.dtype.itemsize * n
+ if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS:
+ remaining_bytes *= 2
+ next_position = self.mat_stream.tell() + remaining_bytes
+ return hdr, next_position
+
+ def read_var_array(self, header, process=True):
+ ''' Read array, given `header`
+
+ Parameters
+ ----------
+ header : header object
+ object with fields defining variable header
+ process : {True, False}, optional
+ If True, apply recursive post-processing during loading of array.
+
+ Returns
+ -------
+ arr : array
+ array with post-processing applied or not according to
+ `process`.
+ '''
+ return self._matrix_reader.array_from_header(header, process)
+
+ def get_variables(self, variable_names=None):
+ ''' get variables from stream as dictionary
+
+ Parameters
+ ----------
+ variable_names : None or str or sequence of str, optional
+ variable name, or sequence of variable names to get from Mat file /
+ file stream. If None, then get all variables in file.
+ '''
+ if isinstance(variable_names, str):
+ variable_names = [variable_names]
+ elif variable_names is not None:
+ variable_names = list(variable_names)
+ self.mat_stream.seek(0)
+ # set up variable reader
+ self.initialize_read()
+ mdict = {}
+ while not self.end_of_stream():
+ hdr, next_position = self.read_var_header()
+ name = asstr(hdr.name)
+ if variable_names is not None and name not in variable_names:
+ self.mat_stream.seek(next_position)
+ continue
+ mdict[name] = self.read_var_array(hdr)
+ self.mat_stream.seek(next_position)
+ if variable_names is not None:
+ variable_names.remove(name)
+ if len(variable_names) == 0:
+ break
+ return mdict
+
+ def list_variables(self):
+ ''' list variables from stream '''
+ self.mat_stream.seek(0)
+ # set up variable reader
+ self.initialize_read()
+ vars = []
+ while not self.end_of_stream():
+ hdr, next_position = self.read_var_header()
+ name = asstr(hdr.name)
+ shape = self._matrix_reader.shape_from_header(hdr)
+ info = mclass_info.get(hdr.mclass, 'unknown')
+ vars.append((name, shape, info))
+
+ self.mat_stream.seek(next_position)
+ return vars
+
+
+def arr_to_2d(arr, oned_as='row'):
+ ''' Make ``arr`` exactly two dimensional
+
+ If `arr` has more than 2 dimensions, raise a ValueError
+
+ Parameters
+ ----------
+ arr : array
+ oned_as : {'row', 'column'}, optional
+ Whether to reshape 1-D vectors as row vectors or column vectors.
+ See documentation for ``matdims`` for more detail
+
+ Returns
+ -------
+ arr2d : array
+ 2-D version of the array
+ '''
+ dims = matdims(arr, oned_as)
+ if len(dims) > 2:
+ raise ValueError('Matlab 4 files cannot save arrays with more than '
+ '2 dimensions')
+ return arr.reshape(dims)
+
+
+class VarWriter4(object):
+ def __init__(self, file_writer):
+ self.file_stream = file_writer.file_stream
+ self.oned_as = file_writer.oned_as
+
+ def write_bytes(self, arr):
+ self.file_stream.write(arr.tobytes(order='F'))
+
+ def write_string(self, s):
+ self.file_stream.write(s)
+
+ def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0):
+ ''' Write header for given data options
+
+ Parameters
+ ----------
+ name : str
+ name of variable
+ shape : sequence
+ Shape of array as it will be read in matlab
+ P : int, optional
+ code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32,
+ miINT16, miUINT16, miUINT8``
+ T : int, optional
+ code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS,
+ mxSPARSE_CLASS``
+ imagf : int, optional
+ flag indicating complex
+ '''
+ header = np.empty((), mdtypes_template['header'])
+ M = not SYS_LITTLE_ENDIAN
+ O = 0
+ header['mopt'] = (M * 1000 +
+ O * 100 +
+ P * 10 +
+ T)
+ header['mrows'] = shape[0]
+ header['ncols'] = shape[1]
+ header['imagf'] = imagf
+ header['namlen'] = len(name) + 1
+ self.write_bytes(header)
+ self.write_string(asbytes(name + '\0'))
+
+ def write(self, arr, name):
+ ''' Write matrix `arr`, with name `name`
+
+ Parameters
+ ----------
+ arr : array_like
+ array to write
+ name : str
+ name in matlab workspace
+ '''
+ # we need to catch sparse first, because np.asarray returns an
+ # an object array for scipy.sparse
+ if scipy.sparse.issparse(arr):
+ self.write_sparse(arr, name)
+ return
+ arr = np.asarray(arr)
+ dt = arr.dtype
+ if not dt.isnative:
+ arr = arr.astype(dt.newbyteorder('='))
+ dtt = dt.type
+ if dtt is np.object_:
+ raise TypeError('Cannot save object arrays in Mat4')
+ elif dtt is np.void:
+ raise TypeError('Cannot save void type arrays')
+ elif dtt in (np.unicode_, np.string_):
+ self.write_char(arr, name)
+ return
+ self.write_numeric(arr, name)
+
+ def write_numeric(self, arr, name):
+ arr = arr_to_2d(arr, self.oned_as)
+ imagf = arr.dtype.kind == 'c'
+ try:
+ P = np_to_mtypes[arr.dtype.str[1:]]
+ except KeyError:
+ if imagf:
+ arr = arr.astype('c128')
+ else:
+ arr = arr.astype('f8')
+ P = miDOUBLE
+ self.write_header(name,
+ arr.shape,
+ P=P,
+ T=mxFULL_CLASS,
+ imagf=imagf)
+ if imagf:
+ self.write_bytes(arr.real)
+ self.write_bytes(arr.imag)
+ else:
+ self.write_bytes(arr)
+
+ def write_char(self, arr, name):
+ arr = arr_to_chars(arr)
+ arr = arr_to_2d(arr, self.oned_as)
+ dims = arr.shape
+ self.write_header(
+ name,
+ dims,
+ P=miUINT8,
+ T=mxCHAR_CLASS)
+ if arr.dtype.kind == 'U':
+ # Recode unicode to latin1
+ n_chars = np.prod(dims)
+ st_arr = np.ndarray(shape=(),
+ dtype=arr_dtype_number(arr, n_chars),
+ buffer=arr)
+ st = st_arr.item().encode('latin-1')
+ arr = np.ndarray(shape=dims, dtype='S1', buffer=st)
+ self.write_bytes(arr)
+
+ def write_sparse(self, arr, name):
+ ''' Sparse matrices are 2-D
+
+ See docstring for VarReader4.read_sparse_array
+ '''
+ A = arr.tocoo() # convert to sparse COO format (ijv)
+ imagf = A.dtype.kind == 'c'
+ ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8')
+ ijv[:-1,0] = A.row
+ ijv[:-1,1] = A.col
+ ijv[:-1,0:2] += 1 # 1 based indexing
+ if imagf:
+ ijv[:-1,2] = A.data.real
+ ijv[:-1,3] = A.data.imag
+ else:
+ ijv[:-1,2] = A.data
+ ijv[-1,0:2] = A.shape
+ self.write_header(
+ name,
+ ijv.shape,
+ P=miDOUBLE,
+ T=mxSPARSE_CLASS)
+ self.write_bytes(ijv)
+
+
+class MatFile4Writer(object):
+ ''' Class for writing matlab 4 format files '''
+ def __init__(self, file_stream, oned_as=None):
+ self.file_stream = file_stream
+ if oned_as is None:
+ oned_as = 'row'
+ self.oned_as = oned_as
+ self._matrix_writer = None
+
+ def put_variables(self, mdict, write_header=None):
+ ''' Write variables in `mdict` to stream
+
+ Parameters
+ ----------
+ mdict : mapping
+ mapping with method ``items`` return name, contents pairs
+ where ``name`` which will appeak in the matlab workspace in
+ file load, and ``contents`` is something writeable to a
+ matlab file, such as a NumPy array.
+ write_header : {None, True, False}
+ If True, then write the matlab file header before writing the
+ variables. If None (the default) then write the file header
+ if we are at position 0 in the stream. By setting False
+ here, and setting the stream position to the end of the file,
+ you can append variables to a matlab file
+ '''
+ # there is no header for a matlab 4 mat file, so we ignore the
+ # ``write_header`` input argument. It's there for compatibility
+ # with the matlab 5 version of this method
+ self._matrix_writer = VarWriter4(self)
+ for name, var in mdict.items():
+ self._matrix_writer.write(var, name)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5.py
new file mode 100644
index 0000000..cb13b81
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5.py
@@ -0,0 +1,893 @@
+''' Classes for read / write of matlab (TM) 5 files
+
+The matfile specification last found here:
+
+https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
+
+(as of December 5 2008)
+'''
+'''
+=================================
+ Note on functions and mat files
+=================================
+
+The document above does not give any hints as to the storage of matlab
+function handles, or anonymous function handles. I had, therefore, to
+guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
+``mxOPAQUE_CLASS`` by looking at example mat files.
+
+``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
+contain a struct matrix with a set pattern of fields. For anonymous
+functions, a sub-fields of one of these fields seems to contain the
+well-named ``mxOPAQUE_CLASS``. This seems to contain:
+
+* array flags as for any matlab matrix
+* 3 int8 strings
+* a matrix
+
+It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
+instance, there is also an un-named matrix (name == '') at the end of
+the mat file. I'll call this the ``__function_workspace__`` matrix.
+
+When I saved two anonymous functions in a mat file, or appended another
+anonymous function to the mat file, there was still only one
+``__function_workspace__`` un-named matrix at the end, but larger than
+that for a mat file with a single anonymous function, suggesting that
+the workspaces for the two functions had been merged.
+
+The ``__function_workspace__`` matrix appears to be of double class
+(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
+the format of a mini .mat file, without the first 124 bytes of the file
+header (the description and the subsystem_offset), but with the version
+U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
+presumably for 8 byte padding, and then a series of ``miMATRIX``
+entries, as in a standard mat file. The ``miMATRIX`` entries appear to
+be series of un-named (name == '') matrices, and may also contain arrays
+of this same mini-mat format.
+
+I guess that:
+
+* saving an anonymous function back to a mat file will need the
+ associated ``__function_workspace__`` matrix saved as well for the
+ anonymous function to work correctly.
+* appending to a mat file that has a ``__function_workspace__`` would
+ involve first pulling off this workspace, appending, checking whether
+ there were any more anonymous functions appended, and then somehow
+ merging the relevant workspaces, and saving at the end of the mat
+ file.
+
+The mat files I was playing with are in ``tests/data``:
+
+* sqr.mat
+* parabola.mat
+* some_functions.mat
+
+See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
+script I was working with.
+
+'''
+
+# Small fragments of current code adapted from matfile.py by Heiko
+# Henkelmann; parts of the code for simplify_cells=True adapted from
+# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
+
+import os
+import time
+import sys
+import zlib
+
+from io import BytesIO
+
+import warnings
+
+import numpy as np
+from numpy.compat import asbytes, asstr
+
+import scipy.sparse
+
+from .byteordercodes import native_code, swapped_code
+
+from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
+ arr_to_chars, arr_dtype_number, MatWriteError,
+ MatReadError, MatReadWarning)
+
+# Reader object for matlab 5 format variables
+from .mio5_utils import VarReader5
+
+# Constants and helper objects
+from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
+ NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
+ miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
+ mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
+ mxDOUBLE_CLASS, mclass_info, mat_struct)
+
+from .streams import ZlibInputStream
+
+
+def _has_struct(elem):
+ """Determine if elem is an array and if first array item is a struct."""
+ return (isinstance(elem, np.ndarray) and (elem.size > 0) and
+ isinstance(elem[0], mat_struct))
+
+
+def _inspect_cell_array(ndarray):
+ """Construct lists from cell arrays (loaded as numpy ndarrays), recursing
+ into items if they contain mat_struct objects."""
+ elem_list = []
+ for sub_elem in ndarray:
+ if isinstance(sub_elem, mat_struct):
+ elem_list.append(_matstruct_to_dict(sub_elem))
+ elif _has_struct(sub_elem):
+ elem_list.append(_inspect_cell_array(sub_elem))
+ else:
+ elem_list.append(sub_elem)
+ return elem_list
+
+
+def _matstruct_to_dict(matobj):
+ """Construct nested dicts from mat_struct objects."""
+ d = {}
+ for f in matobj._fieldnames:
+ elem = matobj.__dict__[f]
+ if isinstance(elem, mat_struct):
+ d[f] = _matstruct_to_dict(elem)
+ elif _has_struct(elem):
+ d[f] = _inspect_cell_array(elem)
+ else:
+ d[f] = elem
+ return d
+
+
+def _simplify_cells(d):
+ """Convert mat objects in dict to nested dicts."""
+ for key in d:
+ if isinstance(d[key], mat_struct):
+ d[key] = _matstruct_to_dict(d[key])
+ elif _has_struct(d[key]):
+ d[key] = _inspect_cell_array(d[key])
+ return d
+
+
+class MatFile5Reader(MatFileReader):
+ ''' Reader for Mat 5 mat files
+ Adds the following attribute to base class
+
+ uint16_codec - char codec to use for uint16 char arrays
+ (defaults to system default codec)
+
+ Uses variable reader that has the following stardard interface (see
+ abstract class in ``miobase``::
+
+ __init__(self, file_reader)
+ read_header(self)
+ array_from_header(self)
+
+ and added interface::
+
+ set_stream(self, stream)
+ read_full_tag(self)
+
+ '''
+ @docfiller
+ def __init__(self,
+ mat_stream,
+ byte_order=None,
+ mat_dtype=False,
+ squeeze_me=False,
+ chars_as_strings=True,
+ matlab_compatible=False,
+ struct_as_record=True,
+ verify_compressed_data_integrity=True,
+ uint16_codec=None,
+ simplify_cells=False):
+ '''Initializer for matlab 5 file format reader
+
+ %(matstream_arg)s
+ %(load_args)s
+ %(struct_arg)s
+ uint16_codec : {None, string}
+ Set codec to use for uint16 char arrays (e.g., 'utf-8').
+ Use system default codec if None
+ '''
+ super(MatFile5Reader, self).__init__(
+ mat_stream,
+ byte_order,
+ mat_dtype,
+ squeeze_me,
+ chars_as_strings,
+ matlab_compatible,
+ struct_as_record,
+ verify_compressed_data_integrity,
+ simplify_cells)
+ # Set uint16 codec
+ if not uint16_codec:
+ uint16_codec = sys.getdefaultencoding()
+ self.uint16_codec = uint16_codec
+ # placeholders for readers - see initialize_read method
+ self._file_reader = None
+ self._matrix_reader = None
+
+ def guess_byte_order(self):
+ ''' Guess byte order.
+ Sets stream pointer to 0 '''
+ self.mat_stream.seek(126)
+ mi = self.mat_stream.read(2)
+ self.mat_stream.seek(0)
+ return mi == b'IM' and '<' or '>'
+
+ def read_file_header(self):
+ ''' Read in mat 5 file header '''
+ hdict = {}
+ hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
+ hdr = read_dtype(self.mat_stream, hdr_dtype)
+ hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
+ v_major = hdr['version'] >> 8
+ v_minor = hdr['version'] & 0xFF
+ hdict['__version__'] = '%d.%d' % (v_major, v_minor)
+ return hdict
+
+ def initialize_read(self):
+ ''' Run when beginning read of variables
+
+ Sets up readers from parameters in `self`
+ '''
+ # reader for top level stream. We need this extra top-level
+ # reader because we use the matrix_reader object to contain
+ # compressed matrices (so they have their own stream)
+ self._file_reader = VarReader5(self)
+ # reader for matrix streams
+ self._matrix_reader = VarReader5(self)
+
+ def read_var_header(self):
+ ''' Read header, return header, next position
+
+ Header has to define at least .name and .is_global
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ header : object
+ object that can be passed to self.read_var_array, and that
+ has attributes .name and .is_global
+ next_position : int
+ position in stream of next variable
+ '''
+ mdtype, byte_count = self._file_reader.read_full_tag()
+ if not byte_count > 0:
+ raise ValueError("Did not read any bytes")
+ next_pos = self.mat_stream.tell() + byte_count
+ if mdtype == miCOMPRESSED:
+ # Make new stream from compressed data
+ stream = ZlibInputStream(self.mat_stream, byte_count)
+ self._matrix_reader.set_stream(stream)
+ check_stream_limit = self.verify_compressed_data_integrity
+ mdtype, byte_count = self._matrix_reader.read_full_tag()
+ else:
+ check_stream_limit = False
+ self._matrix_reader.set_stream(self.mat_stream)
+ if not mdtype == miMATRIX:
+ raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
+ header = self._matrix_reader.read_header(check_stream_limit)
+ return header, next_pos
+
+ def read_var_array(self, header, process=True):
+ ''' Read array, given `header`
+
+ Parameters
+ ----------
+ header : header object
+ object with fields defining variable header
+ process : {True, False} bool, optional
+ If True, apply recursive post-processing during loading of
+ array.
+
+ Returns
+ -------
+ arr : array
+ array with post-processing applied or not according to
+ `process`.
+ '''
+ return self._matrix_reader.array_from_header(header, process)
+
+ def get_variables(self, variable_names=None):
+ ''' get variables from stream as dictionary
+
+ variable_names - optional list of variable names to get
+
+ If variable_names is None, then get all variables in file
+ '''
+ if isinstance(variable_names, str):
+ variable_names = [variable_names]
+ elif variable_names is not None:
+ variable_names = list(variable_names)
+
+ self.mat_stream.seek(0)
+ # Here we pass all the parameters in self to the reading objects
+ self.initialize_read()
+ mdict = self.read_file_header()
+ mdict['__globals__'] = []
+ while not self.end_of_stream():
+ hdr, next_position = self.read_var_header()
+ name = asstr(hdr.name)
+ if name in mdict:
+ warnings.warn('Duplicate variable name "%s" in stream'
+ ' - replacing previous with new\n'
+ 'Consider mio5.varmats_from_mat to split '
+ 'file into single variable files' % name,
+ MatReadWarning, stacklevel=2)
+ if name == '':
+ # can only be a matlab 7 function workspace
+ name = '__function_workspace__'
+ # We want to keep this raw because mat_dtype processing
+ # will break the format (uint8 as mxDOUBLE_CLASS)
+ process = False
+ else:
+ process = True
+ if variable_names is not None and name not in variable_names:
+ self.mat_stream.seek(next_position)
+ continue
+ try:
+ res = self.read_var_array(hdr, process)
+ except MatReadError as err:
+ warnings.warn(
+ 'Unreadable variable "%s", because "%s"' %
+ (name, err),
+ Warning, stacklevel=2)
+ res = "Read error: %s" % err
+ self.mat_stream.seek(next_position)
+ mdict[name] = res
+ if hdr.is_global:
+ mdict['__globals__'].append(name)
+ if variable_names is not None:
+ variable_names.remove(name)
+ if len(variable_names) == 0:
+ break
+ if self.simplify_cells:
+ return _simplify_cells(mdict)
+ else:
+ return mdict
+
+ def list_variables(self):
+ ''' list variables from stream '''
+ self.mat_stream.seek(0)
+ # Here we pass all the parameters in self to the reading objects
+ self.initialize_read()
+ self.read_file_header()
+ vars = []
+ while not self.end_of_stream():
+ hdr, next_position = self.read_var_header()
+ name = asstr(hdr.name)
+ if name == '':
+ # can only be a matlab 7 function workspace
+ name = '__function_workspace__'
+
+ shape = self._matrix_reader.shape_from_header(hdr)
+ if hdr.is_logical:
+ info = 'logical'
+ else:
+ info = mclass_info.get(hdr.mclass, 'unknown')
+ vars.append((name, shape, info))
+
+ self.mat_stream.seek(next_position)
+ return vars
+
+
+def varmats_from_mat(file_obj):
+ """ Pull variables out of mat 5 file as a sequence of mat file objects
+
+ This can be useful with a difficult mat file, containing unreadable
+ variables. This routine pulls the variables out in raw form and puts them,
+ unread, back into a file stream for saving or reading. Another use is the
+ pathological case where there is more than one variable of the same name in
+ the file; this routine returns the duplicates, whereas the standard reader
+ will overwrite duplicates in the returned dictionary.
+
+ The file pointer in `file_obj` will be undefined. File pointers for the
+ returned file-like objects are set at 0.
+
+ Parameters
+ ----------
+ file_obj : file-like
+ file object containing mat file
+
+ Returns
+ -------
+ named_mats : list
+ list contains tuples of (name, BytesIO) where BytesIO is a file-like
+ object containing mat file contents as for a single variable. The
+ BytesIO contains a string with the original header and a single var. If
+ ``var_file_obj`` is an individual BytesIO instance, then save as a mat
+ file with something like ``open('test.mat',
+ 'wb').write(var_file_obj.read())``
+
+ Examples
+ --------
+ >>> import scipy.io
+
+ BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
+ Python < 3.
+
+ >>> mat_fileobj = BytesIO()
+ >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
+ >>> varmats = varmats_from_mat(mat_fileobj)
+ >>> sorted([name for name, str_obj in varmats])
+ ['a', 'b']
+ """
+ rdr = MatFile5Reader(file_obj)
+ file_obj.seek(0)
+ # Raw read of top-level file header
+ hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
+ raw_hdr = file_obj.read(hdr_len)
+ # Initialize variable reading
+ file_obj.seek(0)
+ rdr.initialize_read()
+ rdr.read_file_header()
+ next_position = file_obj.tell()
+ named_mats = []
+ while not rdr.end_of_stream():
+ start_position = next_position
+ hdr, next_position = rdr.read_var_header()
+ name = asstr(hdr.name)
+ # Read raw variable string
+ file_obj.seek(start_position)
+ byte_count = next_position - start_position
+ var_str = file_obj.read(byte_count)
+ # write to stringio object
+ out_obj = BytesIO()
+ out_obj.write(raw_hdr)
+ out_obj.write(var_str)
+ out_obj.seek(0)
+ named_mats.append((name, out_obj))
+ return named_mats
+
+
+class EmptyStructMarker(object):
+ """ Class to indicate presence of empty matlab struct on output """
+
+
+def to_writeable(source):
+ ''' Convert input object ``source`` to something we can write
+
+ Parameters
+ ----------
+ source : object
+
+ Returns
+ -------
+ arr : None or ndarray or EmptyStructMarker
+ If `source` cannot be converted to something we can write to a matfile,
+ return None. If `source` is equivalent to an empty dictionary, return
+ ``EmptyStructMarker``. Otherwise return `source` converted to an
+ ndarray with contents for writing to matfile.
+ '''
+ if isinstance(source, np.ndarray):
+ return source
+ if source is None:
+ return None
+ # Objects that implement mappings
+ is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
+ hasattr(source, 'items'))
+ # Objects that don't implement mappings, but do have dicts
+ if isinstance(source, np.generic):
+ # NumPy scalars are never mappings (PyPy issue workaround)
+ pass
+ elif not is_mapping and hasattr(source, '__dict__'):
+ source = dict((key, value) for key, value in source.__dict__.items()
+ if not key.startswith('_'))
+ is_mapping = True
+ if is_mapping:
+ dtype = []
+ values = []
+ for field, value in source.items():
+ if (isinstance(field, str) and
+ field[0] not in '_0123456789'):
+ dtype.append((str(field), object))
+ values.append(value)
+ if dtype:
+ return np.array([tuple(values)], dtype)
+ else:
+ return EmptyStructMarker
+ # Next try and convert to an array
+ narr = np.asanyarray(source)
+ if narr.dtype.type in (object, np.object_) and \
+ narr.shape == () and narr == source:
+ # No interesting conversion possible
+ return None
+ return narr
+
+
+# Native byte ordered dtypes for convenience for writers
+NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
+NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
+NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
+NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
+
+
+class VarWriter5(object):
+ ''' Generic matlab matrix writing class '''
+ mat_tag = np.zeros((), NDT_TAG_FULL)
+ mat_tag['mdtype'] = miMATRIX
+
+ def __init__(self, file_writer):
+ self.file_stream = file_writer.file_stream
+ self.unicode_strings = file_writer.unicode_strings
+ self.long_field_names = file_writer.long_field_names
+ self.oned_as = file_writer.oned_as
+ # These are used for top level writes, and unset after
+ self._var_name = None
+ self._var_is_global = False
+
+ def write_bytes(self, arr):
+ self.file_stream.write(arr.tobytes(order='F'))
+
+ def write_string(self, s):
+ self.file_stream.write(s)
+
+ def write_element(self, arr, mdtype=None):
+ ''' write tag and data '''
+ if mdtype is None:
+ mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
+ # Array needs to be in native byte order
+ if arr.dtype.byteorder == swapped_code:
+ arr = arr.byteswap().newbyteorder()
+ byte_count = arr.size*arr.itemsize
+ if byte_count <= 4:
+ self.write_smalldata_element(arr, mdtype, byte_count)
+ else:
+ self.write_regular_element(arr, mdtype, byte_count)
+
+ def write_smalldata_element(self, arr, mdtype, byte_count):
+ # write tag with embedded data
+ tag = np.zeros((), NDT_TAG_SMALL)
+ tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
+ # if arr.tobytes is < 4, the element will be zero-padded as needed.
+ tag['data'] = arr.tobytes(order='F')
+ self.write_bytes(tag)
+
+ def write_regular_element(self, arr, mdtype, byte_count):
+ # write tag, data
+ tag = np.zeros((), NDT_TAG_FULL)
+ tag['mdtype'] = mdtype
+ tag['byte_count'] = byte_count
+ self.write_bytes(tag)
+ self.write_bytes(arr)
+ # pad to next 64-bit boundary
+ bc_mod_8 = byte_count % 8
+ if bc_mod_8:
+ self.file_stream.write(b'\x00' * (8-bc_mod_8))
+
+ def write_header(self,
+ shape,
+ mclass,
+ is_complex=False,
+ is_logical=False,
+ nzmax=0):
+ ''' Write header for given data options
+ shape : sequence
+ array shape
+ mclass - mat5 matrix class
+ is_complex - True if matrix is complex
+ is_logical - True if matrix is logical
+ nzmax - max non zero elements for sparse arrays
+
+ We get the name and the global flag from the object, and reset
+ them to defaults after we've used them
+ '''
+ # get name and is_global from one-shot object store
+ name = self._var_name
+ is_global = self._var_is_global
+ # initialize the top-level matrix tag, store position
+ self._mat_tag_pos = self.file_stream.tell()
+ self.write_bytes(self.mat_tag)
+ # write array flags (complex, global, logical, class, nzmax)
+ af = np.zeros((), NDT_ARRAY_FLAGS)
+ af['data_type'] = miUINT32
+ af['byte_count'] = 8
+ flags = is_complex << 3 | is_global << 2 | is_logical << 1
+ af['flags_class'] = mclass | flags << 8
+ af['nzmax'] = nzmax
+ self.write_bytes(af)
+ # shape
+ self.write_element(np.array(shape, dtype='i4'))
+ # write name
+ name = np.asarray(name)
+ if name == '': # empty string zero-terminated
+ self.write_smalldata_element(name, miINT8, 0)
+ else:
+ self.write_element(name, miINT8)
+ # reset the one-shot store to defaults
+ self._var_name = ''
+ self._var_is_global = False
+
+ def update_matrix_tag(self, start_pos):
+ curr_pos = self.file_stream.tell()
+ self.file_stream.seek(start_pos)
+ byte_count = curr_pos - start_pos - 8
+ if byte_count >= 2**32:
+ raise MatWriteError("Matrix too large to save with Matlab "
+ "5 format")
+ self.mat_tag['byte_count'] = byte_count
+ self.write_bytes(self.mat_tag)
+ self.file_stream.seek(curr_pos)
+
+ def write_top(self, arr, name, is_global):
+ """ Write variable at top level of mat file
+
+ Parameters
+ ----------
+ arr : array_like
+ array-like object to create writer for
+ name : str, optional
+ name as it will appear in matlab workspace
+ default is empty string
+ is_global : {False, True}, optional
+ whether variable will be global on load into matlab
+ """
+ # these are set before the top-level header write, and unset at
+ # the end of the same write, because they do not apply for lower levels
+ self._var_is_global = is_global
+ self._var_name = name
+ # write the header and data
+ self.write(arr)
+
+ def write(self, arr):
+ ''' Write `arr` to stream at top and sub levels
+
+ Parameters
+ ----------
+ arr : array_like
+ array-like object to create writer for
+ '''
+ # store position, so we can update the matrix tag
+ mat_tag_pos = self.file_stream.tell()
+ # First check if these are sparse
+ if scipy.sparse.issparse(arr):
+ self.write_sparse(arr)
+ self.update_matrix_tag(mat_tag_pos)
+ return
+ # Try to convert things that aren't arrays
+ narr = to_writeable(arr)
+ if narr is None:
+ raise TypeError('Could not convert %s (type %s) to array'
+ % (arr, type(arr)))
+ if isinstance(narr, MatlabObject):
+ self.write_object(narr)
+ elif isinstance(narr, MatlabFunction):
+ raise MatWriteError('Cannot write matlab functions')
+ elif narr is EmptyStructMarker: # empty struct array
+ self.write_empty_struct()
+ elif narr.dtype.fields: # struct array
+ self.write_struct(narr)
+ elif narr.dtype.hasobject: # cell array
+ self.write_cells(narr)
+ elif narr.dtype.kind in ('U', 'S'):
+ if self.unicode_strings:
+ codec = 'UTF8'
+ else:
+ codec = 'ascii'
+ self.write_char(narr, codec)
+ else:
+ self.write_numeric(narr)
+ self.update_matrix_tag(mat_tag_pos)
+
+ def write_numeric(self, arr):
+ imagf = arr.dtype.kind == 'c'
+ logif = arr.dtype.kind == 'b'
+ try:
+ mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
+ except KeyError:
+ # No matching matlab type, probably complex256 / float128 / float96
+ # Cast data to complex128 / float64.
+ if imagf:
+ arr = arr.astype('c128')
+ elif logif:
+ arr = arr.astype('i1') # Should only contain 0/1
+ else:
+ arr = arr.astype('f8')
+ mclass = mxDOUBLE_CLASS
+ self.write_header(matdims(arr, self.oned_as),
+ mclass,
+ is_complex=imagf,
+ is_logical=logif)
+ if imagf:
+ self.write_element(arr.real)
+ self.write_element(arr.imag)
+ else:
+ self.write_element(arr)
+
+ def write_char(self, arr, codec='ascii'):
+ ''' Write string array `arr` with given `codec`
+ '''
+ if arr.size == 0 or np.all(arr == ''):
+ # This an empty string array or a string array containing
+ # only empty strings. Matlab cannot distinguish between a
+ # string array that is empty, and a string array containing
+ # only empty strings, because it stores strings as arrays of
+ # char. There is no way of having an array of char that is
+ # not empty, but contains an empty string. We have to
+ # special-case the array-with-empty-strings because even
+ # empty strings have zero padding, which would otherwise
+ # appear in matlab as a string with a space.
+ shape = (0,) * np.max([arr.ndim, 2])
+ self.write_header(shape, mxCHAR_CLASS)
+ self.write_smalldata_element(arr, miUTF8, 0)
+ return
+ # non-empty string.
+ #
+ # Convert to char array
+ arr = arr_to_chars(arr)
+ # We have to write the shape directly, because we are going
+ # recode the characters, and the resulting stream of chars
+ # may have a different length
+ shape = arr.shape
+ self.write_header(shape, mxCHAR_CLASS)
+ if arr.dtype.kind == 'U' and arr.size:
+ # Make one long string from all the characters. We need to
+ # transpose here, because we're flattening the array, before
+ # we write the bytes. The bytes have to be written in
+ # Fortran order.
+ n_chars = np.prod(shape)
+ st_arr = np.ndarray(shape=(),
+ dtype=arr_dtype_number(arr, n_chars),
+ buffer=arr.T.copy()) # Fortran order
+ # Recode with codec to give byte string
+ st = st_arr.item().encode(codec)
+ # Reconstruct as 1-D byte array
+ arr = np.ndarray(shape=(len(st),),
+ dtype='S1',
+ buffer=st)
+ self.write_element(arr, mdtype=miUTF8)
+
+ def write_sparse(self, arr):
+ ''' Sparse matrices are 2D
+ '''
+ A = arr.tocsc() # convert to sparse CSC format
+ A.sort_indices() # MATLAB expects sorted row indices
+ is_complex = (A.dtype.kind == 'c')
+ is_logical = (A.dtype.kind == 'b')
+ nz = A.nnz
+ self.write_header(matdims(arr, self.oned_as),
+ mxSPARSE_CLASS,
+ is_complex=is_complex,
+ is_logical=is_logical,
+ # matlab won't load file with 0 nzmax
+ nzmax=1 if nz == 0 else nz)
+ self.write_element(A.indices.astype('i4'))
+ self.write_element(A.indptr.astype('i4'))
+ self.write_element(A.data.real)
+ if is_complex:
+ self.write_element(A.data.imag)
+
+ def write_cells(self, arr):
+ self.write_header(matdims(arr, self.oned_as),
+ mxCELL_CLASS)
+ # loop over data, column major
+ A = np.atleast_2d(arr).flatten('F')
+ for el in A:
+ self.write(el)
+
+ def write_empty_struct(self):
+ self.write_header((1, 1), mxSTRUCT_CLASS)
+ # max field name length set to 1 in an example matlab struct
+ self.write_element(np.array(1, dtype=np.int32))
+ # Field names element is empty
+ self.write_element(np.array([], dtype=np.int8))
+
+ def write_struct(self, arr):
+ self.write_header(matdims(arr, self.oned_as),
+ mxSTRUCT_CLASS)
+ self._write_items(arr)
+
+ def _write_items(self, arr):
+ # write fieldnames
+ fieldnames = [f[0] for f in arr.dtype.descr]
+ length = max([len(fieldname) for fieldname in fieldnames])+1
+ max_length = (self.long_field_names and 64) or 32
+ if length > max_length:
+ raise ValueError("Field names are restricted to %d characters" %
+ (max_length-1))
+ self.write_element(np.array([length], dtype='i4'))
+ self.write_element(
+ np.array(fieldnames, dtype='S%d' % (length)),
+ mdtype=miINT8)
+ A = np.atleast_2d(arr).flatten('F')
+ for el in A:
+ for f in fieldnames:
+ self.write(el[f])
+
+ def write_object(self, arr):
+ '''Same as writing structs, except different mx class, and extra
+ classname element after header
+ '''
+ self.write_header(matdims(arr, self.oned_as),
+ mxOBJECT_CLASS)
+ self.write_element(np.array(arr.classname, dtype='S'),
+ mdtype=miINT8)
+ self._write_items(arr)
+
+
+class MatFile5Writer(object):
+ ''' Class for writing mat5 files '''
+
+ @docfiller
+ def __init__(self, file_stream,
+ do_compression=False,
+ unicode_strings=False,
+ global_vars=None,
+ long_field_names=False,
+ oned_as='row'):
+ ''' Initialize writer for matlab 5 format files
+
+ Parameters
+ ----------
+ %(do_compression)s
+ %(unicode_strings)s
+ global_vars : None or sequence of strings, optional
+ Names of variables to be marked as global for matlab
+ %(long_fields)s
+ %(oned_as)s
+ '''
+ self.file_stream = file_stream
+ self.do_compression = do_compression
+ self.unicode_strings = unicode_strings
+ if global_vars:
+ self.global_vars = global_vars
+ else:
+ self.global_vars = []
+ self.long_field_names = long_field_names
+ self.oned_as = oned_as
+ self._matrix_writer = None
+
+ def write_file_header(self):
+ # write header
+ hdr = np.zeros((), NDT_FILE_HDR)
+ hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
+ % (os.name,time.asctime())
+ hdr['version'] = 0x0100
+ hdr['endian_test'] = np.ndarray(shape=(),
+ dtype='S2',
+ buffer=np.uint16(0x4d49))
+ self.file_stream.write(hdr.tobytes())
+
+ def put_variables(self, mdict, write_header=None):
+ ''' Write variables in `mdict` to stream
+
+ Parameters
+ ----------
+ mdict : mapping
+ mapping with method ``items`` returns name, contents pairs where
+ ``name`` which will appear in the matlab workspace in file load, and
+ ``contents`` is something writeable to a matlab file, such as a NumPy
+ array.
+ write_header : {None, True, False}, optional
+ If True, then write the matlab file header before writing the
+ variables. If None (the default) then write the file header
+ if we are at position 0 in the stream. By setting False
+ here, and setting the stream position to the end of the file,
+ you can append variables to a matlab file
+ '''
+ # write header if requested, or None and start of file
+ if write_header is None:
+ write_header = self.file_stream.tell() == 0
+ if write_header:
+ self.write_file_header()
+ self._matrix_writer = VarWriter5(self)
+ for name, var in mdict.items():
+ if name[0] == '_':
+ continue
+ is_global = name in self.global_vars
+ if self.do_compression:
+ stream = BytesIO()
+ self._matrix_writer.file_stream = stream
+ self._matrix_writer.write_top(var, asbytes(name), is_global)
+ out_str = zlib.compress(stream.getvalue())
+ tag = np.empty((), NDT_TAG_FULL)
+ tag['mdtype'] = miCOMPRESSED
+ tag['byte_count'] = len(out_str)
+ self.file_stream.write(tag.tobytes())
+ self.file_stream.write(out_str)
+ else: # not compressing
+ self._matrix_writer.write_top(var, asbytes(name), is_global)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_params.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_params.py
new file mode 100644
index 0000000..70381a2
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_params.py
@@ -0,0 +1,252 @@
+''' Constants and classes for matlab 5 read and write
+
+See also mio5_utils.pyx where these same constants arise as c enums.
+
+If you make changes in this file, don't forget to change mio5_utils.pyx
+'''
+import numpy as np
+
+from .miobase import convert_dtypes
+
+miINT8 = 1
+miUINT8 = 2
+miINT16 = 3
+miUINT16 = 4
+miINT32 = 5
+miUINT32 = 6
+miSINGLE = 7
+miDOUBLE = 9
+miINT64 = 12
+miUINT64 = 13
+miMATRIX = 14
+miCOMPRESSED = 15
+miUTF8 = 16
+miUTF16 = 17
+miUTF32 = 18
+
+mxCELL_CLASS = 1
+mxSTRUCT_CLASS = 2
+# The March 2008 edition of "Matlab 7 MAT-File Format" says that
+# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3.
+# Matlab 2008a appears to save logicals as type 9, so we assume that
+# the document is correct. See type 18, below.
+mxOBJECT_CLASS = 3
+mxCHAR_CLASS = 4
+mxSPARSE_CLASS = 5
+mxDOUBLE_CLASS = 6
+mxSINGLE_CLASS = 7
+mxINT8_CLASS = 8
+mxUINT8_CLASS = 9
+mxINT16_CLASS = 10
+mxUINT16_CLASS = 11
+mxINT32_CLASS = 12
+mxUINT32_CLASS = 13
+# The following are not in the March 2008 edition of "Matlab 7
+# MAT-File Format," but were guessed from matrix.h.
+mxINT64_CLASS = 14
+mxUINT64_CLASS = 15
+mxFUNCTION_CLASS = 16
+# Not doing anything with these at the moment.
+mxOPAQUE_CLASS = 17 # This appears to be a function workspace
+# Thread 'saving/loading symbol table of annymous functions', octave-maintainers, April-May 2007
+# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html
+# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html
+# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html)
+mxOBJECT_CLASS_FROM_MATRIX_H = 18
+
+mdtypes_template = {
+ miINT8: 'i1',
+ miUINT8: 'u1',
+ miINT16: 'i2',
+ miUINT16: 'u2',
+ miINT32: 'i4',
+ miUINT32: 'u4',
+ miSINGLE: 'f4',
+ miDOUBLE: 'f8',
+ miINT64: 'i8',
+ miUINT64: 'u8',
+ miUTF8: 'u1',
+ miUTF16: 'u2',
+ miUTF32: 'u4',
+ 'file_header': [('description', 'S116'),
+ ('subsystem_offset', 'i8'),
+ ('version', 'u2'),
+ ('endian_test', 'S2')],
+ 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')],
+ 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')],
+ 'array_flags': [('data_type', 'u4'),
+ ('byte_count', 'u4'),
+ ('flags_class','u4'),
+ ('nzmax', 'u4')],
+ 'U1': 'U1',
+ }
+
+mclass_dtypes_template = {
+ mxINT8_CLASS: 'i1',
+ mxUINT8_CLASS: 'u1',
+ mxINT16_CLASS: 'i2',
+ mxUINT16_CLASS: 'u2',
+ mxINT32_CLASS: 'i4',
+ mxUINT32_CLASS: 'u4',
+ mxINT64_CLASS: 'i8',
+ mxUINT64_CLASS: 'u8',
+ mxSINGLE_CLASS: 'f4',
+ mxDOUBLE_CLASS: 'f8',
+ }
+
+mclass_info = {
+ mxINT8_CLASS: 'int8',
+ mxUINT8_CLASS: 'uint8',
+ mxINT16_CLASS: 'int16',
+ mxUINT16_CLASS: 'uint16',
+ mxINT32_CLASS: 'int32',
+ mxUINT32_CLASS: 'uint32',
+ mxINT64_CLASS: 'int64',
+ mxUINT64_CLASS: 'uint64',
+ mxSINGLE_CLASS: 'single',
+ mxDOUBLE_CLASS: 'double',
+ mxCELL_CLASS: 'cell',
+ mxSTRUCT_CLASS: 'struct',
+ mxOBJECT_CLASS: 'object',
+ mxCHAR_CLASS: 'char',
+ mxSPARSE_CLASS: 'sparse',
+ mxFUNCTION_CLASS: 'function',
+ mxOPAQUE_CLASS: 'opaque',
+ }
+
+NP_TO_MTYPES = {
+ 'f8': miDOUBLE,
+ 'c32': miDOUBLE,
+ 'c24': miDOUBLE,
+ 'c16': miDOUBLE,
+ 'f4': miSINGLE,
+ 'c8': miSINGLE,
+ 'i8': miINT64,
+ 'i4': miINT32,
+ 'i2': miINT16,
+ 'i1': miINT8,
+ 'u8': miUINT64,
+ 'u4': miUINT32,
+ 'u2': miUINT16,
+ 'u1': miUINT8,
+ 'S1': miUINT8,
+ 'U1': miUTF16,
+ 'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022)
+ }
+
+
+NP_TO_MXTYPES = {
+ 'f8': mxDOUBLE_CLASS,
+ 'c32': mxDOUBLE_CLASS,
+ 'c24': mxDOUBLE_CLASS,
+ 'c16': mxDOUBLE_CLASS,
+ 'f4': mxSINGLE_CLASS,
+ 'c8': mxSINGLE_CLASS,
+ 'i8': mxINT64_CLASS,
+ 'i4': mxINT32_CLASS,
+ 'i2': mxINT16_CLASS,
+ 'i1': mxINT8_CLASS,
+ 'u8': mxUINT64_CLASS,
+ 'u4': mxUINT32_CLASS,
+ 'u2': mxUINT16_CLASS,
+ 'u1': mxUINT8_CLASS,
+ 'S1': mxUINT8_CLASS,
+ 'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this
+ }
+
+''' Before release v7.1 (release 14) matlab (TM) used the system
+default character encoding scheme padded out to 16-bits. Release 14
+and later use Unicode. When saving character data, R14 checks if it
+can be encoded in 7-bit ascii, and saves in that format if so.'''
+
+codecs_template = {
+ miUTF8: {'codec': 'utf_8', 'width': 1},
+ miUTF16: {'codec': 'utf_16', 'width': 2},
+ miUTF32: {'codec': 'utf_32','width': 4},
+ }
+
+
+def _convert_codecs(template, byte_order):
+ ''' Convert codec template mapping to byte order
+
+ Set codecs not on this system to None
+
+ Parameters
+ ----------
+ template : mapping
+ key, value are respectively codec name, and root name for codec
+ (without byte order suffix)
+ byte_order : {'<', '>'}
+ code for little or big endian
+
+ Returns
+ -------
+ codecs : dict
+ key, value are name, codec (as in .encode(codec))
+ '''
+ codecs = {}
+ postfix = byte_order == '<' and '_le' or '_be'
+ for k, v in template.items():
+ codec = v['codec']
+ try:
+ " ".encode(codec)
+ except LookupError:
+ codecs[k] = None
+ continue
+ if v['width'] > 1:
+ codec += postfix
+ codecs[k] = codec
+ return codecs.copy()
+
+
+MDTYPES = {}
+for _bytecode in '<>':
+ _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode),
+ 'classes': convert_dtypes(mclass_dtypes_template, _bytecode),
+ 'codecs': _convert_codecs(codecs_template, _bytecode)}
+ MDTYPES[_bytecode] = _def
+
+
+class mat_struct(object):
+ ''' Placeholder for holding read data from structs
+
+ We use instances of this class when the user passes False as a value to the
+ ``struct_as_record`` parameter of the :func:`scipy.io.matlab.loadmat`
+ function.
+ '''
+ pass
+
+
+class MatlabObject(np.ndarray):
+ ''' ndarray Subclass to contain matlab object '''
+ def __new__(cls, input_array, classname=None):
+ # Input array is an already formed ndarray instance
+ # We first cast to be our class type
+ obj = np.asarray(input_array).view(cls)
+ # add the new attribute to the created instance
+ obj.classname = classname
+ # Finally, we must return the newly created object:
+ return obj
+
+ def __array_finalize__(self,obj):
+ # reset the attribute from passed original object
+ self.classname = getattr(obj, 'classname', None)
+ # We do not need to return anything
+
+
+class MatlabFunction(np.ndarray):
+ ''' Subclass to signal this is a matlab function '''
+ def __new__(cls, input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+
+class MatlabOpaque(np.ndarray):
+ ''' Subclass to signal this is a matlab opaque matrix '''
+ def __new__(cls, input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+
+OPAQUE_DTYPE = np.dtype(
+ [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_utils.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_utils.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..a662a71
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio5_utils.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio_utils.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio_utils.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..d2c7197
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/mio_utils.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/miobase.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/miobase.py
new file mode 100644
index 0000000..f18a111
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/miobase.py
@@ -0,0 +1,409 @@
+# Authors: Travis Oliphant, Matthew Brett
+
+"""
+Base classes for MATLAB file stream reading.
+
+MATLAB is a registered trademark of the Mathworks inc.
+"""
+import operator
+import functools
+
+import numpy as np
+from scipy._lib import doccer
+
+from . import byteordercodes as boc
+
+
+class MatReadError(Exception):
+ pass
+
+
+class MatWriteError(Exception):
+ pass
+
+
+class MatReadWarning(UserWarning):
+ pass
+
+
+doc_dict = \
+ {'file_arg':
+ '''file_name : str
+ Name of the mat file (do not need .mat extension if
+ appendmat==True) Can also pass open file-like object.''',
+ 'append_arg':
+ '''appendmat : bool, optional
+ True to append the .mat extension to the end of the given
+ filename, if not already present.''',
+ 'load_args':
+ '''byte_order : str or None, optional
+ None by default, implying byte order guessed from mat
+ file. Otherwise can be one of ('native', '=', 'little', '<',
+ 'BIG', '>').
+mat_dtype : bool, optional
+ If True, return arrays in same dtype as would be loaded into
+ MATLAB (instead of the dtype with which they are saved).
+squeeze_me : bool, optional
+ Whether to squeeze unit matrix dimensions or not.
+chars_as_strings : bool, optional
+ Whether to convert char arrays to string arrays.
+matlab_compatible : bool, optional
+ Returns matrices as would be loaded by MATLAB (implies
+ squeeze_me=False, chars_as_strings=False, mat_dtype=True,
+ struct_as_record=True).''',
+ 'struct_arg':
+ '''struct_as_record : bool, optional
+ Whether to load MATLAB structs as NumPy record arrays, or as
+ old-style NumPy arrays with dtype=object. Setting this flag to
+ False replicates the behavior of SciPy version 0.7.x (returning
+ numpy object arrays). The default setting is True, because it
+ allows easier round-trip load and save of MATLAB files.''',
+ 'matstream_arg':
+ '''mat_stream : file-like
+ Object with file API, open for reading.''',
+ 'long_fields':
+ '''long_field_names : bool, optional
+ * False - maximum field name length in a structure is 31 characters
+ which is the documented maximum length. This is the default.
+ * True - maximum field name length in a structure is 63 characters
+ which works for MATLAB 7.6''',
+ 'do_compression':
+ '''do_compression : bool, optional
+ Whether to compress matrices on write. Default is False.''',
+ 'oned_as':
+ '''oned_as : {'row', 'column'}, optional
+ If 'column', write 1-D NumPy arrays as column vectors.
+ If 'row', write 1D NumPy arrays as row vectors.''',
+ 'unicode_strings':
+ '''unicode_strings : bool, optional
+ If True, write strings as Unicode, else MATLAB usual encoding.'''}
+
+docfiller = doccer.filldoc(doc_dict)
+
+'''
+
+ Note on architecture
+======================
+
+There are three sets of parameters relevant for reading files. The
+first are *file read parameters* - containing options that are common
+for reading the whole file, and therefore every variable within that
+file. At the moment these are:
+
+* mat_stream
+* dtypes (derived from byte code)
+* byte_order
+* chars_as_strings
+* squeeze_me
+* struct_as_record (MATLAB 5 files)
+* class_dtypes (derived from order code, MATLAB 5 files)
+* codecs (MATLAB 5 files)
+* uint16_codec (MATLAB 5 files)
+
+Another set of parameters are those that apply only to the current
+variable being read - the *header*:
+
+* header related variables (different for v4 and v5 mat files)
+* is_complex
+* mclass
+* var_stream
+
+With the header, we need ``next_position`` to tell us where the next
+variable in the stream is.
+
+Then, for each element in a matrix, there can be *element read
+parameters*. An element is, for example, one element in a MATLAB cell
+array. At the moment, these are:
+
+* mat_dtype
+
+The file-reading object contains the *file read parameters*. The
+*header* is passed around as a data object, or may be read and discarded
+in a single function. The *element read parameters* - the mat_dtype in
+this instance, is passed into a general post-processing function - see
+``mio_utils`` for details.
+'''
+
+
+def convert_dtypes(dtype_template, order_code):
+ ''' Convert dtypes in mapping to given order
+
+ Parameters
+ ----------
+ dtype_template : mapping
+ mapping with values returning numpy dtype from ``np.dtype(val)``
+ order_code : str
+ an order code suitable for using in ``dtype.newbyteorder()``
+
+ Returns
+ -------
+ dtypes : mapping
+ mapping where values have been replaced by
+ ``np.dtype(val).newbyteorder(order_code)``
+
+ '''
+ dtypes = dtype_template.copy()
+ for k in dtypes:
+ dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)
+ return dtypes
+
+
+def read_dtype(mat_stream, a_dtype):
+ """
+ Generic get of byte stream data of known type
+
+ Parameters
+ ----------
+ mat_stream : file_like object
+ MATLAB (tm) mat file stream
+ a_dtype : dtype
+ dtype of array to read. `a_dtype` is assumed to be correct
+ endianness.
+
+ Returns
+ -------
+ arr : ndarray
+ Array of dtype `a_dtype` read from stream.
+
+ """
+ num_bytes = a_dtype.itemsize
+ arr = np.ndarray(shape=(),
+ dtype=a_dtype,
+ buffer=mat_stream.read(num_bytes),
+ order='F')
+ return arr
+
+
+def get_matfile_version(fileobj):
+ """
+ Return major, minor tuple depending on apparent mat file type
+
+ Where:
+
+ #. 0,x -> version 4 format mat files
+ #. 1,x -> version 5 format mat files
+ #. 2,x -> version 7.3 format mat files (HDF format)
+
+ Parameters
+ ----------
+ fileobj : file_like
+ object implementing seek() and read()
+
+ Returns
+ -------
+ major_version : {0, 1, 2}
+ major MATLAB File format version
+ minor_version : int
+ minor MATLAB file format version
+
+ Raises
+ ------
+ MatReadError
+ If the file is empty.
+ ValueError
+ The matfile version is unknown.
+
+ Notes
+ -----
+ Has the side effect of setting the file read pointer to 0
+ """
+ # Mat4 files have a zero somewhere in first 4 bytes
+ fileobj.seek(0)
+ mopt_bytes = fileobj.read(4)
+ if len(mopt_bytes) == 0:
+ raise MatReadError("Mat file appears to be empty")
+ mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes)
+ if 0 in mopt_ints:
+ fileobj.seek(0)
+ return (0,0)
+ # For 5 format or 7.3 format we need to read an integer in the
+ # header. Bytes 124 through 128 contain a version integer and an
+ # endian test string
+ fileobj.seek(124)
+ tst_str = fileobj.read(4)
+ fileobj.seek(0)
+ maj_ind = int(tst_str[2] == b'I'[0])
+ maj_val = int(tst_str[maj_ind])
+ min_val = int(tst_str[1 - maj_ind])
+ ret = (maj_val, min_val)
+ if maj_val in (1, 2):
+ return ret
+ raise ValueError('Unknown mat file type, version %s, %s' % ret)
+
+
+def matdims(arr, oned_as='column'):
+ """
+ Determine equivalent MATLAB dimensions for given array
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array
+ oned_as : {'column', 'row'}, optional
+ Whether 1-D arrays are returned as MATLAB row or column matrices.
+ Default is 'column'.
+
+ Returns
+ -------
+ dims : tuple
+ Shape tuple, in the form MATLAB expects it.
+
+ Notes
+ -----
+ We had to decide what shape a 1 dimensional array would be by
+ default. ``np.atleast_2d`` thinks it is a row vector. The
+ default for a vector in MATLAB (e.g., ``>> 1:12``) is a row vector.
+
+ Versions of scipy up to and including 0.11 resulted (accidentally)
+ in 1-D arrays being read as column vectors. For the moment, we
+ maintain the same tradition here.
+
+ Examples
+ --------
+ >>> matdims(np.array(1)) # NumPy scalar
+ (1, 1)
+ >>> matdims(np.array([1])) # 1-D array, 1 element
+ (1, 1)
+ >>> matdims(np.array([1,2])) # 1-D array, 2 elements
+ (2, 1)
+ >>> matdims(np.array([[2],[3]])) # 2-D array, column vector
+ (2, 1)
+ >>> matdims(np.array([[2,3]])) # 2-D array, row vector
+ (1, 2)
+ >>> matdims(np.array([[[2,3]]])) # 3-D array, rowish vector
+ (1, 1, 2)
+ >>> matdims(np.array([])) # empty 1-D array
+ (0, 0)
+ >>> matdims(np.array([[]])) # empty 2-D array
+ (0, 0)
+ >>> matdims(np.array([[[]]])) # empty 3-D array
+ (0, 0, 0)
+
+ Optional argument flips 1-D shape behavior.
+
+ >>> matdims(np.array([1,2]), 'row') # 1-D array, 2 elements
+ (1, 2)
+
+ The argument has to make sense though
+
+ >>> matdims(np.array([1,2]), 'bizarre')
+ Traceback (most recent call last):
+ ...
+ ValueError: 1-D option "bizarre" is strange
+
+ """
+ shape = arr.shape
+ if shape == (): # scalar
+ return (1,1)
+ if functools.reduce(operator.mul, shape) == 0: # zero elememts
+ return (0,) * np.max([arr.ndim, 2])
+ if len(shape) == 1: # 1D
+ if oned_as == 'column':
+ return shape + (1,)
+ elif oned_as == 'row':
+ return (1,) + shape
+ else:
+ raise ValueError('1-D option "%s" is strange'
+ % oned_as)
+ return shape
+
+
+class MatVarReader(object):
+ ''' Abstract class defining required interface for var readers'''
+ def __init__(self, file_reader):
+ pass
+
+ def read_header(self):
+ ''' Returns header '''
+ pass
+
+ def array_from_header(self, header):
+ ''' Reads array given header '''
+ pass
+
+
+class MatFileReader(object):
+ """ Base object for reading mat files
+
+ To make this class functional, you will need to override the
+ following methods:
+
+ matrix_getter_factory - gives object to fetch next matrix from stream
+ guess_byte_order - guesses file byte order from file
+ """
+
+ @docfiller
+ def __init__(self, mat_stream,
+ byte_order=None,
+ mat_dtype=False,
+ squeeze_me=False,
+ chars_as_strings=True,
+ matlab_compatible=False,
+ struct_as_record=True,
+ verify_compressed_data_integrity=True,
+ simplify_cells=False):
+ '''
+ Initializer for mat file reader
+
+ mat_stream : file-like
+ object with file API, open for reading
+ %(load_args)s
+ '''
+ # Initialize stream
+ self.mat_stream = mat_stream
+ self.dtypes = {}
+ if not byte_order:
+ byte_order = self.guess_byte_order()
+ else:
+ byte_order = boc.to_numpy_code(byte_order)
+ self.byte_order = byte_order
+ self.struct_as_record = struct_as_record
+ if matlab_compatible:
+ self.set_matlab_compatible()
+ else:
+ self.squeeze_me = squeeze_me
+ self.chars_as_strings = chars_as_strings
+ self.mat_dtype = mat_dtype
+ self.verify_compressed_data_integrity = verify_compressed_data_integrity
+ self.simplify_cells = simplify_cells
+ if simplify_cells:
+ self.squeeze_me = True
+ self.struct_as_record = False
+
+ def set_matlab_compatible(self):
+ ''' Sets options to return arrays as MATLAB loads them '''
+ self.mat_dtype = True
+ self.squeeze_me = False
+ self.chars_as_strings = False
+
+ def guess_byte_order(self):
+ ''' As we do not know what file type we have, assume native '''
+ return boc.native_code
+
+ def end_of_stream(self):
+ b = self.mat_stream.read(1)
+ curpos = self.mat_stream.tell()
+ self.mat_stream.seek(curpos-1)
+ return len(b) == 0
+
+
+def arr_dtype_number(arr, num):
+ ''' Return dtype for given number of items per element'''
+ return np.dtype(arr.dtype.str[:2] + str(num))
+
+
+def arr_to_chars(arr):
+ ''' Convert string array to char array '''
+ dims = list(arr.shape)
+ if not dims:
+ dims = [1]
+ dims.append(int(arr.dtype.str[2:]))
+ arr = np.ndarray(shape=dims,
+ dtype=arr_dtype_number(arr, 1),
+ buffer=arr)
+ empties = [arr == '']
+ if not np.any(empties):
+ return arr
+ arr = arr.copy()
+ arr[tuple(empties)] = ' '
+ return arr
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/setup.py
new file mode 100644
index 0000000..98afe9e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/setup.py
@@ -0,0 +1,14 @@
+
+def configuration(parent_package='io',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('matlab', parent_package, top_path)
+ config.add_extension('streams', sources=['streams.c'])
+ config.add_extension('mio_utils', sources=['mio_utils.c'])
+ config.add_extension('mio5_utils', sources=['mio5_utils.c'])
+ config.add_data_dir('tests')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/streams.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/streams.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..cabd329
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/streams.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/afunc.m b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/afunc.m
new file mode 100644
index 0000000..5cbf628
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/afunc.m
@@ -0,0 +1,4 @@
+function [a, b] = afunc(c, d)
+% A function
+a = c + 1;
+b = d + 10;
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miuint32.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miuint32.mat
new file mode 100644
index 0000000..c9ab357
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miuint32.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat
new file mode 100644
index 0000000..a17203f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/big_endian.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/big_endian.mat
new file mode 100644
index 0000000..2a0c982
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/big_endian.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/broken_utf8.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/broken_utf8.mat
new file mode 100644
index 0000000..4f63238
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/broken_utf8.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat
new file mode 100644
index 0000000..c88cbb6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_data.mat
new file mode 100644
index 0000000..45a2ef4
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/japanese_utf8.txt b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/japanese_utf8.txt
new file mode 100644
index 0000000..1459b6b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/japanese_utf8.txt
@@ -0,0 +1,5 @@
+Japanese:
+すべての人間は、生まれながらにして自由であり、
+かつ、尊厳と権利と について平等である。
+人間は、理性と良心とを授けられており、
+互いに同胞の精神をもって行動しなければならない。
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/little_endian.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/little_endian.mat
new file mode 100644
index 0000000..df6db66
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/little_endian.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/logical_sparse.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/logical_sparse.mat
new file mode 100644
index 0000000..a60ad5b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/logical_sparse.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/malformed1.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/malformed1.mat
new file mode 100644
index 0000000..54462e2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/malformed1.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miuint32_for_miint32.mat
new file mode 100644
index 0000000..fd2c499
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miutf8_array_name.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miutf8_array_name.mat
new file mode 100644
index 0000000..ccfdaa8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/miutf8_array_name.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat
new file mode 100644
index 0000000..35dcb71
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/one_by_zero_char.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/one_by_zero_char.mat
new file mode 100644
index 0000000..07e7dca
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/one_by_zero_char.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/parabola.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/parabola.mat
new file mode 100644
index 0000000..6635053
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/parabola.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/single_empty_string.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/single_empty_string.mat
new file mode 100644
index 0000000..293f387
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/single_empty_string.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/some_functions.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/some_functions.mat
new file mode 100644
index 0000000..cc81859
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/some_functions.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/sqr.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/sqr.mat
new file mode 100644
index 0000000..2436d87
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/sqr.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat
new file mode 100644
index 0000000..4537126
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..e04d27d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat
new file mode 100644
index 0000000..4c03030
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat
new file mode 100644
index 0000000..232a051
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_empty_struct.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_empty_struct.mat
new file mode 100644
index 0000000..30c8c8a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_empty_struct.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_mat4_le_floats.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_mat4_le_floats.mat
new file mode 100644
index 0000000..6643c42
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_mat4_le_floats.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_skip_variable.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_skip_variable.mat
new file mode 100644
index 0000000..efbe3fe
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/test_skip_variable.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testbool_8_WIN64.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testbool_8_WIN64.mat
new file mode 100644
index 0000000..faa30b1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testbool_8_WIN64.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat
new file mode 100644
index 0000000..512f7d8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..a763310
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat
new file mode 100644
index 0000000..2ac1da1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat
new file mode 100644
index 0000000..fc893f3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat
new file mode 100644
index 0000000..4198a4f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..2c7826e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat
new file mode 100644
index 0000000..b3b086c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat
new file mode 100644
index 0000000..316f889
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat
new file mode 100644
index 0000000..36621b2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat
new file mode 100644
index 0000000..32fcd2a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..f3ecd20
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat
new file mode 100644
index 0000000..c0c0838
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat
new file mode 100644
index 0000000..6a187ed
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat
new file mode 100644
index 0000000..5dbfcf1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat
new file mode 100644
index 0000000..8e36c0c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..a003b6d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat
new file mode 100644
index 0000000..3106712
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat
new file mode 100644
index 0000000..9097bb0
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat
new file mode 100644
index 0000000..e7dec3b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..a1c9348
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat
new file mode 100644
index 0000000..f29d4f9
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat
new file mode 100644
index 0000000..8b24404
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat
new file mode 100644
index 0000000..adb6c28
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat
new file mode 100644
index 0000000..6066c1e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat
new file mode 100644
index 0000000..3698c88
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat
new file mode 100644
index 0000000..164be11
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..a8735e9
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat
new file mode 100644
index 0000000..b6fb05b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat
new file mode 100644
index 0000000..eb537ab
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat
new file mode 100644
index 0000000..cc207ed
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat
new file mode 100644
index 0000000..c2f0ba2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..b4dbd15
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat
new file mode 100644
index 0000000..fadcd23
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat
new file mode 100644
index 0000000..9ce65f9
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat
new file mode 100644
index 0000000..9c6ba79
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat
new file mode 100644
index 0000000..0c4729c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat
new file mode 100644
index 0000000..6d3e068
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat
new file mode 100644
index 0000000..fc13642
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..f68323b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat
new file mode 100644
index 0000000..83dcad3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat
new file mode 100644
index 0000000..59d243c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat
new file mode 100644
index 0000000..cdb4191
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat
new file mode 100644
index 0000000..3b5a428
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..8cef2dd
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat
new file mode 100644
index 0000000..5ba4810
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat
new file mode 100644
index 0000000..8964765
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat
new file mode 100644
index 0000000..1dcd72e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsimplecell.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsimplecell.mat
new file mode 100644
index 0000000..2a98f48
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsimplecell.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat
new file mode 100644
index 0000000..55cbd3c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat
new file mode 100644
index 0000000..194ca4d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..3e1e9a1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat
new file mode 100644
index 0000000..55b5107
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat
new file mode 100644
index 0000000..bdb6ce6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat
new file mode 100644
index 0000000..81c536d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat
new file mode 100644
index 0000000..520e1ce
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..969b714
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat
new file mode 100644
index 0000000..9117dce
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat
new file mode 100644
index 0000000..a8a615a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat
new file mode 100644
index 0000000..1542426
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat
new file mode 100644
index 0000000..137561e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat
new file mode 100644
index 0000000..2ad75f2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..6fd12d8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat
new file mode 100644
index 0000000..ab93994
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat
new file mode 100644
index 0000000..63059b8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat
new file mode 100644
index 0000000..fa687ee
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat
new file mode 100644
index 0000000..11afb41
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..75e07a0
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat
new file mode 100644
index 0000000..7d76f63
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat
new file mode 100644
index 0000000..954e39b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat
new file mode 100644
index 0000000..5086bb7
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..6feb6e4
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat
new file mode 100644
index 0000000..b2ff222
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat
new file mode 100644
index 0000000..028841f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat
new file mode 100644
index 0000000..da57365
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..d1c97a7
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat
new file mode 100644
index 0000000..c7ca095
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat
new file mode 100644
index 0000000..8716f7e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat
new file mode 100644
index 0000000..2c34c4d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat
new file mode 100644
index 0000000..c6dccc0
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat
new file mode 100644
index 0000000..0f6f544
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat
new file mode 100644
index 0000000..faf9221
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat
new file mode 100644
index 0000000..1b7b3d7
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat
new file mode 100644
index 0000000..d22fb57
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat
new file mode 100644
index 0000000..76c51d0
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat4files.m b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat4files.m
new file mode 100644
index 0000000..a67cc20
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat4files.m
@@ -0,0 +1,50 @@
+% Generates mat files for loadmat unit tests
+% Uses save_matfile.m function
+% This is the version for matlab 4
+
+% work out matlab version and file suffix for test files
+global FILEPREFIX FILESUFFIX
+sepchar = '/';
+if strcmp(computer, 'PCWIN'), sepchar = '\'; end
+FILEPREFIX = [pwd sepchar 'data' sepchar];
+mlv = version;
+FILESUFFIX = ['_' mlv '_' computer '.mat'];
+
+% basic double array
+theta = 0:pi/4:2*pi;
+save_matfile('testdouble', theta);
+
+% string
+save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.')
+
+% complex
+save_matfile('testcomplex', cos(theta) + 1j*sin(theta));
+
+% asymmetric array to check indexing
+a = zeros(3, 5);
+a(:,1) = [1:3]';
+a(1,:) = 1:5;
+
+% 2D matrix
+save_matfile('testmatrix', a);
+
+% minus number - tests signed int
+save_matfile('testminus', -1);
+
+% single character
+save_matfile('testonechar', 'r');
+
+% string array
+save_matfile('teststringarray', ['one '; 'two '; 'three']);
+
+% sparse array
+save_matfile('testsparse', sparse(a));
+
+% sparse complex array
+b = sparse(a);
+b(1,1) = b(1,1) + j;
+save_matfile('testsparsecomplex', b);
+
+% Two variables in same file
+save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta')
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat5files.m b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat5files.m
new file mode 100644
index 0000000..9351127
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/gen_mat5files.m
@@ -0,0 +1,100 @@
+% Generates mat files for loadmat unit tests
+% This is the version for matlab 5 and higher
+% Uses save_matfile.m function
+
+% work out matlab version and file suffix for test files
+global FILEPREFIX FILESUFFIX
+FILEPREFIX = [fullfile(pwd, 'data') filesep];
+temp = ver('MATLAB');
+mlv = temp.Version;
+FILESUFFIX = ['_' mlv '_' computer '.mat'];
+
+% basic double array
+theta = 0:pi/4:2*pi;
+save_matfile('testdouble', theta);
+
+% string
+save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.')
+
+% complex
+save_matfile('testcomplex', cos(theta) + 1j*sin(theta));
+
+% asymmetric array to check indexing
+a = zeros(3, 5);
+a(:,1) = [1:3]';
+a(1,:) = 1:5;
+
+% 2D matrix
+save_matfile('testmatrix', a);
+
+% minus number - tests signed int
+save_matfile('testminus', -1);
+
+% single character
+save_matfile('testonechar', 'r');
+
+% string array
+save_matfile('teststringarray', ['one '; 'two '; 'three']);
+
+% sparse array
+save_matfile('testsparse', sparse(a));
+
+% sparse complex array
+b = sparse(a);
+b(1,1) = b(1,1) + j;
+save_matfile('testsparsecomplex', b);
+
+% Two variables in same file
+save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta')
+
+
+% struct
+save_matfile('teststruct', ...
+ struct('stringfield','Rats live on no evil star.',...
+ 'doublefield',[sqrt(2) exp(1) pi],...
+ 'complexfield',(1+1j)*[sqrt(2) exp(1) pi]));
+
+% cell
+save_matfile('testcell', ...
+ {['This cell contains this string and 3 arrays of increasing' ...
+ ' length'], 1., 1.:2., 1.:3.});
+
+% scalar cell
+save_matfile('testscalarcell', {1})
+
+% Empty cells in two cell matrices
+save_matfile('testemptycell', {1, 2, [], [], 3});
+
+% 3D matrix
+save_matfile('test3dmatrix', reshape(1:24,[2 3 4]))
+
+% nested cell array
+save_matfile('testcellnest', {1, {2, 3, {4, 5}}});
+
+% nested struct
+save_matfile('teststructnest', struct('one', 1, 'two', ...
+ struct('three', 'number 3')));
+
+% array of struct
+save_matfile('teststructarr', [struct('one', 1, 'two', 2) ...
+ struct('one', 'number 1', 'two', 'number 2')]);
+
+% matlab object
+save_matfile('testobject', inline('x'))
+
+% array of matlab objects
+%save_matfile('testobjarr', [inline('x') inline('x')])
+
+% unicode test
+if str2num(mlv) > 7 % function added 7.0.1
+ fid = fopen([FILEPREFIX 'japanese_utf8.txt']);
+ from_japan = fread(fid, 'uint8')';
+ fclose(fid);
+ save_matfile('testunicode', native2unicode(from_japan, 'utf-8'));
+end
+
+% func
+if str2num(mlv) > 7 % function pointers added recently
+ func = @afunc;
+ save_matfile('testfunc', func);
+end
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/save_matfile.m b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/save_matfile.m
new file mode 100644
index 0000000..a6ff677
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/save_matfile.m
@@ -0,0 +1,6 @@
+function save_matfile(test_name, v)
+% saves variable passed in m with filename from prefix
+
+global FILEPREFIX FILESUFFIX
+eval([test_name ' = v;']);
+save([FILEPREFIX test_name FILESUFFIX], test_name)
\ No newline at end of file
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_byteordercodes.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_byteordercodes.py
new file mode 100644
index 0000000..e8e9f97
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_byteordercodes.py
@@ -0,0 +1,29 @@
+''' Tests for byteorder module '''
+
+import sys
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+import scipy.io.matlab.byteordercodes as sibc
+
+
+def test_native():
+ native_is_le = sys.byteorder == 'little'
+ assert_(sibc.sys_is_le == native_is_le)
+
+
+def test_to_numpy():
+ if sys.byteorder == 'little':
+ assert_(sibc.to_numpy_code('native') == '<')
+ assert_(sibc.to_numpy_code('swapped') == '>')
+ else:
+ assert_(sibc.to_numpy_code('native') == '>')
+ assert_(sibc.to_numpy_code('swapped') == '<')
+ assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
+ assert_(sibc.to_numpy_code('big') == '>')
+ for code in ('little', '<', 'l', 'L', 'le'):
+ assert_(sibc.to_numpy_code(code) == '<')
+ for code in ('big', '>', 'b', 'B', 'be'):
+ assert_(sibc.to_numpy_code(code) == '>')
+ assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio.py
new file mode 100644
index 0000000..2d496e9
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio.py
@@ -0,0 +1,1237 @@
+# -*- coding: utf-8 -*-
+''' Nose test generators
+
+Need function load / save / roundtrip tests
+
+'''
+import os
+from collections import OrderedDict
+from os.path import join as pjoin, dirname
+from glob import glob
+from io import BytesIO
+from tempfile import mkdtemp
+
+import warnings
+import shutil
+import gzip
+
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+ assert_equal, assert_)
+from pytest import raises as assert_raises
+
+import numpy as np
+from numpy import array
+import scipy.sparse as SP
+
+import scipy.io.matlab.byteordercodes as boc
+from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError
+from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)
+from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,
+ MatlabFunction, varmats_from_mat,
+ to_writeable, EmptyStructMarker)
+from scipy.io.matlab import mio5_params as mio5p
+
+test_data_path = pjoin(dirname(__file__), 'data')
+
+
+def mlarr(*args, **kwargs):
+ """Convenience function to return matlab-compatible 2-D array."""
+ arr = np.array(*args, **kwargs)
+ arr.shape = matdims(arr)
+ return arr
+
+
+# Define cases to test
+theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
+case_table4 = [
+ {'name': 'double',
+ 'classes': {'testdouble': 'double'},
+ 'expected': {'testdouble': theta}
+ }]
+case_table4.append(
+ {'name': 'string',
+ 'classes': {'teststring': 'char'},
+ 'expected': {'teststring':
+ array(['"Do nine men interpret?" "Nine men," I nod.'])}
+ })
+case_table4.append(
+ {'name': 'complex',
+ 'classes': {'testcomplex': 'double'},
+ 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
+ })
+A = np.zeros((3,5))
+A[0] = list(range(1,6))
+A[:,0] = list(range(1,4))
+case_table4.append(
+ {'name': 'matrix',
+ 'classes': {'testmatrix': 'double'},
+ 'expected': {'testmatrix': A},
+ })
+case_table4.append(
+ {'name': 'sparse',
+ 'classes': {'testsparse': 'sparse'},
+ 'expected': {'testsparse': SP.coo_matrix(A)},
+ })
+B = A.astype(complex)
+B[0,0] += 1j
+case_table4.append(
+ {'name': 'sparsecomplex',
+ 'classes': {'testsparsecomplex': 'sparse'},
+ 'expected': {'testsparsecomplex': SP.coo_matrix(B)},
+ })
+case_table4.append(
+ {'name': 'multi',
+ 'classes': {'theta': 'double', 'a': 'double'},
+ 'expected': {'theta': theta, 'a': A},
+ })
+case_table4.append(
+ {'name': 'minus',
+ 'classes': {'testminus': 'double'},
+ 'expected': {'testminus': mlarr(-1)},
+ })
+case_table4.append(
+ {'name': 'onechar',
+ 'classes': {'testonechar': 'char'},
+ 'expected': {'testonechar': array(['r'])},
+ })
+# Cell arrays stored as object arrays
+CA = mlarr(( # tuple for object array creation
+ [],
+ mlarr([1]),
+ mlarr([[1,2]]),
+ mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
+CA[0,0] = array(
+ ['This cell contains this string and 3 arrays of increasing length'])
+case_table5 = [
+ {'name': 'cell',
+ 'classes': {'testcell': 'cell'},
+ 'expected': {'testcell': CA}}]
+CAE = mlarr(( # tuple for object array creation
+ mlarr(1),
+ mlarr(2),
+ mlarr([]),
+ mlarr([]),
+ mlarr(3)), dtype=object).reshape(1,-1)
+objarr = np.empty((1,1),dtype=object)
+objarr[0,0] = mlarr(1)
+case_table5.append(
+ {'name': 'scalarcell',
+ 'classes': {'testscalarcell': 'cell'},
+ 'expected': {'testscalarcell': objarr}
+ })
+case_table5.append(
+ {'name': 'emptycell',
+ 'classes': {'testemptycell': 'cell'},
+ 'expected': {'testemptycell': CAE}})
+case_table5.append(
+ {'name': 'stringarray',
+ 'classes': {'teststringarray': 'char'},
+ 'expected': {'teststringarray': array(
+ ['one ', 'two ', 'three'])},
+ })
+case_table5.append(
+ {'name': '3dmatrix',
+ 'classes': {'test3dmatrix': 'double'},
+ 'expected': {
+ 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
+ })
+st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
+dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
+st1 = np.zeros((1,1), dtype)
+st1['stringfield'][0,0] = array(['Rats live on no evil star.'])
+st1['doublefield'][0,0] = st_sub_arr
+st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
+case_table5.append(
+ {'name': 'struct',
+ 'classes': {'teststruct': 'struct'},
+ 'expected': {'teststruct': st1}
+ })
+CN = np.zeros((1,2), dtype=object)
+CN[0,0] = mlarr(1)
+CN[0,1] = np.zeros((1,3), dtype=object)
+CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
+CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
+CN[0,1][0,2] = np.zeros((1,2), dtype=object)
+CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
+CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
+case_table5.append(
+ {'name': 'cellnest',
+ 'classes': {'testcellnest': 'cell'},
+ 'expected': {'testcellnest': CN},
+ })
+st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
+st2[0,0]['one'] = mlarr(1)
+st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
+st2[0,0]['two'][0,0]['three'] = array(['number 3'])
+case_table5.append(
+ {'name': 'structnest',
+ 'classes': {'teststructnest': 'struct'},
+ 'expected': {'teststructnest': st2}
+ })
+a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
+a[0,0]['one'] = mlarr(1)
+a[0,0]['two'] = mlarr(2)
+a[0,1]['one'] = array(['number 1'])
+a[0,1]['two'] = array(['number 2'])
+case_table5.append(
+ {'name': 'structarr',
+ 'classes': {'teststructarr': 'struct'},
+ 'expected': {'teststructarr': a}
+ })
+ODT = np.dtype([(n, object) for n in
+ ['expr', 'inputExpr', 'args',
+ 'isEmpty', 'numArgs', 'version']])
+MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
+m0 = MO[0,0]
+m0['expr'] = array(['x'])
+m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};'])
+m0['args'] = array(['x'])
+m0['isEmpty'] = mlarr(0)
+m0['numArgs'] = mlarr(1)
+m0['version'] = mlarr(1)
+case_table5.append(
+ {'name': 'object',
+ 'classes': {'testobject': 'object'},
+ 'expected': {'testobject': MO}
+ })
+fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
+u_str = fp_u_str.read().decode('utf-8')
+fp_u_str.close()
+case_table5.append(
+ {'name': 'unicode',
+ 'classes': {'testunicode': 'char'},
+ 'expected': {'testunicode': array([u_str])}
+ })
+case_table5.append(
+ {'name': 'sparse',
+ 'classes': {'testsparse': 'sparse'},
+ 'expected': {'testsparse': SP.coo_matrix(A)},
+ })
+case_table5.append(
+ {'name': 'sparsecomplex',
+ 'classes': {'testsparsecomplex': 'sparse'},
+ 'expected': {'testsparsecomplex': SP.coo_matrix(B)},
+ })
+case_table5.append(
+ {'name': 'bool',
+ 'classes': {'testbools': 'logical'},
+ 'expected': {'testbools':
+ array([[True], [False]])},
+ })
+
+case_table5_rt = case_table5[:]
+# Inline functions can't be concatenated in matlab, so RT only
+case_table5_rt.append(
+ {'name': 'objectarray',
+ 'classes': {'testobjectarray': 'object'},
+ 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
+
+
+def types_compatible(var1, var2):
+ """Check if types are same or compatible.
+
+ 0-D numpy scalars are compatible with bare python scalars.
+ """
+ type1 = type(var1)
+ type2 = type(var2)
+ if type1 is type2:
+ return True
+ if type1 is np.ndarray and var1.shape == ():
+ return type(var1.item()) is type2
+ if type2 is np.ndarray and var2.shape == ():
+ return type(var2.item()) is type1
+ return False
+
+
+def _check_level(label, expected, actual):
+ """ Check one level of a potentially nested array """
+ if SP.issparse(expected): # allow different types of sparse matrices
+ assert_(SP.issparse(actual))
+ assert_array_almost_equal(actual.todense(),
+ expected.todense(),
+ err_msg=label,
+ decimal=5)
+ return
+ # Check types are as expected
+ assert_(types_compatible(expected, actual),
+ "Expected type %s, got %s at %s" %
+ (type(expected), type(actual), label))
+ # A field in a record array may not be an ndarray
+ # A scalar from a record array will be type np.void
+ if not isinstance(expected,
+ (np.void, np.ndarray, MatlabObject)):
+ assert_equal(expected, actual)
+ return
+ # This is an ndarray-like thing
+ assert_(expected.shape == actual.shape,
+ msg='Expected shape %s, got %s at %s' % (expected.shape,
+ actual.shape,
+ label))
+ ex_dtype = expected.dtype
+ if ex_dtype.hasobject: # array of objects
+ if isinstance(expected, MatlabObject):
+ assert_equal(expected.classname, actual.classname)
+ for i, ev in enumerate(expected):
+ level_label = "%s, [%d], " % (label, i)
+ _check_level(level_label, ev, actual[i])
+ return
+ if ex_dtype.fields: # probably recarray
+ for fn in ex_dtype.fields:
+ level_label = "%s, field %s, " % (label, fn)
+ _check_level(level_label,
+ expected[fn], actual[fn])
+ return
+ if ex_dtype.type in (str, # string or bool
+ np.unicode_,
+ np.bool_):
+ assert_equal(actual, expected, err_msg=label)
+ return
+ # Something numeric
+ assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
+
+
+def _load_check_case(name, files, case):
+ for file_name in files:
+ matdict = loadmat(file_name, struct_as_record=True)
+ label = "test %s; file %s" % (name, file_name)
+ for k, expected in case.items():
+ k_label = "%s, variable %s" % (label, k)
+ assert_(k in matdict, "Missing key at %s" % k_label)
+ _check_level(k_label, expected, matdict[k])
+
+
+def _whos_check_case(name, files, case, classes):
+ for file_name in files:
+ label = "test %s; file %s" % (name, file_name)
+
+ whos = whosmat(file_name)
+
+ expected_whos = [
+ (k, expected.shape, classes[k]) for k, expected in case.items()]
+
+ whos.sort()
+ expected_whos.sort()
+ assert_equal(whos, expected_whos,
+ "%s: %r != %r" % (label, whos, expected_whos)
+ )
+
+
+# Round trip tests
+def _rt_check_case(name, expected, format):
+ mat_stream = BytesIO()
+ savemat(mat_stream, expected, format=format)
+ mat_stream.seek(0)
+ _load_check_case(name, [mat_stream], expected)
+
+
+# generator for load tests
+def test_load():
+ for case in case_table4 + case_table5:
+ name = case['name']
+ expected = case['expected']
+ filt = pjoin(test_data_path, 'test%s_*.mat' % name)
+ files = glob(filt)
+ assert_(len(files) > 0,
+ "No files for test %s using filter %s" % (name, filt))
+ _load_check_case(name, files, expected)
+
+
+# generator for whos tests
+def test_whos():
+ for case in case_table4 + case_table5:
+ name = case['name']
+ expected = case['expected']
+ classes = case['classes']
+ filt = pjoin(test_data_path, 'test%s_*.mat' % name)
+ files = glob(filt)
+ assert_(len(files) > 0,
+ "No files for test %s using filter %s" % (name, filt))
+ _whos_check_case(name, files, expected, classes)
+
+
+# generator for round trip tests
+def test_round_trip():
+ for case in case_table4 + case_table5_rt:
+ case_table4_names = [case['name'] for case in case_table4]
+ name = case['name'] + '_round_trip'
+ expected = case['expected']
+ for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):
+ _rt_check_case(name, expected, format)
+
+
+def test_gzip_simple():
+ xdense = np.zeros((20,20))
+ xdense[2,3] = 2.3
+ xdense[4,5] = 4.5
+ x = SP.csc_matrix(xdense)
+
+ name = 'gzip_test'
+ expected = {'x':x}
+ format = '4'
+
+ tmpdir = mkdtemp()
+ try:
+ fname = pjoin(tmpdir,name)
+ mat_stream = gzip.open(fname, mode='wb')
+ savemat(mat_stream, expected, format=format)
+ mat_stream.close()
+
+ mat_stream = gzip.open(fname, mode='rb')
+ actual = loadmat(mat_stream, struct_as_record=True)
+ mat_stream.close()
+ finally:
+ shutil.rmtree(tmpdir)
+
+ assert_array_almost_equal(actual['x'].todense(),
+ expected['x'].todense(),
+ err_msg=repr(actual))
+
+
+def test_multiple_open():
+ # Ticket #1039, on Windows: check that files are not left open
+ tmpdir = mkdtemp()
+ try:
+ x = dict(x=np.zeros((2, 2)))
+
+ fname = pjoin(tmpdir, "a.mat")
+
+ # Check that file is not left open
+ savemat(fname, x)
+ os.unlink(fname)
+ savemat(fname, x)
+ loadmat(fname)
+ os.unlink(fname)
+
+ # Check that stream is left open
+ f = open(fname, 'wb')
+ savemat(f, x)
+ f.seek(0)
+ f.close()
+
+ f = open(fname, 'rb')
+ loadmat(f)
+ f.seek(0)
+ f.close()
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+def test_mat73():
+ # Check any hdf5 files raise an error
+ filenames = glob(
+ pjoin(test_data_path, 'testhdf5*.mat'))
+ assert_(len(filenames) > 0)
+ for filename in filenames:
+ fp = open(filename, 'rb')
+ assert_raises(NotImplementedError,
+ loadmat,
+ fp,
+ struct_as_record=True)
+ fp.close()
+
+
+def test_warnings():
+ # This test is an echo of the previous behavior, which was to raise a
+ # warning if the user triggered a search for mat files on the Python system
+ # path. We can remove the test in the next version after upcoming (0.13).
+ fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
+ with warnings.catch_warnings():
+ warnings.simplefilter('error')
+ # This should not generate a warning
+ loadmat(fname, struct_as_record=True)
+ # This neither
+ loadmat(fname, struct_as_record=False)
+
+
+def test_regression_653():
+ # Saving a dictionary with only invalid keys used to raise an error. Now we
+ # save this as an empty struct in matlab space.
+ sio = BytesIO()
+ savemat(sio, {'d':{1:2}}, format='5')
+ back = loadmat(sio)['d']
+ # Check we got an empty struct equivalent
+ assert_equal(back.shape, (1,1))
+ assert_equal(back.dtype, np.dtype(object))
+ assert_(back[0,0] is None)
+
+
+def test_structname_len():
+ # Test limit for length of field names in structs
+ lim = 31
+ fldname = 'a' * lim
+ st1 = np.zeros((1,1), dtype=[(fldname, object)])
+ savemat(BytesIO(), {'longstruct': st1}, format='5')
+ fldname = 'a' * (lim+1)
+ st1 = np.zeros((1,1), dtype=[(fldname, object)])
+ assert_raises(ValueError, savemat, BytesIO(),
+ {'longstruct': st1}, format='5')
+
+
+def test_4_and_long_field_names_incompatible():
+ # Long field names option not supported in 4
+ my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
+ assert_raises(ValueError, savemat, BytesIO(),
+ {'my_struct':my_struct}, format='4', long_field_names=True)
+
+
+def test_long_field_names():
+ # Test limit for length of field names in structs
+ lim = 63
+ fldname = 'a' * lim
+ st1 = np.zeros((1,1), dtype=[(fldname, object)])
+ savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
+ fldname = 'a' * (lim+1)
+ st1 = np.zeros((1,1), dtype=[(fldname, object)])
+ assert_raises(ValueError, savemat, BytesIO(),
+ {'longstruct': st1}, format='5',long_field_names=True)
+
+
+def test_long_field_names_in_struct():
+ # Regression test - long_field_names was erased if you passed a struct
+ # within a struct
+ lim = 63
+ fldname = 'a' * lim
+ cell = np.ndarray((1,2),dtype=object)
+ st1 = np.zeros((1,1), dtype=[(fldname, object)])
+ cell[0,0] = st1
+ cell[0,1] = st1
+ savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
+ #
+ # Check to make sure it fails with long field names off
+ #
+ assert_raises(ValueError, savemat, BytesIO(),
+ {'longstruct': cell}, format='5', long_field_names=False)
+
+
+def test_cell_with_one_thing_in_it():
+ # Regression test - make a cell array that's 1 x 2 and put two
+ # strings in it. It works. Make a cell array that's 1 x 1 and put
+ # a string in it. It should work but, in the old days, it didn't.
+ cells = np.ndarray((1,2),dtype=object)
+ cells[0,0] = 'Hello'
+ cells[0,1] = 'World'
+ savemat(BytesIO(), {'x': cells}, format='5')
+
+ cells = np.ndarray((1,1),dtype=object)
+ cells[0,0] = 'Hello, world'
+ savemat(BytesIO(), {'x': cells}, format='5')
+
+
+def test_writer_properties():
+ # Tests getting, setting of properties of matrix writer
+ mfw = MatFile5Writer(BytesIO())
+ assert_equal(mfw.global_vars, [])
+ mfw.global_vars = ['avar']
+ assert_equal(mfw.global_vars, ['avar'])
+ assert_equal(mfw.unicode_strings, False)
+ mfw.unicode_strings = True
+ assert_equal(mfw.unicode_strings, True)
+ assert_equal(mfw.long_field_names, False)
+ mfw.long_field_names = True
+ assert_equal(mfw.long_field_names, True)
+
+
+def test_use_small_element():
+ # Test whether we're using small data element or not
+ sio = BytesIO()
+ wtr = MatFile5Writer(sio)
+ # First check size for no sde for name
+ arr = np.zeros(10)
+ wtr.put_variables({'aaaaa': arr})
+ w_sz = len(sio.getvalue())
+ # Check small name results in largish difference in size
+ sio.truncate(0)
+ sio.seek(0)
+ wtr.put_variables({'aaaa': arr})
+ assert_(w_sz - len(sio.getvalue()) > 4)
+ # Whereas increasing name size makes less difference
+ sio.truncate(0)
+ sio.seek(0)
+ wtr.put_variables({'aaaaaa': arr})
+ assert_(len(sio.getvalue()) - w_sz < 4)
+
+
+def test_save_dict():
+ # Test that dict can be saved (as recarray), loaded as matstruct
+ dict_types = ((dict, False), (OrderedDict, True),)
+ ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
+ ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
+ for dict_type, is_ordered in dict_types:
+ # Initialize with tuples to keep order for OrderedDict
+ d = dict_type([('a', 1), ('b', 2)])
+ stream = BytesIO()
+ savemat(stream, {'dict': d})
+ stream.seek(0)
+ vals = loadmat(stream)['dict']
+ assert_equal(set(vals.dtype.names), set(['a', 'b']))
+ if is_ordered: # Input was ordered, output in ab order
+ assert_array_equal(vals, ab_exp)
+ else: # Not ordered input, either order output
+ if vals.dtype.names[0] == 'a':
+ assert_array_equal(vals, ab_exp)
+ else:
+ assert_array_equal(vals, ba_exp)
+
+
+def test_1d_shape():
+ # New 5 behavior is 1D -> row vector
+ arr = np.arange(5)
+ for format in ('4', '5'):
+ # Column is the default
+ stream = BytesIO()
+ savemat(stream, {'oned': arr}, format=format)
+ vals = loadmat(stream)
+ assert_equal(vals['oned'].shape, (1, 5))
+ # can be explicitly 'column' for oned_as
+ stream = BytesIO()
+ savemat(stream, {'oned':arr},
+ format=format,
+ oned_as='column')
+ vals = loadmat(stream)
+ assert_equal(vals['oned'].shape, (5,1))
+ # but different from 'row'
+ stream = BytesIO()
+ savemat(stream, {'oned':arr},
+ format=format,
+ oned_as='row')
+ vals = loadmat(stream)
+ assert_equal(vals['oned'].shape, (1,5))
+
+
+def test_compression():
+ arr = np.zeros(100).reshape((5,20))
+ arr[2,10] = 1
+ stream = BytesIO()
+ savemat(stream, {'arr':arr})
+ raw_len = len(stream.getvalue())
+ vals = loadmat(stream)
+ assert_array_equal(vals['arr'], arr)
+ stream = BytesIO()
+ savemat(stream, {'arr':arr}, do_compression=True)
+ compressed_len = len(stream.getvalue())
+ vals = loadmat(stream)
+ assert_array_equal(vals['arr'], arr)
+ assert_(raw_len > compressed_len)
+ # Concatenate, test later
+ arr2 = arr.copy()
+ arr2[0,0] = 1
+ stream = BytesIO()
+ savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
+ vals = loadmat(stream)
+ assert_array_equal(vals['arr2'], arr2)
+ stream = BytesIO()
+ savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
+ vals = loadmat(stream)
+ assert_array_equal(vals['arr2'], arr2)
+
+
+def test_single_object():
+ stream = BytesIO()
+ savemat(stream, {'A':np.array(1, dtype=object)})
+
+
+def test_skip_variable():
+ # Test skipping over the first of two variables in a MAT file
+ # using mat_reader_factory and put_variables to read them in.
+ #
+ # This is a regression test of a problem that's caused by
+ # using the compressed file reader seek instead of the raw file
+ # I/O seek when skipping over a compressed chunk.
+ #
+ # The problem arises when the chunk is large: this file has
+ # a 256x256 array of random (uncompressible) doubles.
+ #
+ filename = pjoin(test_data_path,'test_skip_variable.mat')
+ #
+ # Prove that it loads with loadmat
+ #
+ d = loadmat(filename, struct_as_record=True)
+ assert_('first' in d)
+ assert_('second' in d)
+ #
+ # Make the factory
+ #
+ factory, file_opened = mat_reader_factory(filename, struct_as_record=True)
+ #
+ # This is where the factory breaks with an error in MatMatrixGetter.to_next
+ #
+ d = factory.get_variables('second')
+ assert_('second' in d)
+ factory.mat_stream.close()
+
+
+def test_empty_struct():
+ # ticket 885
+ filename = pjoin(test_data_path,'test_empty_struct.mat')
+ # before ticket fix, this would crash with ValueError, empty data
+ # type
+ d = loadmat(filename, struct_as_record=True)
+ a = d['a']
+ assert_equal(a.shape, (1,1))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_(a[0,0] is None)
+ stream = BytesIO()
+ arr = np.array((), dtype='U')
+ # before ticket fix, this used to give data type not understood
+ savemat(stream, {'arr':arr})
+ d = loadmat(stream)
+ a2 = d['arr']
+ assert_array_equal(a2, arr)
+
+
+def test_save_empty_dict():
+ # saving empty dict also gives empty struct
+ stream = BytesIO()
+ savemat(stream, {'arr': {}})
+ d = loadmat(stream)
+ a = d['arr']
+ assert_equal(a.shape, (1,1))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_(a[0,0] is None)
+
+
+def assert_any_equal(output, alternatives):
+ """ Assert `output` is equal to at least one element in `alternatives`
+ """
+ one_equal = False
+ for expected in alternatives:
+ if np.all(output == expected):
+ one_equal = True
+ break
+ assert_(one_equal)
+
+
+def test_to_writeable():
+ # Test to_writeable function
+ res = to_writeable(np.array([1])) # pass through ndarrays
+ assert_equal(res.shape, (1,))
+ assert_array_equal(res, 1)
+ # Dict fields can be written in any order
+ expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
+ expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])
+ alternatives = (expected1, expected2)
+ assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)
+ # Fields with underscores discarded
+ assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)
+ # Not-string fields discarded
+ assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)
+ # String fields that are valid Python identifiers discarded
+ assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)
+ # Object with field names is equivalent
+
+ class klass(object):
+ pass
+
+ c = klass
+ c.a = 1
+ c.b = 2
+ assert_any_equal(to_writeable(c), alternatives)
+ # empty list and tuple go to empty array
+ res = to_writeable([])
+ assert_equal(res.shape, (0,))
+ assert_equal(res.dtype.type, np.float64)
+ res = to_writeable(())
+ assert_equal(res.shape, (0,))
+ assert_equal(res.dtype.type, np.float64)
+ # None -> None
+ assert_(to_writeable(None) is None)
+ # String to strings
+ assert_equal(to_writeable('a string').dtype.type, np.str_)
+ # Scalars to numpy to NumPy scalars
+ res = to_writeable(1)
+ assert_equal(res.shape, ())
+ assert_equal(res.dtype.type, np.array(1).dtype.type)
+ assert_array_equal(res, 1)
+ # Empty dict returns EmptyStructMarker
+ assert_(to_writeable({}) is EmptyStructMarker)
+ # Object does not have (even empty) __dict__
+ assert_(to_writeable(object()) is None)
+ # Custom object does have empty __dict__, returns EmptyStructMarker
+
+ class C(object):
+ pass
+
+ assert_(to_writeable(c()) is EmptyStructMarker)
+ # dict keys with legal characters are convertible
+ res = to_writeable({'a': 1})['a']
+ assert_equal(res.shape, (1,))
+ assert_equal(res.dtype.type, np.object_)
+ # Only fields with illegal characters, falls back to EmptyStruct
+ assert_(to_writeable({'1':1}) is EmptyStructMarker)
+ assert_(to_writeable({'_a':1}) is EmptyStructMarker)
+ # Unless there are valid fields, in which case structured array
+ assert_equal(to_writeable({'1':1, 'f': 2}),
+ np.array([(2,)], dtype=[('f', '|O8')]))
+
+
+def test_recarray():
+ # check roundtrip of structured array
+ dt = [('f1', 'f8'),
+ ('f2', 'S10')]
+ arr = np.zeros((2,), dtype=dt)
+ arr[0]['f1'] = 0.5
+ arr[0]['f2'] = 'python'
+ arr[1]['f1'] = 99
+ arr[1]['f2'] = 'not perl'
+ stream = BytesIO()
+ savemat(stream, {'arr': arr})
+ d = loadmat(stream, struct_as_record=False)
+ a20 = d['arr'][0,0]
+ assert_equal(a20.f1, 0.5)
+ assert_equal(a20.f2, 'python')
+ d = loadmat(stream, struct_as_record=True)
+ a20 = d['arr'][0,0]
+ assert_equal(a20['f1'], 0.5)
+ assert_equal(a20['f2'], 'python')
+ # structs always come back as object types
+ assert_equal(a20.dtype, np.dtype([('f1', 'O'),
+ ('f2', 'O')]))
+ a21 = d['arr'].flat[1]
+ assert_equal(a21['f1'], 99)
+ assert_equal(a21['f2'], 'not perl')
+
+
+def test_save_object():
+ class C(object):
+ pass
+ c = C()
+ c.field1 = 1
+ c.field2 = 'a string'
+ stream = BytesIO()
+ savemat(stream, {'c': c})
+ d = loadmat(stream, struct_as_record=False)
+ c2 = d['c'][0,0]
+ assert_equal(c2.field1, 1)
+ assert_equal(c2.field2, 'a string')
+ d = loadmat(stream, struct_as_record=True)
+ c2 = d['c'][0,0]
+ assert_equal(c2['field1'], 1)
+ assert_equal(c2['field2'], 'a string')
+
+
+def test_read_opts():
+ # tests if read is seeing option sets, at initialization and after
+ # initialization
+ arr = np.arange(6).reshape(1,6)
+ stream = BytesIO()
+ savemat(stream, {'a': arr})
+ rdr = MatFile5Reader(stream)
+ back_dict = rdr.get_variables()
+ rarr = back_dict['a']
+ assert_array_equal(rarr, arr)
+ rdr = MatFile5Reader(stream, squeeze_me=True)
+ assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
+ rdr.squeeze_me = False
+ assert_array_equal(rarr, arr)
+ rdr = MatFile5Reader(stream, byte_order=boc.native_code)
+ assert_array_equal(rdr.get_variables()['a'], arr)
+ # inverted byte code leads to error on read because of swapped
+ # header etc.
+ rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
+ assert_raises(Exception, rdr.get_variables)
+ rdr.byte_order = boc.native_code
+ assert_array_equal(rdr.get_variables()['a'], arr)
+ arr = np.array(['a string'])
+ stream.truncate(0)
+ stream.seek(0)
+ savemat(stream, {'a': arr})
+ rdr = MatFile5Reader(stream)
+ assert_array_equal(rdr.get_variables()['a'], arr)
+ rdr = MatFile5Reader(stream, chars_as_strings=False)
+ carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
+ assert_array_equal(rdr.get_variables()['a'], carr)
+ rdr.chars_as_strings = True
+ assert_array_equal(rdr.get_variables()['a'], arr)
+
+
+def test_empty_string():
+ # make sure reading empty string does not raise error
+ estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
+ fp = open(estring_fname, 'rb')
+ rdr = MatFile5Reader(fp)
+ d = rdr.get_variables()
+ fp.close()
+ assert_array_equal(d['a'], np.array([], dtype='U1'))
+ # Empty string round trip. Matlab cannot distinguish
+ # between a string array that is empty, and a string array
+ # containing a single empty string, because it stores strings as
+ # arrays of char. There is no way of having an array of char that
+ # is not empty, but contains an empty string.
+ stream = BytesIO()
+ savemat(stream, {'a': np.array([''])})
+ rdr = MatFile5Reader(stream)
+ d = rdr.get_variables()
+ assert_array_equal(d['a'], np.array([], dtype='U1'))
+ stream.truncate(0)
+ stream.seek(0)
+ savemat(stream, {'a': np.array([], dtype='U1')})
+ rdr = MatFile5Reader(stream)
+ d = rdr.get_variables()
+ assert_array_equal(d['a'], np.array([], dtype='U1'))
+ stream.close()
+
+
+def test_corrupted_data():
+ import zlib
+ for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
+ (zlib.error, 'corrupted_zlib_checksum.mat')]:
+ with open(pjoin(test_data_path, fname), 'rb') as fp:
+ rdr = MatFile5Reader(fp)
+ assert_raises(exc, rdr.get_variables)
+
+
+def test_corrupted_data_check_can_be_disabled():
+ with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
+ rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
+ rdr.get_variables()
+
+
+def test_read_both_endian():
+ # make sure big- and little- endian data is read correctly
+ for fname in ('big_endian.mat', 'little_endian.mat'):
+ fp = open(pjoin(test_data_path, fname), 'rb')
+ rdr = MatFile5Reader(fp)
+ d = rdr.get_variables()
+ fp.close()
+ assert_array_equal(d['strings'],
+ np.array([['hello'],
+ ['world']], dtype=object))
+ assert_array_equal(d['floats'],
+ np.array([[2., 3.],
+ [3., 4.]], dtype=np.float32))
+
+
+def test_write_opposite_endian():
+ # We don't support writing opposite endian .mat files, but we need to behave
+ # correctly if the user supplies an other-endian NumPy array to write out.
+ float_arr = np.array([[2., 3.],
+ [3., 4.]])
+ int_arr = np.arange(6).reshape((2, 3))
+ uni_arr = np.array(['hello', 'world'], dtype='U')
+ stream = BytesIO()
+ savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
+ 'ints': int_arr.byteswap().newbyteorder(),
+ 'uni_arr': uni_arr.byteswap().newbyteorder()})
+ rdr = MatFile5Reader(stream)
+ d = rdr.get_variables()
+ assert_array_equal(d['floats'], float_arr)
+ assert_array_equal(d['ints'], int_arr)
+ assert_array_equal(d['uni_arr'], uni_arr)
+ stream.close()
+
+
+def test_logical_array():
+ # The roundtrip test doesn't verify that we load the data up with the
+ # correct (bool) dtype
+ with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
+ rdr = MatFile5Reader(fobj, mat_dtype=True)
+ d = rdr.get_variables()
+ x = np.array([[True], [False]], dtype=np.bool_)
+ assert_array_equal(d['testbools'], x)
+ assert_equal(d['testbools'].dtype, x.dtype)
+
+
+def test_logical_out_type():
+ # Confirm that bool type written as uint8, uint8 class
+ # See gh-4022
+ stream = BytesIO()
+ barr = np.array([False, True, False])
+ savemat(stream, {'barray': barr})
+ stream.seek(0)
+ reader = MatFile5Reader(stream)
+ reader.initialize_read()
+ reader.read_file_header()
+ hdr, _ = reader.read_var_header()
+ assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
+ assert_equal(hdr.is_logical, True)
+ var = reader.read_var_array(hdr, False)
+ assert_equal(var.dtype.type, np.uint8)
+
+
+def test_mat4_3d():
+ # test behavior when writing 3-D arrays to matlab 4 files
+ stream = BytesIO()
+ arr = np.arange(24).reshape((2,3,4))
+ assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
+
+
+def test_func_read():
+ func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
+ fp = open(func_eg, 'rb')
+ rdr = MatFile5Reader(fp)
+ d = rdr.get_variables()
+ fp.close()
+ assert_(isinstance(d['testfunc'], MatlabFunction))
+ stream = BytesIO()
+ wtr = MatFile5Writer(stream)
+ assert_raises(MatWriteError, wtr.put_variables, d)
+
+
+def test_mat_dtype():
+ double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
+ fp = open(double_eg, 'rb')
+ rdr = MatFile5Reader(fp, mat_dtype=False)
+ d = rdr.get_variables()
+ fp.close()
+ assert_equal(d['testmatrix'].dtype.kind, 'u')
+
+ fp = open(double_eg, 'rb')
+ rdr = MatFile5Reader(fp, mat_dtype=True)
+ d = rdr.get_variables()
+ fp.close()
+ assert_equal(d['testmatrix'].dtype.kind, 'f')
+
+
+def test_sparse_in_struct():
+ # reproduces bug found by DC where Cython code was insisting on
+ # ndarray return type, but getting sparse matrix
+ st = {'sparsefield': SP.coo_matrix(np.eye(4))}
+ stream = BytesIO()
+ savemat(stream, {'a':st})
+ d = loadmat(stream, struct_as_record=True)
+ assert_array_equal(d['a'][0,0]['sparsefield'].todense(), np.eye(4))
+
+
+def test_mat_struct_squeeze():
+ stream = BytesIO()
+ in_d = {'st':{'one':1, 'two':2}}
+ savemat(stream, in_d)
+ # no error without squeeze
+ loadmat(stream, struct_as_record=False)
+ # previous error was with squeeze, with mat_struct
+ loadmat(stream, struct_as_record=False, squeeze_me=True)
+
+
+def test_scalar_squeeze():
+ stream = BytesIO()
+ in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
+ savemat(stream, in_d)
+ out_d = loadmat(stream, squeeze_me=True)
+ assert_(isinstance(out_d['scalar'], float))
+ assert_(isinstance(out_d['string'], str))
+ assert_(isinstance(out_d['st'], np.ndarray))
+
+
+def test_str_round():
+ # from report by Angus McMorland on mailing list 3 May 2010
+ stream = BytesIO()
+ in_arr = np.array(['Hello', 'Foob'])
+ out_arr = np.array(['Hello', 'Foob '])
+ savemat(stream, dict(a=in_arr))
+ res = loadmat(stream)
+ # resulted in ['HloolFoa', 'elWrdobr']
+ assert_array_equal(res['a'], out_arr)
+ stream.truncate(0)
+ stream.seek(0)
+ # Make Fortran ordered version of string
+ in_str = in_arr.tobytes(order='F')
+ in_from_str = np.ndarray(shape=a.shape,
+ dtype=in_arr.dtype,
+ order='F',
+ buffer=in_str)
+ savemat(stream, dict(a=in_from_str))
+ assert_array_equal(res['a'], out_arr)
+ # unicode save did lead to buffer too small error
+ stream.truncate(0)
+ stream.seek(0)
+ in_arr_u = in_arr.astype('U')
+ out_arr_u = out_arr.astype('U')
+ savemat(stream, {'a': in_arr_u})
+ res = loadmat(stream)
+ assert_array_equal(res['a'], out_arr_u)
+
+
+def test_fieldnames():
+ # Check that field names are as expected
+ stream = BytesIO()
+ savemat(stream, {'a': {'a':1, 'b':2}})
+ res = loadmat(stream)
+ field_names = res['a'].dtype.names
+ assert_equal(set(field_names), set(('a', 'b')))
+
+
+def test_loadmat_varnames():
+ # Test that we can get just one variable from a mat file using loadmat
+ mat5_sys_names = ['__globals__',
+ '__header__',
+ '__version__']
+ for eg_file, sys_v_names in (
+ (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
+ test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
+ vars = loadmat(eg_file)
+ assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
+ vars = loadmat(eg_file, variable_names='a')
+ assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
+ vars = loadmat(eg_file, variable_names=['a'])
+ assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
+ vars = loadmat(eg_file, variable_names=['theta'])
+ assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
+ vars = loadmat(eg_file, variable_names=('theta',))
+ assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
+ vars = loadmat(eg_file, variable_names=[])
+ assert_equal(set(vars.keys()), set(sys_v_names))
+ vnames = ['theta']
+ vars = loadmat(eg_file, variable_names=vnames)
+ assert_equal(vnames, ['theta'])
+
+
+def test_round_types():
+ # Check that saving, loading preserves dtype in most cases
+ arr = np.arange(10)
+ stream = BytesIO()
+ for dts in ('f8','f4','i8','i4','i2','i1',
+ 'u8','u4','u2','u1','c16','c8'):
+ stream.truncate(0)
+ stream.seek(0) # needed for BytesIO in Python 3
+ savemat(stream, {'arr': arr.astype(dts)})
+ vars = loadmat(stream)
+ assert_equal(np.dtype(dts), vars['arr'].dtype)
+
+
+def test_varmats_from_mat():
+ # Make a mat file with several variables, write it, read it back
+ names_vars = (('arr', mlarr(np.arange(10))),
+ ('mystr', mlarr('a string')),
+ ('mynum', mlarr(10)))
+
+ # Dict like thing to give variables in defined order
+ class C(object):
+ def items(self):
+ return names_vars
+ stream = BytesIO()
+ savemat(stream, C())
+ varmats = varmats_from_mat(stream)
+ assert_equal(len(varmats), 3)
+ for i in range(3):
+ name, var_stream = varmats[i]
+ exp_name, exp_res = names_vars[i]
+ assert_equal(name, exp_name)
+ res = loadmat(var_stream)
+ assert_array_equal(res[name], exp_res)
+
+
+def test_one_by_zero():
+ # Test 1x0 chars get read correctly
+ func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
+ fp = open(func_eg, 'rb')
+ rdr = MatFile5Reader(fp)
+ d = rdr.get_variables()
+ fp.close()
+ assert_equal(d['var'].shape, (0,))
+
+
+def test_load_mat4_le():
+ # We were getting byte order wrong when reading little-endian floa64 dense
+ # matrices on big-endian platforms
+ mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
+ vars = loadmat(mat4_fname)
+ assert_array_equal(vars['a'], [[0.1, 1.2]])
+
+
+def test_unicode_mat4():
+ # Mat4 should save unicode as latin1
+ bio = BytesIO()
+ var = {'second_cat': 'Schrödinger'}
+ savemat(bio, var, format='4')
+ var_back = loadmat(bio)
+ assert_equal(var_back['second_cat'], var['second_cat'])
+
+
+def test_logical_sparse():
+ # Test we can read logical sparse stored in mat file as bytes.
+ # See https://github.com/scipy/scipy/issues/3539.
+ # In some files saved by MATLAB, the sparse data elements (Real Part
+ # Subelement in MATLAB speak) are stored with apparent type double
+ # (miDOUBLE) but are in fact single bytes.
+ filename = pjoin(test_data_path,'logical_sparse.mat')
+ # Before fix, this would crash with:
+ # ValueError: indices and data should have the same size
+ d = loadmat(filename, struct_as_record=True)
+ log_sp = d['sp_log_5_4']
+ assert_(isinstance(log_sp, SP.csc_matrix))
+ assert_equal(log_sp.dtype.type, np.bool_)
+ assert_array_equal(log_sp.toarray(),
+ [[True, True, True, False],
+ [False, False, True, False],
+ [False, False, True, False],
+ [False, False, False, False],
+ [False, False, False, False]])
+
+
+def test_empty_sparse():
+ # Can we read empty sparse matrices?
+ sio = BytesIO()
+ import scipy.sparse
+ empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
+ savemat(sio, dict(x=empty_sparse))
+ sio.seek(0)
+ res = loadmat(sio)
+ assert_array_equal(res['x'].shape, empty_sparse.shape)
+ assert_array_equal(res['x'].todense(), 0)
+ # Do empty sparse matrices get written with max nnz 1?
+ # See https://github.com/scipy/scipy/issues/4208
+ sio.seek(0)
+ reader = MatFile5Reader(sio)
+ reader.initialize_read()
+ reader.read_file_header()
+ hdr, _ = reader.read_var_header()
+ assert_equal(hdr.nzmax, 1)
+
+
+def test_empty_mat_error():
+ # Test we get a specific warning for an empty mat file
+ sio = BytesIO()
+ assert_raises(MatReadError, loadmat, sio)
+
+
+def test_miuint32_compromise():
+ # Reader should accept miUINT32 for miINT32, but check signs
+ # mat file with miUINT32 for miINT32, but OK values
+ filename = pjoin(test_data_path, 'miuint32_for_miint32.mat')
+ res = loadmat(filename)
+ assert_equal(res['an_array'], np.arange(10)[None, :])
+ # mat file with miUINT32 for miINT32, with negative value
+ filename = pjoin(test_data_path, 'bad_miuint32.mat')
+ with assert_raises(ValueError):
+ loadmat(filename)
+
+
+def test_miutf8_for_miint8_compromise():
+ # Check reader accepts ascii as miUTF8 for array names
+ filename = pjoin(test_data_path, 'miutf8_array_name.mat')
+ res = loadmat(filename)
+ assert_equal(res['array_name'], [[1]])
+ # mat file with non-ascii utf8 name raises error
+ filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')
+ with assert_raises(ValueError):
+ loadmat(filename)
+
+
+def test_bad_utf8():
+ # Check that reader reads bad UTF with 'replace' option
+ filename = pjoin(test_data_path,'broken_utf8.mat')
+ res = loadmat(filename)
+ assert_equal(res['bad_string'],
+ b'\x80 am broken'.decode('utf8', 'replace'))
+
+
+def test_save_unicode_field(tmpdir):
+ filename = os.path.join(str(tmpdir), 'test.mat')
+ test_dict = {u'a':{u'b':1,u'c':'test_str'}}
+ savemat(filename, test_dict)
+
+
+def test_filenotfound():
+ # Check the correct error is thrown
+ assert_raises(IOError, loadmat, "NotExistentFile00.mat")
+ assert_raises(IOError, loadmat, "NotExistentFile00")
+
+
+def test_simplify_cells():
+ # Test output when simplify_cells=True
+ filename = pjoin(test_data_path, 'testsimplecell.mat')
+ res1 = loadmat(filename, simplify_cells=True)
+ res2 = loadmat(filename, simplify_cells=False)
+ assert_(isinstance(res1["s"], dict))
+ assert_(isinstance(res2["s"], np.ndarray))
+ assert_array_equal(res1["s"]["mycell"], np.array(["a", "b", "c"]))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio5_utils.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio5_utils.py
new file mode 100644
index 0000000..4e0f627
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio5_utils.py
@@ -0,0 +1,181 @@
+""" Testing mio5_utils Cython module
+
+"""
+import sys
+
+from io import BytesIO
+cStringIO = BytesIO
+
+import numpy as np
+
+from numpy.testing import assert_array_equal, assert_equal, assert_
+from pytest import raises as assert_raises
+
+import scipy.io.matlab.byteordercodes as boc
+import scipy.io.matlab.streams as streams
+import scipy.io.matlab.mio5_params as mio5p
+import scipy.io.matlab.mio5_utils as m5u
+
+
+def test_byteswap():
+ for val in (
+ 1,
+ 0x100,
+ 0x10000):
+ a = np.array(val, dtype=np.uint32)
+ b = a.byteswap()
+ c = m5u.byteswap_u4(a)
+ assert_equal(b.item(), c)
+ d = m5u.byteswap_u4(c)
+ assert_equal(a.item(), d)
+
+
+def _make_tag(base_dt, val, mdtype, sde=False):
+ ''' Makes a simple matlab tag, full or sde '''
+ base_dt = np.dtype(base_dt)
+ bo = boc.to_numpy_code(base_dt.byteorder)
+ byte_count = base_dt.itemsize
+ if not sde:
+ udt = bo + 'u4'
+ padding = 8 - (byte_count % 8)
+ all_dt = [('mdtype', udt),
+ ('byte_count', udt),
+ ('val', base_dt)]
+ if padding:
+ all_dt.append(('padding', 'u1', padding))
+ else: # is sde
+ udt = bo + 'u2'
+ padding = 4-byte_count
+ if bo == '<': # little endian
+ all_dt = [('mdtype', udt),
+ ('byte_count', udt),
+ ('val', base_dt)]
+ else: # big endian
+ all_dt = [('byte_count', udt),
+ ('mdtype', udt),
+ ('val', base_dt)]
+ if padding:
+ all_dt.append(('padding', 'u1', padding))
+ tag = np.zeros((1,), dtype=all_dt)
+ tag['mdtype'] = mdtype
+ tag['byte_count'] = byte_count
+ tag['val'] = val
+ return tag
+
+
+def _write_stream(stream, *strings):
+ stream.truncate(0)
+ stream.seek(0)
+ for s in strings:
+ stream.write(s)
+ stream.seek(0)
+
+
+def _make_readerlike(stream, byte_order=boc.native_code):
+ class R(object):
+ pass
+ r = R()
+ r.mat_stream = stream
+ r.byte_order = byte_order
+ r.struct_as_record = True
+ r.uint16_codec = sys.getdefaultencoding()
+ r.chars_as_strings = False
+ r.mat_dtype = False
+ r.squeeze_me = False
+ return r
+
+
+def test_read_tag():
+ # mainly to test errors
+ # make reader-like thing
+ str_io = BytesIO()
+ r = _make_readerlike(str_io)
+ c_reader = m5u.VarReader5(r)
+ # This works for StringIO but _not_ cStringIO
+ assert_raises(IOError, c_reader.read_tag)
+ # bad SDE
+ tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
+ tag['byte_count'] = 5
+ _write_stream(str_io, tag.tobytes())
+ assert_raises(ValueError, c_reader.read_tag)
+
+
+def test_read_stream():
+ tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
+ tag_str = tag.tobytes()
+ str_io = cStringIO(tag_str)
+ st = streams.make_stream(str_io)
+ s = streams._read_into(st, tag.itemsize)
+ assert_equal(s, tag.tobytes())
+
+
+def test_read_numeric():
+ # make reader-like thing
+ str_io = cStringIO()
+ r = _make_readerlike(str_io)
+ # check simplest of tags
+ for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16),
+ ('i4', 1, mio5p.miINT32),
+ ('i2', -1, mio5p.miINT16)):
+ for byte_code in ('<', '>'):
+ r.byte_order = byte_code
+ c_reader = m5u.VarReader5(r)
+ assert_equal(c_reader.little_endian, byte_code == '<')
+ assert_equal(c_reader.is_swapped, byte_code != boc.native_code)
+ for sde_f in (False, True):
+ dt = np.dtype(base_dt).newbyteorder(byte_code)
+ a = _make_tag(dt, val, mdtype, sde_f)
+ a_str = a.tobytes()
+ _write_stream(str_io, a_str)
+ el = c_reader.read_numeric()
+ assert_equal(el, val)
+ # two sequential reads
+ _write_stream(str_io, a_str, a_str)
+ el = c_reader.read_numeric()
+ assert_equal(el, val)
+ el = c_reader.read_numeric()
+ assert_equal(el, val)
+
+
+def test_read_numeric_writeable():
+ # make reader-like thing
+ str_io = cStringIO()
+ r = _make_readerlike(str_io, '<')
+ c_reader = m5u.VarReader5(r)
+ dt = np.dtype(''
+ rdr.mat_stream.read(4) # presumably byte padding
+ mdict = read_minimat_vars(rdr)
+ fp.close()
+ return mdict
+
+
+def test_jottings():
+ # example
+ fname = os.path.join(test_data_path, 'parabola.mat')
+ read_workspace_vars(fname)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio_utils.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio_utils.py
new file mode 100644
index 0000000..ea2989e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_mio_utils.py
@@ -0,0 +1,45 @@
+""" Testing
+
+"""
+
+import numpy as np
+
+from numpy.testing import assert_array_equal, assert_
+
+from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings
+
+
+def test_squeeze_element():
+ a = np.zeros((1,3))
+ assert_array_equal(np.squeeze(a), squeeze_element(a))
+ # 0-D output from squeeze gives scalar
+ sq_int = squeeze_element(np.zeros((1,1), dtype=float))
+ assert_(isinstance(sq_int, float))
+ # Unless it's a structured array
+ sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')]))
+ assert_(isinstance(sq_sa, np.ndarray))
+ # Squeezing empty arrays maintain their dtypes.
+ sq_empty = squeeze_element(np.empty(0, np.uint8))
+ assert sq_empty.dtype == np.uint8
+
+
+def test_chars_strings():
+ # chars as strings
+ strings = ['learn ', 'python', 'fast ', 'here ']
+ str_arr = np.array(strings, dtype='U6') # shape (4,)
+ chars = [list(s) for s in strings]
+ char_arr = np.array(chars, dtype='U1') # shape (4,6)
+ assert_array_equal(chars_to_strings(char_arr), str_arr)
+ ca2d = char_arr.reshape((2,2,6))
+ sa2d = str_arr.reshape((2,2))
+ assert_array_equal(chars_to_strings(ca2d), sa2d)
+ ca3d = char_arr.reshape((1,2,2,6))
+ sa3d = str_arr.reshape((1,2,2))
+ assert_array_equal(chars_to_strings(ca3d), sa3d)
+ # Fortran ordered arrays
+ char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6)
+ assert_array_equal(chars_to_strings(char_arrf), str_arr)
+ # empty array
+ arr = np.array([['']], dtype='U1')
+ out_arr = np.array([''], dtype='U1')
+ assert_array_equal(chars_to_strings(arr), out_arr)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_miobase.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_miobase.py
new file mode 100644
index 0000000..59100f0
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_miobase.py
@@ -0,0 +1,31 @@
+""" Testing miobase module
+"""
+
+import numpy as np
+
+from numpy.testing import assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io.matlab.miobase import matdims
+
+
+def test_matdims():
+ # Test matdims dimension finder
+ assert_equal(matdims(np.array(1)), (1, 1)) # NumPy scalar
+ assert_equal(matdims(np.array([1])), (1, 1)) # 1-D array, 1 element
+ assert_equal(matdims(np.array([1,2])), (2, 1)) # 1-D array, 2 elements
+ assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2-D array, column vector
+ assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2-D array, row vector
+ # 3d array, rowish vector
+ assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2))
+ assert_equal(matdims(np.array([])), (0, 0)) # empty 1-D array
+ assert_equal(matdims(np.array([[]])), (0, 0)) # empty 2-D array
+ assert_equal(matdims(np.array([[[]]])), (0, 0, 0)) # empty 3-D array
+ # Optional argument flips 1-D shape behavior.
+ assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1-D array, 2 elements
+ # The argument has to make sense though
+ assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre')
+ # Check empty sparse matrices get their own shape
+ from scipy.sparse import csr_matrix, csc_matrix
+ assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3))
+ assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_pathological.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_pathological.py
new file mode 100644
index 0000000..f849a6c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_pathological.py
@@ -0,0 +1,33 @@
+""" Test reading of files not conforming to matlab specification
+
+We try and read any file that matlab reads, these files included
+"""
+from os.path import dirname, join as pjoin
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy.io.matlab.mio import loadmat
+
+TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
+
+
+def test_multiple_fieldnames():
+ # Example provided by Dharhas Pothina
+ # Extracted using mio5.varmats_from_mat
+ multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat')
+ vars = loadmat(multi_fname)
+ funny_names = vars['Summary'].dtype.names
+ assert_(set(['_1_Station_Q', '_2_Station_Q',
+ '_3_Station_Q']).issubset(funny_names))
+
+
+def test_malformed1():
+ # Example from gh-6072
+ # Contains malformed header data, which previously resulted into a
+ # buffer overflow.
+ #
+ # Should raise an exception, not segfault
+ fname = pjoin(TEST_DATA_PATH, 'malformed1.mat')
+ with open(fname, 'rb') as f:
+ assert_raises(ValueError, loadmat, f)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_streams.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_streams.py
new file mode 100644
index 0000000..3156e97
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/matlab/tests/test_streams.py
@@ -0,0 +1,229 @@
+""" Testing
+
+"""
+
+import os
+import zlib
+
+from io import BytesIO
+
+
+from tempfile import mkstemp
+from contextlib import contextmanager
+
+import numpy as np
+
+from numpy.testing import assert_, assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io.matlab.streams import (make_stream,
+ GenericStream, ZlibInputStream,
+ _read_into, _read_string, BLOCK_SIZE)
+
+
+@contextmanager
+def setup_test_file():
+ val = b'a\x00string'
+ fd, fname = mkstemp()
+
+ with os.fdopen(fd, 'wb') as fs:
+ fs.write(val)
+ with open(fname, 'rb') as fs:
+ gs = BytesIO(val)
+ cs = BytesIO(val)
+ yield fs, gs, cs
+ os.unlink(fname)
+
+
+def test_make_stream():
+ with setup_test_file() as (fs, gs, cs):
+ # test stream initialization
+ assert_(isinstance(make_stream(gs), GenericStream))
+
+
+def test_tell_seek():
+ with setup_test_file() as (fs, gs, cs):
+ for s in (fs, gs, cs):
+ st = make_stream(s)
+ res = st.seek(0)
+ assert_equal(res, 0)
+ assert_equal(st.tell(), 0)
+ res = st.seek(5)
+ assert_equal(res, 0)
+ assert_equal(st.tell(), 5)
+ res = st.seek(2, 1)
+ assert_equal(res, 0)
+ assert_equal(st.tell(), 7)
+ res = st.seek(-2, 2)
+ assert_equal(res, 0)
+ assert_equal(st.tell(), 6)
+
+
+def test_read():
+ with setup_test_file() as (fs, gs, cs):
+ for s in (fs, gs, cs):
+ st = make_stream(s)
+ st.seek(0)
+ res = st.read(-1)
+ assert_equal(res, b'a\x00string')
+ st.seek(0)
+ res = st.read(4)
+ assert_equal(res, b'a\x00st')
+ # read into
+ st.seek(0)
+ res = _read_into(st, 4)
+ assert_equal(res, b'a\x00st')
+ res = _read_into(st, 4)
+ assert_equal(res, b'ring')
+ assert_raises(IOError, _read_into, st, 2)
+ # read alloc
+ st.seek(0)
+ res = _read_string(st, 4)
+ assert_equal(res, b'a\x00st')
+ res = _read_string(st, 4)
+ assert_equal(res, b'ring')
+ assert_raises(IOError, _read_string, st, 2)
+
+
+class TestZlibInputStream(object):
+ def _get_data(self, size):
+ data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data)
+ stream = BytesIO(compressed_data)
+ return stream, len(compressed_data), data
+
+ def test_read(self):
+ SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1,
+ BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1]
+
+ READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1,
+ BLOCK_SIZE, BLOCK_SIZE+1]
+
+ def check(size, read_size):
+ compressed_stream, compressed_data_len, data = self._get_data(size)
+ stream = ZlibInputStream(compressed_stream, compressed_data_len)
+ data2 = b''
+ so_far = 0
+ while True:
+ block = stream.read(min(read_size,
+ size - so_far))
+ if not block:
+ break
+ so_far += len(block)
+ data2 += block
+ assert_equal(data, data2)
+
+ for size in SIZES:
+ for read_size in READ_SIZES:
+ check(size, read_size)
+
+ def test_read_max_length(self):
+ size = 1234
+ data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data)
+ compressed_stream = BytesIO(compressed_data + b"abbacaca")
+ stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+ stream.read(len(data))
+ assert_equal(compressed_stream.tell(), len(compressed_data))
+
+ assert_raises(IOError, stream.read, 1)
+
+ def test_read_bad_checksum(self):
+ data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data)
+
+ # break checksum
+ compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+ compressed_stream = BytesIO(compressed_data)
+ stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+ assert_raises(zlib.error, stream.read, len(data))
+
+ def test_seek(self):
+ compressed_stream, compressed_data_len, data = self._get_data(1024)
+
+ stream = ZlibInputStream(compressed_stream, compressed_data_len)
+
+ stream.seek(123)
+ p = 123
+ assert_equal(stream.tell(), p)
+ d1 = stream.read(11)
+ assert_equal(d1, data[p:p+11])
+
+ stream.seek(321, 1)
+ p = 123+11+321
+ assert_equal(stream.tell(), p)
+ d2 = stream.read(21)
+ assert_equal(d2, data[p:p+21])
+
+ stream.seek(641, 0)
+ p = 641
+ assert_equal(stream.tell(), p)
+ d3 = stream.read(11)
+ assert_equal(d3, data[p:p+11])
+
+ assert_raises(IOError, stream.seek, 10, 2)
+ assert_raises(IOError, stream.seek, -1, 1)
+ assert_raises(ValueError, stream.seek, 1, 123)
+
+ stream.seek(10000, 1)
+ assert_raises(IOError, stream.read, 12)
+
+ def test_seek_bad_checksum(self):
+ data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data)
+
+ # break checksum
+ compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+ compressed_stream = BytesIO(compressed_data)
+ stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+ assert_raises(zlib.error, stream.seek, len(data))
+
+ def test_all_data_read(self):
+ compressed_stream, compressed_data_len, data = self._get_data(1024)
+ stream = ZlibInputStream(compressed_stream, compressed_data_len)
+ assert_(not stream.all_data_read())
+ stream.seek(512)
+ assert_(not stream.all_data_read())
+ stream.seek(1024)
+ assert_(stream.all_data_read())
+
+ def test_all_data_read_overlap(self):
+ COMPRESSION_LEVEL = 6
+
+ data = np.arange(33707000).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
+ compressed_data_len = len(compressed_data)
+
+ # check that part of the checksum overlaps
+ assert_(compressed_data_len == BLOCK_SIZE + 2)
+
+ compressed_stream = BytesIO(compressed_data)
+ stream = ZlibInputStream(compressed_stream, compressed_data_len)
+ assert_(not stream.all_data_read())
+ stream.seek(len(data))
+ assert_(stream.all_data_read())
+
+ def test_all_data_read_bad_checksum(self):
+ COMPRESSION_LEVEL = 6
+
+ data = np.arange(33707000).astype(np.uint8).tobytes()
+ compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
+ compressed_data_len = len(compressed_data)
+
+ # check that part of the checksum overlaps
+ assert_(compressed_data_len == BLOCK_SIZE + 2)
+
+ # break checksum
+ compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+ compressed_stream = BytesIO(compressed_data)
+ stream = ZlibInputStream(compressed_stream, compressed_data_len)
+ assert_(not stream.all_data_read())
+ stream.seek(len(data))
+
+ assert_raises(zlib.error, stream.all_data_read)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/mmio.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/mmio.py
new file mode 100644
index 0000000..305b60d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/mmio.py
@@ -0,0 +1,845 @@
+"""
+ Matrix Market I/O in Python.
+ See http://math.nist.gov/MatrixMarket/formats.html
+ for information about the Matrix Market format.
+"""
+#
+# Author: Pearu Peterson
+# Created: October, 2004
+#
+# References:
+# http://math.nist.gov/MatrixMarket/
+#
+import os
+import sys
+
+from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
+ ones, can_cast)
+from numpy.compat import asbytes, asstr
+
+from scipy.sparse import coo_matrix, isspmatrix
+
+__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
+
+
+# -----------------------------------------------------------------------------
+def mminfo(source):
+ """
+ Return size and storage parameters from Matrix Market file-like 'source'.
+
+ Parameters
+ ----------
+ source : str or file-like
+ Matrix Market filename (extension .mtx) or open file-like object
+
+ Returns
+ -------
+ rows : int
+ Number of matrix rows.
+ cols : int
+ Number of matrix columns.
+ entries : int
+ Number of non-zero entries of a sparse matrix
+ or rows*cols for a dense matrix.
+ format : str
+ Either 'coordinate' or 'array'.
+ field : str
+ Either 'real', 'complex', 'pattern', or 'integer'.
+ symmetry : str
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+ """
+ return MMFile.info(source)
+
+# -----------------------------------------------------------------------------
+
+
+def mmread(source):
+ """
+ Reads the contents of a Matrix Market file-like 'source' into a matrix.
+
+ Parameters
+ ----------
+ source : str or file-like
+ Matrix Market filename (extensions .mtx, .mtz.gz)
+ or open file-like object.
+
+ Returns
+ -------
+ a : ndarray or coo_matrix
+ Dense or sparse matrix depending on the matrix format in the
+ Matrix Market file.
+ """
+ return MMFile().read(source)
+
+# -----------------------------------------------------------------------------
+
+
+def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
+ """
+ Writes the sparse or dense array `a` to Matrix Market file-like `target`.
+
+ Parameters
+ ----------
+ target : str or file-like
+ Matrix Market filename (extension .mtx) or open file-like object.
+ a : array like
+ Sparse or dense 2-D array.
+ comment : str, optional
+ Comments to be prepended to the Matrix Market file.
+ field : None or str, optional
+ Either 'real', 'complex', 'pattern', or 'integer'.
+ precision : None or int, optional
+ Number of digits to display for real or complex values.
+ symmetry : None or str, optional
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+ If symmetry is None the symmetry type of 'a' is determined by its
+ values.
+ """
+ MMFile().write(target, a, comment, field, precision, symmetry)
+
+
+###############################################################################
+class MMFile:
+ __slots__ = ('_rows',
+ '_cols',
+ '_entries',
+ '_format',
+ '_field',
+ '_symmetry')
+
+ @property
+ def rows(self):
+ return self._rows
+
+ @property
+ def cols(self):
+ return self._cols
+
+ @property
+ def entries(self):
+ return self._entries
+
+ @property
+ def format(self):
+ return self._format
+
+ @property
+ def field(self):
+ return self._field
+
+ @property
+ def symmetry(self):
+ return self._symmetry
+
+ @property
+ def has_symmetry(self):
+ return self._symmetry in (self.SYMMETRY_SYMMETRIC,
+ self.SYMMETRY_SKEW_SYMMETRIC,
+ self.SYMMETRY_HERMITIAN)
+
+ # format values
+ FORMAT_COORDINATE = 'coordinate'
+ FORMAT_ARRAY = 'array'
+ FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
+
+ @classmethod
+ def _validate_format(self, format):
+ if format not in self.FORMAT_VALUES:
+ raise ValueError('unknown format type %s, must be one of %s' %
+ (format, self.FORMAT_VALUES))
+
+ # field values
+ FIELD_INTEGER = 'integer'
+ FIELD_UNSIGNED = 'unsigned-integer'
+ FIELD_REAL = 'real'
+ FIELD_COMPLEX = 'complex'
+ FIELD_PATTERN = 'pattern'
+ FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
+
+ @classmethod
+ def _validate_field(self, field):
+ if field not in self.FIELD_VALUES:
+ raise ValueError('unknown field type %s, must be one of %s' %
+ (field, self.FIELD_VALUES))
+
+ # symmetry values
+ SYMMETRY_GENERAL = 'general'
+ SYMMETRY_SYMMETRIC = 'symmetric'
+ SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
+ SYMMETRY_HERMITIAN = 'hermitian'
+ SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
+ SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
+
+ @classmethod
+ def _validate_symmetry(self, symmetry):
+ if symmetry not in self.SYMMETRY_VALUES:
+ raise ValueError('unknown symmetry type %s, must be one of %s' %
+ (symmetry, self.SYMMETRY_VALUES))
+
+ DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
+ FIELD_UNSIGNED: 'uint64',
+ FIELD_REAL: 'd',
+ FIELD_COMPLEX: 'D',
+ FIELD_PATTERN: 'd'}
+
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def reader():
+ pass
+
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def writer():
+ pass
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def info(self, source):
+ """
+ Return size, storage parameters from Matrix Market file-like 'source'.
+
+ Parameters
+ ----------
+ source : str or file-like
+ Matrix Market filename (extension .mtx) or open file-like object
+
+ Returns
+ -------
+ rows : int
+ Number of matrix rows.
+ cols : int
+ Number of matrix columns.
+ entries : int
+ Number of non-zero entries of a sparse matrix
+ or rows*cols for a dense matrix.
+ format : str
+ Either 'coordinate' or 'array'.
+ field : str
+ Either 'real', 'complex', 'pattern', or 'integer'.
+ symmetry : str
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+ """
+
+ stream, close_it = self._open(source)
+
+ try:
+
+ # read and validate header line
+ line = stream.readline()
+ mmid, matrix, format, field, symmetry = \
+ [asstr(part.strip()) for part in line.split()]
+ if not mmid.startswith('%%MatrixMarket'):
+ raise ValueError('source is not in Matrix Market format')
+ if not matrix.lower() == 'matrix':
+ raise ValueError("Problem reading file header: " + line)
+
+ # http://math.nist.gov/MatrixMarket/formats.html
+ if format.lower() == 'array':
+ format = self.FORMAT_ARRAY
+ elif format.lower() == 'coordinate':
+ format = self.FORMAT_COORDINATE
+
+ # skip comments
+ # line.startswith('%')
+ while line and line[0] in ['%', 37]:
+ line = stream.readline()
+
+ # skip empty lines
+ while not line.strip():
+ line = stream.readline()
+
+ split_line = line.split()
+ if format == self.FORMAT_ARRAY:
+ if not len(split_line) == 2:
+ raise ValueError("Header line not of length 2: " +
+ line.decode('ascii'))
+ rows, cols = map(int, split_line)
+ entries = rows * cols
+ else:
+ if not len(split_line) == 3:
+ raise ValueError("Header line not of length 3: " +
+ line.decode('ascii'))
+ rows, cols, entries = map(int, split_line)
+
+ return (rows, cols, entries, format, field.lower(),
+ symmetry.lower())
+
+ finally:
+ if close_it:
+ stream.close()
+
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _open(filespec, mode='rb'):
+ """ Return an open file stream for reading based on source.
+
+ If source is a file name, open it (after trying to find it with mtx and
+ gzipped mtx extensions). Otherwise, just return source.
+
+ Parameters
+ ----------
+ filespec : str or file-like
+ String giving file name or file-like object
+ mode : str, optional
+ Mode with which to open file, if `filespec` is a file name.
+
+ Returns
+ -------
+ fobj : file-like
+ Open file-like object.
+ close_it : bool
+ True if the calling function should close this file when done,
+ false otherwise.
+ """
+ # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class
+ # implementing a '__fspath__' method), try to convert it to str. If this
+ # fails by throwing a 'TypeError', assume it's an open file handle and
+ # return it as-is.
+ try:
+ filespec = os.fspath(filespec)
+ except TypeError:
+ return filespec, False
+
+ # 'filespec' is definitely a str now
+
+ # open for reading
+ if mode[0] == 'r':
+
+ # determine filename plus extension
+ if not os.path.isfile(filespec):
+ if os.path.isfile(filespec+'.mtx'):
+ filespec = filespec + '.mtx'
+ elif os.path.isfile(filespec+'.mtx.gz'):
+ filespec = filespec + '.mtx.gz'
+ elif os.path.isfile(filespec+'.mtx.bz2'):
+ filespec = filespec + '.mtx.bz2'
+ # open filename
+ if filespec.endswith('.gz'):
+ import gzip
+ stream = gzip.open(filespec, mode)
+ elif filespec.endswith('.bz2'):
+ import bz2
+ stream = bz2.BZ2File(filespec, 'rb')
+ else:
+ stream = open(filespec, mode)
+
+ # open for writing
+ else:
+ if filespec[-4:] != '.mtx':
+ filespec = filespec + '.mtx'
+ stream = open(filespec, mode)
+
+ return stream, True
+
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _get_symmetry(a):
+ m, n = a.shape
+ if m != n:
+ return MMFile.SYMMETRY_GENERAL
+ issymm = True
+ isskew = True
+ isherm = a.dtype.char in 'FD'
+
+ # sparse input
+ if isspmatrix(a):
+ # check if number of nonzero entries of lower and upper triangle
+ # matrix are equal
+ a = a.tocoo()
+ (row, col) = a.nonzero()
+ if (row < col).sum() != (row > col).sum():
+ return MMFile.SYMMETRY_GENERAL
+
+ # define iterator over symmetric pair entries
+ a = a.todok()
+
+ def symm_iterator():
+ for ((i, j), aij) in a.items():
+ if i > j:
+ aji = a[j, i]
+ yield (aij, aji)
+
+ # non-sparse input
+ else:
+ # define iterator over symmetric pair entries
+ def symm_iterator():
+ for j in range(n):
+ for i in range(j+1, n):
+ aij, aji = a[i][j], a[j][i]
+ yield (aij, aji)
+
+ # check for symmetry
+ for (aij, aji) in symm_iterator():
+ if issymm and aij != aji:
+ issymm = False
+ if isskew and aij != -aji:
+ isskew = False
+ if isherm and aij != conj(aji):
+ isherm = False
+ if not (issymm or isskew or isherm):
+ break
+
+ # return symmetry value
+ if issymm:
+ return MMFile.SYMMETRY_SYMMETRIC
+ if isskew:
+ return MMFile.SYMMETRY_SKEW_SYMMETRIC
+ if isherm:
+ return MMFile.SYMMETRY_HERMITIAN
+ return MMFile.SYMMETRY_GENERAL
+
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _field_template(field, precision):
+ return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
+ MMFile.FIELD_INTEGER: '%i\n',
+ MMFile.FIELD_UNSIGNED: '%u\n',
+ MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
+ (precision, precision)
+ }.get(field, None)
+
+ # -------------------------------------------------------------------------
+ def __init__(self, **kwargs):
+ self._init_attrs(**kwargs)
+
+ # -------------------------------------------------------------------------
+ def read(self, source):
+ """
+ Reads the contents of a Matrix Market file-like 'source' into a matrix.
+
+ Parameters
+ ----------
+ source : str or file-like
+ Matrix Market filename (extensions .mtx, .mtz.gz)
+ or open file object.
+
+ Returns
+ -------
+ a : ndarray or coo_matrix
+ Dense or sparse matrix depending on the matrix format in the
+ Matrix Market file.
+ """
+ stream, close_it = self._open(source)
+
+ try:
+ self._parse_header(stream)
+ return self._parse_body(stream)
+
+ finally:
+ if close_it:
+ stream.close()
+
+ # -------------------------------------------------------------------------
+ def write(self, target, a, comment='', field=None, precision=None,
+ symmetry=None):
+ """
+ Writes sparse or dense array `a` to Matrix Market file-like `target`.
+
+ Parameters
+ ----------
+ target : str or file-like
+ Matrix Market filename (extension .mtx) or open file-like object.
+ a : array like
+ Sparse or dense 2-D array.
+ comment : str, optional
+ Comments to be prepended to the Matrix Market file.
+ field : None or str, optional
+ Either 'real', 'complex', 'pattern', or 'integer'.
+ precision : None or int, optional
+ Number of digits to display for real or complex values.
+ symmetry : None or str, optional
+ Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+ If symmetry is None the symmetry type of 'a' is determined by its
+ values.
+ """
+
+ stream, close_it = self._open(target, 'wb')
+
+ try:
+ self._write(stream, a, comment, field, precision, symmetry)
+
+ finally:
+ if close_it:
+ stream.close()
+ else:
+ stream.flush()
+
+ # -------------------------------------------------------------------------
+ def _init_attrs(self, **kwargs):
+ """
+ Initialize each attributes with the corresponding keyword arg value
+ or a default of None
+ """
+
+ attrs = self.__class__.__slots__
+ public_attrs = [attr[1:] for attr in attrs]
+ invalid_keys = set(kwargs.keys()) - set(public_attrs)
+
+ if invalid_keys:
+ raise ValueError('''found %s invalid keyword arguments, please only
+ use %s''' % (tuple(invalid_keys),
+ public_attrs))
+
+ for attr in attrs:
+ setattr(self, attr, kwargs.get(attr[1:], None))
+
+ # -------------------------------------------------------------------------
+ def _parse_header(self, stream):
+ rows, cols, entries, format, field, symmetry = \
+ self.__class__.info(stream)
+ self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
+ field=field, symmetry=symmetry)
+
+ # -------------------------------------------------------------------------
+ def _parse_body(self, stream):
+ rows, cols, entries, format, field, symm = (self.rows, self.cols,
+ self.entries, self.format,
+ self.field, self.symmetry)
+
+ try:
+ from scipy.sparse import coo_matrix
+ except ImportError:
+ coo_matrix = None
+
+ dtype = self.DTYPES_BY_FIELD.get(field, None)
+
+ has_symmetry = self.has_symmetry
+ is_integer = field == self.FIELD_INTEGER
+ is_unsigned_integer = field == self.FIELD_UNSIGNED
+ is_complex = field == self.FIELD_COMPLEX
+ is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
+ is_herm = symm == self.SYMMETRY_HERMITIAN
+ is_pattern = field == self.FIELD_PATTERN
+
+ if format == self.FORMAT_ARRAY:
+ a = zeros((rows, cols), dtype=dtype)
+ line = 1
+ i, j = 0, 0
+ if is_skew:
+ a[i, j] = 0
+ if i < rows - 1:
+ i += 1
+ while line:
+ line = stream.readline()
+ # line.startswith('%')
+ if not line or line[0] in ['%', 37] or not line.strip():
+ continue
+ if is_integer:
+ aij = int(line)
+ elif is_unsigned_integer:
+ aij = int(line)
+ elif is_complex:
+ aij = complex(*map(float, line.split()))
+ else:
+ aij = float(line)
+ a[i, j] = aij
+ if has_symmetry and i != j:
+ if is_skew:
+ a[j, i] = -aij
+ elif is_herm:
+ a[j, i] = conj(aij)
+ else:
+ a[j, i] = aij
+ if i < rows-1:
+ i = i + 1
+ else:
+ j = j + 1
+ if not has_symmetry:
+ i = 0
+ else:
+ i = j
+ if is_skew:
+ a[i, j] = 0
+ if i < rows-1:
+ i += 1
+
+ if is_skew:
+ if not (i in [0, j] and j == cols - 1):
+ raise ValueError("Parse error, did not read all lines.")
+ else:
+ if not (i in [0, j] and j == cols):
+ raise ValueError("Parse error, did not read all lines.")
+
+ elif format == self.FORMAT_COORDINATE and coo_matrix is None:
+ # Read sparse matrix to dense when coo_matrix is not available.
+ a = zeros((rows, cols), dtype=dtype)
+ line = 1
+ k = 0
+ while line:
+ line = stream.readline()
+ # line.startswith('%')
+ if not line or line[0] in ['%', 37] or not line.strip():
+ continue
+ l = line.split()
+ i, j = map(int, l[:2])
+ i, j = i-1, j-1
+ if is_integer:
+ aij = int(l[2])
+ elif is_unsigned_integer:
+ aij = int(l[2])
+ elif is_complex:
+ aij = complex(*map(float, l[2:]))
+ else:
+ aij = float(l[2])
+ a[i, j] = aij
+ if has_symmetry and i != j:
+ if is_skew:
+ a[j, i] = -aij
+ elif is_herm:
+ a[j, i] = conj(aij)
+ else:
+ a[j, i] = aij
+ k = k + 1
+ if not k == entries:
+ ValueError("Did not read all entries")
+
+ elif format == self.FORMAT_COORDINATE:
+ # Read sparse COOrdinate format
+
+ if entries == 0:
+ # empty matrix
+ return coo_matrix((rows, cols), dtype=dtype)
+
+ I = zeros(entries, dtype='intc')
+ J = zeros(entries, dtype='intc')
+ if is_pattern:
+ V = ones(entries, dtype='int8')
+ elif is_integer:
+ V = zeros(entries, dtype='intp')
+ elif is_unsigned_integer:
+ V = zeros(entries, dtype='uint64')
+ elif is_complex:
+ V = zeros(entries, dtype='complex')
+ else:
+ V = zeros(entries, dtype='float')
+
+ entry_number = 0
+ for line in stream:
+ # line.startswith('%')
+ if not line or line[0] in ['%', 37] or not line.strip():
+ continue
+
+ if entry_number+1 > entries:
+ raise ValueError("'entries' in header is smaller than "
+ "number of entries")
+ l = line.split()
+ I[entry_number], J[entry_number] = map(int, l[:2])
+
+ if not is_pattern:
+ if is_integer:
+ V[entry_number] = int(l[2])
+ elif is_unsigned_integer:
+ V[entry_number] = int(l[2])
+ elif is_complex:
+ V[entry_number] = complex(*map(float, l[2:]))
+ else:
+ V[entry_number] = float(l[2])
+ entry_number += 1
+ if entry_number < entries:
+ raise ValueError("'entries' in header is larger than "
+ "number of entries")
+
+ I -= 1 # adjust indices (base 1 -> base 0)
+ J -= 1
+
+ if has_symmetry:
+ mask = (I != J) # off diagonal mask
+ od_I = I[mask]
+ od_J = J[mask]
+ od_V = V[mask]
+
+ I = concatenate((I, od_J))
+ J = concatenate((J, od_I))
+
+ if is_skew:
+ od_V *= -1
+ elif is_herm:
+ od_V = od_V.conjugate()
+
+ V = concatenate((V, od_V))
+
+ a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
+ else:
+ raise NotImplementedError(format)
+
+ return a
+
+ # ------------------------------------------------------------------------
+ def _write(self, stream, a, comment='', field=None, precision=None,
+ symmetry=None):
+ if isinstance(a, list) or isinstance(a, ndarray) or \
+ isinstance(a, tuple) or hasattr(a, '__array__'):
+ rep = self.FORMAT_ARRAY
+ a = asarray(a)
+ if len(a.shape) != 2:
+ raise ValueError('Expected 2 dimensional array')
+ rows, cols = a.shape
+
+ if field is not None:
+
+ if field == self.FIELD_INTEGER:
+ if not can_cast(a.dtype, 'intp'):
+ raise OverflowError("mmwrite does not support integer "
+ "dtypes larger than native 'intp'.")
+ a = a.astype('intp')
+ elif field == self.FIELD_REAL:
+ if a.dtype.char not in 'fd':
+ a = a.astype('d')
+ elif field == self.FIELD_COMPLEX:
+ if a.dtype.char not in 'FD':
+ a = a.astype('D')
+
+ else:
+ if not isspmatrix(a):
+ raise ValueError('unknown matrix type: %s' % type(a))
+
+ rep = 'coordinate'
+ rows, cols = a.shape
+
+ typecode = a.dtype.char
+
+ if precision is None:
+ if typecode in 'fF':
+ precision = 8
+ else:
+ precision = 16
+ if field is None:
+ kind = a.dtype.kind
+ if kind == 'i':
+ if not can_cast(a.dtype, 'intp'):
+ raise OverflowError("mmwrite does not support integer "
+ "dtypes larger than native 'intp'.")
+ field = 'integer'
+ elif kind == 'f':
+ field = 'real'
+ elif kind == 'c':
+ field = 'complex'
+ elif kind == 'u':
+ field = 'unsigned-integer'
+ else:
+ raise TypeError('unexpected dtype kind ' + kind)
+
+ if symmetry is None:
+ symmetry = self._get_symmetry(a)
+
+ # validate rep, field, and symmetry
+ self.__class__._validate_format(rep)
+ self.__class__._validate_field(field)
+ self.__class__._validate_symmetry(symmetry)
+
+ # write initial header line
+ stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep,
+ field, symmetry)))
+
+ # write comments
+ for line in comment.split('\n'):
+ stream.write(asbytes('%%%s\n' % (line)))
+
+ template = self._field_template(field, precision)
+ # write dense format
+ if rep == self.FORMAT_ARRAY:
+ # write shape spec
+ stream.write(asbytes('%i %i\n' % (rows, cols)))
+
+ if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
+ if symmetry == self.SYMMETRY_GENERAL:
+ for j in range(cols):
+ for i in range(rows):
+ stream.write(asbytes(template % a[i, j]))
+
+ elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
+ for j in range(cols):
+ for i in range(j + 1, rows):
+ stream.write(asbytes(template % a[i, j]))
+
+ else:
+ for j in range(cols):
+ for i in range(j, rows):
+ stream.write(asbytes(template % a[i, j]))
+
+ elif field == self.FIELD_COMPLEX:
+
+ if symmetry == self.SYMMETRY_GENERAL:
+ for j in range(cols):
+ for i in range(rows):
+ aij = a[i, j]
+ stream.write(asbytes(template % (real(aij),
+ imag(aij))))
+ else:
+ for j in range(cols):
+ for i in range(j, rows):
+ aij = a[i, j]
+ stream.write(asbytes(template % (real(aij),
+ imag(aij))))
+
+ elif field == self.FIELD_PATTERN:
+ raise ValueError('pattern type inconsisted with dense format')
+
+ else:
+ raise TypeError('Unknown field type %s' % field)
+
+ # write sparse format
+ else:
+ coo = a.tocoo() # convert to COOrdinate format
+
+ # if symmetry format used, remove values above main diagonal
+ if symmetry != self.SYMMETRY_GENERAL:
+ lower_triangle_mask = coo.row >= coo.col
+ coo = coo_matrix((coo.data[lower_triangle_mask],
+ (coo.row[lower_triangle_mask],
+ coo.col[lower_triangle_mask])),
+ shape=coo.shape)
+
+ # write shape spec
+ stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
+
+ template = self._field_template(field, precision-1)
+
+ if field == self.FIELD_PATTERN:
+ for r, c in zip(coo.row+1, coo.col+1):
+ stream.write(asbytes("%i %i\n" % (r, c)))
+ elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
+ for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
+ stream.write(asbytes(("%i %i " % (r, c)) +
+ (template % d)))
+ elif field == self.FIELD_COMPLEX:
+ for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
+ stream.write(asbytes(("%i %i " % (r, c)) +
+ (template % (d.real, d.imag))))
+ else:
+ raise TypeError('Unknown field type %s' % field)
+
+
+def _is_fromfile_compatible(stream):
+ """
+ Check whether `stream` is compatible with numpy.fromfile.
+
+ Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
+ Python 3.
+ """
+
+ bad_cls = []
+ try:
+ import gzip
+ bad_cls.append(gzip.GzipFile)
+ except ImportError:
+ pass
+ try:
+ import bz2
+ bad_cls.append(bz2.BZ2File)
+ except ImportError:
+ pass
+
+ bad_cls = tuple(bad_cls)
+ return not isinstance(stream, bad_cls)
+
+
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+ import time
+ for filename in sys.argv[1:]:
+ print('Reading', filename, '...', end=' ')
+ sys.stdout.flush()
+ t = time.time()
+ mmread(filename)
+ print('took %s seconds' % (time.time() - t))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/netcdf.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/netcdf.py
new file mode 100644
index 0000000..a040209
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/netcdf.py
@@ -0,0 +1,1091 @@
+"""
+NetCDF reader/writer module.
+
+This module is used to read and create NetCDF files. NetCDF files are
+accessed through the `netcdf_file` object. Data written to and from NetCDF
+files are contained in `netcdf_variable` objects. Attributes are given
+as member variables of the `netcdf_file` and `netcdf_variable` objects.
+
+This module implements the Scientific.IO.NetCDF API to read and create
+NetCDF files. The same API is also used in the PyNIO and pynetcdf
+modules, allowing these modules to be used interchangeably when working
+with NetCDF files.
+
+Only NetCDF3 is supported here; for NetCDF4 see
+`netCDF4-python `__,
+which has a similar API.
+
+"""
+
+# TODO:
+# * properly implement ``_FillValue``.
+# * fix character variables.
+# * implement PAGESIZE for Python 2.6?
+
+# The Scientific.IO.NetCDF API allows attributes to be added directly to
+# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
+# between user-set attributes and instance attributes, user-set attributes
+# are automatically stored in the ``_attributes`` attribute by overloading
+#``__setattr__``. This is the reason why the code sometimes uses
+#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
+# otherwise the key would be inserted into userspace attributes.
+
+
+__all__ = ['netcdf_file', 'netcdf_variable']
+
+
+import warnings
+import weakref
+from operator import mul
+from collections import OrderedDict
+from platform import python_implementation
+
+import mmap as mm
+
+import numpy as np
+from numpy.compat import asbytes, asstr
+from numpy import frombuffer, dtype, empty, array, asarray
+from numpy import little_endian as LITTLE_ENDIAN
+from functools import reduce
+
+
+IS_PYPY = python_implementation() == 'PyPy'
+
+ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
+ZERO = b'\x00\x00\x00\x00'
+NC_BYTE = b'\x00\x00\x00\x01'
+NC_CHAR = b'\x00\x00\x00\x02'
+NC_SHORT = b'\x00\x00\x00\x03'
+NC_INT = b'\x00\x00\x00\x04'
+NC_FLOAT = b'\x00\x00\x00\x05'
+NC_DOUBLE = b'\x00\x00\x00\x06'
+NC_DIMENSION = b'\x00\x00\x00\n'
+NC_VARIABLE = b'\x00\x00\x00\x0b'
+NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
+FILL_BYTE = b'\x81'
+FILL_CHAR = b'\x00'
+FILL_SHORT = b'\x80\x01'
+FILL_INT = b'\x80\x00\x00\x01'
+FILL_FLOAT = b'\x7C\xF0\x00\x00'
+FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
+
+TYPEMAP = {NC_BYTE: ('b', 1),
+ NC_CHAR: ('c', 1),
+ NC_SHORT: ('h', 2),
+ NC_INT: ('i', 4),
+ NC_FLOAT: ('f', 4),
+ NC_DOUBLE: ('d', 8)}
+
+FILLMAP = {NC_BYTE: FILL_BYTE,
+ NC_CHAR: FILL_CHAR,
+ NC_SHORT: FILL_SHORT,
+ NC_INT: FILL_INT,
+ NC_FLOAT: FILL_FLOAT,
+ NC_DOUBLE: FILL_DOUBLE}
+
+REVERSE = {('b', 1): NC_BYTE,
+ ('B', 1): NC_CHAR,
+ ('c', 1): NC_CHAR,
+ ('h', 2): NC_SHORT,
+ ('i', 4): NC_INT,
+ ('f', 4): NC_FLOAT,
+ ('d', 8): NC_DOUBLE,
+
+ # these come from asarray(1).dtype.char and asarray('foo').dtype.char,
+ # used when getting the types from generic attributes.
+ ('l', 4): NC_INT,
+ ('S', 1): NC_CHAR}
+
+
+class netcdf_file(object):
+ """
+ A file object for NetCDF data.
+
+ A `netcdf_file` object has two standard attributes: `dimensions` and
+ `variables`. The values of both are dictionaries, mapping dimension
+ names to their associated lengths and variable names to variables,
+ respectively. Application programs should never modify these
+ dictionaries.
+
+ All other attributes correspond to global attributes defined in the
+ NetCDF file. Global file attributes are created by assigning to an
+ attribute of the `netcdf_file` object.
+
+ Parameters
+ ----------
+ filename : string or file-like
+ string -> filename
+ mode : {'r', 'w', 'a'}, optional
+ read-write-append mode, default is 'r'
+ mmap : None or bool, optional
+ Whether to mmap `filename` when reading. Default is True
+ when `filename` is a file name, False when `filename` is a
+ file-like object. Note that when mmap is in use, data arrays
+ returned refer directly to the mmapped data on disk, and the
+ file cannot be closed as long as references to it exist.
+ version : {1, 2}, optional
+ version of netcdf to read / write, where 1 means *Classic
+ format* and 2 means *64-bit offset format*. Default is 1. See
+ `here `__
+ for more info.
+ maskandscale : bool, optional
+ Whether to automatically scale and/or mask data based on attributes.
+ Default is False.
+
+ Notes
+ -----
+ The major advantage of this module over other modules is that it doesn't
+ require the code to be linked to the NetCDF libraries. This module is
+ derived from `pupynere `_.
+
+ NetCDF files are a self-describing binary data format. The file contains
+ metadata that describes the dimensions and variables in the file. More
+ details about NetCDF files can be found `here
+ `__. There
+ are three main sections to a NetCDF data structure:
+
+ 1. Dimensions
+ 2. Variables
+ 3. Attributes
+
+ The dimensions section records the name and length of each dimension used
+ by the variables. The variables would then indicate which dimensions it
+ uses and any attributes such as data units, along with containing the data
+ values for the variable. It is good practice to include a
+ variable that is the same name as a dimension to provide the values for
+ that axes. Lastly, the attributes section would contain additional
+ information such as the name of the file creator or the instrument used to
+ collect the data.
+
+ When writing data to a NetCDF file, there is often the need to indicate the
+ 'record dimension'. A record dimension is the unbounded dimension for a
+ variable. For example, a temperature variable may have dimensions of
+ latitude, longitude and time. If one wants to add more temperature data to
+ the NetCDF file as time progresses, then the temperature variable should
+ have the time dimension flagged as the record dimension.
+
+ In addition, the NetCDF file header contains the position of the data in
+ the file, so access can be done in an efficient manner without loading
+ unnecessary data into memory. It uses the ``mmap`` module to create
+ Numpy arrays mapped to the data on disk, for the same purpose.
+
+ Note that when `netcdf_file` is used to open a file with mmap=True
+ (default for read-only), arrays returned by it refer to data
+ directly on the disk. The file should not be closed, and cannot be cleanly
+ closed when asked, if such arrays are alive. You may want to copy data arrays
+ obtained from mmapped Netcdf file if they are to be processed after the file
+ is closed, see the example below.
+
+ Examples
+ --------
+ To create a NetCDF file:
+
+ >>> from scipy.io import netcdf
+ >>> f = netcdf.netcdf_file('simple.nc', 'w')
+ >>> f.history = 'Created for a test'
+ >>> f.createDimension('time', 10)
+ >>> time = f.createVariable('time', 'i', ('time',))
+ >>> time[:] = np.arange(10)
+ >>> time.units = 'days since 2008-01-01'
+ >>> f.close()
+
+ Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
+ of the time variable allows for the data to be set in the object, rather
+ than letting ``arange(10)`` overwrite the ``time`` variable.
+
+ To read the NetCDF file we just created:
+
+ >>> from scipy.io import netcdf
+ >>> f = netcdf.netcdf_file('simple.nc', 'r')
+ >>> print(f.history)
+ b'Created for a test'
+ >>> time = f.variables['time']
+ >>> print(time.units)
+ b'days since 2008-01-01'
+ >>> print(time.shape)
+ (10,)
+ >>> print(time[-1])
+ 9
+
+ NetCDF files, when opened read-only, return arrays that refer
+ directly to memory-mapped data on disk:
+
+ >>> data = time[:]
+ >>> data.base.base
+
+
+ If the data is to be processed after the file is closed, it needs
+ to be copied to main memory:
+
+ >>> data = time[:].copy()
+ >>> f.close()
+ >>> data.mean()
+ 4.5
+
+ A NetCDF file can also be used as context manager:
+
+ >>> from scipy.io import netcdf
+ >>> with netcdf.netcdf_file('simple.nc', 'r') as f:
+ ... print(f.history)
+ b'Created for a test'
+
+ """
+ def __init__(self, filename, mode='r', mmap=None, version=1,
+ maskandscale=False):
+ """Initialize netcdf_file from fileobj (str or file-like)."""
+ if mode not in 'rwa':
+ raise ValueError("Mode must be either 'r', 'w' or 'a'.")
+
+ if hasattr(filename, 'seek'): # file-like
+ self.fp = filename
+ self.filename = 'None'
+ if mmap is None:
+ mmap = False
+ elif mmap and not hasattr(filename, 'fileno'):
+ raise ValueError('Cannot use file object for mmap')
+ else: # maybe it's a string
+ self.filename = filename
+ omode = 'r+' if mode == 'a' else mode
+ self.fp = open(self.filename, '%sb' % omode)
+ if mmap is None:
+ # Mmapped files on PyPy cannot be usually closed
+ # before the GC runs, so it's better to use mmap=False
+ # as the default.
+ mmap = (not IS_PYPY)
+
+ if mode != 'r':
+ # Cannot read write-only files
+ mmap = False
+
+ self.use_mmap = mmap
+ self.mode = mode
+ self.version_byte = version
+ self.maskandscale = maskandscale
+
+ self.dimensions = OrderedDict()
+ self.variables = OrderedDict()
+
+ self._dims = []
+ self._recs = 0
+ self._recsize = 0
+
+ self._mm = None
+ self._mm_buf = None
+ if self.use_mmap:
+ self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
+ self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
+
+ self._attributes = OrderedDict()
+
+ if mode in 'ra':
+ self._read()
+
+ def __setattr__(self, attr, value):
+ # Store user defined attributes in a separate dict,
+ # so we can save them to file later.
+ try:
+ self._attributes[attr] = value
+ except AttributeError:
+ pass
+ self.__dict__[attr] = value
+
+ def close(self):
+ """Closes the NetCDF file."""
+ if hasattr(self, 'fp') and not self.fp.closed:
+ try:
+ self.flush()
+ finally:
+ self.variables = OrderedDict()
+ if self._mm_buf is not None:
+ ref = weakref.ref(self._mm_buf)
+ self._mm_buf = None
+ if ref() is None:
+ # self._mm_buf is gc'd, and we can close the mmap
+ self._mm.close()
+ else:
+ # we cannot close self._mm, since self._mm_buf is
+ # alive and there may still be arrays referring to it
+ warnings.warn((
+ "Cannot close a netcdf_file opened with mmap=True, when "
+ "netcdf_variables or arrays referring to its data still exist. "
+ "All data arrays obtained from such files refer directly to "
+ "data on disk, and must be copied before the file can be cleanly "
+ "closed. (See netcdf_file docstring for more information on mmap.)"
+ ), category=RuntimeWarning)
+ self._mm = None
+ self.fp.close()
+ __del__ = close
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def createDimension(self, name, length):
+ """
+ Adds a dimension to the Dimension section of the NetCDF data structure.
+
+ Note that this function merely adds a new dimension that the variables can
+ reference. The values for the dimension, if desired, should be added as
+ a variable using `createVariable`, referring to this dimension.
+
+ Parameters
+ ----------
+ name : str
+ Name of the dimension (Eg, 'lat' or 'time').
+ length : int
+ Length of the dimension.
+
+ See Also
+ --------
+ createVariable
+
+ """
+ if length is None and self._dims:
+ raise ValueError("Only first dimension may be unlimited!")
+
+ self.dimensions[name] = length
+ self._dims.append(name)
+
+ def createVariable(self, name, type, dimensions):
+ """
+ Create an empty variable for the `netcdf_file` object, specifying its data
+ type and the dimensions it uses.
+
+ Parameters
+ ----------
+ name : str
+ Name of the new variable.
+ type : dtype or str
+ Data type of the variable.
+ dimensions : sequence of str
+ List of the dimension names used by the variable, in the desired order.
+
+ Returns
+ -------
+ variable : netcdf_variable
+ The newly created ``netcdf_variable`` object.
+ This object has also been added to the `netcdf_file` object as well.
+
+ See Also
+ --------
+ createDimension
+
+ Notes
+ -----
+ Any dimensions to be used by the variable should already exist in the
+ NetCDF data structure or should be created by `createDimension` prior to
+ creating the NetCDF variable.
+
+ """
+ shape = tuple([self.dimensions[dim] for dim in dimensions])
+ shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy
+
+ type = dtype(type)
+ typecode, size = type.char, type.itemsize
+ if (typecode, size) not in REVERSE:
+ raise ValueError("NetCDF 3 does not support type %s" % type)
+
+ data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
+ self.variables[name] = netcdf_variable(
+ data, typecode, size, shape, dimensions,
+ maskandscale=self.maskandscale)
+ return self.variables[name]
+
+ def flush(self):
+ """
+ Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
+
+ See Also
+ --------
+ sync : Identical function
+
+ """
+ if hasattr(self, 'mode') and self.mode in 'wa':
+ self._write()
+ sync = flush
+
+ def _write(self):
+ self.fp.seek(0)
+ self.fp.write(b'CDF')
+ self.fp.write(array(self.version_byte, '>b').tobytes())
+
+ # Write headers and data.
+ self._write_numrecs()
+ self._write_dim_array()
+ self._write_gatt_array()
+ self._write_var_array()
+
+ def _write_numrecs(self):
+ # Get highest record count from all record variables.
+ for var in self.variables.values():
+ if var.isrec and len(var.data) > self._recs:
+ self.__dict__['_recs'] = len(var.data)
+ self._pack_int(self._recs)
+
+ def _write_dim_array(self):
+ if self.dimensions:
+ self.fp.write(NC_DIMENSION)
+ self._pack_int(len(self.dimensions))
+ for name in self._dims:
+ self._pack_string(name)
+ length = self.dimensions[name]
+ self._pack_int(length or 0) # replace None with 0 for record dimension
+ else:
+ self.fp.write(ABSENT)
+
+ def _write_gatt_array(self):
+ self._write_att_array(self._attributes)
+
+ def _write_att_array(self, attributes):
+ if attributes:
+ self.fp.write(NC_ATTRIBUTE)
+ self._pack_int(len(attributes))
+ for name, values in attributes.items():
+ self._pack_string(name)
+ self._write_att_values(values)
+ else:
+ self.fp.write(ABSENT)
+
+ def _write_var_array(self):
+ if self.variables:
+ self.fp.write(NC_VARIABLE)
+ self._pack_int(len(self.variables))
+
+ # Sort variable names non-recs first, then recs.
+ def sortkey(n):
+ v = self.variables[n]
+ if v.isrec:
+ return (-1,)
+ return v._shape
+ variables = sorted(self.variables, key=sortkey, reverse=True)
+
+ # Set the metadata for all variables.
+ for name in variables:
+ self._write_var_metadata(name)
+ # Now that we have the metadata, we know the vsize of
+ # each record variable, so we can calculate recsize.
+ self.__dict__['_recsize'] = sum([
+ var._vsize for var in self.variables.values()
+ if var.isrec])
+ # Set the data for all variables.
+ for name in variables:
+ self._write_var_data(name)
+ else:
+ self.fp.write(ABSENT)
+
+ def _write_var_metadata(self, name):
+ var = self.variables[name]
+
+ self._pack_string(name)
+ self._pack_int(len(var.dimensions))
+ for dimname in var.dimensions:
+ dimid = self._dims.index(dimname)
+ self._pack_int(dimid)
+
+ self._write_att_array(var._attributes)
+
+ nc_type = REVERSE[var.typecode(), var.itemsize()]
+ self.fp.write(asbytes(nc_type))
+
+ if not var.isrec:
+ vsize = var.data.size * var.data.itemsize
+ vsize += -vsize % 4
+ else: # record variable
+ try:
+ vsize = var.data[0].size * var.data.itemsize
+ except IndexError:
+ vsize = 0
+ rec_vars = len([v for v in self.variables.values()
+ if v.isrec])
+ if rec_vars > 1:
+ vsize += -vsize % 4
+ self.variables[name].__dict__['_vsize'] = vsize
+ self._pack_int(vsize)
+
+ # Pack a bogus begin, and set the real value later.
+ self.variables[name].__dict__['_begin'] = self.fp.tell()
+ self._pack_begin(0)
+
+ def _write_var_data(self, name):
+ var = self.variables[name]
+
+ # Set begin in file header.
+ the_beguine = self.fp.tell()
+ self.fp.seek(var._begin)
+ self._pack_begin(the_beguine)
+ self.fp.seek(the_beguine)
+
+ # Write data.
+ if not var.isrec:
+ self.fp.write(var.data.tobytes())
+ count = var.data.size * var.data.itemsize
+ self._write_var_padding(var, var._vsize - count)
+ else: # record variable
+ # Handle rec vars with shape[0] < nrecs.
+ if self._recs > len(var.data):
+ shape = (self._recs,) + var.data.shape[1:]
+ # Resize in-place does not always work since
+ # the array might not be single-segment
+ try:
+ var.data.resize(shape)
+ except ValueError:
+ var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
+
+ pos0 = pos = self.fp.tell()
+ for rec in var.data:
+ # Apparently scalars cannot be converted to big endian. If we
+ # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
+ # will remain as ``=i4``.
+ if not rec.shape and (rec.dtype.byteorder == '<' or
+ (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
+ rec = rec.byteswap()
+ self.fp.write(rec.tobytes())
+ # Padding
+ count = rec.size * rec.itemsize
+ self._write_var_padding(var, var._vsize - count)
+ pos += self._recsize
+ self.fp.seek(pos)
+ self.fp.seek(pos0 + var._vsize)
+
+ def _write_var_padding(self, var, size):
+ encoded_fill_value = var._get_encoded_fill_value()
+ num_fills = size // len(encoded_fill_value)
+ self.fp.write(encoded_fill_value * num_fills)
+
+ def _write_att_values(self, values):
+ if hasattr(values, 'dtype'):
+ nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
+ else:
+ types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
+
+ # bytes index into scalars in py3k. Check for "string" types
+ if isinstance(values, (str, bytes)):
+ sample = values
+ else:
+ try:
+ sample = values[0] # subscriptable?
+ except TypeError:
+ sample = values # scalar
+
+ for class_, nc_type in types:
+ if isinstance(sample, class_):
+ break
+
+ typecode, size = TYPEMAP[nc_type]
+ dtype_ = '>%s' % typecode
+ # asarray() dies with bytes and '>c' in py3k. Change to 'S'
+ dtype_ = 'S' if dtype_ == '>c' else dtype_
+
+ values = asarray(values, dtype=dtype_)
+
+ self.fp.write(asbytes(nc_type))
+
+ if values.dtype.char == 'S':
+ nelems = values.itemsize
+ else:
+ nelems = values.size
+ self._pack_int(nelems)
+
+ if not values.shape and (values.dtype.byteorder == '<' or
+ (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
+ values = values.byteswap()
+ self.fp.write(values.tobytes())
+ count = values.size * values.itemsize
+ self.fp.write(b'\x00' * (-count % 4)) # pad
+
+ def _read(self):
+ # Check magic bytes and version
+ magic = self.fp.read(3)
+ if not magic == b'CDF':
+ raise TypeError("Error: %s is not a valid NetCDF 3 file" %
+ self.filename)
+ self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
+
+ # Read file headers and set data.
+ self._read_numrecs()
+ self._read_dim_array()
+ self._read_gatt_array()
+ self._read_var_array()
+
+ def _read_numrecs(self):
+ self.__dict__['_recs'] = self._unpack_int()
+
+ def _read_dim_array(self):
+ header = self.fp.read(4)
+ if header not in [ZERO, NC_DIMENSION]:
+ raise ValueError("Unexpected header.")
+ count = self._unpack_int()
+
+ for dim in range(count):
+ name = asstr(self._unpack_string())
+ length = self._unpack_int() or None # None for record dimension
+ self.dimensions[name] = length
+ self._dims.append(name) # preserve order
+
+ def _read_gatt_array(self):
+ for k, v in self._read_att_array().items():
+ self.__setattr__(k, v)
+
+ def _read_att_array(self):
+ header = self.fp.read(4)
+ if header not in [ZERO, NC_ATTRIBUTE]:
+ raise ValueError("Unexpected header.")
+ count = self._unpack_int()
+
+ attributes = OrderedDict()
+ for attr in range(count):
+ name = asstr(self._unpack_string())
+ attributes[name] = self._read_att_values()
+ return attributes
+
+ def _read_var_array(self):
+ header = self.fp.read(4)
+ if header not in [ZERO, NC_VARIABLE]:
+ raise ValueError("Unexpected header.")
+
+ begin = 0
+ dtypes = {'names': [], 'formats': []}
+ rec_vars = []
+ count = self._unpack_int()
+ for var in range(count):
+ (name, dimensions, shape, attributes,
+ typecode, size, dtype_, begin_, vsize) = self._read_var()
+ # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
+ # Note that vsize is the product of the dimension lengths
+ # (omitting the record dimension) and the number of bytes
+ # per value (determined from the type), increased to the
+ # next multiple of 4, for each variable. If a record
+ # variable, this is the amount of space per record. The
+ # netCDF "record size" is calculated as the sum of the
+ # vsize's of all the record variables.
+ #
+ # The vsize field is actually redundant, because its value
+ # may be computed from other information in the header. The
+ # 32-bit vsize field is not large enough to contain the size
+ # of variables that require more than 2^32 - 4 bytes, so
+ # 2^32 - 1 is used in the vsize field for such variables.
+ if shape and shape[0] is None: # record variable
+ rec_vars.append(name)
+ # The netCDF "record size" is calculated as the sum of
+ # the vsize's of all the record variables.
+ self.__dict__['_recsize'] += vsize
+ if begin == 0:
+ begin = begin_
+ dtypes['names'].append(name)
+ dtypes['formats'].append(str(shape[1:]) + dtype_)
+
+ # Handle padding with a virtual variable.
+ if typecode in 'bch':
+ actual_size = reduce(mul, (1,) + shape[1:]) * size
+ padding = -actual_size % 4
+ if padding:
+ dtypes['names'].append('_padding_%d' % var)
+ dtypes['formats'].append('(%d,)>b' % padding)
+
+ # Data will be set later.
+ data = None
+ else: # not a record variable
+ # Calculate size to avoid problems with vsize (above)
+ a_size = reduce(mul, shape, 1) * size
+ if self.use_mmap:
+ data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
+ data.shape = shape
+ else:
+ pos = self.fp.tell()
+ self.fp.seek(begin_)
+ data = frombuffer(self.fp.read(a_size), dtype=dtype_
+ ).copy()
+ data.shape = shape
+ self.fp.seek(pos)
+
+ # Add variable.
+ self.variables[name] = netcdf_variable(
+ data, typecode, size, shape, dimensions, attributes,
+ maskandscale=self.maskandscale)
+
+ if rec_vars:
+ # Remove padding when only one record variable.
+ if len(rec_vars) == 1:
+ dtypes['names'] = dtypes['names'][:1]
+ dtypes['formats'] = dtypes['formats'][:1]
+
+ # Build rec array.
+ if self.use_mmap:
+ rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
+ rec_array.shape = (self._recs,)
+ else:
+ pos = self.fp.tell()
+ self.fp.seek(begin)
+ rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
+ dtype=dtypes).copy()
+ rec_array.shape = (self._recs,)
+ self.fp.seek(pos)
+
+ for var in rec_vars:
+ self.variables[var].__dict__['data'] = rec_array[var]
+
+ def _read_var(self):
+ name = asstr(self._unpack_string())
+ dimensions = []
+ shape = []
+ dims = self._unpack_int()
+
+ for i in range(dims):
+ dimid = self._unpack_int()
+ dimname = self._dims[dimid]
+ dimensions.append(dimname)
+ dim = self.dimensions[dimname]
+ shape.append(dim)
+ dimensions = tuple(dimensions)
+ shape = tuple(shape)
+
+ attributes = self._read_att_array()
+ nc_type = self.fp.read(4)
+ vsize = self._unpack_int()
+ begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
+
+ typecode, size = TYPEMAP[nc_type]
+ dtype_ = '>%s' % typecode
+
+ return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
+
+ def _read_att_values(self):
+ nc_type = self.fp.read(4)
+ n = self._unpack_int()
+
+ typecode, size = TYPEMAP[nc_type]
+
+ count = n*size
+ values = self.fp.read(int(count))
+ self.fp.read(-count % 4) # read padding
+
+ if typecode != 'c':
+ values = frombuffer(values, dtype='>%s' % typecode).copy()
+ if values.shape == (1,):
+ values = values[0]
+ else:
+ values = values.rstrip(b'\x00')
+ return values
+
+ def _pack_begin(self, begin):
+ if self.version_byte == 1:
+ self._pack_int(begin)
+ elif self.version_byte == 2:
+ self._pack_int64(begin)
+
+ def _pack_int(self, value):
+ self.fp.write(array(value, '>i').tobytes())
+ _pack_int32 = _pack_int
+
+ def _unpack_int(self):
+ return int(frombuffer(self.fp.read(4), '>i')[0])
+ _unpack_int32 = _unpack_int
+
+ def _pack_int64(self, value):
+ self.fp.write(array(value, '>q').tobytes())
+
+ def _unpack_int64(self):
+ return frombuffer(self.fp.read(8), '>q')[0]
+
+ def _pack_string(self, s):
+ count = len(s)
+ self._pack_int(count)
+ self.fp.write(asbytes(s))
+ self.fp.write(b'\x00' * (-count % 4)) # pad
+
+ def _unpack_string(self):
+ count = self._unpack_int()
+ s = self.fp.read(count).rstrip(b'\x00')
+ self.fp.read(-count % 4) # read padding
+ return s
+
+
+class netcdf_variable(object):
+ """
+ A data object for netcdf files.
+
+ `netcdf_variable` objects are constructed by calling the method
+ `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
+ objects behave much like array objects defined in numpy, except that their
+ data resides in a file. Data is read by indexing and written by assigning
+ to an indexed subset; the entire array can be accessed by the index ``[:]``
+ or (for scalars) by using the methods `getValue` and `assignValue`.
+ `netcdf_variable` objects also have attribute `shape` with the same meaning
+ as for arrays, but the shape cannot be modified. There is another read-only
+ attribute `dimensions`, whose value is the tuple of dimension names.
+
+ All other attributes correspond to variable attributes defined in
+ the NetCDF file. Variable attributes are created by assigning to an
+ attribute of the `netcdf_variable` object.
+
+ Parameters
+ ----------
+ data : array_like
+ The data array that holds the values for the variable.
+ Typically, this is initialized as empty, but with the proper shape.
+ typecode : dtype character code
+ Desired data-type for the data array.
+ size : int
+ Desired element size for the data array.
+ shape : sequence of ints
+ The shape of the array. This should match the lengths of the
+ variable's dimensions.
+ dimensions : sequence of strings
+ The names of the dimensions used by the variable. Must be in the
+ same order of the dimension lengths given by `shape`.
+ attributes : dict, optional
+ Attribute values (any type) keyed by string names. These attributes
+ become attributes for the netcdf_variable object.
+ maskandscale : bool, optional
+ Whether to automatically scale and/or mask data based on attributes.
+ Default is False.
+
+
+ Attributes
+ ----------
+ dimensions : list of str
+ List of names of dimensions used by the variable object.
+ isrec, shape
+ Properties
+
+ See also
+ --------
+ isrec, shape
+
+ """
+ def __init__(self, data, typecode, size, shape, dimensions,
+ attributes=None,
+ maskandscale=False):
+ self.data = data
+ self._typecode = typecode
+ self._size = size
+ self._shape = shape
+ self.dimensions = dimensions
+ self.maskandscale = maskandscale
+
+ self._attributes = attributes or OrderedDict()
+ for k, v in self._attributes.items():
+ self.__dict__[k] = v
+
+ def __setattr__(self, attr, value):
+ # Store user defined attributes in a separate dict,
+ # so we can save them to file later.
+ try:
+ self._attributes[attr] = value
+ except AttributeError:
+ pass
+ self.__dict__[attr] = value
+
+ def isrec(self):
+ """Returns whether the variable has a record dimension or not.
+
+ A record dimension is a dimension along which additional data could be
+ easily appended in the netcdf data structure without much rewriting of
+ the data file. This attribute is a read-only property of the
+ `netcdf_variable`.
+
+ """
+ return bool(self.data.shape) and not self._shape[0]
+ isrec = property(isrec)
+
+ def shape(self):
+ """Returns the shape tuple of the data variable.
+
+ This is a read-only attribute and can not be modified in the
+ same manner of other numpy arrays.
+ """
+ return self.data.shape
+ shape = property(shape)
+
+ def getValue(self):
+ """
+ Retrieve a scalar value from a `netcdf_variable` of length one.
+
+ Raises
+ ------
+ ValueError
+ If the netcdf variable is an array of length greater than one,
+ this exception will be raised.
+
+ """
+ return self.data.item()
+
+ def assignValue(self, value):
+ """
+ Assign a scalar value to a `netcdf_variable` of length one.
+
+ Parameters
+ ----------
+ value : scalar
+ Scalar value (of compatible type) to assign to a length-one netcdf
+ variable. This value will be written to file.
+
+ Raises
+ ------
+ ValueError
+ If the input is not a scalar, or if the destination is not a length-one
+ netcdf variable.
+
+ """
+ if not self.data.flags.writeable:
+ # Work-around for a bug in NumPy. Calling itemset() on a read-only
+ # memory-mapped array causes a seg. fault.
+ # See NumPy ticket #1622, and SciPy ticket #1202.
+ # This check for `writeable` can be removed when the oldest version
+ # of NumPy still supported by scipy contains the fix for #1622.
+ raise RuntimeError("variable is not writeable")
+
+ self.data.itemset(value)
+
+ def typecode(self):
+ """
+ Return the typecode of the variable.
+
+ Returns
+ -------
+ typecode : char
+ The character typecode of the variable (e.g., 'i' for int).
+
+ """
+ return self._typecode
+
+ def itemsize(self):
+ """
+ Return the itemsize of the variable.
+
+ Returns
+ -------
+ itemsize : int
+ The element size of the variable (e.g., 8 for float64).
+
+ """
+ return self._size
+
+ def __getitem__(self, index):
+ if not self.maskandscale:
+ return self.data[index]
+
+ data = self.data[index].copy()
+ missing_value = self._get_missing_value()
+ data = self._apply_missing_value(data, missing_value)
+ scale_factor = self._attributes.get('scale_factor')
+ add_offset = self._attributes.get('add_offset')
+ if add_offset is not None or scale_factor is not None:
+ data = data.astype(np.float64)
+ if scale_factor is not None:
+ data = data * scale_factor
+ if add_offset is not None:
+ data += add_offset
+
+ return data
+
+ def __setitem__(self, index, data):
+ if self.maskandscale:
+ missing_value = (
+ self._get_missing_value() or
+ getattr(data, 'fill_value', 999999))
+ self._attributes.setdefault('missing_value', missing_value)
+ self._attributes.setdefault('_FillValue', missing_value)
+ data = ((data - self._attributes.get('add_offset', 0.0)) /
+ self._attributes.get('scale_factor', 1.0))
+ data = np.ma.asarray(data).filled(missing_value)
+ if self._typecode not in 'fd' and data.dtype.kind == 'f':
+ data = np.round(data)
+
+ # Expand data for record vars?
+ if self.isrec:
+ if isinstance(index, tuple):
+ rec_index = index[0]
+ else:
+ rec_index = index
+ if isinstance(rec_index, slice):
+ recs = (rec_index.start or 0) + len(data)
+ else:
+ recs = rec_index + 1
+ if recs > len(self.data):
+ shape = (recs,) + self._shape[1:]
+ # Resize in-place does not always work since
+ # the array might not be single-segment
+ try:
+ self.data.resize(shape)
+ except ValueError:
+ self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
+ self.data[index] = data
+
+ def _default_encoded_fill_value(self):
+ """
+ The default encoded fill-value for this Variable's data type.
+ """
+ nc_type = REVERSE[self.typecode(), self.itemsize()]
+ return FILLMAP[nc_type]
+
+ def _get_encoded_fill_value(self):
+ """
+ Returns the encoded fill value for this variable as bytes.
+
+ This is taken from either the _FillValue attribute, or the default fill
+ value for this variable's data type.
+ """
+ if '_FillValue' in self._attributes:
+ fill_value = np.array(self._attributes['_FillValue'],
+ dtype=self.data.dtype).tobytes()
+ if len(fill_value) == self.itemsize():
+ return fill_value
+ else:
+ return self._default_encoded_fill_value()
+ else:
+ return self._default_encoded_fill_value()
+
+ def _get_missing_value(self):
+ """
+ Returns the value denoting "no data" for this variable.
+
+ If this variable does not have a missing/fill value, returns None.
+
+ If both _FillValue and missing_value are given, give precedence to
+ _FillValue. The netCDF standard gives special meaning to _FillValue;
+ missing_value is just used for compatibility with old datasets.
+ """
+
+ if '_FillValue' in self._attributes:
+ missing_value = self._attributes['_FillValue']
+ elif 'missing_value' in self._attributes:
+ missing_value = self._attributes['missing_value']
+ else:
+ missing_value = None
+
+ return missing_value
+
+ @staticmethod
+ def _apply_missing_value(data, missing_value):
+ """
+ Applies the given missing value to the data array.
+
+ Returns a numpy.ma array, with any value equal to missing_value masked
+ out (unless missing_value is None, in which case the original array is
+ returned).
+ """
+
+ if missing_value is None:
+ newdata = data
+ else:
+ try:
+ missing_value_isnan = np.isnan(missing_value)
+ except (TypeError, NotImplementedError):
+ # some data types (e.g., characters) cannot be tested for NaN
+ missing_value_isnan = False
+
+ if missing_value_isnan:
+ mymask = np.isnan(data)
+ else:
+ mymask = (data == missing_value)
+
+ newdata = np.ma.masked_where(mymask, data)
+
+ return newdata
+
+
+NetCDFFile = netcdf_file
+NetCDFVariable = netcdf_variable
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/setup.py
new file mode 100644
index 0000000..bec840e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/setup.py
@@ -0,0 +1,18 @@
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('io', parent_package, top_path)
+
+ config.add_extension('_test_fortran',
+ sources=['_test_fortran.pyf', '_test_fortran.f'])
+
+ config.add_data_dir('tests')
+ config.add_subpackage('matlab')
+ config.add_subpackage('arff')
+ config.add_subpackage('harwell_boeing')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/Transparent Busy.ani b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/Transparent Busy.ani
new file mode 100644
index 0000000..3be5000
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/Transparent Busy.ani differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_1d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_1d.sav
new file mode 100644
index 0000000..619a125
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_1d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_2d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_2d.sav
new file mode 100644
index 0000000..804d8b1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_2d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_3d.sav
new file mode 100644
index 0000000..3fa56c4
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_4d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_4d.sav
new file mode 100644
index 0000000..4bb951e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_4d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_5d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_5d.sav
new file mode 100644
index 0000000..2854dbc
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_5d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_6d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_6d.sav
new file mode 100644
index 0000000..91588d3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_6d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_7d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_7d.sav
new file mode 100644
index 0000000..3e978fa
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_7d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_8d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_8d.sav
new file mode 100644
index 0000000..f699fe2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_8d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_1d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_1d.sav
new file mode 100644
index 0000000..8e3a402
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_1d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_2d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_2d.sav
new file mode 100644
index 0000000..dd3504f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_2d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_3d.sav
new file mode 100644
index 0000000..285da7f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_4d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_4d.sav
new file mode 100644
index 0000000..d99fa48
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_4d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_5d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_5d.sav
new file mode 100644
index 0000000..de5e984
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_5d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_6d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_6d.sav
new file mode 100644
index 0000000..bb76671
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_6d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_7d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_7d.sav
new file mode 100644
index 0000000..995d23c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_7d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_8d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_8d.sav
new file mode 100644
index 0000000..4249ec6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/array_float32_pointer_8d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_1.nc b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_1.nc
new file mode 100644
index 0000000..5775622
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_1.nc differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_2.nc b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_2.nc
new file mode 100644
index 0000000..07db1cd
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_2.nc differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_3_maskedvals.nc b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_3_maskedvals.nc
new file mode 100644
index 0000000..57f8bf9
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/example_3_maskedvals.nc differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-3x3d-2i.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-3x3d-2i.dat
new file mode 100644
index 0000000..87731eb
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-3x3d-2i.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-mixed.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-mixed.dat
new file mode 100644
index 0000000..a165a7a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-mixed.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-11x1x10.dat
new file mode 100644
index 0000000..c3bb9dc
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-15x10x22.dat
new file mode 100644
index 0000000..351801f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x1.dat
new file mode 100644
index 0000000..64bf92f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x5.dat
new file mode 100644
index 0000000..3d3f27f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x7.dat
new file mode 100644
index 0000000..0bd6830
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x3x5.dat
new file mode 100644
index 0000000..25269ff
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-11x1x10.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-11x1x10.dat
new file mode 100644
index 0000000..9850de3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-11x1x10.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-15x10x22.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-15x10x22.dat
new file mode 100644
index 0000000..98c09c2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-15x10x22.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x1.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x1.dat
new file mode 100644
index 0000000..959098d
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x1.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x5.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x5.dat
new file mode 100644
index 0000000..49c0ec1
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x5.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x7.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x7.dat
new file mode 100644
index 0000000..bb936b8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x1x7.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x3x5.dat b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x3x5.dat
new file mode 100644
index 0000000..cb3e9e4
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/fortran-si4-1x3x5.dat differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/invalid_pointer.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/invalid_pointer.sav
new file mode 100644
index 0000000..d53893c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/invalid_pointer.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/null_pointer.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/null_pointer.sav
new file mode 100644
index 0000000..8cee5eb
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/null_pointer.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte.sav
new file mode 100644
index 0000000..e4027b3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte_descr.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte_descr.sav
new file mode 100644
index 0000000..182e29b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_byte_descr.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex32.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex32.sav
new file mode 100644
index 0000000..593e8c6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex32.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex64.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex64.sav
new file mode 100644
index 0000000..edb19d3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_complex64.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float32.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float32.sav
new file mode 100644
index 0000000..be9e387
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float32.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float64.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float64.sav
new file mode 100644
index 0000000..9680b28
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_float64.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_heap_pointer.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_heap_pointer.sav
new file mode 100644
index 0000000..d02b175
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_heap_pointer.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int16.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int16.sav
new file mode 100644
index 0000000..6035256
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int16.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int32.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int32.sav
new file mode 100644
index 0000000..40210b8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int32.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int64.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int64.sav
new file mode 100644
index 0000000..c91cd0a
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_int64.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_string.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_string.sav
new file mode 100644
index 0000000..ee6e69f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_string.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint16.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint16.sav
new file mode 100644
index 0000000..759c2e6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint16.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint32.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint32.sav
new file mode 100644
index 0000000..74dec7b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint32.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint64.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint64.sav
new file mode 100644
index 0000000..fc9da57
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/scalar_uint64.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays.sav
new file mode 100644
index 0000000..40c9cd3
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_byte_idl80.sav
new file mode 100644
index 0000000..f1aa416
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated.sav
new file mode 100644
index 0000000..6f01fbf
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated_3d.sav
new file mode 100644
index 0000000..bac9b20
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_inherit.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_inherit.sav
new file mode 100644
index 0000000..8babd56
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_inherit.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays.sav
new file mode 100644
index 0000000..a3c6781
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated.sav
new file mode 100644
index 0000000..38b8122
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav
new file mode 100644
index 0000000..db1c256
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers.sav
new file mode 100644
index 0000000..acbb058
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated.sav
new file mode 100644
index 0000000..d16f465
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated_3d.sav
new file mode 100644
index 0000000..732dd2c
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars.sav
new file mode 100644
index 0000000..69d7eaf
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated.sav
new file mode 100644
index 0000000..2222391
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated_3d.sav
new file mode 100644
index 0000000..a35f1ac
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav
new file mode 100644
index 0000000..056333e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav
new file mode 100644
index 0000000..57e6f17
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav
new file mode 100644
index 0000000..bb86f2f
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav
new file mode 100644
index 0000000..d1b7065
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav
new file mode 100644
index 0000000..7271fdd
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav
new file mode 100644
index 0000000..8aae8e2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav
new file mode 100644
index 0000000..31221b2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav
new file mode 100644
index 0000000..13f131e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav
new file mode 100644
index 0000000..c4fed62
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav
new file mode 100644
index 0000000..7090081
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav
new file mode 100644
index 0000000..9c4312b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav
new file mode 100644
index 0000000..5c28ed8
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav
new file mode 100644
index 0000000..2d4eea2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav
new file mode 100644
index 0000000..68437da
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav
new file mode 100644
index 0000000..ef478de
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav
new file mode 100644
index 0000000..9c93e13
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav
new file mode 100644
index 0000000..b95bcdf
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/various_compressed.sav b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/various_compressed.sav
new file mode 100644
index 0000000..dcdb0b0
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/data/various_compressed.sav differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_fortran.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_fortran.py
new file mode 100644
index 0000000..c4747e5
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_fortran.py
@@ -0,0 +1,236 @@
+''' Tests for fortran sequential files '''
+
+import tempfile
+import shutil
+from os import path
+from glob import iglob
+import re
+
+from numpy.testing import assert_equal, assert_allclose
+import numpy as np
+import pytest
+
+from scipy.io import (FortranFile,
+ _test_fortran,
+ FortranEOFError,
+ FortranFormattingError)
+
+
+DATA_PATH = path.join(path.dirname(__file__), 'data')
+
+
+def test_fortranfiles_read():
+ for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
+ m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
+ if not m:
+ raise RuntimeError("Couldn't match %s filename to regex" % filename)
+
+ dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
+
+ dtype = m.group(1).replace('s', '<')
+
+ f = FortranFile(filename, 'r', ' 0] = 1
+ info = (2, 2, 3, 'coordinate', 'pattern', 'general')
+ mmwrite(self.fn, a, field='pattern')
+ assert_equal(mminfo(self.fn), info)
+ b = mmread(self.fn)
+ assert_array_almost_equal(p, b.todense())
+
+
+_32bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2 2
+2147483647
+2147483646
+2147483647
+2147483646
+'''
+
+_32bit_integer_sparse_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2 2 2
+1 1 2147483647
+2 2 2147483646
+'''
+
+_64bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2 2
+ 2147483648
+-9223372036854775806
+ -2147483648
+ 9223372036854775807
+'''
+
+_64bit_integer_sparse_general_example = '''\
+%%MatrixMarket matrix coordinate integer general
+2 2 3
+1 1 2147483648
+1 2 9223372036854775807
+2 2 9223372036854775807
+'''
+
+_64bit_integer_sparse_symmetric_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2 2 3
+1 1 2147483648
+1 2 -9223372036854775807
+2 2 9223372036854775807
+'''
+
+_64bit_integer_sparse_skew_example = '''\
+%%MatrixMarket matrix coordinate integer skew-symmetric
+2 2 3
+1 1 2147483648
+1 2 -9223372036854775807
+2 2 9223372036854775807
+'''
+
+_over64bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2 2
+ 2147483648
+9223372036854775807
+ 2147483648
+9223372036854775808
+'''
+
+_over64bit_integer_sparse_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2 2 2
+1 1 2147483648
+2 2 19223372036854775808
+'''
+
+
+class TestMMIOReadLargeIntegers(object):
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
+
+ def teardown_method(self):
+ shutil.rmtree(self.tmpdir)
+
+ def check_read(self, example, a, info, dense, over32, over64):
+ with open(self.fn, 'w') as f:
+ f.write(example)
+ assert_equal(mminfo(self.fn), info)
+ if (over32 and (np.intp(0).itemsize < 8)) or over64:
+ assert_raises(OverflowError, mmread, self.fn)
+ else:
+ b = mmread(self.fn)
+ if not dense:
+ b = b.todense()
+ assert_equal(a, b)
+
+ def test_read_32bit_integer_dense(self):
+ a = array([[2**31-1, 2**31-1],
+ [2**31-2, 2**31-2]], dtype=np.int64)
+ self.check_read(_32bit_integer_dense_example,
+ a,
+ (2, 2, 4, 'array', 'integer', 'general'),
+ dense=True,
+ over32=False,
+ over64=False)
+
+ def test_read_32bit_integer_sparse(self):
+ a = array([[2**31-1, 0],
+ [0, 2**31-2]], dtype=np.int64)
+ self.check_read(_32bit_integer_sparse_example,
+ a,
+ (2, 2, 2, 'coordinate', 'integer', 'symmetric'),
+ dense=False,
+ over32=False,
+ over64=False)
+
+ def test_read_64bit_integer_dense(self):
+ a = array([[2**31, -2**31],
+ [-2**63+2, 2**63-1]], dtype=np.int64)
+ self.check_read(_64bit_integer_dense_example,
+ a,
+ (2, 2, 4, 'array', 'integer', 'general'),
+ dense=True,
+ over32=True,
+ over64=False)
+
+ def test_read_64bit_integer_sparse_general(self):
+ a = array([[2**31, 2**63-1],
+ [0, 2**63-1]], dtype=np.int64)
+ self.check_read(_64bit_integer_sparse_general_example,
+ a,
+ (2, 2, 3, 'coordinate', 'integer', 'general'),
+ dense=False,
+ over32=True,
+ over64=False)
+
+ def test_read_64bit_integer_sparse_symmetric(self):
+ a = array([[2**31, -2**63+1],
+ [-2**63+1, 2**63-1]], dtype=np.int64)
+ self.check_read(_64bit_integer_sparse_symmetric_example,
+ a,
+ (2, 2, 3, 'coordinate', 'integer', 'symmetric'),
+ dense=False,
+ over32=True,
+ over64=False)
+
+ def test_read_64bit_integer_sparse_skew(self):
+ a = array([[2**31, -2**63+1],
+ [2**63-1, 2**63-1]], dtype=np.int64)
+ self.check_read(_64bit_integer_sparse_skew_example,
+ a,
+ (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'),
+ dense=False,
+ over32=True,
+ over64=False)
+
+ def test_read_over64bit_integer_dense(self):
+ self.check_read(_over64bit_integer_dense_example,
+ None,
+ (2, 2, 4, 'array', 'integer', 'general'),
+ dense=True,
+ over32=True,
+ over64=True)
+
+ def test_read_over64bit_integer_sparse(self):
+ self.check_read(_over64bit_integer_sparse_example,
+ None,
+ (2, 2, 2, 'coordinate', 'integer', 'symmetric'),
+ dense=False,
+ over32=True,
+ over64=True)
+
+
+_general_example = '''\
+%%MatrixMarket matrix coordinate real general
+%=================================================================================
+%
+% This ASCII file represents a sparse MxN matrix with L
+% nonzeros in the following Matrix Market format:
+%
+% +----------------------------------------------+
+% |%%MatrixMarket matrix coordinate real general | <--- header line
+% |% | <--+
+% |% comments | |-- 0 or more comment lines
+% |% | <--+
+% | M N L | <--- rows, columns, entries
+% | I1 J1 A(I1, J1) | <--+
+% | I2 J2 A(I2, J2) | |
+% | I3 J3 A(I3, J3) | |-- L lines
+% | . . . | |
+% | IL JL A(IL, JL) | <--+
+% +----------------------------------------------+
+%
+% Indices are 1-based, i.e. A(1,1) is the first element.
+%
+%=================================================================================
+ 5 5 8
+ 1 1 1.000e+00
+ 2 2 1.050e+01
+ 3 3 1.500e-02
+ 1 4 6.000e+00
+ 4 2 2.505e+02
+ 4 4 -2.800e+02
+ 4 5 3.332e+01
+ 5 5 1.200e+01
+'''
+
+_hermitian_example = '''\
+%%MatrixMarket matrix coordinate complex hermitian
+ 5 5 7
+ 1 1 1.0 0
+ 2 2 10.5 0
+ 4 2 250.5 22.22
+ 3 3 1.5e-2 0
+ 4 4 -2.8e2 0
+ 5 5 12. 0
+ 5 4 0 33.32
+'''
+
+_skew_example = '''\
+%%MatrixMarket matrix coordinate real skew-symmetric
+ 5 5 7
+ 1 1 1.0
+ 2 2 10.5
+ 4 2 250.5
+ 3 3 1.5e-2
+ 4 4 -2.8e2
+ 5 5 12.
+ 5 4 0
+'''
+
+_symmetric_example = '''\
+%%MatrixMarket matrix coordinate real symmetric
+ 5 5 7
+ 1 1 1.0
+ 2 2 10.5
+ 4 2 250.5
+ 3 3 1.5e-2
+ 4 4 -2.8e2
+ 5 5 12.
+ 5 4 8
+'''
+
+_symmetric_pattern_example = '''\
+%%MatrixMarket matrix coordinate pattern symmetric
+ 5 5 7
+ 1 1
+ 2 2
+ 4 2
+ 3 3
+ 4 4
+ 5 5
+ 5 4
+'''
+
+# example (without comment lines) from Figure 1 in
+# https://math.nist.gov/MatrixMarket/reports/MMformat.ps
+_empty_lines_example = '''\
+%%MatrixMarket MATRIX Coordinate Real General
+
+ 5 5 8
+
+1 1 1.0
+2 2 10.5
+3 3 1.5e-2
+4 4 -2.8E2
+5 5 12.
+ 1 4 6
+ 4 2 250.5
+ 4 5 33.32
+
+'''
+
+
+class TestMMIOCoordinate(object):
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
+
+ def teardown_method(self):
+ shutil.rmtree(self.tmpdir)
+
+ def check_read(self, example, a, info):
+ f = open(self.fn, 'w')
+ f.write(example)
+ f.close()
+ assert_equal(mminfo(self.fn), info)
+ b = mmread(self.fn).todense()
+ assert_array_almost_equal(a, b)
+
+ def test_read_general(self):
+ a = [[1, 0, 0, 6, 0],
+ [0, 10.5, 0, 0, 0],
+ [0, 0, .015, 0, 0],
+ [0, 250.5, 0, -280, 33.32],
+ [0, 0, 0, 0, 12]]
+ self.check_read(_general_example, a,
+ (5, 5, 8, 'coordinate', 'real', 'general'))
+
+ def test_read_hermitian(self):
+ a = [[1, 0, 0, 0, 0],
+ [0, 10.5, 0, 250.5 - 22.22j, 0],
+ [0, 0, .015, 0, 0],
+ [0, 250.5 + 22.22j, 0, -280, -33.32j],
+ [0, 0, 0, 33.32j, 12]]
+ self.check_read(_hermitian_example, a,
+ (5, 5, 7, 'coordinate', 'complex', 'hermitian'))
+
+ def test_read_skew(self):
+ a = [[1, 0, 0, 0, 0],
+ [0, 10.5, 0, -250.5, 0],
+ [0, 0, .015, 0, 0],
+ [0, 250.5, 0, -280, 0],
+ [0, 0, 0, 0, 12]]
+ self.check_read(_skew_example, a,
+ (5, 5, 7, 'coordinate', 'real', 'skew-symmetric'))
+
+ def test_read_symmetric(self):
+ a = [[1, 0, 0, 0, 0],
+ [0, 10.5, 0, 250.5, 0],
+ [0, 0, .015, 0, 0],
+ [0, 250.5, 0, -280, 8],
+ [0, 0, 0, 8, 12]]
+ self.check_read(_symmetric_example, a,
+ (5, 5, 7, 'coordinate', 'real', 'symmetric'))
+
+ def test_read_symmetric_pattern(self):
+ a = [[1, 0, 0, 0, 0],
+ [0, 1, 0, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 0, 1, 1],
+ [0, 0, 0, 1, 1]]
+ self.check_read(_symmetric_pattern_example, a,
+ (5, 5, 7, 'coordinate', 'pattern', 'symmetric'))
+
+ def test_read_empty_lines(self):
+ a = [[1, 0, 0, 6, 0],
+ [0, 10.5, 0, 0, 0],
+ [0, 0, .015, 0, 0],
+ [0, 250.5, 0, -280, 33.32],
+ [0, 0, 0, 0, 12]]
+ self.check_read(_empty_lines_example, a,
+ (5, 5, 8, 'coordinate', 'real', 'general'))
+
+ def test_empty_write_read(self):
+ # https://github.com/scipy/scipy/issues/1410 (Trac #883)
+
+ b = scipy.sparse.coo_matrix((10, 10))
+ mmwrite(self.fn, b)
+
+ assert_equal(mminfo(self.fn),
+ (10, 10, 0, 'coordinate', 'real', 'symmetric'))
+ a = b.todense()
+ b = mmread(self.fn).todense()
+ assert_array_almost_equal(a, b)
+
+ def test_bzip2_py3(self):
+ # test if fix for #2152 works
+ try:
+ # bz2 module isn't always built when building Python.
+ import bz2
+ except ImportError:
+ return
+ I = array([0, 0, 1, 2, 3, 3, 3, 4])
+ J = array([0, 3, 1, 2, 1, 3, 4, 4])
+ V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+ b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+ mmwrite(self.fn, b)
+
+ fn_bzip2 = "%s.bz2" % self.fn
+ with open(self.fn, 'rb') as f_in:
+ f_out = bz2.BZ2File(fn_bzip2, 'wb')
+ f_out.write(f_in.read())
+ f_out.close()
+
+ a = mmread(fn_bzip2).todense()
+ assert_array_almost_equal(a, b.todense())
+
+ def test_gzip_py3(self):
+ # test if fix for #2152 works
+ try:
+ # gzip module can be missing from Python installation
+ import gzip
+ except ImportError:
+ return
+ I = array([0, 0, 1, 2, 3, 3, 3, 4])
+ J = array([0, 3, 1, 2, 1, 3, 4, 4])
+ V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+ b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+ mmwrite(self.fn, b)
+
+ fn_gzip = "%s.gz" % self.fn
+ with open(self.fn, 'rb') as f_in:
+ f_out = gzip.open(fn_gzip, 'wb')
+ f_out.write(f_in.read())
+ f_out.close()
+
+ a = mmread(fn_gzip).todense()
+ assert_array_almost_equal(a, b.todense())
+
+ def test_real_write_read(self):
+ I = array([0, 0, 1, 2, 3, 3, 3, 4])
+ J = array([0, 3, 1, 2, 1, 3, 4, 4])
+ V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+ b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+ mmwrite(self.fn, b)
+
+ assert_equal(mminfo(self.fn),
+ (5, 5, 8, 'coordinate', 'real', 'general'))
+ a = b.todense()
+ b = mmread(self.fn).todense()
+ assert_array_almost_equal(a, b)
+
+ def test_complex_write_read(self):
+ I = array([0, 0, 1, 2, 3, 3, 3, 4])
+ J = array([0, 3, 1, 2, 1, 3, 4, 4])
+ V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
+ 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
+
+ b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+ mmwrite(self.fn, b)
+
+ assert_equal(mminfo(self.fn),
+ (5, 5, 8, 'coordinate', 'complex', 'general'))
+ a = b.todense()
+ b = mmread(self.fn).todense()
+ assert_array_almost_equal(a, b)
+
+ def test_sparse_formats(self):
+ mats = []
+
+ I = array([0, 0, 1, 2, 3, 3, 3, 4])
+ J = array([0, 3, 1, 2, 1, 3, 4, 4])
+
+ V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+ mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
+
+ V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
+ 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
+ mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
+
+ for mat in mats:
+ expected = mat.todense()
+ for fmt in ['csr', 'csc', 'coo']:
+ fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir
+ mmwrite(fn, mat.asformat(fmt))
+
+ result = mmread(fn).todense()
+ assert_array_almost_equal(result, expected)
+
+ def test_precision(self):
+ test_values = [pi] + [10**(i) for i in range(0, -10, -1)]
+ test_precisions = range(1, 10)
+ for value in test_values:
+ for precision in test_precisions:
+ # construct sparse matrix with test value at last main diagonal
+ n = 10**precision + 1
+ A = scipy.sparse.dok_matrix((n, n))
+ A[n-1, n-1] = value
+ # write matrix with test precision and read again
+ mmwrite(self.fn, A, precision=precision)
+ A = scipy.io.mmread(self.fn)
+ # check for right entries in matrix
+ assert_array_equal(A.row, [n-1])
+ assert_array_equal(A.col, [n-1])
+ assert_allclose(A.data, [float('%%.%dg' % precision % value)])
+
+ def test_bad_number_of_coordinate_header_fields(self):
+ s = """\
+ %%MatrixMarket matrix coordinate real general
+ 5 5 8 999
+ 1 1 1.000e+00
+ 2 2 1.050e+01
+ 3 3 1.500e-02
+ 1 4 6.000e+00
+ 4 2 2.505e+02
+ 4 4 -2.800e+02
+ 4 5 3.332e+01
+ 5 5 1.200e+01
+ """
+ text = textwrap.dedent(s).encode('ascii')
+ with pytest.raises(ValueError, match='not of length 3'):
+ scipy.io.mmread(io.BytesIO(text))
+
+
+def test_gh11389():
+ mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n"
+ " 1 1 1\n"
+ "1 1 -2.1846000000000e+02 0.0000000000000e+00"))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_netcdf.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_netcdf.py
new file mode 100644
index 0000000..3030b27
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_netcdf.py
@@ -0,0 +1,541 @@
+''' Tests for netcdf '''
+import os
+from os.path import join as pjoin, dirname
+import shutil
+import tempfile
+import warnings
+from io import BytesIO
+from glob import glob
+from contextlib import contextmanager
+
+import numpy as np
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+ suppress_warnings)
+from pytest import raises as assert_raises
+
+from scipy.io.netcdf import netcdf_file, IS_PYPY
+from scipy._lib._tmpdirs import in_tempdir
+
+TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
+
+N_EG_ELS = 11 # number of elements for example variable
+VARTYPE_EG = 'b' # var type for example variable
+
+
+@contextmanager
+def make_simple(*args, **kwargs):
+ f = netcdf_file(*args, **kwargs)
+ f.history = 'Created for a test'
+ f.createDimension('time', N_EG_ELS)
+ time = f.createVariable('time', VARTYPE_EG, ('time',))
+ time[:] = np.arange(N_EG_ELS)
+ time.units = 'days since 2008-01-01'
+ f.flush()
+ yield f
+ f.close()
+
+
+def check_simple(ncfileobj):
+ '''Example fileobj tests '''
+ assert_equal(ncfileobj.history, b'Created for a test')
+ time = ncfileobj.variables['time']
+ assert_equal(time.units, b'days since 2008-01-01')
+ assert_equal(time.shape, (N_EG_ELS,))
+ assert_equal(time[-1], N_EG_ELS-1)
+
+def assert_mask_matches(arr, expected_mask):
+ '''
+ Asserts that the mask of arr is effectively the same as expected_mask.
+
+ In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
+ testing the 'mask' of a standard numpy array (the mask in this case is treated
+ as all False).
+
+ Parameters
+ ----------
+ arr: ndarray or MaskedArray
+ Array to test.
+ expected_mask: array_like of booleans
+ A list giving the expected mask.
+ '''
+
+ mask = np.ma.getmaskarray(arr)
+ assert_equal(mask, expected_mask)
+
+
+def test_read_write_files():
+ # test round trip for example file
+ cwd = os.getcwd()
+ try:
+ tmpdir = tempfile.mkdtemp()
+ os.chdir(tmpdir)
+ with make_simple('simple.nc', 'w') as f:
+ pass
+ # read the file we just created in 'a' mode
+ with netcdf_file('simple.nc', 'a') as f:
+ check_simple(f)
+ # add something
+ f._attributes['appendRan'] = 1
+
+ # To read the NetCDF file we just created::
+ with netcdf_file('simple.nc') as f:
+ # Using mmap is the default (but not on pypy)
+ assert_equal(f.use_mmap, not IS_PYPY)
+ check_simple(f)
+ assert_equal(f._attributes['appendRan'], 1)
+
+ # Read it in append (and check mmap is off)
+ with netcdf_file('simple.nc', 'a') as f:
+ assert_(not f.use_mmap)
+ check_simple(f)
+ assert_equal(f._attributes['appendRan'], 1)
+
+ # Now without mmap
+ with netcdf_file('simple.nc', mmap=False) as f:
+ # Using mmap is the default
+ assert_(not f.use_mmap)
+ check_simple(f)
+
+ # To read the NetCDF file we just created, as file object, no
+ # mmap. When n * n_bytes(var_type) is not divisible by 4, this
+ # raised an error in pupynere 1.0.12 and scipy rev 5893, because
+ # calculated vsize was rounding up in units of 4 - see
+ # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
+ with open('simple.nc', 'rb') as fobj:
+ with netcdf_file(fobj) as f:
+ # by default, don't use mmap for file-like
+ assert_(not f.use_mmap)
+ check_simple(f)
+
+ # Read file from fileobj, with mmap
+ with suppress_warnings() as sup:
+ if IS_PYPY:
+ sup.filter(RuntimeWarning,
+ "Cannot close a netcdf_file opened with mmap=True.*")
+ with open('simple.nc', 'rb') as fobj:
+ with netcdf_file(fobj, mmap=True) as f:
+ assert_(f.use_mmap)
+ check_simple(f)
+
+ # Again read it in append mode (adding another att)
+ with open('simple.nc', 'r+b') as fobj:
+ with netcdf_file(fobj, 'a') as f:
+ assert_(not f.use_mmap)
+ check_simple(f)
+ f.createDimension('app_dim', 1)
+ var = f.createVariable('app_var', 'i', ('app_dim',))
+ var[:] = 42
+
+ # And... check that app_var made it in...
+ with netcdf_file('simple.nc') as f:
+ check_simple(f)
+ assert_equal(f.variables['app_var'][:], 42)
+
+ except: # noqa: E722
+ os.chdir(cwd)
+ shutil.rmtree(tmpdir)
+ raise
+ os.chdir(cwd)
+ shutil.rmtree(tmpdir)
+
+
+def test_read_write_sio():
+ eg_sio1 = BytesIO()
+ with make_simple(eg_sio1, 'w'):
+ str_val = eg_sio1.getvalue()
+
+ eg_sio2 = BytesIO(str_val)
+ with netcdf_file(eg_sio2) as f2:
+ check_simple(f2)
+
+ # Test that error is raised if attempting mmap for sio
+ eg_sio3 = BytesIO(str_val)
+ assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
+ # Test 64-bit offset write / read
+ eg_sio_64 = BytesIO()
+ with make_simple(eg_sio_64, 'w', version=2) as f_64:
+ str_val = eg_sio_64.getvalue()
+
+ eg_sio_64 = BytesIO(str_val)
+ with netcdf_file(eg_sio_64) as f_64:
+ check_simple(f_64)
+ assert_equal(f_64.version_byte, 2)
+ # also when version 2 explicitly specified
+ eg_sio_64 = BytesIO(str_val)
+ with netcdf_file(eg_sio_64, version=2) as f_64:
+ check_simple(f_64)
+ assert_equal(f_64.version_byte, 2)
+
+
+def test_bytes():
+ raw_file = BytesIO()
+ f = netcdf_file(raw_file, mode='w')
+ # Dataset only has a single variable, dimension and attribute to avoid
+ # any ambiguity related to order.
+ f.a = 'b'
+ f.createDimension('dim', 1)
+ var = f.createVariable('var', np.int16, ('dim',))
+ var[0] = -9999
+ var.c = 'd'
+ f.sync()
+
+ actual = raw_file.getvalue()
+
+ expected = (b'CDF\x01'
+ b'\x00\x00\x00\x00'
+ b'\x00\x00\x00\x0a'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x03'
+ b'dim\x00'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x0c'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x01'
+ b'a\x00\x00\x00'
+ b'\x00\x00\x00\x02'
+ b'\x00\x00\x00\x01'
+ b'b\x00\x00\x00'
+ b'\x00\x00\x00\x0b'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x03'
+ b'var\x00'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x00'
+ b'\x00\x00\x00\x0c'
+ b'\x00\x00\x00\x01'
+ b'\x00\x00\x00\x01'
+ b'c\x00\x00\x00'
+ b'\x00\x00\x00\x02'
+ b'\x00\x00\x00\x01'
+ b'd\x00\x00\x00'
+ b'\x00\x00\x00\x03'
+ b'\x00\x00\x00\x04'
+ b'\x00\x00\x00\x78'
+ b'\xd8\xf1\x80\x01')
+
+ assert_equal(actual, expected)
+
+
+def test_encoded_fill_value():
+ with netcdf_file(BytesIO(), mode='w') as f:
+ f.createDimension('x', 1)
+ var = f.createVariable('var', 'S1', ('x',))
+ assert_equal(var._get_encoded_fill_value(), b'\x00')
+ var._FillValue = b'\x01'
+ assert_equal(var._get_encoded_fill_value(), b'\x01')
+ var._FillValue = b'\x00\x00' # invalid, wrong size
+ assert_equal(var._get_encoded_fill_value(), b'\x00')
+
+
+def test_read_example_data():
+ # read any example data files
+ for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
+ with netcdf_file(fname, 'r'):
+ pass
+ with netcdf_file(fname, 'r', mmap=False):
+ pass
+
+
+def test_itemset_no_segfault_on_readonly():
+ # Regression test for ticket #1202.
+ # Open the test file in read-only mode.
+
+ filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
+ with netcdf_file(filename, 'r', mmap=True) as f:
+ time_var = f.variables['time']
+
+ # time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
+ assert_raises(RuntimeError, time_var.assignValue, 42)
+
+
+def test_appending_issue_gh_8625():
+ stream = BytesIO()
+
+ with make_simple(stream, mode='w') as f:
+ f.createDimension('x', 2)
+ f.createVariable('x', float, ('x',))
+ f.variables['x'][...] = 1
+ f.flush()
+ contents = stream.getvalue()
+
+ stream = BytesIO(contents)
+ with netcdf_file(stream, mode='a') as f:
+ f.variables['x'][...] = 2
+
+
+def test_write_invalid_dtype():
+ dtypes = ['int64', 'uint64']
+ if np.dtype('int').itemsize == 8: # 64-bit machines
+ dtypes.append('int')
+ if np.dtype('uint').itemsize == 8: # 64-bit machines
+ dtypes.append('uint')
+
+ with netcdf_file(BytesIO(), 'w') as f:
+ f.createDimension('time', N_EG_ELS)
+ for dt in dtypes:
+ assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
+
+
+def test_flush_rewind():
+ stream = BytesIO()
+ with make_simple(stream, mode='w') as f:
+ x = f.createDimension('x',4) # x is used in createVariable
+ v = f.createVariable('v', 'i2', ['x'])
+ v[:] = 1
+ f.flush()
+ len_single = len(stream.getvalue())
+ f.flush()
+ len_double = len(stream.getvalue())
+
+ assert_(len_single == len_double)
+
+
+def test_dtype_specifiers():
+ # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
+ # Specifying np.int16 or similar only works from the same commit as this
+ # comment was made.
+ with make_simple(BytesIO(), mode='w') as f:
+ f.createDimension('x',4)
+ f.createVariable('v1', 'i2', ['x'])
+ f.createVariable('v2', np.int16, ['x'])
+ f.createVariable('v3', np.dtype(np.int16), ['x'])
+
+
+def test_ticket_1720():
+ io = BytesIO()
+
+ items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
+
+ with netcdf_file(io, 'w') as f:
+ f.history = 'Created for a test'
+ f.createDimension('float_var', 10)
+ float_var = f.createVariable('float_var', 'f', ('float_var',))
+ float_var[:] = items
+ float_var.units = 'metres'
+ f.flush()
+ contents = io.getvalue()
+
+ io = BytesIO(contents)
+ with netcdf_file(io, 'r') as f:
+ assert_equal(f.history, b'Created for a test')
+ float_var = f.variables['float_var']
+ assert_equal(float_var.units, b'metres')
+ assert_equal(float_var.shape, (10,))
+ assert_allclose(float_var[:], items)
+
+
+def test_mmaps_segfault():
+ filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
+
+ if not IS_PYPY:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ with netcdf_file(filename, mmap=True) as f:
+ x = f.variables['lat'][:]
+ # should not raise warnings
+ del x
+
+ def doit():
+ with netcdf_file(filename, mmap=True) as f:
+ return f.variables['lat'][:]
+
+ # should not crash
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
+ x = doit()
+ x.sum()
+
+
+def test_zero_dimensional_var():
+ io = BytesIO()
+ with make_simple(io, 'w') as f:
+ v = f.createVariable('zerodim', 'i2', [])
+ # This is checking that .isrec returns a boolean - don't simplify it
+ # to 'assert not ...'
+ assert v.isrec is False, v.isrec
+ f.flush()
+
+
+def test_byte_gatts():
+ # Check that global "string" atts work like they did before py3k
+ # unicode and general bytes confusion
+ with in_tempdir():
+ filename = 'g_byte_atts.nc'
+ f = netcdf_file(filename, 'w')
+ f._attributes['holy'] = b'grail'
+ f._attributes['witch'] = 'floats'
+ f.close()
+ f = netcdf_file(filename, 'r')
+ assert_equal(f._attributes['holy'], b'grail')
+ assert_equal(f._attributes['witch'], b'floats')
+ f.close()
+
+
+def test_open_append():
+ # open 'w' put one attr
+ with in_tempdir():
+ filename = 'append_dat.nc'
+ f = netcdf_file(filename, 'w')
+ f._attributes['Kilroy'] = 'was here'
+ f.close()
+
+ # open again in 'a', read the att and and a new one
+ f = netcdf_file(filename, 'a')
+ assert_equal(f._attributes['Kilroy'], b'was here')
+ f._attributes['naughty'] = b'Zoot'
+ f.close()
+
+ # open yet again in 'r' and check both atts
+ f = netcdf_file(filename, 'r')
+ assert_equal(f._attributes['Kilroy'], b'was here')
+ assert_equal(f._attributes['naughty'], b'Zoot')
+ f.close()
+
+
+def test_append_recordDimension():
+ dataSize = 100
+
+ with in_tempdir():
+ # Create file with record time dimension
+ with netcdf_file('withRecordDimension.nc', 'w') as f:
+ f.createDimension('time', None)
+ f.createVariable('time', 'd', ('time',))
+ f.createDimension('x', dataSize)
+ x = f.createVariable('x', 'd', ('x',))
+ x[:] = np.array(range(dataSize))
+ f.createDimension('y', dataSize)
+ y = f.createVariable('y', 'd', ('y',))
+ y[:] = np.array(range(dataSize))
+ f.createVariable('testData', 'i', ('time', 'x', 'y'))
+ f.flush()
+ f.close()
+
+ for i in range(2):
+ # Open the file in append mode and add data
+ with netcdf_file('withRecordDimension.nc', 'a') as f:
+ f.variables['time'].data = np.append(f.variables["time"].data, i)
+ f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i)
+ f.flush()
+
+ # Read the file and check that append worked
+ with netcdf_file('withRecordDimension.nc') as f:
+ assert_equal(f.variables['time'][-1], i)
+ assert_equal(f.variables['testData'][-1, :, :].copy(), np.full((dataSize, dataSize), i))
+ assert_equal(f.variables['time'].data.shape[0], i+1)
+ assert_equal(f.variables['testData'].data.shape[0], i+1)
+
+ # Read the file and check that 'data' was not saved as user defined
+ # attribute of testData variable during append operation
+ with netcdf_file('withRecordDimension.nc') as f:
+ with assert_raises(KeyError) as ar:
+ f.variables['testData']._attributes['data']
+ ex = ar.value
+ assert_equal(ex.args[0], 'data')
+
+def test_maskandscale():
+ t = np.linspace(20, 30, 15)
+ t[3] = 100
+ tm = np.ma.masked_greater(t, 99)
+ fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ Temp = f.variables['Temperature']
+ assert_equal(Temp.missing_value, 9999)
+ assert_equal(Temp.add_offset, 20)
+ assert_equal(Temp.scale_factor, np.float32(0.01))
+ found = Temp[:].compressed()
+ del Temp # Remove ref to mmap, so file can be closed.
+ expected = np.round(tm.compressed(), 2)
+ assert_allclose(found, expected)
+
+ with in_tempdir():
+ newfname = 'ms.nc'
+ f = netcdf_file(newfname, 'w', maskandscale=True)
+ f.createDimension('Temperature', len(tm))
+ temp = f.createVariable('Temperature', 'i', ('Temperature',))
+ temp.missing_value = 9999
+ temp.scale_factor = 0.01
+ temp.add_offset = 20
+ temp[:] = tm
+ f.close()
+
+ with netcdf_file(newfname, maskandscale=True) as f:
+ Temp = f.variables['Temperature']
+ assert_equal(Temp.missing_value, 9999)
+ assert_equal(Temp.add_offset, 20)
+ assert_equal(Temp.scale_factor, np.float32(0.01))
+ expected = np.round(tm.compressed(), 2)
+ found = Temp[:].compressed()
+ del Temp
+ assert_allclose(found, expected)
+
+
+# ------------------------------------------------------------------------
+# Test reading with masked values (_FillValue / missing_value)
+# ------------------------------------------------------------------------
+
+def test_read_withValuesNearFillValue():
+ # Regression test for ticket #5626
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var1_fillval0'][:]
+ assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withNoFillValue():
+ # For a variable with no fill value, reading data with maskandscale=True
+ # should return unmasked data
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var2_noFillval'][:]
+ assert_mask_matches(vardata, [False, False, False])
+ assert_equal(vardata, [1,2,3])
+
+def test_read_withFillValueAndMissingValue():
+ # For a variable with both _FillValue and missing_value, the _FillValue
+ # should be used
+ IRRELEVANT_VALUE = 9999
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var3_fillvalAndMissingValue'][:]
+ assert_mask_matches(vardata, [True, False, False])
+ assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
+
+def test_read_withMissingValue():
+ # For a variable with missing_value but not _FillValue, the missing_value
+ # should be used
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var4_missingValue'][:]
+ assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withFillValNaN():
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var5_fillvalNaN'][:]
+ assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withChar():
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var6_char'][:]
+ assert_mask_matches(vardata, [False, True, False])
+
+def test_read_with2dVar():
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ with netcdf_file(fname, maskandscale=True) as f:
+ vardata = f.variables['var7_2d'][:]
+ assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
+
+def test_read_withMaskAndScaleFalse():
+ # If a variable has a _FillValue (or missing_value) attribute, but is read
+ # with maskandscale set to False, the result should be unmasked
+ fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+ # Open file with mmap=False to avoid problems with closing a mmap'ed file
+ # when arrays referring to its data still exist:
+ with netcdf_file(fname, maskandscale=False, mmap=False) as f:
+ vardata = f.variables['var3_fillvalAndMissingValue'][:]
+ assert_mask_matches(vardata, [False, False, False])
+ assert_equal(vardata, [1, 2, 3])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_paths.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_paths.py
new file mode 100644
index 0000000..4ba6dc3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_paths.py
@@ -0,0 +1,94 @@
+"""
+Ensure that we can use pathlib.Path objects in all relevant IO functions.
+"""
+import sys
+from pathlib import Path
+
+import numpy as np
+
+import scipy.io
+import scipy.io.wavfile
+from scipy._lib._tmpdirs import tempdir
+import scipy.sparse
+
+
+class TestPaths:
+ data = np.arange(5).astype(np.int64)
+
+ def test_savemat(self):
+ with tempdir() as temp_dir:
+ path = Path(temp_dir) / 'data.mat'
+ scipy.io.savemat(path, {'data': self.data})
+ assert path.is_file()
+
+ def test_loadmat(self):
+ # Save data with string path, load with pathlib.Path
+ with tempdir() as temp_dir:
+ path = Path(temp_dir) / 'data.mat'
+ scipy.io.savemat(str(path), {'data': self.data})
+
+ mat_contents = scipy.io.loadmat(path)
+ assert (mat_contents['data'] == self.data).all()
+
+ def test_whosmat(self):
+ # Save data with string path, load with pathlib.Path
+ with tempdir() as temp_dir:
+ path = Path(temp_dir) / 'data.mat'
+ scipy.io.savemat(str(path), {'data': self.data})
+
+ contents = scipy.io.whosmat(path)
+ assert contents[0] == ('data', (1, 5), 'int64')
+
+ def test_readsav(self):
+ path = Path(__file__).parent / 'data/scalar_string.sav'
+ scipy.io.readsav(path)
+
+ def test_hb_read(self):
+ # Save data with string path, load with pathlib.Path
+ with tempdir() as temp_dir:
+ data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+ path = Path(temp_dir) / 'data.hb'
+ scipy.io.harwell_boeing.hb_write(str(path), data)
+
+ data_new = scipy.io.harwell_boeing.hb_read(path)
+ assert (data_new != data).nnz == 0
+
+ def test_hb_write(self):
+ with tempdir() as temp_dir:
+ data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+ path = Path(temp_dir) / 'data.hb'
+ scipy.io.harwell_boeing.hb_write(path, data)
+ assert path.is_file()
+
+ def test_mmio_read(self):
+ # Save data with string path, load with pathlib.Path
+ with tempdir() as temp_dir:
+ data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+ path = Path(temp_dir) / 'data.mtx'
+ scipy.io.mmwrite(str(path), data)
+
+ data_new = scipy.io.mmread(path)
+ assert (data_new != data).nnz == 0
+
+ def test_mmio_write(self):
+ with tempdir() as temp_dir:
+ data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+ path = Path(temp_dir) / 'data.mtx'
+ scipy.io.mmwrite(path, data)
+
+ def test_netcdf_file(self):
+ path = Path(__file__).parent / 'data/example_1.nc'
+ scipy.io.netcdf.netcdf_file(path)
+
+ def test_wavfile_read(self):
+ path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
+ scipy.io.wavfile.read(path)
+
+ def test_wavfile_write(self):
+ # Read from str path, write to Path
+ input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
+ rate, data = scipy.io.wavfile.read(str(input_path))
+
+ with tempdir() as temp_dir:
+ output_path = Path(temp_dir) / input_path.name
+ scipy.io.wavfile.write(output_path, rate, data)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_wavfile.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_wavfile.py
new file mode 100644
index 0000000..0fccefe
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/tests/test_wavfile.py
@@ -0,0 +1,394 @@
+import os
+import sys
+from io import BytesIO
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_, assert_array_equal,
+ suppress_warnings)
+import pytest
+from pytest import raises, warns
+
+from scipy.io import wavfile
+
+
+def datafile(fn):
+ return os.path.join(os.path.dirname(__file__), 'data', fn)
+
+
+def test_read_1():
+ # 32-bit PCM (which uses extensible format)
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-le-1ch-4bytes.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 44100)
+ assert_(np.issubdtype(data.dtype, np.int32))
+ assert_equal(data.shape, (4410,))
+
+ del data
+
+
+def test_read_2():
+ # 8-bit unsigned PCM
+ for mmap in [False, True]:
+ filename = 'test-8000Hz-le-2ch-1byteu.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.uint8))
+ assert_equal(data.shape, (800, 2))
+
+ del data
+
+
+def test_read_3():
+ # Little-endian float
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-2ch-32bit-float-le.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 44100)
+ assert_(np.issubdtype(data.dtype, np.float32))
+ assert_equal(data.shape, (441, 2))
+
+ del data
+
+
+def test_read_4():
+ # Contains unsupported 'PEAK' chunk
+ for mmap in [False, True]:
+ with suppress_warnings() as sup:
+ sup.filter(wavfile.WavFileWarning,
+ "Chunk .non-data. not understood, skipping it")
+ filename = 'test-48000Hz-2ch-64bit-float-le-wavex.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 48000)
+ assert_(np.issubdtype(data.dtype, np.float64))
+ assert_equal(data.shape, (480, 2))
+
+ del data
+
+
+def test_read_5():
+ # Big-endian float
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-2ch-32bit-float-be.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 44100)
+ assert_(np.issubdtype(data.dtype, np.float32))
+ assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and
+ data.dtype.byteorder == '='))
+ assert_equal(data.shape, (441, 2))
+
+ del data
+
+
+def test_5_bit_odd_size_no_pad():
+ # 5-bit, 1 B container, 5 channels, 9 samples, 45 B data chunk
+ # Generated by LTspice, which incorrectly omits pad byte, but should be
+ # readable anyway
+ for mmap in [False, True]:
+ filename = 'test-8000Hz-le-5ch-9S-5bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.uint8))
+ assert_equal(data.shape, (9, 5))
+
+ # 8-5 = 3 LSBits should be 0
+ assert_equal(data & 0b00000111, 0)
+
+ # Unsigned
+ assert_equal(data.max(), 0b11111000) # Highest possible
+ assert_equal(data[0, 0], 128) # Midpoint is 128 for <= 8-bit
+ assert_equal(data.min(), 0) # Lowest possible
+
+ del data
+
+
+def test_12_bit_even_size():
+ # 12-bit, 2 B container, 4 channels, 9 samples, 72 B data chunk
+ # Generated by LTspice from 1 Vpk sine waves
+ for mmap in [False, True]:
+ filename = 'test-8000Hz-le-4ch-9S-12bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int16))
+ assert_equal(data.shape, (9, 4))
+
+ # 16-12 = 4 LSBits should be 0
+ assert_equal(data & 0b00000000_00001111, 0)
+
+ # Signed
+ assert_equal(data.max(), 0b01111111_11110000) # Highest possible
+ assert_equal(data[0, 0], 0) # Midpoint is 0 for >= 9-bit
+ assert_equal(data.min(), -0b10000000_00000000) # Lowest possible
+
+ del data
+
+
+def test_24_bit_odd_size_with_pad():
+ # 24-bit, 3 B container, 3 channels, 5 samples, 45 B data chunk
+ # Should not raise any warnings about the data chunk pad byte
+ filename = 'test-8000Hz-le-3ch-5S-24bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int32))
+ assert_equal(data.shape, (5, 3))
+
+ # All LSBytes should be 0
+ assert_equal(data & 0xff, 0)
+
+ # Hand-made max/min samples under different conventions:
+ # 2**(N-1) 2**(N-1)-1 LSB
+ assert_equal(data, [[-0x8000_0000, -0x7fff_ff00, -0x200],
+ [-0x4000_0000, -0x3fff_ff00, -0x100],
+ [+0x0000_0000, +0x0000_0000, +0x000],
+ [+0x4000_0000, +0x3fff_ff00, +0x100],
+ [+0x7fff_ff00, +0x7fff_ff00, +0x200]])
+ # ^ clipped
+
+
+def test_20_bit_extra_data():
+ # 20-bit, 3 B container, 1 channel, 10 samples, 30 B data chunk
+ # with extra data filling container beyond the bit depth
+ filename = 'test-8000Hz-le-1ch-10S-20bit-extra.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 1234)
+ assert_(np.issubdtype(data.dtype, np.int32))
+ assert_equal(data.shape, (10,))
+
+ # All LSBytes should still be 0, because 3 B container in 4 B dtype
+ assert_equal(data & 0xff, 0)
+
+ # But it should load the data beyond 20 bits
+ assert_((data & 0xf00).any())
+
+ # Full-scale positive/negative samples, then being halved each time
+ assert_equal(data, [+0x7ffff000, # +full-scale 20-bit
+ -0x7ffff000, # -full-scale 20-bit
+ +0x7ffff000 >> 1, # +1/2
+ -0x7ffff000 >> 1, # -1/2
+ +0x7ffff000 >> 2, # +1/4
+ -0x7ffff000 >> 2, # -1/4
+ +0x7ffff000 >> 3, # +1/8
+ -0x7ffff000 >> 3, # -1/8
+ +0x7ffff000 >> 4, # +1/16
+ -0x7ffff000 >> 4, # -1/16
+ ])
+
+
+def test_36_bit_odd_size():
+ # 36-bit, 5 B container, 3 channels, 5 samples, 75 B data chunk + pad
+ filename = 'test-8000Hz-le-3ch-5S-36bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int64))
+ assert_equal(data.shape, (5, 3))
+
+ # 28 LSBits should be 0
+ assert_equal(data & 0xfffffff, 0)
+
+ # Hand-made max/min samples under different conventions:
+ # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
+ correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_f000_0000, -0x2000_0000],
+ [-0x4000_0000_0000_0000, -0x3fff_ffff_f000_0000, -0x1000_0000],
+ [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000_0000],
+ [+0x4000_0000_0000_0000, +0x3fff_ffff_f000_0000, +0x1000_0000],
+ [+0x7fff_ffff_f000_0000, +0x7fff_ffff_f000_0000, +0x2000_0000]]
+ # ^ clipped
+
+ assert_equal(data, correct)
+
+
+def test_45_bit_even_size():
+ # 45-bit, 6 B container, 3 channels, 5 samples, 90 B data chunk
+ filename = 'test-8000Hz-le-3ch-5S-45bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int64))
+ assert_equal(data.shape, (5, 3))
+
+ # 19 LSBits should be 0
+ assert_equal(data & 0x7ffff, 0)
+
+ # Hand-made max/min samples under different conventions:
+ # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
+ correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_fff8_0000, -0x10_0000],
+ [-0x4000_0000_0000_0000, -0x3fff_ffff_fff8_0000, -0x08_0000],
+ [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x00_0000],
+ [+0x4000_0000_0000_0000, +0x3fff_ffff_fff8_0000, +0x08_0000],
+ [+0x7fff_ffff_fff8_0000, +0x7fff_ffff_fff8_0000, +0x10_0000]]
+ # ^ clipped
+
+ assert_equal(data, correct)
+
+
+def test_53_bit_odd_size():
+ # 53-bit, 7 B container, 3 channels, 5 samples, 105 B data chunk + pad
+ filename = 'test-8000Hz-le-3ch-5S-53bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int64))
+ assert_equal(data.shape, (5, 3))
+
+ # 11 LSBits should be 0
+ assert_equal(data & 0x7ff, 0)
+
+ # Hand-made max/min samples under different conventions:
+ # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
+ correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_f800, -0x1000],
+ [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_f800, -0x0800],
+ [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000],
+ [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_f800, +0x0800],
+ [+0x7fff_ffff_ffff_f800, +0x7fff_ffff_ffff_f800, +0x1000]]
+ # ^ clipped
+
+ assert_equal(data, correct)
+
+
+def test_64_bit_even_size():
+ # 64-bit, 8 B container, 3 channels, 5 samples, 120 B data chunk
+ for mmap in [False, True]:
+ filename = 'test-8000Hz-le-3ch-5S-64bit.wav'
+ rate, data = wavfile.read(datafile(filename), mmap=False)
+
+ assert_equal(rate, 8000)
+ assert_(np.issubdtype(data.dtype, np.int64))
+ assert_equal(data.shape, (5, 3))
+
+ # Hand-made max/min samples under different conventions:
+ # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
+ correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_ffff, -0x2],
+ [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_ffff, -0x1],
+ [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0],
+ [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_ffff, +0x1],
+ [+0x7fff_ffff_ffff_ffff, +0x7fff_ffff_ffff_ffff, +0x2]]
+ # ^ clipped
+
+ assert_equal(data, correct)
+
+ del data
+
+
+def test_unsupported_mmap():
+ # Test containers that cannot be mapped to numpy types
+ for filename in {'test-8000Hz-le-3ch-5S-24bit.wav',
+ 'test-8000Hz-le-3ch-5S-36bit.wav',
+ 'test-8000Hz-le-3ch-5S-45bit.wav',
+ 'test-8000Hz-le-3ch-5S-53bit.wav',
+ 'test-8000Hz-le-1ch-10S-20bit-extra.wav'}:
+ with raises(ValueError, match="mmap.*not compatible"):
+ rate, data = wavfile.read(datafile(filename), mmap=True)
+
+
+def test_read_unknown_filetype_fail():
+ # Not an RIFF
+ for mmap in [False, True]:
+ filename = 'example_1.nc'
+ with open(datafile(filename), 'rb') as fp:
+ with raises(ValueError, match="CDF.*'RIFF' and 'RIFX' supported"):
+ wavfile.read(fp, mmap=mmap)
+
+
+def test_read_unknown_riff_form_type():
+ # RIFF, but not WAVE form
+ for mmap in [False, True]:
+ filename = 'Transparent Busy.ani'
+ with open(datafile(filename), 'rb') as fp:
+ with raises(ValueError, match='Not a WAV file.*ACON'):
+ wavfile.read(fp, mmap=mmap)
+
+
+def test_read_unknown_wave_format():
+ # RIFF and WAVE, but not supported format
+ for mmap in [False, True]:
+ filename = 'test-8000Hz-le-1ch-1byte-ulaw.wav'
+ with open(datafile(filename), 'rb') as fp:
+ with raises(ValueError, match='Unknown wave file format.*MULAW.*'
+ 'Supported formats'):
+ wavfile.read(fp, mmap=mmap)
+
+
+def test_read_early_eof_with_data():
+ # File ends inside 'data' chunk, but we keep incomplete data
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-le-1ch-4bytes-early-eof.wav'
+ with open(datafile(filename), 'rb') as fp:
+ with warns(wavfile.WavFileWarning, match='Reached EOF'):
+ rate, data = wavfile.read(fp, mmap=mmap)
+ assert data.size > 0
+ assert rate == 44100
+ # also test writing (gh-12176)
+ data[0] = 0
+
+
+def test_read_early_eof():
+ # File ends after 'fact' chunk at boundary, no data read
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav'
+ with open(datafile(filename), 'rb') as fp:
+ with raises(ValueError, match="Unexpected end of file."):
+ wavfile.read(fp, mmap=mmap)
+
+
+def test_read_incomplete_chunk():
+ # File ends inside 'fmt ' chunk ID, no data read
+ for mmap in [False, True]:
+ filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'
+ with open(datafile(filename), 'rb') as fp:
+ with raises(ValueError, match="Incomplete chunk ID.*b'f'"):
+ wavfile.read(fp, mmap=mmap)
+
+
+def _check_roundtrip(realfile, rate, dtype, channels, tmpdir):
+ if realfile:
+ tmpfile = str(tmpdir.join('temp.wav'))
+ else:
+ tmpfile = BytesIO()
+ data = np.random.rand(100, channels)
+ if channels == 1:
+ data = data[:, 0]
+ if dtype.kind == 'f':
+ # The range of the float type should be in [-1, 1]
+ data = data.astype(dtype)
+ else:
+ data = (data*128).astype(dtype)
+
+ wavfile.write(tmpfile, rate, data)
+
+ for mmap in [False, True]:
+ rate2, data2 = wavfile.read(tmpfile, mmap=mmap)
+
+ assert_equal(rate, rate2)
+ assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype)
+ assert_array_equal(data, data2)
+ # also test writing (gh-12176)
+ if realfile:
+ data2[0] = 0
+ else:
+ with pytest.raises(ValueError, match='read-only'):
+ data2[0] = 0
+
+
+def test_write_roundtrip(tmpdir):
+ for realfile in (False, True):
+ # signed 8-bit integer PCM is not allowed
+ # unsigned > 8-bit integer PCM is not allowed
+ # 8- or 16-bit float PCM is not expected
+ # g and q are platform-dependent, so not included
+ for dt_str in {'|u1',
+ 'i2', '>i4', '>i8', '>f4', '>f8'}:
+ for rate in (8000, 32000):
+ for channels in (1, 2, 5):
+ dt = np.dtype(dt_str)
+ _check_roundtrip(realfile, rate, dt, channels, tmpdir)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/io/wavfile.py b/dem-S-SAR/ISCEApp/_internal/scipy/io/wavfile.py
new file mode 100644
index 0000000..9b5845d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/io/wavfile.py
@@ -0,0 +1,828 @@
+"""
+Module to read / write wav files using NumPy arrays
+
+Functions
+---------
+`read`: Return the sample rate (in samples/sec) and data from a WAV file.
+
+`write`: Write a NumPy array as a WAV file.
+
+"""
+import io
+import sys
+import numpy
+import struct
+import warnings
+from enum import IntEnum
+
+
+__all__ = [
+ 'WavFileWarning',
+ 'read',
+ 'write'
+]
+
+
+class WavFileWarning(UserWarning):
+ pass
+
+
+class WAVE_FORMAT(IntEnum):
+ """
+ WAVE form wFormatTag IDs
+
+ Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the
+ newest additions, in v10.0.14393 2016-07
+ """
+ UNKNOWN = 0x0000
+ PCM = 0x0001
+ ADPCM = 0x0002
+ IEEE_FLOAT = 0x0003
+ VSELP = 0x0004
+ IBM_CVSD = 0x0005
+ ALAW = 0x0006
+ MULAW = 0x0007
+ DTS = 0x0008
+ DRM = 0x0009
+ WMAVOICE9 = 0x000A
+ WMAVOICE10 = 0x000B
+ OKI_ADPCM = 0x0010
+ DVI_ADPCM = 0x0011
+ IMA_ADPCM = 0x0011 # Duplicate
+ MEDIASPACE_ADPCM = 0x0012
+ SIERRA_ADPCM = 0x0013
+ G723_ADPCM = 0x0014
+ DIGISTD = 0x0015
+ DIGIFIX = 0x0016
+ DIALOGIC_OKI_ADPCM = 0x0017
+ MEDIAVISION_ADPCM = 0x0018
+ CU_CODEC = 0x0019
+ HP_DYN_VOICE = 0x001A
+ YAMAHA_ADPCM = 0x0020
+ SONARC = 0x0021
+ DSPGROUP_TRUESPEECH = 0x0022
+ ECHOSC1 = 0x0023
+ AUDIOFILE_AF36 = 0x0024
+ APTX = 0x0025
+ AUDIOFILE_AF10 = 0x0026
+ PROSODY_1612 = 0x0027
+ LRC = 0x0028
+ DOLBY_AC2 = 0x0030
+ GSM610 = 0x0031
+ MSNAUDIO = 0x0032
+ ANTEX_ADPCME = 0x0033
+ CONTROL_RES_VQLPC = 0x0034
+ DIGIREAL = 0x0035
+ DIGIADPCM = 0x0036
+ CONTROL_RES_CR10 = 0x0037
+ NMS_VBXADPCM = 0x0038
+ CS_IMAADPCM = 0x0039
+ ECHOSC3 = 0x003A
+ ROCKWELL_ADPCM = 0x003B
+ ROCKWELL_DIGITALK = 0x003C
+ XEBEC = 0x003D
+ G721_ADPCM = 0x0040
+ G728_CELP = 0x0041
+ MSG723 = 0x0042
+ INTEL_G723_1 = 0x0043
+ INTEL_G729 = 0x0044
+ SHARP_G726 = 0x0045
+ MPEG = 0x0050
+ RT24 = 0x0052
+ PAC = 0x0053
+ MPEGLAYER3 = 0x0055
+ LUCENT_G723 = 0x0059
+ CIRRUS = 0x0060
+ ESPCM = 0x0061
+ VOXWARE = 0x0062
+ CANOPUS_ATRAC = 0x0063
+ G726_ADPCM = 0x0064
+ G722_ADPCM = 0x0065
+ DSAT = 0x0066
+ DSAT_DISPLAY = 0x0067
+ VOXWARE_BYTE_ALIGNED = 0x0069
+ VOXWARE_AC8 = 0x0070
+ VOXWARE_AC10 = 0x0071
+ VOXWARE_AC16 = 0x0072
+ VOXWARE_AC20 = 0x0073
+ VOXWARE_RT24 = 0x0074
+ VOXWARE_RT29 = 0x0075
+ VOXWARE_RT29HW = 0x0076
+ VOXWARE_VR12 = 0x0077
+ VOXWARE_VR18 = 0x0078
+ VOXWARE_TQ40 = 0x0079
+ VOXWARE_SC3 = 0x007A
+ VOXWARE_SC3_1 = 0x007B
+ SOFTSOUND = 0x0080
+ VOXWARE_TQ60 = 0x0081
+ MSRT24 = 0x0082
+ G729A = 0x0083
+ MVI_MVI2 = 0x0084
+ DF_G726 = 0x0085
+ DF_GSM610 = 0x0086
+ ISIAUDIO = 0x0088
+ ONLIVE = 0x0089
+ MULTITUDE_FT_SX20 = 0x008A
+ INFOCOM_ITS_G721_ADPCM = 0x008B
+ CONVEDIA_G729 = 0x008C
+ CONGRUENCY = 0x008D
+ SBC24 = 0x0091
+ DOLBY_AC3_SPDIF = 0x0092
+ MEDIASONIC_G723 = 0x0093
+ PROSODY_8KBPS = 0x0094
+ ZYXEL_ADPCM = 0x0097
+ PHILIPS_LPCBB = 0x0098
+ PACKED = 0x0099
+ MALDEN_PHONYTALK = 0x00A0
+ RACAL_RECORDER_GSM = 0x00A1
+ RACAL_RECORDER_G720_A = 0x00A2
+ RACAL_RECORDER_G723_1 = 0x00A3
+ RACAL_RECORDER_TETRA_ACELP = 0x00A4
+ NEC_AAC = 0x00B0
+ RAW_AAC1 = 0x00FF
+ RHETOREX_ADPCM = 0x0100
+ IRAT = 0x0101
+ VIVO_G723 = 0x0111
+ VIVO_SIREN = 0x0112
+ PHILIPS_CELP = 0x0120
+ PHILIPS_GRUNDIG = 0x0121
+ DIGITAL_G723 = 0x0123
+ SANYO_LD_ADPCM = 0x0125
+ SIPROLAB_ACEPLNET = 0x0130
+ SIPROLAB_ACELP4800 = 0x0131
+ SIPROLAB_ACELP8V3 = 0x0132
+ SIPROLAB_G729 = 0x0133
+ SIPROLAB_G729A = 0x0134
+ SIPROLAB_KELVIN = 0x0135
+ VOICEAGE_AMR = 0x0136
+ G726ADPCM = 0x0140
+ DICTAPHONE_CELP68 = 0x0141
+ DICTAPHONE_CELP54 = 0x0142
+ QUALCOMM_PUREVOICE = 0x0150
+ QUALCOMM_HALFRATE = 0x0151
+ TUBGSM = 0x0155
+ MSAUDIO1 = 0x0160
+ WMAUDIO2 = 0x0161
+ WMAUDIO3 = 0x0162
+ WMAUDIO_LOSSLESS = 0x0163
+ WMASPDIF = 0x0164
+ UNISYS_NAP_ADPCM = 0x0170
+ UNISYS_NAP_ULAW = 0x0171
+ UNISYS_NAP_ALAW = 0x0172
+ UNISYS_NAP_16K = 0x0173
+ SYCOM_ACM_SYC008 = 0x0174
+ SYCOM_ACM_SYC701_G726L = 0x0175
+ SYCOM_ACM_SYC701_CELP54 = 0x0176
+ SYCOM_ACM_SYC701_CELP68 = 0x0177
+ KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
+ FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
+ DTS_DS = 0x0190
+ CREATIVE_ADPCM = 0x0200
+ CREATIVE_FASTSPEECH8 = 0x0202
+ CREATIVE_FASTSPEECH10 = 0x0203
+ UHER_ADPCM = 0x0210
+ ULEAD_DV_AUDIO = 0x0215
+ ULEAD_DV_AUDIO_1 = 0x0216
+ QUARTERDECK = 0x0220
+ ILINK_VC = 0x0230
+ RAW_SPORT = 0x0240
+ ESST_AC3 = 0x0241
+ GENERIC_PASSTHRU = 0x0249
+ IPI_HSX = 0x0250
+ IPI_RPELP = 0x0251
+ CS2 = 0x0260
+ SONY_SCX = 0x0270
+ SONY_SCY = 0x0271
+ SONY_ATRAC3 = 0x0272
+ SONY_SPC = 0x0273
+ TELUM_AUDIO = 0x0280
+ TELUM_IA_AUDIO = 0x0281
+ NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
+ FM_TOWNS_SND = 0x0300
+ MICRONAS = 0x0350
+ MICRONAS_CELP833 = 0x0351
+ BTV_DIGITAL = 0x0400
+ INTEL_MUSIC_CODER = 0x0401
+ INDEO_AUDIO = 0x0402
+ QDESIGN_MUSIC = 0x0450
+ ON2_VP7_AUDIO = 0x0500
+ ON2_VP6_AUDIO = 0x0501
+ VME_VMPCM = 0x0680
+ TPC = 0x0681
+ LIGHTWAVE_LOSSLESS = 0x08AE
+ OLIGSM = 0x1000
+ OLIADPCM = 0x1001
+ OLICELP = 0x1002
+ OLISBC = 0x1003
+ OLIOPR = 0x1004
+ LH_CODEC = 0x1100
+ LH_CODEC_CELP = 0x1101
+ LH_CODEC_SBC8 = 0x1102
+ LH_CODEC_SBC12 = 0x1103
+ LH_CODEC_SBC16 = 0x1104
+ NORRIS = 0x1400
+ ISIAUDIO_2 = 0x1401
+ SOUNDSPACE_MUSICOMPRESS = 0x1500
+ MPEG_ADTS_AAC = 0x1600
+ MPEG_RAW_AAC = 0x1601
+ MPEG_LOAS = 0x1602
+ NOKIA_MPEG_ADTS_AAC = 0x1608
+ NOKIA_MPEG_RAW_AAC = 0x1609
+ VODAFONE_MPEG_ADTS_AAC = 0x160A
+ VODAFONE_MPEG_RAW_AAC = 0x160B
+ MPEG_HEAAC = 0x1610
+ VOXWARE_RT24_SPEECH = 0x181C
+ SONICFOUNDRY_LOSSLESS = 0x1971
+ INNINGS_TELECOM_ADPCM = 0x1979
+ LUCENT_SX8300P = 0x1C07
+ LUCENT_SX5363S = 0x1C0C
+ CUSEEME = 0x1F03
+ NTCSOFT_ALF2CM_ACM = 0x1FC4
+ DVM = 0x2000
+ DTS2 = 0x2001
+ MAKEAVIS = 0x3313
+ DIVIO_MPEG4_AAC = 0x4143
+ NOKIA_ADAPTIVE_MULTIRATE = 0x4201
+ DIVIO_G726 = 0x4243
+ LEAD_SPEECH = 0x434C
+ LEAD_VORBIS = 0x564C
+ WAVPACK_AUDIO = 0x5756
+ OGG_VORBIS_MODE_1 = 0x674F
+ OGG_VORBIS_MODE_2 = 0x6750
+ OGG_VORBIS_MODE_3 = 0x6751
+ OGG_VORBIS_MODE_1_PLUS = 0x676F
+ OGG_VORBIS_MODE_2_PLUS = 0x6770
+ OGG_VORBIS_MODE_3_PLUS = 0x6771
+ ALAC = 0x6C61
+ _3COM_NBX = 0x7000 # Can't have leading digit
+ OPUS = 0x704F
+ FAAD_AAC = 0x706D
+ AMR_NB = 0x7361
+ AMR_WB = 0x7362
+ AMR_WP = 0x7363
+ GSM_AMR_CBR = 0x7A21
+ GSM_AMR_VBR_SID = 0x7A22
+ COMVERSE_INFOSYS_G723_1 = 0xA100
+ COMVERSE_INFOSYS_AVQSBC = 0xA101
+ COMVERSE_INFOSYS_SBC = 0xA102
+ SYMBOL_G729_A = 0xA103
+ VOICEAGE_AMR_WB = 0xA104
+ INGENIENT_G726 = 0xA105
+ MPEG4_AAC = 0xA106
+ ENCORE_G726 = 0xA107
+ ZOLL_ASAO = 0xA108
+ SPEEX_VOICE = 0xA109
+ VIANIX_MASC = 0xA10A
+ WM9_SPECTRUM_ANALYZER = 0xA10B
+ WMF_SPECTRUM_ANAYZER = 0xA10C
+ GSM_610 = 0xA10D
+ GSM_620 = 0xA10E
+ GSM_660 = 0xA10F
+ GSM_690 = 0xA110
+ GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
+ POLYCOM_G722 = 0xA112
+ POLYCOM_G728 = 0xA113
+ POLYCOM_G729_A = 0xA114
+ POLYCOM_SIREN = 0xA115
+ GLOBAL_IP_ILBC = 0xA116
+ RADIOTIME_TIME_SHIFT_RADIO = 0xA117
+ NICE_ACA = 0xA118
+ NICE_ADPCM = 0xA119
+ VOCORD_G721 = 0xA11A
+ VOCORD_G726 = 0xA11B
+ VOCORD_G722_1 = 0xA11C
+ VOCORD_G728 = 0xA11D
+ VOCORD_G729 = 0xA11E
+ VOCORD_G729_A = 0xA11F
+ VOCORD_G723_1 = 0xA120
+ VOCORD_LBC = 0xA121
+ NICE_G728 = 0xA122
+ FRACE_TELECOM_G729 = 0xA123
+ CODIAN = 0xA124
+ FLAC = 0xF1AC
+ EXTENSIBLE = 0xFFFE
+ DEVELOPMENT = 0xFFFF
+
+
+KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
+
+
+def _raise_bad_format(format_tag):
+ try:
+ format_name = WAVE_FORMAT(format_tag).name
+ except ValueError:
+ format_name = f'{format_tag:#06x}'
+ raise ValueError(f"Unknown wave file format: {format_name}. Supported "
+ "formats: " +
+ ', '.join(x.name for x in KNOWN_WAVE_FORMATS))
+
+
+def _read_fmt_chunk(fid, is_big_endian):
+ """
+ Returns
+ -------
+ size : int
+ size of format subchunk in bytes (minus 8 for "fmt " and itself)
+ format_tag : int
+ PCM, float, or compressed format
+ channels : int
+ number of channels
+ fs : int
+ sampling frequency in samples per second
+ bytes_per_second : int
+ overall byte rate for the file
+ block_align : int
+ bytes per sample, including all channels
+ bit_depth : int
+ bits per sample
+
+ Notes
+ -----
+ Assumes file pointer is immediately after the 'fmt ' id
+ """
+ if is_big_endian:
+ fmt = '>'
+ else:
+ fmt = '<'
+
+ size = struct.unpack(fmt+'I', fid.read(4))[0]
+
+ if size < 16:
+ raise ValueError("Binary structure of wave file is not compliant")
+
+ res = struct.unpack(fmt+'HHIIHH', fid.read(16))
+ bytes_read = 16
+
+ format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
+
+ if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
+ ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
+ bytes_read += 2
+ if ext_chunk_size >= 22:
+ extensible_chunk_data = fid.read(22)
+ bytes_read += 22
+ raw_guid = extensible_chunk_data[2+4:2+4+16]
+ # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
+ # MS GUID byte order: first three groups are native byte order,
+ # rest is Big Endian
+ if is_big_endian:
+ tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
+ else:
+ tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
+ if raw_guid.endswith(tail):
+ format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
+ else:
+ raise ValueError("Binary structure of wave file is not compliant")
+
+ if format_tag not in KNOWN_WAVE_FORMATS:
+ _raise_bad_format(format_tag)
+
+ # move file pointer to next chunk
+ if size > bytes_read:
+ fid.read(size - bytes_read)
+
+ # fmt should always be 16, 18 or 40, but handle it just in case
+ _handle_pad_byte(fid, size)
+
+ return (size, format_tag, channels, fs, bytes_per_second, block_align,
+ bit_depth)
+
+
+def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
+ block_align, mmap=False):
+ """
+ Notes
+ -----
+ Assumes file pointer is immediately after the 'data' id
+
+ It's possible to not use all available bits in a container, or to store
+ samples in a container bigger than necessary, so bytes_per_sample uses
+ the actual reported container size (nBlockAlign / nChannels). Real-world
+ examples:
+
+ Adobe Audition's "24-bit packed int (type 1, 20-bit)"
+
+ nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
+
+ http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
+ is:
+
+ nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
+
+ http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
+ gives an example of:
+
+ nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
+ """
+ if is_big_endian:
+ fmt = '>'
+ else:
+ fmt = '<'
+
+ # Size of the data subchunk in bytes
+ size = struct.unpack(fmt+'I', fid.read(4))[0]
+
+ # Number of bytes per sample (sample container size)
+ bytes_per_sample = block_align // channels
+ n_samples = size // bytes_per_sample
+
+ if format_tag == WAVE_FORMAT.PCM:
+ if 1 <= bit_depth <= 8:
+ dtype = 'u1' # WAV of 8-bit integer or less are unsigned
+ elif bytes_per_sample in {3, 5, 6, 7}:
+ # No compatible dtype. Load as raw bytes for reshaping later.
+ dtype = 'V1'
+ elif bit_depth <= 64:
+ # Remaining bit depths can map directly to signed numpy dtypes
+ dtype = f'{fmt}i{bytes_per_sample}'
+ else:
+ raise ValueError("Unsupported bit depth: the WAV file "
+ f"has {bit_depth}-bit integer data.")
+ elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
+ if bit_depth in {32, 64}:
+ dtype = f'{fmt}f{bytes_per_sample}'
+ else:
+ raise ValueError("Unsupported bit depth: the WAV file "
+ f"has {bit_depth}-bit floating-point data.")
+ else:
+ _raise_bad_format(format_tag)
+
+ start = fid.tell()
+ if not mmap:
+ try:
+ count = size if dtype == 'V1' else n_samples
+ data = numpy.fromfile(fid, dtype=dtype, count=count)
+ except io.UnsupportedOperation: # not a C-like file
+ fid.seek(start, 0) # just in case it seeked, though it shouldn't
+ data = numpy.frombuffer(fid.read(size), dtype=dtype)
+
+ if dtype == 'V1':
+ # Rearrange raw bytes into smallest compatible numpy dtype
+ dt = numpy.int32 if bytes_per_sample == 3 else numpy.int64
+ a = numpy.zeros((len(data) // bytes_per_sample, dt().itemsize),
+ dtype='V1')
+ a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
+ data = a.view(dt).reshape(a.shape[:-1])
+ else:
+ if bytes_per_sample in {1, 2, 4, 8}:
+ start = fid.tell()
+ data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
+ shape=(n_samples,))
+ fid.seek(start + size)
+ else:
+ raise ValueError("mmap=True not compatible with "
+ f"{bytes_per_sample}-byte container size.")
+
+ _handle_pad_byte(fid, size)
+
+ if channels > 1:
+ data = data.reshape(-1, channels)
+ return data
+
+
+def _skip_unknown_chunk(fid, is_big_endian):
+ if is_big_endian:
+ fmt = '>I'
+ else:
+ fmt = '>> from os.path import dirname, join as pjoin
+ >>> from scipy.io import wavfile
+ >>> import scipy.io
+
+ Get the filename for an example .wav file from the tests/data directory.
+
+ >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
+ >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
+
+ Load the .wav file contents.
+
+ >>> samplerate, data = wavfile.read(wav_fname)
+ >>> print(f"number of channels = {data.shape[1]}")
+ number of channels = 2
+ >>> length = data.shape[0] / samplerate
+ >>> print(f"length = {length}s")
+ length = 0.01s
+
+ Plot the waveform.
+
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+ >>> time = np.linspace(0., length, data.shape[0])
+ >>> plt.plot(time, data[:, 0], label="Left channel")
+ >>> plt.plot(time, data[:, 1], label="Right channel")
+ >>> plt.legend()
+ >>> plt.xlabel("Time [s]")
+ >>> plt.ylabel("Amplitude")
+ >>> plt.show()
+
+ """
+ if hasattr(filename, 'read'):
+ fid = filename
+ mmap = False
+ else:
+ fid = open(filename, 'rb')
+
+ try:
+ file_size, is_big_endian = _read_riff_chunk(fid)
+ fmt_chunk_received = False
+ data_chunk_received = False
+ while fid.tell() < file_size:
+ # read the next chunk
+ chunk_id = fid.read(4)
+
+ if not chunk_id:
+ if data_chunk_received:
+ # End of file but data successfully read
+ warnings.warn(
+ "Reached EOF prematurely; finished at {:d} bytes, "
+ "expected {:d} bytes from header."
+ .format(fid.tell(), file_size),
+ WavFileWarning, stacklevel=2)
+ break
+ else:
+ raise ValueError("Unexpected end of file.")
+ elif len(chunk_id) < 4:
+ msg = f"Incomplete chunk ID: {repr(chunk_id)}"
+ # If we have the data, ignore the broken chunk
+ if fmt_chunk_received and data_chunk_received:
+ warnings.warn(msg + ", ignoring it.", WavFileWarning,
+ stacklevel=2)
+ else:
+ raise ValueError(msg)
+
+ if chunk_id == b'fmt ':
+ fmt_chunk_received = True
+ fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
+ format_tag, channels, fs = fmt_chunk[1:4]
+ bit_depth = fmt_chunk[6]
+ block_align = fmt_chunk[5]
+ elif chunk_id == b'fact':
+ _skip_unknown_chunk(fid, is_big_endian)
+ elif chunk_id == b'data':
+ data_chunk_received = True
+ if not fmt_chunk_received:
+ raise ValueError("No fmt chunk before data")
+ data = _read_data_chunk(fid, format_tag, channels, bit_depth,
+ is_big_endian, block_align, mmap)
+ elif chunk_id == b'LIST':
+ # Someday this could be handled properly but for now skip it
+ _skip_unknown_chunk(fid, is_big_endian)
+ elif chunk_id in {b'JUNK', b'Fake'}:
+ # Skip alignment chunks without warning
+ _skip_unknown_chunk(fid, is_big_endian)
+ else:
+ warnings.warn("Chunk (non-data) not understood, skipping it.",
+ WavFileWarning, stacklevel=2)
+ _skip_unknown_chunk(fid, is_big_endian)
+ finally:
+ if not hasattr(filename, 'read'):
+ fid.close()
+ else:
+ fid.seek(0)
+
+ return fs, data
+
+
+def write(filename, rate, data):
+ """
+ Write a NumPy array as a WAV file.
+
+ Parameters
+ ----------
+ filename : string or open file handle
+ Output wav file.
+ rate : int
+ The sample rate (in samples/sec).
+ data : ndarray
+ A 1-D or 2-D NumPy array of either integer or float data-type.
+
+ Notes
+ -----
+ * Writes a simple uncompressed WAV file.
+ * To write multiple-channels, use a 2-D array of shape
+ (Nsamples, Nchannels).
+ * The bits-per-sample and PCM/float will be determined by the data-type.
+
+ Common data types: [1]_
+
+ ===================== =========== =========== =============
+ WAV format Min Max NumPy dtype
+ ===================== =========== =========== =============
+ 32-bit floating-point -1.0 +1.0 float32
+ 32-bit PCM -2147483648 +2147483647 int32
+ 16-bit PCM -32768 +32767 int16
+ 8-bit PCM 0 255 uint8
+ ===================== =========== =========== =============
+
+ Note that 8-bit PCM is unsigned.
+
+ References
+ ----------
+ .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
+ Interface and Data Specifications 1.0", section "Data Format of the
+ Samples", August 1991
+ http://www.tactilemedia.com/info/MCI_Control_Info.html
+
+ Examples
+ --------
+ Create a 100Hz sine wave, sampled at 44100Hz.
+ Write to 16-bit PCM, Mono.
+
+ >>> from scipy.io.wavfile import write
+ >>> samplerate = 44100; fs = 100
+ >>> t = np.linspace(0., 1., samplerate)
+ >>> amplitude = np.iinfo(np.int16).max
+ >>> data = amplitude * np.sin(2. * np.pi * fs * t)
+ >>> write("example.wav", samplerate, data.astype(np.int16))
+
+ """
+ if hasattr(filename, 'write'):
+ fid = filename
+ else:
+ fid = open(filename, 'wb')
+
+ fs = rate
+
+ try:
+ dkind = data.dtype.kind
+ if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
+ data.dtype.itemsize == 1)):
+ raise ValueError("Unsupported data type '%s'" % data.dtype)
+
+ header_data = b''
+
+ header_data += b'RIFF'
+ header_data += b'\x00\x00\x00\x00'
+ header_data += b'WAVE'
+
+ # fmt chunk
+ header_data += b'fmt '
+ if dkind == 'f':
+ format_tag = WAVE_FORMAT.IEEE_FLOAT
+ else:
+ format_tag = WAVE_FORMAT.PCM
+ if data.ndim == 1:
+ channels = 1
+ else:
+ channels = data.shape[1]
+ bit_depth = data.dtype.itemsize * 8
+ bytes_per_second = fs*(bit_depth // 8)*channels
+ block_align = channels * (bit_depth // 8)
+
+ fmt_chunk_data = struct.pack(' 0xFFFFFFFF:
+ raise ValueError("Data exceeds wave file size limit")
+
+ fid.write(header_data)
+
+ # data chunk
+ fid.write(b'data')
+ fid.write(struct.pack('' or (data.dtype.byteorder == '=' and
+ sys.byteorder == 'big'):
+ data = data.byteswap()
+ _array_tofile(fid, data)
+
+ # Determine file size and place it in correct
+ # position at start of the file.
+ size = fid.tell()
+ fid.seek(4)
+ fid.write(struct.pack('`__
+ for more linear algebra functions. Note that
+ although `scipy.linalg` imports most of them, identically named
+ functions from `scipy.linalg` may offer more or slightly differing
+ functionality.
+
+
+Basics
+======
+
+.. autosummary::
+ :toctree: generated/
+
+ inv - Find the inverse of a square matrix
+ solve - Solve a linear system of equations
+ solve_banded - Solve a banded linear system
+ solveh_banded - Solve a Hermitian or symmetric banded system
+ solve_circulant - Solve a circulant system
+ solve_triangular - Solve a triangular matrix
+ solve_toeplitz - Solve a toeplitz matrix
+ matmul_toeplitz - Multiply a Toeplitz matrix with an array.
+ det - Find the determinant of a square matrix
+ norm - Matrix and vector norm
+ lstsq - Solve a linear least-squares problem
+ pinv - Pseudo-inverse (Moore-Penrose) using lstsq
+ pinv2 - Pseudo-inverse using svd
+ pinvh - Pseudo-inverse of hermitian matrix
+ kron - Kronecker product of two arrays
+ khatri_rao - Khatri-Rao product of two arrays
+ tril - Construct a lower-triangular matrix from a given matrix
+ triu - Construct an upper-triangular matrix from a given matrix
+ orthogonal_procrustes - Solve an orthogonal Procrustes problem
+ matrix_balance - Balance matrix entries with a similarity transformation
+ subspace_angles - Compute the subspace angles between two matrices
+ LinAlgError
+ LinAlgWarning
+
+Eigenvalue Problems
+===================
+
+.. autosummary::
+ :toctree: generated/
+
+ eig - Find the eigenvalues and eigenvectors of a square matrix
+ eigvals - Find just the eigenvalues of a square matrix
+ eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
+ eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
+ eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
+ eigvals_banded - Find just the eigenvalues of a banded matrix
+ eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix
+ eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix
+
+Decompositions
+==============
+
+.. autosummary::
+ :toctree: generated/
+
+ lu - LU decomposition of a matrix
+ lu_factor - LU decomposition returning unordered matrix and pivots
+ lu_solve - Solve Ax=b using back substitution with output of lu_factor
+ svd - Singular value decomposition of a matrix
+ svdvals - Singular values of a matrix
+ diagsvd - Construct matrix of singular values from output of svd
+ orth - Construct orthonormal basis for the range of A using svd
+ null_space - Construct orthonormal basis for the null space of A using svd
+ ldl - LDL.T decomposition of a Hermitian or a symmetric matrix.
+ cholesky - Cholesky decomposition of a matrix
+ cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
+ cho_factor - Cholesky decomposition for use in solving a linear system
+ cho_solve - Solve previously factored linear system
+ cho_solve_banded - Solve previously factored banded linear system
+ polar - Compute the polar decomposition.
+ qr - QR decomposition of a matrix
+ qr_multiply - QR decomposition and multiplication by Q
+ qr_update - Rank k QR update
+ qr_delete - QR downdate on row or column deletion
+ qr_insert - QR update on row or column insertion
+ rq - RQ decomposition of a matrix
+ qz - QZ decomposition of a pair of matrices
+ ordqz - QZ decomposition of a pair of matrices with reordering
+ schur - Schur decomposition of a matrix
+ rsf2csf - Real to complex Schur form
+ hessenberg - Hessenberg form of a matrix
+ cdf2rdf - Complex diagonal form to real diagonal block form
+ cossin - Cosine sine decomposition of a unitary or orthogonal matrix
+
+.. seealso::
+
+ `scipy.linalg.interpolative` -- Interpolative matrix decompositions
+
+
+Matrix Functions
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ expm - Matrix exponential
+ logm - Matrix logarithm
+ cosm - Matrix cosine
+ sinm - Matrix sine
+ tanm - Matrix tangent
+ coshm - Matrix hyperbolic cosine
+ sinhm - Matrix hyperbolic sine
+ tanhm - Matrix hyperbolic tangent
+ signm - Matrix sign
+ sqrtm - Matrix square root
+ funm - Evaluating an arbitrary matrix function
+ expm_frechet - Frechet derivative of the matrix exponential
+ expm_cond - Relative condition number of expm in the Frobenius norm
+ fractional_matrix_power - Fractional matrix power
+
+
+Matrix Equation Solvers
+=======================
+
+.. autosummary::
+ :toctree: generated/
+
+ solve_sylvester - Solve the Sylvester matrix equation
+ solve_continuous_are - Solve the continuous-time algebraic Riccati equation
+ solve_discrete_are - Solve the discrete-time algebraic Riccati equation
+ solve_continuous_lyapunov - Solve the continuous-time Lyapunov equation
+ solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
+
+
+Sketches and Random Projections
+===============================
+
+.. autosummary::
+ :toctree: generated/
+
+ clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch)
+
+Special Matrices
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ block_diag - Construct a block diagonal matrix from submatrices
+ circulant - Circulant matrix
+ companion - Companion matrix
+ convolution_matrix - Convolution matrix
+ dft - Discrete Fourier transform matrix
+ fiedler - Fiedler matrix
+ fiedler_companion - Fiedler companion matrix
+ hadamard - Hadamard matrix of order 2**n
+ hankel - Hankel matrix
+ helmert - Helmert matrix
+ hilbert - Hilbert matrix
+ invhilbert - Inverse Hilbert matrix
+ leslie - Leslie matrix
+ pascal - Pascal matrix
+ invpascal - Inverse Pascal matrix
+ toeplitz - Toeplitz matrix
+ tri - Construct a matrix filled with ones at and below a given diagonal
+
+Low-level routines
+==================
+
+.. autosummary::
+ :toctree: generated/
+
+ get_blas_funcs
+ get_lapack_funcs
+ find_best_blas_type
+
+.. seealso::
+
+ `scipy.linalg.blas` -- Low-level BLAS functions
+
+ `scipy.linalg.lapack` -- Low-level LAPACK functions
+
+ `scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
+
+ `scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
+
+""" # noqa: E501
+
+from .misc import *
+from .basic import *
+from .decomp import *
+from .decomp_lu import *
+from ._decomp_ldl import *
+from .decomp_cholesky import *
+from .decomp_qr import *
+from ._decomp_qz import *
+from .decomp_svd import *
+from .decomp_schur import *
+from ._decomp_polar import *
+from .matfuncs import *
+from .blas import *
+from .lapack import *
+from .special_matrices import *
+from ._solvers import *
+from ._procrustes import *
+from ._decomp_update import *
+from ._sketches import *
+from ._decomp_cossin import *
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_cython_signature_generator.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_cython_signature_generator.py
new file mode 100644
index 0000000..1a68c96
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_cython_signature_generator.py
@@ -0,0 +1,203 @@
+"""
+A script that uses f2py to generate the signature files used to make
+the Cython BLAS and LAPACK wrappers from the fortran source code for
+LAPACK and the reference BLAS.
+
+To generate the BLAS wrapper signatures call:
+python _cython_signature_generator.py blas
+
+To generate the LAPACK wrapper signatures call:
+python _cython_signature_generator.py lapack
+
+This script expects to be run on the source directory for
+the oldest supported version of LAPACK (currently 3.4.0).
+"""
+
+import glob
+import os
+from numpy.f2py import crackfortran
+
+sig_types = {'integer': 'int',
+ 'complex': 'c',
+ 'double precision': 'd',
+ 'real': 's',
+ 'complex*16': 'z',
+ 'double complex': 'z',
+ 'character': 'char',
+ 'logical': 'bint'}
+
+
+def get_type(info, arg):
+ argtype = sig_types[info['vars'][arg]['typespec']]
+ if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
+ argtype = 'z'
+ return argtype
+
+
+def make_signature(filename):
+ info = crackfortran.crackfortran(filename)[0]
+ name = info['name']
+ if info['block'] == 'subroutine':
+ return_type = 'void'
+ else:
+ return_type = get_type(info, name)
+ arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
+ args = ', '.join(arglist)
+ # Eliminate strange variable naming that replaces rank with rank_bn.
+ args = args.replace('rank_bn', 'rank')
+ return '{0} {1}({2})\n'.format(return_type, name, args)
+
+
+def get_sig_name(line):
+ return line.split('(')[0].split(' ')[-1]
+
+
+def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
+ if directory[-1] in ['/', '\\']:
+ directory = directory[:-1]
+ files = sorted(glob.glob(directory + '/*.f*'))
+ if exclusions is None:
+ exclusions = []
+ if manual_wrappers is not None:
+ exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
+ signatures = []
+ for filename in files:
+ name = os.path.splitext(os.path.basename(filename))[0]
+ if name in exclusions:
+ continue
+ signatures.append(make_signature(filename))
+ if manual_wrappers is not None:
+ signatures += [l + '\n' for l in manual_wrappers.split('\n')]
+ signatures.sort(key=get_sig_name)
+ comment = ["# This file was generated by _cython_signature_generator.py.\n",
+ "# Do not edit this file directly.\n\n"]
+ with open(outfile, 'w') as f:
+ f.writelines(comment)
+ f.writelines(signatures)
+
+# slamch and dlamch are not in the lapack src directory, but,since they
+# already have Python wrappers, we'll wrap them as well.
+# The other manual signatures are used because the signature generating
+# functions don't work when function pointer arguments are used.
+
+
+lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
+void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
+void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
+void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
+void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
+void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
+void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
+void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
+d dlamch(char *cmach)
+void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
+void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
+void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
+void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
+void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
+s slamch(char *cmach)
+void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
+void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
+void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
+void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
+
+
+# Exclude scabs and sisnan since they aren't currently included
+# in the scipy-specific ABI wrappers.
+blas_exclusions = ['scabs1', 'xerbla']
+
+# Exclude all routines that do not have consistent interfaces from
+# LAPACK 3.4.0 through 3.6.0.
+# Also exclude routines with string arguments to avoid
+# compatibility woes with different standards for string arguments.
+lapack_exclusions = [
+ # Not included because people should be using the
+ # C standard library function instead.
+ # sisnan is also not currently included in the
+ # ABI wrappers.
+ 'sisnan', 'dlaisnan', 'slaisnan',
+ # Exclude slaneg because it isn't currently included
+ # in the ABI wrappers
+ 'slaneg',
+ # Excluded because they require Fortran string arguments.
+ 'ilaenv', 'iparmq', 'lsamen', 'xerbla',
+ # Exclude XBLAS routines since they aren't included
+ # by default.
+ 'cgesvxx', 'dgesvxx', 'sgesvxx', 'zgesvxx',
+ 'cgerfsx', 'dgerfsx', 'sgerfsx', 'zgerfsx',
+ 'cla_gerfsx_extended', 'dla_gerfsx_extended',
+ 'sla_gerfsx_extended', 'zla_gerfsx_extended',
+ 'cla_geamv', 'dla_geamv', 'sla_geamv', 'zla_geamv',
+ 'dla_gercond', 'sla_gercond',
+ 'cla_gercond_c', 'zla_gercond_c',
+ 'cla_gercond_x', 'zla_gercond_x',
+ 'cla_gerpvgrw', 'dla_gerpvgrw',
+ 'sla_gerpvgrw', 'zla_gerpvgrw',
+ 'csysvxx', 'dsysvxx', 'ssysvxx', 'zsysvxx',
+ 'csyrfsx', 'dsyrfsx', 'ssyrfsx', 'zsyrfsx',
+ 'cla_syrfsx_extended', 'dla_syrfsx_extended',
+ 'sla_syrfsx_extended', 'zla_syrfsx_extended',
+ 'cla_syamv', 'dla_syamv', 'sla_syamv', 'zla_syamv',
+ 'dla_syrcond', 'sla_syrcond',
+ 'cla_syrcond_c', 'zla_syrcond_c',
+ 'cla_syrcond_x', 'zla_syrcond_x',
+ 'cla_syrpvgrw', 'dla_syrpvgrw',
+ 'sla_syrpvgrw', 'zla_syrpvgrw',
+ 'cposvxx', 'dposvxx', 'sposvxx', 'zposvxx',
+ 'cporfsx', 'dporfsx', 'sporfsx', 'zporfsx',
+ 'cla_porfsx_extended', 'dla_porfsx_extended',
+ 'sla_porfsx_extended', 'zla_porfsx_extended',
+ 'dla_porcond', 'sla_porcond',
+ 'cla_porcond_c', 'zla_porcond_c',
+ 'cla_porcond_x', 'zla_porcond_x',
+ 'cla_porpvgrw', 'dla_porpvgrw',
+ 'sla_porpvgrw', 'zla_porpvgrw',
+ 'cgbsvxx', 'dgbsvxx', 'sgbsvxx', 'zgbsvxx',
+ 'cgbrfsx', 'dgbrfsx', 'sgbrfsx', 'zgbrfsx',
+ 'cla_gbrfsx_extended', 'dla_gbrfsx_extended',
+ 'sla_gbrfsx_extended', 'zla_gbrfsx_extended',
+ 'cla_gbamv', 'dla_gbamv', 'sla_gbamv', 'zla_gbamv',
+ 'dla_gbrcond', 'sla_gbrcond',
+ 'cla_gbrcond_c', 'zla_gbrcond_c',
+ 'cla_gbrcond_x', 'zla_gbrcond_x',
+ 'cla_gbrpvgrw', 'dla_gbrpvgrw',
+ 'sla_gbrpvgrw', 'zla_gbrpvgrw',
+ 'chesvxx', 'zhesvxx',
+ 'cherfsx', 'zherfsx',
+ 'cla_herfsx_extended', 'zla_herfsx_extended',
+ 'cla_heamv', 'zla_heamv',
+ 'cla_hercond_c', 'zla_hercond_c',
+ 'cla_hercond_x', 'zla_hercond_x',
+ 'cla_herpvgrw', 'zla_herpvgrw',
+ 'sla_lin_berr', 'cla_lin_berr',
+ 'dla_lin_berr', 'zla_lin_berr',
+ 'clarscl2', 'dlarscl2', 'slarscl2', 'zlarscl2',
+ 'clascl2', 'dlascl2', 'slascl2', 'zlascl2',
+ 'cla_wwaddw', 'dla_wwaddw', 'sla_wwaddw', 'zla_wwaddw',
+ # Removed between 3.3.1 and 3.4.0.
+ 'cla_rpvgrw', 'dla_rpvgrw', 'sla_rpvgrw', 'zla_rpvgrw',
+ # Signatures changed between 3.4.0 and 3.4.1.
+ 'dlasq5', 'slasq5',
+ # Routines deprecated in LAPACK 3.6.0
+ 'cgegs', 'cgegv', 'cgelsx',
+ 'cgeqpf', 'cggsvd', 'cggsvp',
+ 'clahrd', 'clatzm', 'ctzrqf',
+ 'dgegs', 'dgegv', 'dgelsx',
+ 'dgeqpf', 'dggsvd', 'dggsvp',
+ 'dlahrd', 'dlatzm', 'dtzrqf',
+ 'sgegs', 'sgegv', 'sgelsx',
+ 'sgeqpf', 'sggsvd', 'sggsvp',
+ 'slahrd', 'slatzm', 'stzrqf',
+ 'zgegs', 'zgegv', 'zgelsx',
+ 'zgeqpf', 'zggsvd', 'zggsvp',
+ 'zlahrd', 'zlatzm', 'ztzrqf']
+
+
+if __name__ == '__main__':
+ from sys import argv
+ libname, src_dir, outfile = argv[1:]
+ if libname.lower() == 'blas':
+ sigs_from_dir(src_dir, outfile, exclusions=blas_exclusions)
+ elif libname.lower() == 'lapack':
+ sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
+ exclusions=lapack_exclusions)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_cossin.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_cossin.py
new file mode 100644
index 0000000..be6794b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_cossin.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+from collections.abc import Iterable
+import numpy as np
+
+from scipy._lib._util import _asarray_validated
+from scipy.linalg import block_diag, LinAlgError
+from .lapack import _compute_lwork, get_lapack_funcs
+
+__all__ = ['cossin']
+
+
+def cossin(X, p=None, q=None, separate=False,
+ swap_sign=False, compute_u=True, compute_vh=True):
+ """
+ Compute the cosine-sine (CS) decomposition of an orthogonal/unitary matrix.
+
+ X is an ``(m, m)`` orthogonal/unitary matrix, partitioned as the following
+ where upper left block has the shape of ``(p, q)``::
+
+ ┌ ┐
+ │ I 0 0 │ 0 0 0 │
+ ┌ ┐ ┌ ┐│ 0 C 0 │ 0 -S 0 │┌ ┐*
+ │ X11 │ X12 │ │ U1 │ ││ 0 0 0 │ 0 0 -I ││ V1 │ │
+ │ ────┼──── │ = │────┼────││─────────┼─────────││────┼────│
+ │ X21 │ X22 │ │ │ U2 ││ 0 0 0 │ I 0 0 ││ │ V2 │
+ └ ┘ └ ┘│ 0 S 0 │ 0 C 0 │└ ┘
+ │ 0 0 I │ 0 0 0 │
+ └ ┘
+
+ ``U1``, ``U2``, ``V1``, ``V2`` are square orthogonal/unitary matrices of
+ dimensions ``(p,p)``, ``(m-p,m-p)``, ``(q,q)``, and ``(m-q,m-q)``
+ respectively, and ``C`` and ``S`` are ``(r, r)`` nonnegative diagonal
+ matrices satisfying ``C^2 + S^2 = I`` where ``r = min(p, m-p, q, m-q)``.
+
+ Moreover, the rank of the identity matrices are ``min(p, q) - r``,
+ ``min(p, m - q) - r``, ``min(m - p, q) - r``, and ``min(m - p, m - q) - r``
+ respectively.
+
+ X can be supplied either by itself and block specifications p, q or its
+ subblocks in an iterable from which the shapes would be derived. See the
+ examples below.
+
+ Parameters
+ ----------
+ X : array_like, iterable
+ complex unitary or real orthogonal matrix to be decomposed, or iterable
+ of subblocks ``X11``, ``X12``, ``X21``, ``X22``, when ``p``, ``q`` are
+ omitted.
+ p : int, optional
+ Number of rows of the upper left block ``X11``, used only when X is
+ given as an array.
+ q : int, optional
+ Number of columns of the upper left block ``X11``, used only when X is
+ given as an array.
+ separate : bool, optional
+ if ``True``, the low level components are returned instead of the
+ matrix factors, i.e. ``(u1,u2)``, ``theta``, ``(v1h,v2h)`` instead of
+ ``u``, ``cs``, ``vh``.
+ swap_sign : bool, optional
+ if ``True``, the ``-S``, ``-I`` block will be the bottom left,
+ otherwise (by default) they will be in the upper right block.
+ compute_u : bool, optional
+ if ``False``, ``u`` won't be computed and an empty array is returned.
+ compute_vh : bool, optional
+ if ``False``, ``vh`` won't be computed and an empty array is returned.
+
+ Returns
+ -------
+ u : ndarray
+ When ``compute_u=True``, contains the block diagonal orthogonal/unitary
+ matrix consisting of the blocks ``U1`` (``p`` x ``p``) and ``U2``
+ (``m-p`` x ``m-p``) orthogonal/unitary matrices. If ``separate=True``,
+ this contains the tuple of ``(U1, U2)``.
+ cs : ndarray
+ The cosine-sine factor with the structure described above.
+ If ``separate=True``, this contains the ``theta`` array containing the
+ angles in radians.
+ vh : ndarray
+ When ``compute_vh=True`, contains the block diagonal orthogonal/unitary
+ matrix consisting of the blocks ``V1H`` (``q`` x ``q``) and ``V2H``
+ (``m-q`` x ``m-q``) orthogonal/unitary matrices. If ``separate=True``,
+ this contains the tuple of ``(V1H, V2H)``.
+
+ Examples
+ --------
+ >>> from scipy.linalg import cossin
+ >>> from scipy.stats import unitary_group
+ >>> x = unitary_group.rvs(4)
+ >>> u, cs, vdh = cossin(x, p=2, q=2)
+ >>> np.allclose(x, u @ cs @ vdh)
+ True
+
+ Same can be entered via subblocks without the need of ``p`` and ``q``. Also
+ let's skip the computation of ``u``
+
+ >>> ue, cs, vdh = cossin((x[:2, :2], x[:2, 2:], x[2:, :2], x[2:, 2:]),
+ ... compute_u=False)
+ >>> print(ue)
+ []
+ >>> np.allclose(x, u @ cs @ vdh)
+ True
+
+ References
+ ----------
+ .. [1] : Brian D. Sutton. Computing the complete CS decomposition. Numer.
+ Algorithms, 50(1):33-65, 2009.
+
+ """
+
+ if p or q:
+ p = 1 if p is None else int(p)
+ q = 1 if q is None else int(q)
+ X = _asarray_validated(X, check_finite=True)
+ if not np.equal(*X.shape):
+ raise ValueError("Cosine Sine decomposition only supports square"
+ " matrices, got {}".format(X.shape))
+ m = X.shape[0]
+ if p >= m or p <= 0:
+ raise ValueError("invalid p={}, 0= m or q <= 0:
+ raise ValueError("invalid q={}, 0 0:
+ raise LinAlgError("{} did not converge: {}".format(method_name, info))
+
+ if separate:
+ return (u1, u2), theta, (v1h, v2h)
+
+ U = block_diag(u1, u2)
+ VDH = block_diag(v1h, v2h)
+
+ # Construct the middle factor CS
+ c = np.diag(np.cos(theta))
+ s = np.diag(np.sin(theta))
+ r = min(p, q, m - p, m - q)
+ n11 = min(p, q) - r
+ n12 = min(p, m - q) - r
+ n21 = min(m - p, q) - r
+ n22 = min(m - p, m - q) - r
+ Id = np.eye(np.max([n11, n12, n21, n22, r]), dtype=theta.dtype)
+ CS = np.zeros((m, m), dtype=theta.dtype)
+
+ CS[:n11, :n11] = Id[:n11, :n11]
+
+ xs = n11 + r
+ xe = n11 + r + n12
+ ys = n11 + n21 + n22 + 2 * r
+ ye = n11 + n21 + n22 + 2 * r + n12
+ CS[xs: xe, ys:ye] = Id[:n12, :n12] if swap_sign else -Id[:n12, :n12]
+
+ xs = p + n22 + r
+ xe = p + n22 + r + + n21
+ ys = n11 + r
+ ye = n11 + r + n21
+ CS[xs:xe, ys:ye] = -Id[:n21, :n21] if swap_sign else Id[:n21, :n21]
+
+ CS[p:p + n22, q:q + n22] = Id[:n22, :n22]
+ CS[n11:n11 + r, n11:n11 + r] = c
+ CS[p + n22:p + n22 + r, r + n21 + n22:2 * r + n21 + n22] = c
+
+ xs = n11
+ xe = n11 + r
+ ys = n11 + n21 + n22 + r
+ ye = n11 + n21 + n22 + 2 * r
+ CS[xs:xe, ys:ye] = s if swap_sign else -s
+
+ CS[p + n22:p + n22 + r, n11:n11 + r] = -s if swap_sign else s
+
+ return U, CS, VDH
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_ldl.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_ldl.py
new file mode 100644
index 0000000..b1daa25
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_ldl.py
@@ -0,0 +1,352 @@
+from warnings import warn
+
+import numpy as np
+from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
+ iscomplexobj, tril, triu, argsort, empty_like)
+from .decomp import _asarray_validated
+from .lapack import get_lapack_funcs, _compute_lwork
+
+__all__ = ['ldl']
+
+
+def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
+ """ Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
+ hermitian matrix.
+
+ This function returns a block diagonal matrix D consisting blocks of size
+ at most 2x2 and also a possibly permuted unit lower triangular matrix
+ ``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
+ holds. If ``lower`` is False then (again possibly permuted) upper
+ triangular matrices are returned as outer factors.
+
+ The permutation array can be used to triangularize the outer factors
+ simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
+ triangular matrix. This is also equivalent to multiplication with a
+ permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
+ identity matrix ``I[:, perm]``.
+
+ Depending on the value of the boolean ``lower``, only upper or lower
+ triangular part of the input array is referenced. Hence, a triangular
+ matrix on entry would give the same result as if the full matrix is
+ supplied.
+
+ Parameters
+ ----------
+ a : array_like
+ Square input array
+ lower : bool, optional
+ This switches between the lower and upper triangular outer factors of
+ the factorization. Lower triangular (``lower=True``) is the default.
+ hermitian : bool, optional
+ For complex-valued arrays, this defines whether ``a = a.conj().T`` or
+ ``a = a.T`` is assumed. For real-valued arrays, this switch has no
+ effect.
+ overwrite_a : bool, optional
+ Allow overwriting data in ``a`` (may enhance performance). The default
+ is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ lu : ndarray
+ The (possibly) permuted upper/lower triangular outer factor of the
+ factorization.
+ d : ndarray
+ The block diagonal multiplier of the factorization.
+ perm : ndarray
+ The row-permutation index array that brings lu into triangular form.
+
+ Raises
+ ------
+ ValueError
+ If input array is not square.
+ ComplexWarning
+ If a complex-valued array with nonzero imaginary parts on the
+ diagonal is given and hermitian is set to True.
+
+ Examples
+ --------
+ Given an upper triangular array `a` that represents the full symmetric
+ array with its entries, obtain `l`, 'd' and the permutation vector `perm`:
+
+ >>> import numpy as np
+ >>> from scipy.linalg import ldl
+ >>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
+ >>> lu, d, perm = ldl(a, lower=0) # Use the upper part
+ >>> lu
+ array([[ 0. , 0. , 1. ],
+ [ 0. , 1. , -0.5],
+ [ 1. , 1. , 1.5]])
+ >>> d
+ array([[-5. , 0. , 0. ],
+ [ 0. , 1.5, 0. ],
+ [ 0. , 0. , 2. ]])
+ >>> perm
+ array([2, 1, 0])
+ >>> lu[perm, :]
+ array([[ 1. , 1. , 1.5],
+ [ 0. , 1. , -0.5],
+ [ 0. , 0. , 1. ]])
+ >>> lu.dot(d).dot(lu.T)
+ array([[ 2., -1., 3.],
+ [-1., 2., 0.],
+ [ 3., 0., 1.]])
+
+ Notes
+ -----
+ This function uses ``?SYTRF`` routines for symmetric matrices and
+ ``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
+ the algorithm details.
+
+ Depending on the ``lower`` keyword value, only lower or upper triangular
+ part of the input array is referenced. Moreover, this keyword also defines
+ the structure of the outer factors of the factorization.
+
+ .. versionadded:: 1.1.0
+
+ See also
+ --------
+ cholesky, lu
+
+ References
+ ----------
+ .. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating
+ inertia and solving symmetric linear systems, Math. Comput. Vol.31,
+ 1977. :doi:`10.2307/2005787`
+
+ """
+ a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
+ if a.shape[0] != a.shape[1]:
+ raise ValueError('The input array "a" should be square.')
+ # Return empty arrays for empty square input
+ if a.size == 0:
+ return empty_like(a), empty_like(a), np.array([], dtype=int)
+
+ n = a.shape[0]
+ r_or_c = complex if iscomplexobj(a) else float
+
+ # Get the LAPACK routine
+ if r_or_c is complex and hermitian:
+ s, sl = 'hetrf', 'hetrf_lwork'
+ if np.any(imag(diag(a))):
+ warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal'
+ 'are ignored. Use "hermitian=False" for factorization of'
+ 'complex symmetric arrays.', ComplexWarning, stacklevel=2)
+ else:
+ s, sl = 'sytrf', 'sytrf_lwork'
+
+ solver, solver_lwork = get_lapack_funcs((s, sl), (a,))
+ lwork = _compute_lwork(solver_lwork, n, lower=lower)
+ ldu, piv, info = solver(a, lwork=lwork, lower=lower,
+ overwrite_a=overwrite_a)
+ if info < 0:
+ raise ValueError('{} exited with the internal error "illegal value '
+ 'in argument number {}". See LAPACK documentation '
+ 'for the error codes.'.format(s.upper(), -info))
+
+ swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower)
+ d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian)
+ lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower)
+
+ return lu, d, perm
+
+
+def _ldl_sanitize_ipiv(a, lower=True):
+ """
+ This helper function takes the rather strangely encoded permutation array
+ returned by the LAPACK routines ?(HE/SY)TRF and converts it into
+ regularized permutation and diagonal pivot size format.
+
+ Since FORTRAN uses 1-indexing and LAPACK uses different start points for
+ upper and lower formats there are certain offsets in the indices used
+ below.
+
+ Let's assume a result where the matrix is 6x6 and there are two 2x2
+ and two 1x1 blocks reported by the routine. To ease the coding efforts,
+ we still populate a 6-sized array and fill zeros as the following ::
+
+ pivots = [2, 0, 2, 0, 1, 1]
+
+ This denotes a diagonal matrix of the form ::
+
+ [x x ]
+ [x x ]
+ [ x x ]
+ [ x x ]
+ [ x ]
+ [ x]
+
+ In other words, we write 2 when the 2x2 block is first encountered and
+ automatically write 0 to the next entry and skip the next spin of the
+ loop. Thus, a separate counter or array appends to keep track of block
+ sizes are avoided. If needed, zeros can be filtered out later without
+ losing the block structure.
+
+ Parameters
+ ----------
+ a : ndarray
+ The permutation array ipiv returned by LAPACK
+ lower : bool, optional
+ The switch to select whether upper or lower triangle is chosen in
+ the LAPACK call.
+
+ Returns
+ -------
+ swap_ : ndarray
+ The array that defines the row/column swap operations. For example,
+ if row two is swapped with row four, the result is [0, 3, 2, 3].
+ pivots : ndarray
+ The array that defines the block diagonal structure as given above.
+
+ """
+ n = a.size
+ swap_ = arange(n)
+ pivots = zeros_like(swap_, dtype=int)
+ skip_2x2 = False
+
+ # Some upper/lower dependent offset values
+ # range (s)tart, r(e)nd, r(i)ncrement
+ x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1)
+
+ for ind in range(rs, re, ri):
+ # If previous spin belonged already to a 2x2 block
+ if skip_2x2:
+ skip_2x2 = False
+ continue
+
+ cur_val = a[ind]
+ # do we have a 1x1 block or not?
+ if cur_val > 0:
+ if cur_val != ind+1:
+ # Index value != array value --> permutation required
+ swap_[ind] = swap_[cur_val-1]
+ pivots[ind] = 1
+ # Not.
+ elif cur_val < 0 and cur_val == a[ind+x]:
+ # first neg entry of 2x2 block identifier
+ if -cur_val != ind+2:
+ # Index value != array value --> permutation required
+ swap_[ind+x] = swap_[-cur_val-1]
+ pivots[ind+y] = 2
+ skip_2x2 = True
+ else: # Doesn't make sense, give up
+ raise ValueError('While parsing the permutation array '
+ 'in "scipy.linalg.ldl", invalid entries '
+ 'found. The array syntax is invalid.')
+ return swap_, pivots
+
+
+def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):
+ """
+ Helper function to extract the diagonal and triangular matrices for
+ LDL.T factorization.
+
+ Parameters
+ ----------
+ ldu : ndarray
+ The compact output returned by the LAPACK routing
+ pivs : ndarray
+ The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For
+ every 2 there is a succeeding 0.
+ lower : bool, optional
+ If set to False, upper triangular part is considered.
+ hermitian : bool, optional
+ If set to False a symmetric complex array is assumed.
+
+ Returns
+ -------
+ d : ndarray
+ The block diagonal matrix.
+ lu : ndarray
+ The upper/lower triangular matrix
+ """
+ is_c = iscomplexobj(ldu)
+ d = diag(diag(ldu))
+ n = d.shape[0]
+ blk_i = 0 # block index
+
+ # row/column offsets for selecting sub-, super-diagonal
+ x, y = (1, 0) if lower else (0, 1)
+
+ lu = tril(ldu, -1) if lower else triu(ldu, 1)
+ diag_inds = arange(n)
+ lu[diag_inds, diag_inds] = 1
+
+ for blk in pivs[pivs != 0]:
+ # increment the block index and check for 2s
+ # if 2 then copy the off diagonals depending on uplo
+ inc = blk_i + blk
+
+ if blk == 2:
+ d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y]
+ # If Hermitian matrix is factorized, the cross-offdiagonal element
+ # should be conjugated.
+ if is_c and hermitian:
+ d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj()
+ else:
+ d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y]
+
+ lu[blk_i+x, blk_i+y] = 0.
+ blk_i = inc
+
+ return d, lu
+
+
+def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):
+ """
+ Helper function to construct explicit outer factors of LDL factorization.
+
+ If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k).
+ Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See
+ LAPACK documentation for more details.
+
+ Parameters
+ ----------
+ lu : ndarray
+ The triangular array that is extracted from LAPACK routine call with
+ ones on the diagonals.
+ swap_vec : ndarray
+ The array that defines the row swapping indices. If the kth entry is m
+ then rows k,m are swapped. Notice that the mth entry is not necessarily
+ k to avoid undoing the swapping.
+ pivs : ndarray
+ The array that defines the block diagonal structure returned by
+ _ldl_sanitize_ipiv().
+ lower : bool, optional
+ The boolean to switch between lower and upper triangular structure.
+
+ Returns
+ -------
+ lu : ndarray
+ The square outer factor which satisfies the L * D * L.T = A
+ perm : ndarray
+ The permutation vector that brings the lu to the triangular form
+
+ Notes
+ -----
+ Note that the original argument "lu" is overwritten.
+
+ """
+ n = lu.shape[0]
+ perm = arange(n)
+ # Setup the reading order of the permutation matrix for upper/lower
+ rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1)
+
+ for ind in range(rs, re, ri):
+ s_ind = swap_vec[ind]
+ if s_ind != ind:
+ # Column start and end positions
+ col_s = ind if lower else 0
+ col_e = n if lower else ind+1
+
+ # If we stumble upon a 2x2 block include both cols in the perm.
+ if pivs[ind] == (0 if lower else 2):
+ col_s += -1 if lower else 0
+ col_e += 0 if lower else 1
+ lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]
+ perm[[s_ind, ind]] = perm[[ind, s_ind]]
+
+ return lu, argsort(perm)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_polar.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_polar.py
new file mode 100644
index 0000000..9bd98fa
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_polar.py
@@ -0,0 +1,110 @@
+import numpy as np
+from scipy.linalg import svd
+
+
+__all__ = ['polar']
+
+
+def polar(a, side="right"):
+ """
+ Compute the polar decomposition.
+
+ Returns the factors of the polar decomposition [1]_ `u` and `p` such
+ that ``a = up`` (if `side` is "right") or ``a = pu`` (if `side` is
+ "left"), where `p` is positive semidefinite. Depending on the shape
+ of `a`, either the rows or columns of `u` are orthonormal. When `a`
+ is a square array, `u` is a square unitary array. When `a` is not
+ square, the "canonical polar decomposition" [2]_ is computed.
+
+ Parameters
+ ----------
+ a : (m, n) array_like
+ The array to be factored.
+ side : {'left', 'right'}, optional
+ Determines whether a right or left polar decomposition is computed.
+ If `side` is "right", then ``a = up``. If `side` is "left", then
+ ``a = pu``. The default is "right".
+
+ Returns
+ -------
+ u : (m, n) ndarray
+ If `a` is square, then `u` is unitary. If m > n, then the columns
+ of `a` are orthonormal, and if m < n, then the rows of `u` are
+ orthonormal.
+ p : ndarray
+ `p` is Hermitian positive semidefinite. If `a` is nonsingular, `p`
+ is positive definite. The shape of `p` is (n, n) or (m, m), depending
+ on whether `side` is "right" or "left", respectively.
+
+ References
+ ----------
+ .. [1] R. A. Horn and C. R. Johnson, "Matrix Analysis", Cambridge
+ University Press, 1985.
+ .. [2] N. J. Higham, "Functions of Matrices: Theory and Computation",
+ SIAM, 2008.
+
+ Examples
+ --------
+ >>> from scipy.linalg import polar
+ >>> a = np.array([[1, -1], [2, 4]])
+ >>> u, p = polar(a)
+ >>> u
+ array([[ 0.85749293, -0.51449576],
+ [ 0.51449576, 0.85749293]])
+ >>> p
+ array([[ 1.88648444, 1.2004901 ],
+ [ 1.2004901 , 3.94446746]])
+
+ A non-square example, with m < n:
+
+ >>> b = np.array([[0.5, 1, 2], [1.5, 3, 4]])
+ >>> u, p = polar(b)
+ >>> u
+ array([[-0.21196618, -0.42393237, 0.88054056],
+ [ 0.39378971, 0.78757942, 0.4739708 ]])
+ >>> p
+ array([[ 0.48470147, 0.96940295, 1.15122648],
+ [ 0.96940295, 1.9388059 , 2.30245295],
+ [ 1.15122648, 2.30245295, 3.65696431]])
+ >>> u.dot(p) # Verify the decomposition.
+ array([[ 0.5, 1. , 2. ],
+ [ 1.5, 3. , 4. ]])
+ >>> u.dot(u.T) # The rows of u are orthonormal.
+ array([[ 1.00000000e+00, -2.07353665e-17],
+ [ -2.07353665e-17, 1.00000000e+00]])
+
+ Another non-square example, with m > n:
+
+ >>> c = b.T
+ >>> u, p = polar(c)
+ >>> u
+ array([[-0.21196618, 0.39378971],
+ [-0.42393237, 0.78757942],
+ [ 0.88054056, 0.4739708 ]])
+ >>> p
+ array([[ 1.23116567, 1.93241587],
+ [ 1.93241587, 4.84930602]])
+ >>> u.dot(p) # Verify the decomposition.
+ array([[ 0.5, 1.5],
+ [ 1. , 3. ],
+ [ 2. , 4. ]])
+ >>> u.T.dot(u) # The columns of u are orthonormal.
+ array([[ 1.00000000e+00, -1.26363763e-16],
+ [ -1.26363763e-16, 1.00000000e+00]])
+
+ """
+ if side not in ['right', 'left']:
+ raise ValueError("`side` must be either 'right' or 'left'")
+ a = np.asarray(a)
+ if a.ndim != 2:
+ raise ValueError("`a` must be a 2-D array.")
+
+ w, s, vh = svd(a, full_matrices=False)
+ u = w.dot(vh)
+ if side == 'right':
+ # a = up
+ p = (vh.T.conj() * s).dot(vh)
+ else:
+ # a = pu
+ p = (w * s).dot(w.T.conj())
+ return u, p
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_qz.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_qz.py
new file mode 100644
index 0000000..8569c83
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_qz.py
@@ -0,0 +1,402 @@
+import warnings
+
+import numpy as np
+from numpy import asarray_chkfinite
+
+from .misc import LinAlgError, _datacopied, LinAlgWarning
+from .lapack import get_lapack_funcs
+
+
+__all__ = ['qz', 'ordqz']
+
+_double_precision = ['i', 'l', 'd']
+
+
+def _select_function(sort):
+ if callable(sort):
+ # assume the user knows what they're doing
+ sfunction = sort
+ elif sort == 'lhp':
+ sfunction = _lhp
+ elif sort == 'rhp':
+ sfunction = _rhp
+ elif sort == 'iuc':
+ sfunction = _iuc
+ elif sort == 'ouc':
+ sfunction = _ouc
+ else:
+ raise ValueError("sort parameter must be None, a callable, or "
+ "one of ('lhp','rhp','iuc','ouc')")
+
+ return sfunction
+
+
+def _lhp(x, y):
+ out = np.empty_like(x, dtype=bool)
+ nonzero = (y != 0)
+ # handles (x, y) = (0, 0) too
+ out[~nonzero] = False
+ out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0)
+ return out
+
+
+def _rhp(x, y):
+ out = np.empty_like(x, dtype=bool)
+ nonzero = (y != 0)
+ # handles (x, y) = (0, 0) too
+ out[~nonzero] = False
+ out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0)
+ return out
+
+
+def _iuc(x, y):
+ out = np.empty_like(x, dtype=bool)
+ nonzero = (y != 0)
+ # handles (x, y) = (0, 0) too
+ out[~nonzero] = False
+ out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0)
+ return out
+
+
+def _ouc(x, y):
+ out = np.empty_like(x, dtype=bool)
+ xzero = (x == 0)
+ yzero = (y == 0)
+ out[xzero & yzero] = False
+ out[~xzero & yzero] = True
+ out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0)
+ return out
+
+
+def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
+ overwrite_b=False, check_finite=True):
+ if sort is not None:
+ # Disabled due to segfaults on win32, see ticket 1717.
+ raise ValueError("The 'sort' input of qz() has to be None and will be "
+ "removed in a future release. Use ordqz instead.")
+
+ if output not in ['real', 'complex', 'r', 'c']:
+ raise ValueError("argument must be 'real', or 'complex'")
+
+ if check_finite:
+ a1 = asarray_chkfinite(A)
+ b1 = asarray_chkfinite(B)
+ else:
+ a1 = np.asarray(A)
+ b1 = np.asarray(B)
+
+ a_m, a_n = a1.shape
+ b_m, b_n = b1.shape
+ if not (a_m == a_n == b_m == b_n):
+ raise ValueError("Array dimensions must be square and agree")
+
+ typa = a1.dtype.char
+ if output in ['complex', 'c'] and typa not in ['F', 'D']:
+ if typa in _double_precision:
+ a1 = a1.astype('D')
+ typa = 'D'
+ else:
+ a1 = a1.astype('F')
+ typa = 'F'
+ typb = b1.dtype.char
+ if output in ['complex', 'c'] and typb not in ['F', 'D']:
+ if typb in _double_precision:
+ b1 = b1.astype('D')
+ typb = 'D'
+ else:
+ b1 = b1.astype('F')
+ typb = 'F'
+
+ overwrite_a = overwrite_a or (_datacopied(a1, A))
+ overwrite_b = overwrite_b or (_datacopied(b1, B))
+
+ gges, = get_lapack_funcs(('gges',), (a1, b1))
+
+ if lwork is None or lwork == -1:
+ # get optimal work array size
+ result = gges(lambda x: None, a1, b1, lwork=-1)
+ lwork = result[-2][0].real.astype(np.int_)
+
+ sfunction = lambda x: None
+ result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
+ overwrite_b=overwrite_b, sort_t=0)
+
+ info = result[-1]
+ if info < 0:
+ raise ValueError("Illegal value in argument {} of gges".format(-info))
+ elif info > 0 and info <= a_n:
+ warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
+ "form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be "
+ "correct for J={},...,N".format(info-1), LinAlgWarning,
+ stacklevel=3)
+ elif info == a_n+1:
+ raise LinAlgError("Something other than QZ iteration failed")
+ elif info == a_n+2:
+ raise LinAlgError("After reordering, roundoff changed values of some "
+ "complex eigenvalues so that leading eigenvalues "
+ "in the Generalized Schur form no longer satisfy "
+ "sort=True. This could also be due to scaling.")
+ elif info == a_n+3:
+ raise LinAlgError("Reordering failed in tgsen")
+
+ return result, gges.typecode
+
+
+def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
+ overwrite_b=False, check_finite=True):
+ """
+ QZ decomposition for generalized eigenvalues of a pair of matrices.
+
+ The QZ, or generalized Schur, decomposition for a pair of N x N
+ nonsymmetric matrices (A,B) is::
+
+ (A,B) = (Q*AA*Z', Q*BB*Z')
+
+ where AA, BB is in generalized Schur form if BB is upper-triangular
+ with non-negative diagonal and AA is upper-triangular, or for real QZ
+ decomposition (``output='real'``) block upper triangular with 1x1
+ and 2x2 blocks. In this case, the 1x1 blocks correspond to real
+ generalized eigenvalues and 2x2 blocks are 'standardized' by making
+ the corresponding elements of BB have the form::
+
+ [ a 0 ]
+ [ 0 b ]
+
+ and the pair of corresponding 2x2 blocks in AA and BB will have a complex
+ conjugate pair of generalized eigenvalues. If (``output='complex'``) or
+ A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
+ Q and Z are unitary matrices.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ 2-D array to decompose
+ B : (N, N) array_like
+ 2-D array to decompose
+ output : {'real', 'complex'}, optional
+ Construct the real or complex QZ decomposition for real matrices.
+ Default is 'real'.
+ lwork : int, optional
+ Work array size. If None or -1, it is automatically computed.
+ sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+ NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead.
+
+ Specifies whether the upper eigenvalues should be sorted. A callable
+ may be passed that, given a eigenvalue, returns a boolean denoting
+ whether the eigenvalue should be sorted to the top-left (True). For
+ real matrix pairs, the sort function takes three real arguments
+ (alphar, alphai, beta). The eigenvalue
+ ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
+ output='complex', the sort function takes two complex arguments
+ (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively,
+ string parameters may be used:
+
+ - 'lhp' Left-hand plane (x.real < 0.0)
+ - 'rhp' Right-hand plane (x.real > 0.0)
+ - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
+ - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
+
+ Defaults to None (no sorting).
+ overwrite_a : bool, optional
+ Whether to overwrite data in a (may improve performance)
+ overwrite_b : bool, optional
+ Whether to overwrite data in b (may improve performance)
+ check_finite : bool, optional
+ If true checks the elements of `A` and `B` are finite numbers. If
+ false does no checking and passes matrix through to
+ underlying algorithm.
+
+ Returns
+ -------
+ AA : (N, N) ndarray
+ Generalized Schur form of A.
+ BB : (N, N) ndarray
+ Generalized Schur form of B.
+ Q : (N, N) ndarray
+ The left Schur vectors.
+ Z : (N, N) ndarray
+ The right Schur vectors.
+
+ Notes
+ -----
+ Q is transposed versus the equivalent function in Matlab.
+
+ .. versionadded:: 0.11.0
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> np.random.seed(1234)
+ >>> A = np.arange(9).reshape((3, 3))
+ >>> B = np.random.randn(3, 3)
+
+ >>> AA, BB, Q, Z = linalg.qz(A, B)
+ >>> AA
+ array([[-13.40928183, -4.62471562, 1.09215523],
+ [ 0. , 0. , 1.22805978],
+ [ 0. , 0. , 0.31973817]])
+ >>> BB
+ array([[ 0.33362547, -1.37393632, 0.02179805],
+ [ 0. , 1.68144922, 0.74683866],
+ [ 0. , 0. , 0.9258294 ]])
+ >>> Q
+ array([[ 0.14134727, -0.97562773, 0.16784365],
+ [ 0.49835904, -0.07636948, -0.86360059],
+ [ 0.85537081, 0.20571399, 0.47541828]])
+ >>> Z
+ array([[-0.24900855, -0.51772687, 0.81850696],
+ [-0.79813178, 0.58842606, 0.12938478],
+ [-0.54861681, -0.6210585 , -0.55973739]])
+
+ See also
+ --------
+ ordqz
+ """
+ # output for real
+ # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
+ # output for complex
+ # AA, BB, sdim, alpha, beta, vsl, vsr, work, info
+ result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort,
+ overwrite_a=overwrite_a, overwrite_b=overwrite_b,
+ check_finite=check_finite)
+ return result[0], result[1], result[-4], result[-3]
+
+
+def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
+ overwrite_b=False, check_finite=True):
+ """QZ decomposition for a pair of matrices with reordering.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ 2-D array to decompose
+ B : (N, N) array_like
+ 2-D array to decompose
+ sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+ Specifies whether the upper eigenvalues should be sorted. A
+ callable may be passed that, given an ordered pair ``(alpha,
+ beta)`` representing the eigenvalue ``x = (alpha/beta)``,
+ returns a boolean denoting whether the eigenvalue should be
+ sorted to the top-left (True). For the real matrix pairs
+ ``beta`` is real while ``alpha`` can be complex, and for
+ complex matrix pairs both ``alpha`` and ``beta`` can be
+ complex. The callable must be able to accept a NumPy
+ array. Alternatively, string parameters may be used:
+
+ - 'lhp' Left-hand plane (x.real < 0.0)
+ - 'rhp' Right-hand plane (x.real > 0.0)
+ - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
+ - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
+
+ With the predefined sorting functions, an infinite eigenvalue
+ (i.e., ``alpha != 0`` and ``beta = 0``) is considered to lie in
+ neither the left-hand nor the right-hand plane, but it is
+ considered to lie outside the unit circle. For the eigenvalue
+ ``(alpha, beta) = (0, 0)``, the predefined sorting functions
+ all return `False`.
+ output : str {'real','complex'}, optional
+ Construct the real or complex QZ decomposition for real matrices.
+ Default is 'real'.
+ overwrite_a : bool, optional
+ If True, the contents of A are overwritten.
+ overwrite_b : bool, optional
+ If True, the contents of B are overwritten.
+ check_finite : bool, optional
+ If true checks the elements of `A` and `B` are finite numbers. If
+ false does no checking and passes matrix through to
+ underlying algorithm.
+
+ Returns
+ -------
+ AA : (N, N) ndarray
+ Generalized Schur form of A.
+ BB : (N, N) ndarray
+ Generalized Schur form of B.
+ alpha : (N,) ndarray
+ alpha = alphar + alphai * 1j. See notes.
+ beta : (N,) ndarray
+ See notes.
+ Q : (N, N) ndarray
+ The left Schur vectors.
+ Z : (N, N) ndarray
+ The right Schur vectors.
+
+ Notes
+ -----
+ On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
+ generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and
+ ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
+ that would result if the 2-by-2 diagonal blocks of the real generalized
+ Schur form of (A,B) were further reduced to triangular form using complex
+ unitary transformations. If ALPHAI(j) is zero, then the jth eigenvalue is
+ real; if positive, then the ``j``th and ``(j+1)``st eigenvalues are a
+ complex conjugate pair, with ``ALPHAI(j+1)`` negative.
+
+ See also
+ --------
+ qz
+
+ Examples
+ --------
+ >>> from scipy.linalg import ordqz
+ >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+ >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]])
+ >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp')
+
+ Since we have sorted for left half plane eigenvalues, negatives come first
+
+ >>> (alpha/beta).real < 0
+ array([ True, True, False, False], dtype=bool)
+
+ """
+ # NOTE: should users be able to set these?
+ lwork = None
+ result, typ = _qz(A, B, output=output, lwork=lwork, sort=None,
+ overwrite_a=overwrite_a, overwrite_b=overwrite_b,
+ check_finite=check_finite)
+ AA, BB, Q, Z = result[0], result[1], result[-4], result[-3]
+ if typ not in 'cz':
+ alpha, beta = result[3] + result[4]*1.j, result[5]
+ else:
+ alpha, beta = result[3], result[4]
+
+ sfunction = _select_function(sort)
+ select = sfunction(alpha, beta)
+
+ tgsen, = get_lapack_funcs(('tgsen',), (AA, BB))
+
+ if lwork is None or lwork == -1:
+ result = tgsen(select, AA, BB, Q, Z, lwork=-1)
+ lwork = result[-3][0].real.astype(np.int_)
+ # looks like wrong value passed to ZTGSYL if not
+ lwork += 1
+
+ liwork = None
+ if liwork is None or liwork == -1:
+ result = tgsen(select, AA, BB, Q, Z, liwork=-1)
+ liwork = result[-2][0]
+
+ result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork)
+
+ info = result[-1]
+ if info < 0:
+ raise ValueError("Illegal value in argument %d of tgsen" % -info)
+ elif info == 1:
+ raise ValueError("Reordering of (A, B) failed because the transformed"
+ " matrix pair (A, B) would be too far from "
+ "generalized Schur form; the problem is very "
+ "ill-conditioned. (A, B) may have been partially "
+ "reorded. If requested, 0 is returned in DIF(*), "
+ "PL, and PR.")
+
+ # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,
+ # work, iwork, info
+ if typ in ['f', 'd']:
+ alpha = result[2] + result[3] * 1.j
+ return (result[0], result[1], alpha, result[4], result[5], result[6])
+ # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,
+ # iwork, info
+ else:
+ return result[0], result[1], result[2], result[3], result[4], result[5]
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_update.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_update.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..75b7818
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_decomp_update.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_expm_frechet.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_expm_frechet.py
new file mode 100644
index 0000000..df16f86
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_expm_frechet.py
@@ -0,0 +1,409 @@
+"""Frechet derivative of the matrix exponential."""
+import numpy as np
+import scipy.linalg
+
+__all__ = ['expm_frechet', 'expm_cond']
+
+
+def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
+ """
+ Frechet derivative of the matrix exponential of A in the direction E.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix of which to take the matrix exponential.
+ E : (N, N) array_like
+ Matrix direction in which to take the Frechet derivative.
+ method : str, optional
+ Choice of algorithm. Should be one of
+
+ - `SPS` (default)
+ - `blockEnlarge`
+
+ compute_expm : bool, optional
+ Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
+ Default is True.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ expm_A : ndarray
+ Matrix exponential of A.
+ expm_frechet_AE : ndarray
+ Frechet derivative of the matrix exponential of A in the direction E.
+
+ For ``compute_expm = False``, only `expm_frechet_AE` is returned.
+
+ See also
+ --------
+ expm : Compute the exponential of a matrix.
+
+ Notes
+ -----
+ This section describes the available implementations that can be selected
+ by the `method` parameter. The default method is *SPS*.
+
+ Method *blockEnlarge* is a naive algorithm.
+
+ Method *SPS* is Scaling-Pade-Squaring [1]_.
+ It is a sophisticated implementation which should take
+ only about 3/8 as much time as the naive implementation.
+ The asymptotics are the same.
+
+ .. versionadded:: 0.13.0
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
+ Computing the Frechet Derivative of the Matrix Exponential,
+ with an application to Condition Number Estimation.
+ SIAM Journal On Matrix Analysis and Applications.,
+ 30 (4). pp. 1639-1657. ISSN 1095-7162
+
+ Examples
+ --------
+ >>> import scipy.linalg
+ >>> A = np.random.randn(3, 3)
+ >>> E = np.random.randn(3, 3)
+ >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
+ >>> expm_A.shape, expm_frechet_AE.shape
+ ((3, 3), (3, 3))
+
+ >>> import scipy.linalg
+ >>> A = np.random.randn(3, 3)
+ >>> E = np.random.randn(3, 3)
+ >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
+ >>> M = np.zeros((6, 6))
+ >>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A
+ >>> expm_M = scipy.linalg.expm(M)
+ >>> np.allclose(expm_A, expm_M[:3, :3])
+ True
+ >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
+ True
+
+ """
+ if check_finite:
+ A = np.asarray_chkfinite(A)
+ E = np.asarray_chkfinite(E)
+ else:
+ A = np.asarray(A)
+ E = np.asarray(E)
+ if A.ndim != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected A to be a square matrix')
+ if E.ndim != 2 or E.shape[0] != E.shape[1]:
+ raise ValueError('expected E to be a square matrix')
+ if A.shape != E.shape:
+ raise ValueError('expected A and E to be the same shape')
+ if method is None:
+ method = 'SPS'
+ if method == 'SPS':
+ expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
+ elif method == 'blockEnlarge':
+ expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
+ else:
+ raise ValueError('Unknown implementation %s' % method)
+ if compute_expm:
+ return expm_A, expm_frechet_AE
+ else:
+ return expm_frechet_AE
+
+
+def expm_frechet_block_enlarge(A, E):
+ """
+ This is a helper function, mostly for testing and profiling.
+ Return expm(A), frechet(A, E)
+ """
+ n = A.shape[0]
+ M = np.vstack([
+ np.hstack([A, E]),
+ np.hstack([np.zeros_like(A), A])])
+ expm_M = scipy.linalg.expm(M)
+ return expm_M[:n, :n], expm_M[:n, n:]
+
+
+"""
+Maximal values ell_m of ||2**-s A|| such that the backward error bound
+does not exceed 2**-53.
+"""
+ell_table_61 = (
+ None,
+ # 1
+ 2.11e-8,
+ 3.56e-4,
+ 1.08e-2,
+ 6.49e-2,
+ 2.00e-1,
+ 4.37e-1,
+ 7.83e-1,
+ 1.23e0,
+ 1.78e0,
+ 2.42e0,
+ # 11
+ 3.13e0,
+ 3.90e0,
+ 4.74e0,
+ 5.63e0,
+ 6.56e0,
+ 7.52e0,
+ 8.53e0,
+ 9.56e0,
+ 1.06e1,
+ 1.17e1,
+ )
+
+
+# The b vectors and U and V are copypasted
+# from scipy.sparse.linalg.matfuncs.py.
+# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
+
+def _diff_pade3(A, E, ident):
+ b = (120., 60., 12., 1.)
+ A2 = A.dot(A)
+ M2 = np.dot(A, E) + np.dot(E, A)
+ U = A.dot(b[3]*A2 + b[1]*ident)
+ V = b[2]*A2 + b[0]*ident
+ Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
+ Lv = b[2]*M2
+ return U, V, Lu, Lv
+
+
+def _diff_pade5(A, E, ident):
+ b = (30240., 15120., 3360., 420., 30., 1.)
+ A2 = A.dot(A)
+ M2 = np.dot(A, E) + np.dot(E, A)
+ A4 = np.dot(A2, A2)
+ M4 = np.dot(A2, M2) + np.dot(M2, A2)
+ U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
+ V = b[4]*A4 + b[2]*A2 + b[0]*ident
+ Lu = (A.dot(b[5]*M4 + b[3]*M2) +
+ E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
+ Lv = b[4]*M4 + b[2]*M2
+ return U, V, Lu, Lv
+
+
+def _diff_pade7(A, E, ident):
+ b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
+ A2 = A.dot(A)
+ M2 = np.dot(A, E) + np.dot(E, A)
+ A4 = np.dot(A2, A2)
+ M4 = np.dot(A2, M2) + np.dot(M2, A2)
+ A6 = np.dot(A2, A4)
+ M6 = np.dot(A4, M2) + np.dot(M4, A2)
+ U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
+ V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+ Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
+ E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
+ Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
+ return U, V, Lu, Lv
+
+
+def _diff_pade9(A, E, ident):
+ b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
+ 2162160., 110880., 3960., 90., 1.)
+ A2 = A.dot(A)
+ M2 = np.dot(A, E) + np.dot(E, A)
+ A4 = np.dot(A2, A2)
+ M4 = np.dot(A2, M2) + np.dot(M2, A2)
+ A6 = np.dot(A2, A4)
+ M6 = np.dot(A4, M2) + np.dot(M4, A2)
+ A8 = np.dot(A4, A4)
+ M8 = np.dot(A4, M4) + np.dot(M4, A4)
+ U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
+ V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+ Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
+ E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
+ Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
+ return U, V, Lu, Lv
+
+
+def expm_frechet_algo_64(A, E):
+ n = A.shape[0]
+ s = None
+ ident = np.identity(n)
+ A_norm_1 = scipy.linalg.norm(A, 1)
+ m_pade_pairs = (
+ (3, _diff_pade3),
+ (5, _diff_pade5),
+ (7, _diff_pade7),
+ (9, _diff_pade9))
+ for m, pade in m_pade_pairs:
+ if A_norm_1 <= ell_table_61[m]:
+ U, V, Lu, Lv = pade(A, E, ident)
+ s = 0
+ break
+ if s is None:
+ # scaling
+ s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
+ A = A * 2.0**-s
+ E = E * 2.0**-s
+ # pade order 13
+ A2 = np.dot(A, A)
+ M2 = np.dot(A, E) + np.dot(E, A)
+ A4 = np.dot(A2, A2)
+ M4 = np.dot(A2, M2) + np.dot(M2, A2)
+ A6 = np.dot(A2, A4)
+ M6 = np.dot(A4, M2) + np.dot(M4, A2)
+ b = (64764752532480000., 32382376266240000., 7771770303897600.,
+ 1187353796428800., 129060195264000., 10559470521600.,
+ 670442572800., 33522128640., 1323241920., 40840800., 960960.,
+ 16380., 182., 1.)
+ W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
+ W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
+ Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
+ Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+ W = np.dot(A6, W1) + W2
+ U = np.dot(A, W)
+ V = np.dot(A6, Z1) + Z2
+ Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
+ Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
+ Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
+ Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
+ Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
+ Lu = np.dot(A, Lw) + np.dot(E, W)
+ Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
+ # factor once and solve twice
+ lu_piv = scipy.linalg.lu_factor(-U + V)
+ R = scipy.linalg.lu_solve(lu_piv, U + V)
+ L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
+ # squaring
+ for k in range(s):
+ L = np.dot(R, L) + np.dot(L, R)
+ R = np.dot(R, R)
+ return R, L
+
+
+def vec(M):
+ """
+ Stack columns of M to construct a single vector.
+
+ This is somewhat standard notation in linear algebra.
+
+ Parameters
+ ----------
+ M : 2-D array_like
+ Input matrix
+
+ Returns
+ -------
+ v : 1-D ndarray
+ Output vector
+
+ """
+ return M.T.ravel()
+
+
+def expm_frechet_kronform(A, method=None, check_finite=True):
+ """
+ Construct the Kronecker form of the Frechet derivative of expm.
+
+ Parameters
+ ----------
+ A : array_like with shape (N, N)
+ Matrix to be expm'd.
+ method : str, optional
+ Extra keyword to be passed to expm_frechet.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ K : 2-D ndarray with shape (N*N, N*N)
+ Kronecker form of the Frechet derivative of the matrix exponential.
+
+ Notes
+ -----
+ This function is used to help compute the condition number
+ of the matrix exponential.
+
+ See also
+ --------
+ expm : Compute a matrix exponential.
+ expm_frechet : Compute the Frechet derivative of the matrix exponential.
+ expm_cond : Compute the relative condition number of the matrix exponential
+ in the Frobenius norm.
+
+ """
+ if check_finite:
+ A = np.asarray_chkfinite(A)
+ else:
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected a square matrix')
+
+ n = A.shape[0]
+ ident = np.identity(n)
+ cols = []
+ for i in range(n):
+ for j in range(n):
+ E = np.outer(ident[i], ident[j])
+ F = expm_frechet(A, E,
+ method=method, compute_expm=False, check_finite=False)
+ cols.append(vec(F))
+ return np.vstack(cols).T
+
+
+def expm_cond(A, check_finite=True):
+ """
+ Relative condition number of the matrix exponential in the Frobenius norm.
+
+ Parameters
+ ----------
+ A : 2-D array_like
+ Square input matrix with shape (N, N).
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ kappa : float
+ The relative condition number of the matrix exponential
+ in the Frobenius norm
+
+ Notes
+ -----
+ A faster estimate for the condition number in the 1-norm
+ has been published but is not yet implemented in SciPy.
+
+ .. versionadded:: 0.14.0
+
+ See also
+ --------
+ expm : Compute the exponential of a matrix.
+ expm_frechet : Compute the Frechet derivative of the matrix exponential.
+
+ Examples
+ --------
+ >>> from scipy.linalg import expm_cond
+ >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
+ >>> k = expm_cond(A)
+ >>> k
+ 1.7787805864469866
+
+ """
+ if check_finite:
+ A = np.asarray_chkfinite(A)
+ else:
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected a square matrix')
+
+ X = scipy.linalg.expm(A)
+ K = expm_frechet_kronform(A, check_finite=False)
+
+ # The following norm choices are deliberate.
+ # The norms of A and X are Frobenius norms,
+ # and the norm of K is the induced 2-norm.
+ A_norm = scipy.linalg.norm(A, 'fro')
+ X_norm = scipy.linalg.norm(X, 'fro')
+ K_norm = scipy.linalg.norm(K, 2)
+
+ kappa = (K_norm * A_norm) / X_norm
+ return kappa
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_fblas.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_fblas.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..d48abe2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_fblas.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flapack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flapack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..74882ce
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flapack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flinalg.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flinalg.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..db5410e
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_flinalg.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_generate_pyx.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_generate_pyx.py
new file mode 100644
index 0000000..2679c20
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_generate_pyx.py
@@ -0,0 +1,753 @@
+"""
+Code generator script to make the Cython BLAS and LAPACK wrappers
+from the files "cython_blas_signatures.txt" and
+"cython_lapack_signatures.txt" which contain the signatures for
+all the BLAS/LAPACK routines that should be included in the wrappers.
+"""
+
+from collections import defaultdict
+from operator import itemgetter
+import os
+
+BASE_DIR = os.path.abspath(os.path.dirname(__file__))
+
+fortran_types = {'int': 'integer',
+ 'c': 'complex',
+ 'd': 'double precision',
+ 's': 'real',
+ 'z': 'complex*16',
+ 'char': 'character',
+ 'bint': 'logical'}
+
+c_types = {'int': 'int',
+ 'c': 'npy_complex64',
+ 'd': 'double',
+ 's': 'float',
+ 'z': 'npy_complex128',
+ 'char': 'char',
+ 'bint': 'int',
+ 'cselect1': '_cselect1',
+ 'cselect2': '_cselect2',
+ 'dselect2': '_dselect2',
+ 'dselect3': '_dselect3',
+ 'sselect2': '_sselect2',
+ 'sselect3': '_sselect3',
+ 'zselect1': '_zselect1',
+ 'zselect2': '_zselect2'}
+
+
+def arg_names_and_types(args):
+ return zip(*[arg.split(' *') for arg in args.split(', ')])
+
+
+pyx_func_template = """
+cdef extern from "{header_name}":
+ void _fortran_{name} "F_FUNC({name}wrp, {upname}WRP)"({ret_type} *out, {fort_args}) nogil
+cdef {ret_type} {name}({args}) nogil:
+ cdef {ret_type} out
+ _fortran_{name}(&out, {argnames})
+ return out
+"""
+
+npy_types = {'c': 'npy_complex64', 'z': 'npy_complex128',
+ 'cselect1': '_cselect1', 'cselect2': '_cselect2',
+ 'dselect2': '_dselect2', 'dselect3': '_dselect3',
+ 'sselect2': '_sselect2', 'sselect3': '_sselect3',
+ 'zselect1': '_zselect1', 'zselect2': '_zselect2'}
+
+
+def arg_casts(arg):
+ if arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2',
+ '_dselect2', '_dselect3', '_sselect2', '_sselect3',
+ '_zselect1', '_zselect2']:
+ return '<{0}*>'.format(arg)
+ return ''
+
+
+def pyx_decl_func(name, ret_type, args, header_name):
+ argtypes, argnames = arg_names_and_types(args)
+ # Fix the case where one of the arguments has the same name as the
+ # abbreviation for the argument type.
+ # Otherwise the variable passed as an argument is considered overwrites
+ # the previous typedef and Cython compilation fails.
+ if ret_type in argnames:
+ argnames = [n if n != ret_type else ret_type + '_' for n in argnames]
+ argnames = [n if n not in ['lambda', 'in'] else n + '_'
+ for n in argnames]
+ args = ', '.join([' *'.join([n, t])
+ for n, t in zip(argtypes, argnames)])
+ argtypes = [npy_types.get(t, t) for t in argtypes]
+ fort_args = ', '.join([' *'.join([n, t])
+ for n, t in zip(argtypes, argnames)])
+ argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
+ argnames = ', '.join(argnames)
+ c_ret_type = c_types[ret_type]
+ args = args.replace('lambda', 'lambda_')
+ return pyx_func_template.format(name=name, upname=name.upper(), args=args,
+ fort_args=fort_args, ret_type=ret_type,
+ c_ret_type=c_ret_type, argnames=argnames,
+ header_name=header_name)
+
+
+pyx_sub_template = """cdef extern from "{header_name}":
+ void _fortran_{name} "F_FUNC({name},{upname})"({fort_args}) nogil
+cdef void {name}({args}) nogil:
+ _fortran_{name}({argnames})
+"""
+
+
+def pyx_decl_sub(name, args, header_name):
+ argtypes, argnames = arg_names_and_types(args)
+ argtypes = [npy_types.get(t, t) for t in argtypes]
+ argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames]
+ fort_args = ', '.join([' *'.join([n, t])
+ for n, t in zip(argtypes, argnames)])
+ argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
+ argnames = ', '.join(argnames)
+ args = args.replace('*lambda,', '*lambda_,').replace('*in,', '*in_,')
+ return pyx_sub_template.format(name=name, upname=name.upper(),
+ args=args, fort_args=fort_args,
+ argnames=argnames, header_name=header_name)
+
+
+blas_pyx_preamble = '''# cython: boundscheck = False
+# cython: wraparound = False
+# cython: cdivision = True
+
+"""
+BLAS Functions for Cython
+=========================
+
+Usable from Cython via::
+
+ cimport scipy.linalg.cython_blas
+
+These wrappers do not check for alignment of arrays.
+Alignment should be checked before these wrappers are used.
+
+Raw function pointers (Fortran-style pointer arguments):
+
+- {}
+
+
+"""
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_blas
+# from scipy.linalg cimport cython_blas
+# cimport scipy.linalg.cython_blas as cython_blas
+# cimport ..linalg.cython_blas as cython_blas
+
+# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+cdef extern from "fortran_defs.h":
+ pass
+
+from numpy cimport npy_complex64, npy_complex128
+
+'''
+
+
+def make_blas_pyx_preamble(all_sigs):
+ names = [sig[0] for sig in all_sigs]
+ return blas_pyx_preamble.format("\n- ".join(names))
+
+
+lapack_pyx_preamble = '''"""
+LAPACK functions for Cython
+===========================
+
+Usable from Cython via::
+
+ cimport scipy.linalg.cython_lapack
+
+This module provides Cython-level wrappers for all primary routines included
+in LAPACK 3.4.0 except for ``zcgesv`` since its interface is not consistent
+from LAPACK 3.4.0 to 3.6.0. It also provides some of the
+fixed-api auxiliary routines.
+
+These wrappers do not check for alignment of arrays.
+Alignment should be checked before these wrappers are used.
+
+Raw function pointers (Fortran-style pointer arguments):
+
+- {}
+
+
+"""
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_lapack
+# from scipy.linalg cimport cython_lapack
+# cimport scipy.linalg.cython_lapack as cython_lapack
+# cimport ..linalg.cython_lapack as cython_lapack
+
+# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+cdef extern from "fortran_defs.h":
+ pass
+
+from numpy cimport npy_complex64, npy_complex128
+
+cdef extern from "_lapack_subroutines.h":
+ # Function pointer type declarations for
+ # gees and gges families of functions.
+ ctypedef bint _cselect1(npy_complex64*)
+ ctypedef bint _cselect2(npy_complex64*, npy_complex64*)
+ ctypedef bint _dselect2(d*, d*)
+ ctypedef bint _dselect3(d*, d*, d*)
+ ctypedef bint _sselect2(s*, s*)
+ ctypedef bint _sselect3(s*, s*, s*)
+ ctypedef bint _zselect1(npy_complex128*)
+ ctypedef bint _zselect2(npy_complex128*, npy_complex128*)
+
+'''
+
+
+def make_lapack_pyx_preamble(all_sigs):
+ names = [sig[0] for sig in all_sigs]
+ return lapack_pyx_preamble.format("\n- ".join(names))
+
+
+blas_py_wrappers = """
+
+# Python-accessible wrappers for testing:
+
+cdef inline bint _is_contiguous(double[:,:] a, int axis) nogil:
+ return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1)
+
+cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) nogil:
+ cdef:
+ int n = cx.shape[0]
+ int incx = cx.strides[0] // sizeof(cx[0])
+ int incy = cy.strides[0] // sizeof(cy[0])
+ return cdotc(&n, &cx[0], &incx, &cy[0], &incy)
+
+cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) nogil:
+ cdef:
+ int n = cx.shape[0]
+ int incx = cx.strides[0] // sizeof(cx[0])
+ int incy = cy.strides[0] // sizeof(cy[0])
+ return cdotu(&n, &cx[0], &incx, &cy[0], &incy)
+
+cpdef double _test_dasum(double[:] dx) nogil:
+ cdef:
+ int n = dx.shape[0]
+ int incx = dx.strides[0] // sizeof(dx[0])
+ return dasum(&n, &dx[0], &incx)
+
+cpdef double _test_ddot(double[:] dx, double[:] dy) nogil:
+ cdef:
+ int n = dx.shape[0]
+ int incx = dx.strides[0] // sizeof(dx[0])
+ int incy = dy.strides[0] // sizeof(dy[0])
+ return ddot(&n, &dx[0], &incx, &dy[0], &incy)
+
+cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta,
+ double[:,:] c) nogil except -1:
+ cdef:
+ char *transa
+ char *transb
+ int m, n, k, lda, ldb, ldc
+ double *a0=&a[0,0]
+ double *b0=&b[0,0]
+ double *c0=&c[0,0]
+ # In the case that c is C contiguous, swap a and b and
+ # swap whether or not each of them is transposed.
+ # This can be done because a.dot(b) = b.T.dot(a.T).T.
+ if _is_contiguous(c, 1):
+ if _is_contiguous(a, 1):
+ transb = 'n'
+ ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
+ elif _is_contiguous(a, 0):
+ transb = 't'
+ ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
+ else:
+ with gil:
+ raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
+ if _is_contiguous(b, 1):
+ transa = 'n'
+ lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
+ elif _is_contiguous(b, 0):
+ transa = 't'
+ lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
+ else:
+ with gil:
+ raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
+ k = b.shape[0]
+ if k != a.shape[1]:
+ with gil:
+ raise ValueError("Shape mismatch in input arrays.")
+ m = b.shape[1]
+ n = a.shape[0]
+ if n != c.shape[0] or m != c.shape[1]:
+ with gil:
+ raise ValueError("Output array does not have the correct shape.")
+ ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1
+ dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0,
+ &ldb, &beta, c0, &ldc)
+ elif _is_contiguous(c, 0):
+ if _is_contiguous(a, 1):
+ transa = 't'
+ lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
+ elif _is_contiguous(a, 0):
+ transa = 'n'
+ lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
+ else:
+ with gil:
+ raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
+ if _is_contiguous(b, 1):
+ transb = 't'
+ ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
+ elif _is_contiguous(b, 0):
+ transb = 'n'
+ ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
+ else:
+ with gil:
+ raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
+ m = a.shape[0]
+ k = a.shape[1]
+ if k != b.shape[0]:
+ with gil:
+ raise ValueError("Shape mismatch in input arrays.")
+ n = b.shape[1]
+ if m != c.shape[0] or n != c.shape[1]:
+ with gil:
+ raise ValueError("Output array does not have the correct shape.")
+ ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1
+ dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0,
+ &ldb, &beta, c0, &ldc)
+ else:
+ with gil:
+ raise ValueError("Input 'c' is neither C nor Fortran contiguous.")
+ return 0
+
+cpdef double _test_dnrm2(double[:] x) nogil:
+ cdef:
+ int n = x.shape[0]
+ int incx = x.strides[0] // sizeof(x[0])
+ return dnrm2(&n, &x[0], &incx)
+
+cpdef double _test_dzasum(double complex[:] zx) nogil:
+ cdef:
+ int n = zx.shape[0]
+ int incx = zx.strides[0] // sizeof(zx[0])
+ return dzasum(&n, &zx[0], &incx)
+
+cpdef double _test_dznrm2(double complex[:] x) nogil:
+ cdef:
+ int n = x.shape[0]
+ int incx = x.strides[0] // sizeof(x[0])
+ return dznrm2(&n, &x[0], &incx)
+
+cpdef int _test_icamax(float complex[:] cx) nogil:
+ cdef:
+ int n = cx.shape[0]
+ int incx = cx.strides[0] // sizeof(cx[0])
+ return icamax(&n, &cx[0], &incx)
+
+cpdef int _test_idamax(double[:] dx) nogil:
+ cdef:
+ int n = dx.shape[0]
+ int incx = dx.strides[0] // sizeof(dx[0])
+ return idamax(&n, &dx[0], &incx)
+
+cpdef int _test_isamax(float[:] sx) nogil:
+ cdef:
+ int n = sx.shape[0]
+ int incx = sx.strides[0] // sizeof(sx[0])
+ return isamax(&n, &sx[0], &incx)
+
+cpdef int _test_izamax(double complex[:] zx) nogil:
+ cdef:
+ int n = zx.shape[0]
+ int incx = zx.strides[0] // sizeof(zx[0])
+ return izamax(&n, &zx[0], &incx)
+
+cpdef float _test_sasum(float[:] sx) nogil:
+ cdef:
+ int n = sx.shape[0]
+ int incx = sx.shape[0] // sizeof(sx[0])
+ return sasum(&n, &sx[0], &incx)
+
+cpdef float _test_scasum(float complex[:] cx) nogil:
+ cdef:
+ int n = cx.shape[0]
+ int incx = cx.strides[0] // sizeof(cx[0])
+ return scasum(&n, &cx[0], &incx)
+
+cpdef float _test_scnrm2(float complex[:] x) nogil:
+ cdef:
+ int n = x.shape[0]
+ int incx = x.strides[0] // sizeof(x[0])
+ return scnrm2(&n, &x[0], &incx)
+
+cpdef float _test_sdot(float[:] sx, float[:] sy) nogil:
+ cdef:
+ int n = sx.shape[0]
+ int incx = sx.strides[0] // sizeof(sx[0])
+ int incy = sy.strides[0] // sizeof(sy[0])
+ return sdot(&n, &sx[0], &incx, &sy[0], &incy)
+
+cpdef float _test_snrm2(float[:] x) nogil:
+ cdef:
+ int n = x.shape[0]
+ int incx = x.shape[0] // sizeof(x[0])
+ return snrm2(&n, &x[0], &incx)
+
+cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) nogil:
+ cdef:
+ int n = zx.shape[0]
+ int incx = zx.strides[0] // sizeof(zx[0])
+ int incy = zy.strides[0] // sizeof(zy[0])
+ return zdotc(&n, &zx[0], &incx, &zy[0], &incy)
+
+cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) nogil:
+ cdef:
+ int n = zx.shape[0]
+ int incx = zx.strides[0] // sizeof(zx[0])
+ int incy = zy.strides[0] // sizeof(zy[0])
+ return zdotu(&n, &zx[0], &incx, &zy[0], &incy)
+"""
+
+
+def generate_blas_pyx(func_sigs, sub_sigs, all_sigs, header_name):
+ funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
+ subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
+ for s in sub_sigs)
+ return make_blas_pyx_preamble(all_sigs) + funcs + subs + blas_py_wrappers
+
+
+lapack_py_wrappers = """
+
+# Python accessible wrappers for testing:
+
+def _test_dlamch(cmach):
+ # This conversion is necessary to handle Python 3 strings.
+ cmach_bytes = bytes(cmach)
+ # Now that it is a bytes representation, a non-temporary variable
+ # must be passed as a part of the function call.
+ cdef char* cmach_char = cmach_bytes
+ return dlamch(cmach_char)
+
+def _test_slamch(cmach):
+ # This conversion is necessary to handle Python 3 strings.
+ cmach_bytes = bytes(cmach)
+ # Now that it is a bytes representation, a non-temporary variable
+ # must be passed as a part of the function call.
+ cdef char* cmach_char = cmach_bytes
+ return slamch(cmach_char)
+"""
+
+
+def generate_lapack_pyx(func_sigs, sub_sigs, all_sigs, header_name):
+ funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
+ subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
+ for s in sub_sigs)
+ preamble = make_lapack_pyx_preamble(all_sigs)
+ return preamble + funcs + subs + lapack_py_wrappers
+
+
+pxd_template = """ctypedef {ret_type} {name}_t({args}) nogil
+cdef {name}_t *{name}_f
+"""
+pxd_template = """cdef {ret_type} {name}({args}) nogil
+"""
+
+
+def pxd_decl(name, ret_type, args):
+ args = args.replace('lambda', 'lambda_').replace('*in,', '*in_,')
+ return pxd_template.format(name=name, ret_type=ret_type, args=args)
+
+
+blas_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_blas
+# from scipy.linalg cimport cython_blas
+# cimport scipy.linalg.cython_blas as cython_blas
+# cimport ..linalg.cython_blas as cython_blas
+
+# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+"""
+
+
+def generate_blas_pxd(all_sigs):
+ body = '\n'.join(pxd_decl(*sig) for sig in all_sigs)
+ return blas_pxd_preamble + body
+
+
+lapack_pxd_preamble = """# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_lapack
+# from scipy.linalg cimport cython_lapack
+# cimport scipy.linalg.cython_lapack as cython_lapack
+# cimport ..linalg.cython_lapack as cython_lapack
+
+# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+# Function pointer type declarations for
+# gees and gges families of functions.
+ctypedef bint cselect1(c*)
+ctypedef bint cselect2(c*, c*)
+ctypedef bint dselect2(d*, d*)
+ctypedef bint dselect3(d*, d*, d*)
+ctypedef bint sselect2(s*, s*)
+ctypedef bint sselect3(s*, s*, s*)
+ctypedef bint zselect1(z*)
+ctypedef bint zselect2(z*, z*)
+
+"""
+
+
+def generate_lapack_pxd(all_sigs):
+ return lapack_pxd_preamble + '\n'.join(pxd_decl(*sig) for sig in all_sigs)
+
+
+fortran_template = """ subroutine {name}wrp(
+ + ret,
+ + {argnames}
+ + )
+ external {wrapper}
+ {ret_type} {wrapper}
+ {ret_type} ret
+ {argdecls}
+ ret = {wrapper}(
+ + {argnames}
+ + )
+ end
+"""
+
+dims = {'work': '(*)', 'ab': '(ldab,*)', 'a': '(lda,*)', 'dl': '(*)',
+ 'd': '(*)', 'du': '(*)', 'ap': '(*)', 'e': '(*)', 'lld': '(*)'}
+
+xy_specialized_dims = {'x': '', 'y': ''}
+a_specialized_dims = {'a': '(*)'}
+special_cases = defaultdict(dict,
+ ladiv = xy_specialized_dims,
+ lanhf = a_specialized_dims,
+ lansf = a_specialized_dims,
+ lapy2 = xy_specialized_dims,
+ lapy3 = xy_specialized_dims)
+
+
+def process_fortran_name(name, funcname):
+ if 'inc' in name:
+ return name
+ special = special_cases[funcname[1:]]
+ if 'x' in name or 'y' in name:
+ suffix = special.get(name, '(n)')
+ else:
+ suffix = special.get(name, '')
+ return name + suffix
+
+
+def called_name(name):
+ included = ['cdotc', 'cdotu', 'zdotc', 'zdotu', 'cladiv', 'zladiv']
+ if name in included:
+ return "w" + name
+ return name
+
+
+def fort_subroutine_wrapper(name, ret_type, args):
+ wrapper = called_name(name)
+ types, names = arg_names_and_types(args)
+ argnames = ',\n + '.join(names)
+
+ names = [process_fortran_name(n, name) for n in names]
+ argdecls = '\n '.join('{0} {1}'.format(fortran_types[t], n)
+ for n, t in zip(names, types))
+ return fortran_template.format(name=name, wrapper=wrapper,
+ argnames=argnames, argdecls=argdecls,
+ ret_type=fortran_types[ret_type])
+
+
+def generate_fortran(func_sigs):
+ return "\n".join(fort_subroutine_wrapper(*sig) for sig in func_sigs)
+
+
+def make_c_args(args):
+ types, names = arg_names_and_types(args)
+ types = [c_types[arg] for arg in types]
+ return ', '.join('{0} *{1}'.format(t, n) for t, n in zip(types, names))
+
+
+c_func_template = ("void F_FUNC({name}wrp, {upname}WRP)"
+ "({return_type} *ret, {args});\n")
+
+
+def c_func_decl(name, return_type, args):
+ args = make_c_args(args)
+ return_type = c_types[return_type]
+ return c_func_template.format(name=name, upname=name.upper(),
+ return_type=return_type, args=args)
+
+
+c_sub_template = "void F_FUNC({name},{upname})({args});\n"
+
+
+def c_sub_decl(name, return_type, args):
+ args = make_c_args(args)
+ return c_sub_template.format(name=name, upname=name.upper(), args=args)
+
+
+c_preamble = """#ifndef SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
+#define SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
+#include "fortran_defs.h"
+#include "numpy/arrayobject.h"
+"""
+
+lapack_decls = """
+typedef int (*_cselect1)(npy_complex64*);
+typedef int (*_cselect2)(npy_complex64*, npy_complex64*);
+typedef int (*_dselect2)(double*, double*);
+typedef int (*_dselect3)(double*, double*, double*);
+typedef int (*_sselect2)(float*, float*);
+typedef int (*_sselect3)(float*, float*, float*);
+typedef int (*_zselect1)(npy_complex128*);
+typedef int (*_zselect2)(npy_complex128*, npy_complex128*);
+"""
+
+cpp_guard = """
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+"""
+
+c_end = """
+#ifdef __cplusplus
+}
+#endif
+#endif
+"""
+
+
+def generate_c_header(func_sigs, sub_sigs, all_sigs, lib_name):
+ funcs = "".join(c_func_decl(*sig) for sig in func_sigs)
+ subs = "\n" + "".join(c_sub_decl(*sig) for sig in sub_sigs)
+ if lib_name == 'LAPACK':
+ preamble = (c_preamble.format(lib=lib_name) + lapack_decls)
+ else:
+ preamble = c_preamble.format(lib=lib_name)
+ return "".join([preamble, cpp_guard, funcs, subs, c_end])
+
+
+def split_signature(sig):
+ name_and_type, args = sig[:-1].split('(')
+ ret_type, name = name_and_type.split(' ')
+ return name, ret_type, args
+
+
+def filter_lines(lines):
+ lines = [line for line in map(str.strip, lines)
+ if line and not line.startswith('#')]
+ func_sigs = [split_signature(line) for line in lines
+ if line.split(' ')[0] != 'void']
+ sub_sigs = [split_signature(line) for line in lines
+ if line.split(' ')[0] == 'void']
+ all_sigs = list(sorted(func_sigs + sub_sigs, key=itemgetter(0)))
+ return func_sigs, sub_sigs, all_sigs
+
+
+def all_newer(src_files, dst_files):
+ from distutils.dep_util import newer
+ return all(os.path.exists(dst) and newer(dst, src)
+ for dst in dst_files for src in src_files)
+
+
+def make_all(blas_signature_file="cython_blas_signatures.txt",
+ lapack_signature_file="cython_lapack_signatures.txt",
+ blas_name="cython_blas",
+ lapack_name="cython_lapack",
+ blas_fortran_name="_blas_subroutine_wrappers.f",
+ lapack_fortran_name="_lapack_subroutine_wrappers.f",
+ blas_header_name="_blas_subroutines.h",
+ lapack_header_name="_lapack_subroutines.h"):
+
+ src_files = (os.path.abspath(__file__),
+ blas_signature_file,
+ lapack_signature_file)
+ dst_files = (blas_name + '.pyx',
+ blas_name + '.pxd',
+ blas_fortran_name,
+ blas_header_name,
+ lapack_name + '.pyx',
+ lapack_name + '.pxd',
+ lapack_fortran_name,
+ lapack_header_name)
+
+ os.chdir(BASE_DIR)
+
+ if all_newer(src_files, dst_files):
+ print("scipy/linalg/_generate_pyx.py: all files up-to-date")
+ return
+
+ comments = ["This file was generated by _generate_pyx.py.\n",
+ "Do not edit this file directly.\n"]
+ ccomment = ''.join(['/* ' + line.rstrip() + ' */\n'
+ for line in comments]) + '\n'
+ pyxcomment = ''.join(['# ' + line for line in comments]) + '\n'
+ fcomment = ''.join(['c ' + line for line in comments]) + '\n'
+ with open(blas_signature_file, 'r') as f:
+ blas_sigs = f.readlines()
+ blas_sigs = filter_lines(blas_sigs)
+ blas_pyx = generate_blas_pyx(*(blas_sigs + (blas_header_name,)))
+ with open(blas_name + '.pyx', 'w') as f:
+ f.write(pyxcomment)
+ f.write(blas_pyx)
+ blas_pxd = generate_blas_pxd(blas_sigs[2])
+ with open(blas_name + '.pxd', 'w') as f:
+ f.write(pyxcomment)
+ f.write(blas_pxd)
+ blas_fortran = generate_fortran(blas_sigs[0])
+ with open(blas_fortran_name, 'w') as f:
+ f.write(fcomment)
+ f.write(blas_fortran)
+ blas_c_header = generate_c_header(*(blas_sigs + ('BLAS',)))
+ with open(blas_header_name, 'w') as f:
+ f.write(ccomment)
+ f.write(blas_c_header)
+ with open(lapack_signature_file, 'r') as f:
+ lapack_sigs = f.readlines()
+ lapack_sigs = filter_lines(lapack_sigs)
+ lapack_pyx = generate_lapack_pyx(*(lapack_sigs + (lapack_header_name,)))
+ with open(lapack_name + '.pyx', 'w') as f:
+ f.write(pyxcomment)
+ f.write(lapack_pyx)
+ lapack_pxd = generate_lapack_pxd(lapack_sigs[2])
+ with open(lapack_name + '.pxd', 'w') as f:
+ f.write(pyxcomment)
+ f.write(lapack_pxd)
+ lapack_fortran = generate_fortran(lapack_sigs[0])
+ with open(lapack_fortran_name, 'w') as f:
+ f.write(fcomment)
+ f.write(lapack_fortran)
+ lapack_c_header = generate_c_header(*(lapack_sigs + ('LAPACK',)))
+ with open(lapack_header_name, 'w') as f:
+ f.write(ccomment)
+ f.write(lapack_c_header)
+
+
+if __name__ == '__main__':
+ make_all()
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..ef4ec5b
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative_backend.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative_backend.py
new file mode 100644
index 0000000..7835314
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_interpolative_backend.py
@@ -0,0 +1,1681 @@
+#******************************************************************************
+# Copyright (C) 2013 Kenneth L. Ho
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer. Redistributions in binary
+# form must reproduce the above copyright notice, this list of conditions and
+# the following disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# None of the names of the copyright holders may be used to endorse or
+# promote products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+"""
+Direct wrappers for Fortran `id_dist` backend.
+"""
+
+import scipy.linalg._interpolative as _id
+import numpy as np
+
+_RETCODE_ERROR = RuntimeError("nonzero return code")
+
+
+def _asfortranarray_copy(A):
+ """
+ Same as np.asfortranarray, but ensure a copy
+ """
+ A = np.asarray(A)
+ if A.flags.f_contiguous:
+ A = A.copy(order="F")
+ else:
+ A = np.asfortranarray(A)
+ return A
+
+
+#------------------------------------------------------------------------------
+# id_rand.f
+#------------------------------------------------------------------------------
+
+def id_srand(n):
+ """
+ Generate standard uniform pseudorandom numbers via a very efficient lagged
+ Fibonacci method.
+
+ :param n:
+ Number of pseudorandom numbers to generate.
+ :type n: int
+
+ :return:
+ Pseudorandom numbers.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.id_srand(n)
+
+
+def id_srandi(t):
+ """
+ Initialize seed values for :func:`id_srand` (any appropriately random
+ numbers will do).
+
+ :param t:
+ Array of 55 seed values.
+ :type t: :class:`numpy.ndarray`
+ """
+ t = np.asfortranarray(t)
+ _id.id_srandi(t)
+
+
+def id_srando():
+ """
+ Reset seed values to their original values.
+ """
+ _id.id_srando()
+
+
+#------------------------------------------------------------------------------
+# idd_frm.f
+#------------------------------------------------------------------------------
+
+def idd_frm(n, w, x):
+ """
+ Transform real vector via a composition of Rokhlin's random transform,
+ random subselection, and an FFT.
+
+ In contrast to :func:`idd_sfrm`, this routine works best when the length of
+ the transformed vector is the power-of-two integer output by
+ :func:`idd_frmi`, or when the length is not specified but instead
+ determined a posteriori from the output. The returned transformed vector is
+ randomly permuted.
+
+ :param n:
+ Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+ :func:`idd_frmi`; `n` is also the length of the output vector.
+ :type n: int
+ :param w:
+ Initialization array constructed by :func:`idd_frmi`.
+ :type w: :class:`numpy.ndarray`
+ :param x:
+ Vector to be transformed.
+ :type x: :class:`numpy.ndarray`
+
+ :return:
+ Transformed vector.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idd_frm(n, w, x)
+
+
+def idd_sfrm(l, n, w, x):
+ """
+ Transform real vector via a composition of Rokhlin's random transform,
+ random subselection, and an FFT.
+
+ In contrast to :func:`idd_frm`, this routine works best when the length of
+ the transformed vector is known a priori.
+
+ :param l:
+ Length of transformed vector, satisfying `l <= n`.
+ :type l: int
+ :param n:
+ Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+ :func:`idd_sfrmi`.
+ :type n: int
+ :param w:
+ Initialization array constructed by :func:`idd_sfrmi`.
+ :type w: :class:`numpy.ndarray`
+ :param x:
+ Vector to be transformed.
+ :type x: :class:`numpy.ndarray`
+
+ :return:
+ Transformed vector.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idd_sfrm(l, n, w, x)
+
+
+def idd_frmi(m):
+ """
+ Initialize data for :func:`idd_frm`.
+
+ :param m:
+ Length of vector to be transformed.
+ :type m: int
+
+ :return:
+ Greatest power-of-two integer `n` satisfying `n <= m`.
+ :rtype: int
+ :return:
+ Initialization array to be used by :func:`idd_frm`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idd_frmi(m)
+
+
+def idd_sfrmi(l, m):
+ """
+ Initialize data for :func:`idd_sfrm`.
+
+ :param l:
+ Length of output transformed vector.
+ :type l: int
+ :param m:
+ Length of the vector to be transformed.
+ :type m: int
+
+ :return:
+ Greatest power-of-two integer `n` satisfying `n <= m`.
+ :rtype: int
+ :return:
+ Initialization array to be used by :func:`idd_sfrm`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idd_sfrmi(l, m)
+
+
+#------------------------------------------------------------------------------
+# idd_id.f
+#------------------------------------------------------------------------------
+
+def iddp_id(eps, A):
+ """
+ Compute ID of a real matrix to a specified relative precision.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = _asfortranarray_copy(A)
+ k, idx, rnorms = _id.iddp_id(eps, A)
+ n = A.shape[1]
+ proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def iddr_id(A, k):
+ """
+ Compute ID of a real matrix to a specified rank.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = _asfortranarray_copy(A)
+ idx, rnorms = _id.iddr_id(A, k)
+ n = A.shape[1]
+ proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+ return idx, proj
+
+
+def idd_reconid(B, idx, proj):
+ """
+ Reconstruct matrix from real ID.
+
+ :param B:
+ Skeleton matrix.
+ :type B: :class:`numpy.ndarray`
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Reconstructed matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ B = np.asfortranarray(B)
+ if proj.size > 0:
+ return _id.idd_reconid(B, idx, proj)
+ else:
+ return B[:, np.argsort(idx)]
+
+
+def idd_reconint(idx, proj):
+ """
+ Reconstruct interpolation matrix from real ID.
+
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Interpolation matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idd_reconint(idx, proj)
+
+
+def idd_copycols(A, k, idx):
+ """
+ Reconstruct skeleton matrix from real ID.
+
+ :param A:
+ Original matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+
+ :return:
+ Skeleton matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ return _id.idd_copycols(A, k, idx)
+
+
+#------------------------------------------------------------------------------
+# idd_id2svd.f
+#------------------------------------------------------------------------------
+
+def idd_id2svd(B, idx, proj):
+ """
+ Convert real ID to SVD.
+
+ :param B:
+ Skeleton matrix.
+ :type B: :class:`numpy.ndarray`
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ B = np.asfortranarray(B)
+ U, V, S, ier = _id.idd_id2svd(B, idx, proj)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idd_snorm.f
+#------------------------------------------------------------------------------
+
+def idd_snorm(m, n, matvect, matvec, its=20):
+ """
+ Estimate spectral norm of a real matrix by the randomized power method.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param its:
+ Number of power method iterations.
+ :type its: int
+
+ :return:
+ Spectral norm estimate.
+ :rtype: float
+ """
+ snorm, v = _id.idd_snorm(m, n, matvect, matvec, its)
+ return snorm
+
+
+def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20):
+ """
+ Estimate spectral norm of the difference of two real matrices by the
+ randomized power method.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the transpose of the first matrix to a vector, with
+ call signature `y = matvect(x)`, where `x` and `y` are the input and
+ output vectors, respectively.
+ :type matvect: function
+ :param matvect2:
+ Function to apply the transpose of the second matrix to a vector, with
+ call signature `y = matvect2(x)`, where `x` and `y` are the input and
+ output vectors, respectively.
+ :type matvect2: function
+ :param matvec:
+ Function to apply the first matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param matvec2:
+ Function to apply the second matrix to a vector, with call signature
+ `y = matvec2(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec2: function
+ :param its:
+ Number of power method iterations.
+ :type its: int
+
+ :return:
+ Spectral norm estimate of matrix difference.
+ :rtype: float
+ """
+ return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its)
+
+
+#------------------------------------------------------------------------------
+# idd_svd.f
+#------------------------------------------------------------------------------
+
+def iddr_svd(A, k):
+ """
+ Compute SVD of a real matrix to a specified rank.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ U, V, S, ier = _id.iddr_svd(A, k)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+def iddp_svd(eps, A):
+ """
+ Compute SVD of a real matrix to a specified relative precision.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddp_aid.f
+#------------------------------------------------------------------------------
+
+def iddp_aid(eps, A):
+ """
+ Compute ID of a real matrix to a specified relative precision using random
+ sampling.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, w = idd_frmi(m)
+ proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F')
+ k, idx, proj = _id.iddp_aid(eps, A, w, proj)
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def idd_estrank(eps, A):
+ """
+ Estimate rank of a real matrix to a specified relative precision using
+ random sampling.
+
+ The output rank is typically about 8 higher than the actual rank.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank estimate.
+ :rtype: int
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, w = idd_frmi(m)
+ ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F')
+ k, ra = _id.idd_estrank(eps, A, w, ra)
+ return k
+
+
+#------------------------------------------------------------------------------
+# iddp_asvd.f
+#------------------------------------------------------------------------------
+
+def iddp_asvd(eps, A):
+ """
+ Compute SVD of a real matrix to a specified relative precision using random
+ sampling.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, winit = _id.idd_frmi(m)
+ w = np.empty(
+ max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2,
+ (2*n + 1)*(n2 + 1)),
+ order='F')
+ k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddp_rid.f
+#------------------------------------------------------------------------------
+
+def iddp_rid(eps, m, n, matvect):
+ """
+ Compute ID of a real matrix to a specified relative precision using random
+ matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F')
+ k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj)
+ if ier != 0:
+ raise _RETCODE_ERROR
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def idd_findrank(eps, m, n, matvect):
+ """
+ Estimate rank of a real matrix to a specified relative precision using
+ random matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+
+ :return:
+ Rank estimate.
+ :rtype: int
+ """
+ k, ra, ier = _id.idd_findrank(eps, m, n, matvect)
+ if ier:
+ raise _RETCODE_ERROR
+ return k
+
+
+#------------------------------------------------------------------------------
+# iddp_rsvd.f
+#------------------------------------------------------------------------------
+
+def iddp_rsvd(eps, m, n, matvect, matvec):
+ """
+ Compute SVD of a real matrix to a specified relative precision using random
+ matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddr_aid.f
+#------------------------------------------------------------------------------
+
+def iddr_aid(A, k):
+ """
+ Compute ID of a real matrix to a specified rank using random sampling.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ w = iddr_aidi(m, n, k)
+ idx, proj = _id.iddr_aid(A, k, w)
+ if k == n:
+ proj = np.empty((k, n-k), dtype='float64', order='F')
+ else:
+ proj = proj.reshape((k, n-k), order='F')
+ return idx, proj
+
+
+def iddr_aidi(m, n, k):
+ """
+ Initialize array for :func:`iddr_aid`.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Initialization array to be used by :func:`iddr_aid`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.iddr_aidi(m, n, k)
+
+
+#------------------------------------------------------------------------------
+# iddr_asvd.f
+#------------------------------------------------------------------------------
+
+def iddr_asvd(A, k):
+ """
+ Compute SVD of a real matrix to a specified rank using random sampling.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F')
+ w_ = iddr_aidi(m, n, k)
+ w[:w_.size] = w_
+ U, V, S, ier = _id.iddr_asvd(A, k, w)
+ if ier != 0:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddr_rid.f
+#------------------------------------------------------------------------------
+
+def iddr_rid(m, n, matvect, k):
+ """
+ Compute ID of a real matrix to a specified rank using random matrix-vector
+ multiplication.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ idx, proj = _id.iddr_rid(m, n, matvect, k)
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return idx, proj
+
+
+#------------------------------------------------------------------------------
+# iddr_rsvd.f
+#------------------------------------------------------------------------------
+
+def iddr_rsvd(m, n, matvect, matvec, k):
+ """
+ Compute SVD of a real matrix to a specified rank using random matrix-vector
+ multiplication.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matvect:
+ Function to apply the matrix transpose to a vector, with call signature
+ `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvect: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k)
+ if ier != 0:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idz_frm.f
+#------------------------------------------------------------------------------
+
+def idz_frm(n, w, x):
+ """
+ Transform complex vector via a composition of Rokhlin's random transform,
+ random subselection, and an FFT.
+
+ In contrast to :func:`idz_sfrm`, this routine works best when the length of
+ the transformed vector is the power-of-two integer output by
+ :func:`idz_frmi`, or when the length is not specified but instead
+ determined a posteriori from the output. The returned transformed vector is
+ randomly permuted.
+
+ :param n:
+ Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+ :func:`idz_frmi`; `n` is also the length of the output vector.
+ :type n: int
+ :param w:
+ Initialization array constructed by :func:`idz_frmi`.
+ :type w: :class:`numpy.ndarray`
+ :param x:
+ Vector to be transformed.
+ :type x: :class:`numpy.ndarray`
+
+ :return:
+ Transformed vector.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idz_frm(n, w, x)
+
+
+def idz_sfrm(l, n, w, x):
+ """
+ Transform complex vector via a composition of Rokhlin's random transform,
+ random subselection, and an FFT.
+
+ In contrast to :func:`idz_frm`, this routine works best when the length of
+ the transformed vector is known a priori.
+
+ :param l:
+ Length of transformed vector, satisfying `l <= n`.
+ :type l: int
+ :param n:
+ Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+ :func:`idz_sfrmi`.
+ :type n: int
+ :param w:
+ Initialization array constructed by :func:`idd_sfrmi`.
+ :type w: :class:`numpy.ndarray`
+ :param x:
+ Vector to be transformed.
+ :type x: :class:`numpy.ndarray`
+
+ :return:
+ Transformed vector.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idz_sfrm(l, n, w, x)
+
+
+def idz_frmi(m):
+ """
+ Initialize data for :func:`idz_frm`.
+
+ :param m:
+ Length of vector to be transformed.
+ :type m: int
+
+ :return:
+ Greatest power-of-two integer `n` satisfying `n <= m`.
+ :rtype: int
+ :return:
+ Initialization array to be used by :func:`idz_frm`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idz_frmi(m)
+
+
+def idz_sfrmi(l, m):
+ """
+ Initialize data for :func:`idz_sfrm`.
+
+ :param l:
+ Length of output transformed vector.
+ :type l: int
+ :param m:
+ Length of the vector to be transformed.
+ :type m: int
+
+ :return:
+ Greatest power-of-two integer `n` satisfying `n <= m`.
+ :rtype: int
+ :return:
+ Initialization array to be used by :func:`idz_sfrm`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idz_sfrmi(l, m)
+
+
+#------------------------------------------------------------------------------
+# idz_id.f
+#------------------------------------------------------------------------------
+
+def idzp_id(eps, A):
+ """
+ Compute ID of a complex matrix to a specified relative precision.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = _asfortranarray_copy(A)
+ k, idx, rnorms = _id.idzp_id(eps, A)
+ n = A.shape[1]
+ proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def idzr_id(A, k):
+ """
+ Compute ID of a complex matrix to a specified rank.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = _asfortranarray_copy(A)
+ idx, rnorms = _id.idzr_id(A, k)
+ n = A.shape[1]
+ proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+ return idx, proj
+
+
+def idz_reconid(B, idx, proj):
+ """
+ Reconstruct matrix from complex ID.
+
+ :param B:
+ Skeleton matrix.
+ :type B: :class:`numpy.ndarray`
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Reconstructed matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ B = np.asfortranarray(B)
+ if proj.size > 0:
+ return _id.idz_reconid(B, idx, proj)
+ else:
+ return B[:, np.argsort(idx)]
+
+
+def idz_reconint(idx, proj):
+ """
+ Reconstruct interpolation matrix from complex ID.
+
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Interpolation matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idz_reconint(idx, proj)
+
+
+def idz_copycols(A, k, idx):
+ """
+ Reconstruct skeleton matrix from complex ID.
+
+ :param A:
+ Original matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+
+ :return:
+ Skeleton matrix.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ return _id.idz_copycols(A, k, idx)
+
+
+#------------------------------------------------------------------------------
+# idz_id2svd.f
+#------------------------------------------------------------------------------
+
+def idz_id2svd(B, idx, proj):
+ """
+ Convert complex ID to SVD.
+
+ :param B:
+ Skeleton matrix.
+ :type B: :class:`numpy.ndarray`
+ :param idx:
+ Column index array.
+ :type idx: :class:`numpy.ndarray`
+ :param proj:
+ Interpolation coefficients.
+ :type proj: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ B = np.asfortranarray(B)
+ U, V, S, ier = _id.idz_id2svd(B, idx, proj)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idz_snorm.f
+#------------------------------------------------------------------------------
+
+def idz_snorm(m, n, matveca, matvec, its=20):
+ """
+ Estimate spectral norm of a complex matrix by the randomized power method.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param its:
+ Number of power method iterations.
+ :type its: int
+
+ :return:
+ Spectral norm estimate.
+ :rtype: float
+ """
+ snorm, v = _id.idz_snorm(m, n, matveca, matvec, its)
+ return snorm
+
+
+def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20):
+ """
+ Estimate spectral norm of the difference of two complex matrices by the
+ randomized power method.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the adjoint of the first matrix to a vector, with
+ call signature `y = matveca(x)`, where `x` and `y` are the input and
+ output vectors, respectively.
+ :type matveca: function
+ :param matveca2:
+ Function to apply the adjoint of the second matrix to a vector, with
+ call signature `y = matveca2(x)`, where `x` and `y` are the input and
+ output vectors, respectively.
+ :type matveca2: function
+ :param matvec:
+ Function to apply the first matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param matvec2:
+ Function to apply the second matrix to a vector, with call signature
+ `y = matvec2(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec2: function
+ :param its:
+ Number of power method iterations.
+ :type its: int
+
+ :return:
+ Spectral norm estimate of matrix difference.
+ :rtype: float
+ """
+ return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its)
+
+
+#------------------------------------------------------------------------------
+# idz_svd.f
+#------------------------------------------------------------------------------
+
+def idzr_svd(A, k):
+ """
+ Compute SVD of a complex matrix to a specified rank.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ U, V, S, ier = _id.idzr_svd(A, k)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+def idzp_svd(eps, A):
+ """
+ Compute SVD of a complex matrix to a specified relative precision.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzp_aid.f
+#------------------------------------------------------------------------------
+
+def idzp_aid(eps, A):
+ """
+ Compute ID of a complex matrix to a specified relative precision using
+ random sampling.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, w = idz_frmi(m)
+ proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F')
+ k, idx, proj = _id.idzp_aid(eps, A, w, proj)
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def idz_estrank(eps, A):
+ """
+ Estimate rank of a complex matrix to a specified relative precision using
+ random sampling.
+
+ The output rank is typically about 8 higher than the actual rank.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Rank estimate.
+ :rtype: int
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, w = idz_frmi(m)
+ ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F')
+ k, ra = _id.idz_estrank(eps, A, w, ra)
+ return k
+
+
+#------------------------------------------------------------------------------
+# idzp_asvd.f
+#------------------------------------------------------------------------------
+
+def idzp_asvd(eps, A):
+ """
+ Compute SVD of a complex matrix to a specified relative precision using
+ random sampling.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ n2, winit = _id.idz_frmi(m)
+ w = np.empty(
+ max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2,
+ (2*n + 1)*(n2 + 1)),
+ dtype=np.complex128, order='F')
+ k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzp_rid.f
+#------------------------------------------------------------------------------
+
+def idzp_rid(eps, m, n, matveca):
+ """
+ Compute ID of a complex matrix to a specified relative precision using
+ random matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+
+ :return:
+ Rank of ID.
+ :rtype: int
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ proj = np.empty(
+ m + 1 + 2*n*(min(m, n) + 1),
+ dtype=np.complex128, order='F')
+ k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj)
+ if ier:
+ raise _RETCODE_ERROR
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return k, idx, proj
+
+
+def idz_findrank(eps, m, n, matveca):
+ """
+ Estimate rank of a complex matrix to a specified relative precision using
+ random matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+
+ :return:
+ Rank estimate.
+ :rtype: int
+ """
+ k, ra, ier = _id.idz_findrank(eps, m, n, matveca)
+ if ier:
+ raise _RETCODE_ERROR
+ return k
+
+
+#------------------------------------------------------------------------------
+# idzp_rsvd.f
+#------------------------------------------------------------------------------
+
+def idzp_rsvd(eps, m, n, matveca, matvec):
+ """
+ Compute SVD of a complex matrix to a specified relative precision using
+ random matrix-vector multiplication.
+
+ :param eps:
+ Relative precision.
+ :type eps: float
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec)
+ if ier:
+ raise _RETCODE_ERROR
+ U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+ V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+ S = w[iS-1:iS+k-1]
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzr_aid.f
+#------------------------------------------------------------------------------
+
+def idzr_aid(A, k):
+ """
+ Compute ID of a complex matrix to a specified rank using random sampling.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ w = idzr_aidi(m, n, k)
+ idx, proj = _id.idzr_aid(A, k, w)
+ if k == n:
+ proj = np.empty((k, n-k), dtype='complex128', order='F')
+ else:
+ proj = proj.reshape((k, n-k), order='F')
+ return idx, proj
+
+
+def idzr_aidi(m, n, k):
+ """
+ Initialize array for :func:`idzr_aid`.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Initialization array to be used by :func:`idzr_aid`.
+ :rtype: :class:`numpy.ndarray`
+ """
+ return _id.idzr_aidi(m, n, k)
+
+
+#------------------------------------------------------------------------------
+# idzr_asvd.f
+#------------------------------------------------------------------------------
+
+def idzr_asvd(A, k):
+ """
+ Compute SVD of a complex matrix to a specified rank using random sampling.
+
+ :param A:
+ Matrix.
+ :type A: :class:`numpy.ndarray`
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ A = np.asfortranarray(A)
+ m, n = A.shape
+ w = np.empty(
+ (2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90,
+ dtype='complex128', order='F')
+ w_ = idzr_aidi(m, n, k)
+ w[:w_.size] = w_
+ U, V, S, ier = _id.idzr_asvd(A, k, w)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzr_rid.f
+#------------------------------------------------------------------------------
+
+def idzr_rid(m, n, matveca, k):
+ """
+ Compute ID of a complex matrix to a specified rank using random
+ matrix-vector multiplication.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+ :param k:
+ Rank of ID.
+ :type k: int
+
+ :return:
+ Column index array.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Interpolation coefficients.
+ :rtype: :class:`numpy.ndarray`
+ """
+ idx, proj = _id.idzr_rid(m, n, matveca, k)
+ proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+ return idx, proj
+
+
+#------------------------------------------------------------------------------
+# idzr_rsvd.f
+#------------------------------------------------------------------------------
+
+def idzr_rsvd(m, n, matveca, matvec, k):
+ """
+ Compute SVD of a complex matrix to a specified rank using random
+ matrix-vector multiplication.
+
+ :param m:
+ Matrix row dimension.
+ :type m: int
+ :param n:
+ Matrix column dimension.
+ :type n: int
+ :param matveca:
+ Function to apply the matrix adjoint to a vector, with call signature
+ `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matveca: function
+ :param matvec:
+ Function to apply the matrix to a vector, with call signature
+ `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+ respectively.
+ :type matvec: function
+ :param k:
+ Rank of SVD.
+ :type k: int
+
+ :return:
+ Left singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Right singular vectors.
+ :rtype: :class:`numpy.ndarray`
+ :return:
+ Singular values.
+ :rtype: :class:`numpy.ndarray`
+ """
+ U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k)
+ if ier:
+ raise _RETCODE_ERROR
+ return U, V, S
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_inv_ssq.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_inv_ssq.py
new file mode 100644
index 0000000..c43c9a6
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_inv_ssq.py
@@ -0,0 +1,886 @@
+"""
+Matrix functions that use Pade approximation with inverse scaling and squaring.
+
+"""
+import warnings
+
+import numpy as np
+
+from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
+from scipy.linalg.decomp_schur import schur, rsf2csf
+from scipy.linalg.matfuncs import funm
+from scipy.linalg import svdvals, solve_triangular
+from scipy.sparse.linalg.interface import LinearOperator
+from scipy.sparse.linalg import onenormest
+import scipy.special
+
+
+class LogmRankWarning(UserWarning):
+ pass
+
+
+class LogmExactlySingularWarning(LogmRankWarning):
+ pass
+
+
+class LogmNearlySingularWarning(LogmRankWarning):
+ pass
+
+
+class LogmError(np.linalg.LinAlgError):
+ pass
+
+
+class FractionalMatrixPowerError(np.linalg.LinAlgError):
+ pass
+
+
+#TODO renovate or move this class when scipy operators are more mature
+class _MatrixM1PowerOperator(LinearOperator):
+ """
+ A representation of the linear operator (A - I)^p.
+ """
+
+ def __init__(self, A, p):
+ if A.ndim != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected A to be like a square matrix')
+ if p < 0 or p != int(p):
+ raise ValueError('expected p to be a non-negative integer')
+ self._A = A
+ self._p = p
+ self.ndim = A.ndim
+ self.shape = A.shape
+
+ def _matvec(self, x):
+ for i in range(self._p):
+ x = self._A.dot(x) - x
+ return x
+
+ def _rmatvec(self, x):
+ for i in range(self._p):
+ x = x.dot(self._A) - x
+ return x
+
+ def _matmat(self, X):
+ for i in range(self._p):
+ X = self._A.dot(X) - X
+ return X
+
+ def _adjoint(self):
+ return _MatrixM1PowerOperator(self._A.T, self._p)
+
+
+#TODO renovate or move this function when SciPy operators are more mature
+def _onenormest_m1_power(A, p,
+ t=2, itmax=5, compute_v=False, compute_w=False):
+ """
+ Efficiently estimate the 1-norm of (A - I)^p.
+
+ Parameters
+ ----------
+ A : ndarray
+ Matrix whose 1-norm of a power is to be computed.
+ p : int
+ Non-negative integer power.
+ t : int, optional
+ A positive parameter controlling the tradeoff between
+ accuracy versus time and memory usage.
+ Larger values take longer and use more memory
+ but give more accurate output.
+ itmax : int, optional
+ Use at most this many iterations.
+ compute_v : bool, optional
+ Request a norm-maximizing linear operator input vector if True.
+ compute_w : bool, optional
+ Request a norm-maximizing linear operator output vector if True.
+
+ Returns
+ -------
+ est : float
+ An underestimate of the 1-norm of the sparse matrix.
+ v : ndarray, optional
+ The vector such that ||Av||_1 == est*||v||_1.
+ It can be thought of as an input to the linear operator
+ that gives an output with particularly large norm.
+ w : ndarray, optional
+ The vector Av which has relatively large 1-norm.
+ It can be thought of as an output of the linear operator
+ that is relatively large in norm compared to the input.
+
+ """
+ return onenormest(_MatrixM1PowerOperator(A, p),
+ t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
+
+
+def _unwindk(z):
+ """
+ Compute the scalar unwinding number.
+
+ Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
+ Note that this definition differs in sign from the original definition
+ in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
+
+ Parameters
+ ----------
+ z : complex
+ A complex number.
+
+ Returns
+ -------
+ unwinding_number : integer
+ The scalar unwinding number of z.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ .. [2] Robert M. Corless and David J. Jeffrey,
+ "The unwinding number." Newsletter ACM SIGSAM Bulletin
+ Volume 30, Issue 2, June 1996, Pages 28-35.
+
+ .. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
+ David J. Jeffrey and Stephen M. Watt,
+ "Reasoning about the elementary functions of complex analysis"
+ Annals of Mathematics and Artificial Intelligence,
+ 36: 303-318, 2002.
+
+ """
+ return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
+
+
+def _briggs_helper_function(a, k):
+ """
+ Computes r = a^(1 / (2^k)) - 1.
+
+ This is algorithm (2) of [1]_.
+ The purpose is to avoid a danger of subtractive cancellation.
+ For more computational efficiency it should probably be cythonized.
+
+ Parameters
+ ----------
+ a : complex
+ A complex number.
+ k : integer
+ A nonnegative integer.
+
+ Returns
+ -------
+ r : complex
+ The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
+
+ Notes
+ -----
+ The algorithm as formulated in the reference does not handle k=0 or k=1
+ correctly, so these are special-cased in this implementation.
+ This function is intended to not allow `a` to belong to the closed
+ negative real axis, but this constraint is relaxed.
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy (2012)
+ "A more accurate Briggs method for the logarithm",
+ Numerical Algorithms, 59 : 393--402.
+
+ """
+ if k < 0 or int(k) != k:
+ raise ValueError('expected a nonnegative integer k')
+ if k == 0:
+ return a - 1
+ elif k == 1:
+ return np.sqrt(a) - 1
+ else:
+ k_hat = k
+ if np.angle(a) >= np.pi / 2:
+ a = np.sqrt(a)
+ k_hat = k - 1
+ z0 = a - 1
+ a = np.sqrt(a)
+ r = 1 + a
+ for j in range(1, k_hat):
+ a = np.sqrt(a)
+ r = r * (1 + a)
+ r = z0 / r
+ return r
+
+
+def _fractional_power_superdiag_entry(l1, l2, t12, p):
+ """
+ Compute a superdiagonal entry of a fractional matrix power.
+
+ This is Eq. (5.6) in [1]_.
+
+ Parameters
+ ----------
+ l1 : complex
+ A diagonal entry of the matrix.
+ l2 : complex
+ A diagonal entry of the matrix.
+ t12 : complex
+ A superdiagonal entry of the matrix.
+ p : float
+ A fractional power.
+
+ Returns
+ -------
+ f12 : complex
+ A superdiagonal entry of the fractional matrix power.
+
+ Notes
+ -----
+ Care has been taken to return a real number if possible when
+ all of the inputs are real numbers.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ """
+ if l1 == l2:
+ f12 = t12 * p * l1**(p-1)
+ elif abs(l2 - l1) > abs(l1 + l2) / 2:
+ f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
+ else:
+ # This is Eq. (5.5) in [1].
+ z = (l2 - l1) / (l2 + l1)
+ log_l1 = np.log(l1)
+ log_l2 = np.log(l2)
+ arctanh_z = np.arctanh(z)
+ tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
+ tmp_u = _unwindk(log_l2 - log_l1)
+ if tmp_u:
+ tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
+ else:
+ tmp_b = p * arctanh_z
+ tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
+ f12 = tmp_a * tmp_c
+ return f12
+
+
+def _logm_superdiag_entry(l1, l2, t12):
+ """
+ Compute a superdiagonal entry of a matrix logarithm.
+
+ This is like Eq. (11.28) in [1]_, except the determination of whether
+ l1 and l2 are sufficiently far apart has been modified.
+
+ Parameters
+ ----------
+ l1 : complex
+ A diagonal entry of the matrix.
+ l2 : complex
+ A diagonal entry of the matrix.
+ t12 : complex
+ A superdiagonal entry of the matrix.
+
+ Returns
+ -------
+ f12 : complex
+ A superdiagonal entry of the matrix logarithm.
+
+ Notes
+ -----
+ Care has been taken to return a real number if possible when
+ all of the inputs are real numbers.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham (2008)
+ "Functions of Matrices: Theory and Computation"
+ ISBN 978-0-898716-46-7
+
+ """
+ if l1 == l2:
+ f12 = t12 / l1
+ elif abs(l2 - l1) > abs(l1 + l2) / 2:
+ f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
+ else:
+ z = (l2 - l1) / (l2 + l1)
+ u = _unwindk(np.log(l2) - np.log(l1))
+ if u:
+ f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1)
+ else:
+ f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
+ return f12
+
+
+def _inverse_squaring_helper(T0, theta):
+ """
+ A helper function for inverse scaling and squaring for Pade approximation.
+
+ Parameters
+ ----------
+ T0 : (N, N) array_like upper triangular
+ Matrix involved in inverse scaling and squaring.
+ theta : indexable
+ The values theta[1] .. theta[7] must be available.
+ They represent bounds related to Pade approximation, and they depend
+ on the matrix function which is being computed.
+ For example, different values of theta are required for
+ matrix logarithm than for fractional matrix power.
+
+ Returns
+ -------
+ R : (N, N) array_like upper triangular
+ Composition of zero or more matrix square roots of T0, minus I.
+ s : non-negative integer
+ Number of square roots taken.
+ m : positive integer
+ The degree of the Pade approximation.
+
+ Notes
+ -----
+ This subroutine appears as a chunk of lines within
+ a couple of published algorithms; for example it appears
+ as lines 4--35 in algorithm (3.1) of [1]_, and
+ as lines 3--34 in algorithm (4.1) of [2]_.
+ The instances of 'goto line 38' in algorithm (3.1) of [1]_
+ probably mean 'goto line 36' and have been intepreted accordingly.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing Lin (2013)
+ "An Improved Schur-Pade Algorithm for Fractional Powers
+ of a Matrix and their Frechet Derivatives."
+
+ .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+ "Improved Inverse Scaling and Squaring Algorithms
+ for the Matrix Logarithm."
+ SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+ ISSN 1095-7197
+
+ """
+ if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
+ raise ValueError('expected an upper triangular square matrix')
+ n, n = T0.shape
+ T = T0
+
+ # Find s0, the smallest s such that the spectral radius
+ # of a certain diagonal matrix is at most theta[7].
+ # Note that because theta[7] < 1,
+ # this search will not terminate if any diagonal entry of T is zero.
+ s0 = 0
+ tmp_diag = np.diag(T)
+ if np.count_nonzero(tmp_diag) != n:
+ raise Exception('internal inconsistency')
+ while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
+ tmp_diag = np.sqrt(tmp_diag)
+ s0 += 1
+
+ # Take matrix square roots of T.
+ for i in range(s0):
+ T = _sqrtm_triu(T)
+
+ # Flow control in this section is a little odd.
+ # This is because I am translating algorithm descriptions
+ # which have GOTOs in the publication.
+ s = s0
+ k = 0
+ d2 = _onenormest_m1_power(T, 2) ** (1/2)
+ d3 = _onenormest_m1_power(T, 3) ** (1/3)
+ a2 = max(d2, d3)
+ m = None
+ for i in (1, 2):
+ if a2 <= theta[i]:
+ m = i
+ break
+ while m is None:
+ if s > s0:
+ d3 = _onenormest_m1_power(T, 3) ** (1/3)
+ d4 = _onenormest_m1_power(T, 4) ** (1/4)
+ a3 = max(d3, d4)
+ if a3 <= theta[7]:
+ j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
+ if j1 <= 6:
+ m = j1
+ break
+ elif a3 / 2 <= theta[5] and k < 2:
+ k += 1
+ T = _sqrtm_triu(T)
+ s += 1
+ continue
+ d5 = _onenormest_m1_power(T, 5) ** (1/5)
+ a4 = max(d4, d5)
+ eta = min(a3, a4)
+ for i in (6, 7):
+ if eta <= theta[i]:
+ m = i
+ break
+ if m is not None:
+ break
+ T = _sqrtm_triu(T)
+ s += 1
+
+ # The subtraction of the identity is redundant here,
+ # because the diagonal will be replaced for improved numerical accuracy,
+ # but this formulation should help clarify the meaning of R.
+ R = T - np.identity(n)
+
+ # Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
+ # using formulas that have less subtractive cancellation.
+ # Skip this step if the principal branch
+ # does not exist at T0; this happens when a diagonal entry of T0
+ # is negative with imaginary part 0.
+ has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
+ if has_principal_branch:
+ for j in range(n):
+ a = T0[j, j]
+ r = _briggs_helper_function(a, s)
+ R[j, j] = r
+ p = np.exp2(-s)
+ for j in range(n-1):
+ l1 = T0[j, j]
+ l2 = T0[j+1, j+1]
+ t12 = T0[j, j+1]
+ f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
+ R[j, j+1] = f12
+
+ # Return the T-I matrix, the number of square roots, and the Pade degree.
+ if not np.array_equal(R, np.triu(R)):
+ raise Exception('internal inconsistency')
+ return R, s, m
+
+
+def _fractional_power_pade_constant(i, t):
+ # A helper function for matrix fractional power.
+ if i < 1:
+ raise ValueError('expected a positive integer i')
+ if not (-1 < t < 1):
+ raise ValueError('expected -1 < t < 1')
+ if i == 1:
+ return -t
+ elif i % 2 == 0:
+ j = i // 2
+ return (-j + t) / (2 * (2*j - 1))
+ elif i % 2 == 1:
+ j = (i - 1) // 2
+ return (-j - t) / (2 * (2*j + 1))
+ else:
+ raise Exception('internal error')
+
+
+def _fractional_power_pade(R, t, m):
+ """
+ Evaluate the Pade approximation of a fractional matrix power.
+
+ Evaluate the degree-m Pade approximation of R
+ to the fractional matrix power t using the continued fraction
+ in bottom-up fashion using algorithm (4.1) in [1]_.
+
+ Parameters
+ ----------
+ R : (N, N) array_like
+ Upper triangular matrix whose fractional power to evaluate.
+ t : float
+ Fractional power between -1 and 1 exclusive.
+ m : positive integer
+ Degree of Pade approximation.
+
+ Returns
+ -------
+ U : (N, N) array_like
+ The degree-m Pade approximation of R to the fractional power t.
+ This matrix will be upper triangular.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ """
+ if m < 1 or int(m) != m:
+ raise ValueError('expected a positive integer m')
+ if not (-1 < t < 1):
+ raise ValueError('expected -1 < t < 1')
+ R = np.asarray(R)
+ if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
+ raise ValueError('expected an upper triangular square matrix')
+ n, n = R.shape
+ ident = np.identity(n)
+ Y = R * _fractional_power_pade_constant(2*m, t)
+ for j in range(2*m - 1, 0, -1):
+ rhs = R * _fractional_power_pade_constant(j, t)
+ Y = solve_triangular(ident + Y, rhs)
+ U = ident + Y
+ if not np.array_equal(U, np.triu(U)):
+ raise Exception('internal inconsistency')
+ return U
+
+
+def _remainder_matrix_power_triu(T, t):
+ """
+ Compute a fractional power of an upper triangular matrix.
+
+ The fractional power is restricted to fractions -1 < t < 1.
+ This uses algorithm (3.1) of [1]_.
+ The Pade approximation itself uses algorithm (4.1) of [2]_.
+
+ Parameters
+ ----------
+ T : (N, N) array_like
+ Upper triangular matrix whose fractional power to evaluate.
+ t : float
+ Fractional power between -1 and 1 exclusive.
+
+ Returns
+ -------
+ X : (N, N) array_like
+ The fractional power of the matrix.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing Lin (2013)
+ "An Improved Schur-Pade Algorithm for Fractional Powers
+ of a Matrix and their Frechet Derivatives."
+
+ .. [2] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ """
+ m_to_theta = {
+ 1: 1.51e-5,
+ 2: 2.24e-3,
+ 3: 1.88e-2,
+ 4: 6.04e-2,
+ 5: 1.24e-1,
+ 6: 2.00e-1,
+ 7: 2.79e-1,
+ }
+ n, n = T.shape
+ T0 = T
+ T0_diag = np.diag(T0)
+ if np.array_equal(T0, np.diag(T0_diag)):
+ U = np.diag(T0_diag ** t)
+ else:
+ R, s, m = _inverse_squaring_helper(T0, m_to_theta)
+
+ # Evaluate the Pade approximation.
+ # Note that this function expects the negative of the matrix
+ # returned by the inverse squaring helper.
+ U = _fractional_power_pade(-R, t, m)
+
+ # Undo the inverse scaling and squaring.
+ # Be less clever about this
+ # if the principal branch does not exist at T0;
+ # this happens when a diagonal entry of T0
+ # is negative with imaginary part 0.
+ eivals = np.diag(T0)
+ has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
+ for i in range(s, -1, -1):
+ if i < s:
+ U = U.dot(U)
+ else:
+ if has_principal_branch:
+ p = t * np.exp2(-i)
+ U[np.diag_indices(n)] = T0_diag ** p
+ for j in range(n-1):
+ l1 = T0[j, j]
+ l2 = T0[j+1, j+1]
+ t12 = T0[j, j+1]
+ f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
+ U[j, j+1] = f12
+ if not np.array_equal(U, np.triu(U)):
+ raise Exception('internal inconsistency')
+ return U
+
+
+def _remainder_matrix_power(A, t):
+ """
+ Compute the fractional power of a matrix, for fractions -1 < t < 1.
+
+ This uses algorithm (3.1) of [1]_.
+ The Pade approximation itself uses algorithm (4.1) of [2]_.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix whose fractional power to evaluate.
+ t : float
+ Fractional power between -1 and 1 exclusive.
+
+ Returns
+ -------
+ X : (N, N) array_like
+ The fractional power of the matrix.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing Lin (2013)
+ "An Improved Schur-Pade Algorithm for Fractional Powers
+ of a Matrix and their Frechet Derivatives."
+
+ .. [2] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ """
+ # This code block is copied from numpy.matrix_power().
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('input must be a square array')
+
+ # Get the number of rows and columns.
+ n, n = A.shape
+
+ # Triangularize the matrix if necessary,
+ # attempting to preserve dtype if possible.
+ if np.array_equal(A, np.triu(A)):
+ Z = None
+ T = A
+ else:
+ if np.isrealobj(A):
+ T, Z = schur(A)
+ if not np.array_equal(T, np.triu(T)):
+ T, Z = rsf2csf(T, Z)
+ else:
+ T, Z = schur(A, output='complex')
+
+ # Zeros on the diagonal of the triangular matrix are forbidden,
+ # because the inverse scaling and squaring cannot deal with it.
+ T_diag = np.diag(T)
+ if np.count_nonzero(T_diag) != n:
+ raise FractionalMatrixPowerError(
+ 'cannot use inverse scaling and squaring to find '
+ 'the fractional matrix power of a singular matrix')
+
+ # If the triangular matrix is real and has a negative
+ # entry on the diagonal, then force the matrix to be complex.
+ if np.isrealobj(T) and np.min(T_diag) < 0:
+ T = T.astype(complex)
+
+ # Get the fractional power of the triangular matrix,
+ # and de-triangularize it if necessary.
+ U = _remainder_matrix_power_triu(T, t)
+ if Z is not None:
+ ZH = np.conjugate(Z).T
+ return Z.dot(U).dot(ZH)
+ else:
+ return U
+
+
+def _fractional_matrix_power(A, p):
+ """
+ Compute the fractional power of a matrix.
+
+ See the fractional_matrix_power docstring in matfuncs.py for more info.
+
+ """
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected a square matrix')
+ if p == int(p):
+ return np.linalg.matrix_power(A, int(p))
+ # Compute singular values.
+ s = svdvals(A)
+ # Inverse scaling and squaring cannot deal with a singular matrix,
+ # because the process of repeatedly taking square roots
+ # would not converge to the identity matrix.
+ if s[-1]:
+ # Compute the condition number relative to matrix inversion,
+ # and use this to decide between floor(p) and ceil(p).
+ k2 = s[0] / s[-1]
+ p1 = p - np.floor(p)
+ p2 = p - np.ceil(p)
+ if p1 * k2 ** (1 - p1) <= -p2 * k2:
+ a = int(np.floor(p))
+ b = p1
+ else:
+ a = int(np.ceil(p))
+ b = p2
+ try:
+ R = _remainder_matrix_power(A, b)
+ Q = np.linalg.matrix_power(A, a)
+ return Q.dot(R)
+ except np.linalg.LinAlgError:
+ pass
+ # If p is negative then we are going to give up.
+ # If p is non-negative then we can fall back to generic funm.
+ if p < 0:
+ X = np.empty_like(A)
+ X.fill(np.nan)
+ return X
+ else:
+ p1 = p - np.floor(p)
+ a = int(np.floor(p))
+ b = p1
+ R, info = funm(A, lambda x: pow(x, b), disp=False)
+ Q = np.linalg.matrix_power(A, a)
+ return Q.dot(R)
+
+
+def _logm_triu(T):
+ """
+ Compute matrix logarithm of an upper triangular matrix.
+
+ The matrix logarithm is the inverse of
+ expm: expm(logm(`T`)) == `T`
+
+ Parameters
+ ----------
+ T : (N, N) array_like
+ Upper triangular matrix whose logarithm to evaluate
+
+ Returns
+ -------
+ logm : (N, N) ndarray
+ Matrix logarithm of `T`
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+ "Improved Inverse Scaling and Squaring Algorithms
+ for the Matrix Logarithm."
+ SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+ ISSN 1095-7197
+
+ .. [2] Nicholas J. Higham (2008)
+ "Functions of Matrices: Theory and Computation"
+ ISBN 978-0-898716-46-7
+
+ .. [3] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ """
+ T = np.asarray(T)
+ if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
+ raise ValueError('expected an upper triangular square matrix')
+ n, n = T.shape
+
+ # Construct T0 with the appropriate type,
+ # depending on the dtype and the spectrum of T.
+ T_diag = np.diag(T)
+ keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
+ if keep_it_real:
+ T0 = T
+ else:
+ T0 = T.astype(complex)
+
+ # Define bounds given in Table (2.1).
+ theta = (None,
+ 1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
+ 1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
+ 4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
+ 6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
+
+ R, s, m = _inverse_squaring_helper(T0, theta)
+
+ # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
+ # This requires the nodes and weights
+ # corresponding to degree-m Gauss-Legendre quadrature.
+ # These quadrature arrays need to be transformed from the [-1, 1] interval
+ # to the [0, 1] interval.
+ nodes, weights = scipy.special.p_roots(m)
+ nodes = nodes.real
+ if nodes.shape != (m,) or weights.shape != (m,):
+ raise Exception('internal error')
+ nodes = 0.5 + 0.5 * nodes
+ weights = 0.5 * weights
+ ident = np.identity(n)
+ U = np.zeros_like(R)
+ for alpha, beta in zip(weights, nodes):
+ U += solve_triangular(ident + beta*R, alpha*R)
+ U *= np.exp2(s)
+
+ # Skip this step if the principal branch
+ # does not exist at T0; this happens when a diagonal entry of T0
+ # is negative with imaginary part 0.
+ has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
+ if has_principal_branch:
+
+ # Recompute diagonal entries of U.
+ U[np.diag_indices(n)] = np.log(np.diag(T0))
+
+ # Recompute superdiagonal entries of U.
+ # This indexing of this code should be renovated
+ # when newer np.diagonal() becomes available.
+ for i in range(n-1):
+ l1 = T0[i, i]
+ l2 = T0[i+1, i+1]
+ t12 = T0[i, i+1]
+ U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
+
+ # Return the logm of the upper triangular matrix.
+ if not np.array_equal(U, np.triu(U)):
+ raise Exception('internal inconsistency')
+ return U
+
+
+def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
+ # The input matrix should be upper triangular.
+ # The eps is ad hoc and is not meant to be machine precision.
+ tri_eps = 1e-20
+ abs_diag = np.absolute(np.diag(T))
+ if np.any(abs_diag == 0):
+ exact_singularity_msg = 'The logm input matrix is exactly singular.'
+ warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
+ if not inplace:
+ T = T.copy()
+ n = T.shape[0]
+ for i in range(n):
+ if not T[i, i]:
+ T[i, i] = tri_eps
+ elif np.any(abs_diag < tri_eps):
+ near_singularity_msg = 'The logm input matrix may be nearly singular.'
+ warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
+ return T
+
+
+def _logm(A):
+ """
+ Compute the matrix logarithm.
+
+ See the logm docstring in matfuncs.py for more info.
+
+ Notes
+ -----
+ In this function we look at triangular matrices that are similar
+ to the input matrix. If any diagonal entry of such a triangular matrix
+ is exactly zero then the original matrix is singular.
+ The matrix logarithm does not exist for such matrices,
+ but in such cases we will pretend that the diagonal entries that are zero
+ are actually slightly positive by an ad-hoc amount, in the interest
+ of returning something more useful than NaN. This will cause a warning.
+
+ """
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected a square matrix')
+
+ # If the input matrix dtype is integer then copy to a float dtype matrix.
+ if issubclass(A.dtype.type, np.integer):
+ A = np.asarray(A, dtype=float)
+
+ keep_it_real = np.isrealobj(A)
+ try:
+ if np.array_equal(A, np.triu(A)):
+ A = _logm_force_nonsingular_triangular_matrix(A)
+ if np.min(np.diag(A)) < 0:
+ A = A.astype(complex)
+ return _logm_triu(A)
+ else:
+ if keep_it_real:
+ T, Z = schur(A)
+ if not np.array_equal(T, np.triu(T)):
+ T, Z = rsf2csf(T, Z)
+ else:
+ T, Z = schur(A, output='complex')
+ T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
+ U = _logm_triu(T)
+ ZH = np.conjugate(Z).T
+ return Z.dot(U).dot(ZH)
+ except (SqrtmError, LogmError):
+ X = np.empty_like(A)
+ X.fill(np.nan)
+ return X
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm.py
new file mode 100644
index 0000000..d14f9bf
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm.py
@@ -0,0 +1,191 @@
+"""
+Matrix square root for general matrices and for upper triangular matrices.
+
+This module exists to avoid cyclic imports.
+
+"""
+__all__ = ['sqrtm']
+
+import numpy as np
+
+from scipy._lib._util import _asarray_validated
+
+
+# Local imports
+from .misc import norm
+from .lapack import ztrsyl, dtrsyl
+from .decomp_schur import schur, rsf2csf
+
+
+class SqrtmError(np.linalg.LinAlgError):
+ pass
+
+
+from ._matfuncs_sqrtm_triu import within_block_loop
+
+
+def _sqrtm_triu(T, blocksize=64):
+ """
+ Matrix square root of an upper triangular matrix.
+
+ This is a helper function for `sqrtm` and `logm`.
+
+ Parameters
+ ----------
+ T : (N, N) array_like upper triangular
+ Matrix whose square root to evaluate
+ blocksize : int, optional
+ If the blocksize is not degenerate with respect to the
+ size of the input array, then use a blocked algorithm. (Default: 64)
+
+ Returns
+ -------
+ sqrtm : (N, N) ndarray
+ Value of the sqrt function at `T`
+
+ References
+ ----------
+ .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
+ "Blocked Schur Algorithms for Computing the Matrix Square Root,
+ Lecture Notes in Computer Science, 7782. pp. 171-182.
+
+ """
+ T_diag = np.diag(T)
+ keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
+
+ # Cast to complex as necessary + ensure double precision
+ if not keep_it_real:
+ T = np.asarray(T, dtype=np.complex128, order="C")
+ T_diag = np.asarray(T_diag, dtype=np.complex128)
+ else:
+ T = np.asarray(T, dtype=np.float64, order="C")
+ T_diag = np.asarray(T_diag, dtype=np.float64)
+
+ R = np.diag(np.sqrt(T_diag))
+
+ # Compute the number of blocks to use; use at least one block.
+ n, n = T.shape
+ nblocks = max(n // blocksize, 1)
+
+ # Compute the smaller of the two sizes of blocks that
+ # we will actually use, and compute the number of large blocks.
+ bsmall, nlarge = divmod(n, nblocks)
+ blarge = bsmall + 1
+ nsmall = nblocks - nlarge
+ if nsmall * bsmall + nlarge * blarge != n:
+ raise Exception('internal inconsistency')
+
+ # Define the index range covered by each block.
+ start_stop_pairs = []
+ start = 0
+ for count, size in ((nsmall, bsmall), (nlarge, blarge)):
+ for i in range(count):
+ start_stop_pairs.append((start, start + size))
+ start += size
+
+ # Within-block interactions (Cythonized)
+ within_block_loop(R, T, start_stop_pairs, nblocks)
+
+ # Between-block interactions (Cython would give no significant speedup)
+ for j in range(nblocks):
+ jstart, jstop = start_stop_pairs[j]
+ for i in range(j-1, -1, -1):
+ istart, istop = start_stop_pairs[i]
+ S = T[istart:istop, jstart:jstop]
+ if j - i > 1:
+ S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart,
+ jstart:jstop])
+
+ # Invoke LAPACK.
+ # For more details, see the solve_sylvester implemention
+ # and the fortran dtrsyl and ztrsyl docs.
+ Rii = R[istart:istop, istart:istop]
+ Rjj = R[jstart:jstop, jstart:jstop]
+ if keep_it_real:
+ x, scale, info = dtrsyl(Rii, Rjj, S)
+ else:
+ x, scale, info = ztrsyl(Rii, Rjj, S)
+ R[istart:istop, jstart:jstop] = x * scale
+
+ # Return the matrix square root.
+ return R
+
+
+def sqrtm(A, disp=True, blocksize=64):
+ """
+ Matrix square root.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix whose square root to evaluate
+ disp : bool, optional
+ Print warning if error in the result is estimated large
+ instead of returning estimated error. (Default: True)
+ blocksize : integer, optional
+ If the blocksize is not degenerate with respect to the
+ size of the input array, then use a blocked algorithm. (Default: 64)
+
+ Returns
+ -------
+ sqrtm : (N, N) ndarray
+ Value of the sqrt function at `A`
+
+ errest : float
+ (if disp == False)
+
+ Frobenius norm of the estimated error, ||err||_F / ||A||_F
+
+ References
+ ----------
+ .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
+ "Blocked Schur Algorithms for Computing the Matrix Square Root,
+ Lecture Notes in Computer Science, 7782. pp. 171-182.
+
+ Examples
+ --------
+ >>> from scipy.linalg import sqrtm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> r = sqrtm(a)
+ >>> r
+ array([[ 0.75592895, 1.13389342],
+ [ 0.37796447, 1.88982237]])
+ >>> r.dot(r)
+ array([[ 1., 3.],
+ [ 1., 4.]])
+
+ """
+ A = _asarray_validated(A, check_finite=True, as_inexact=True)
+ if len(A.shape) != 2:
+ raise ValueError("Non-matrix input to matrix function.")
+ if blocksize < 1:
+ raise ValueError("The blocksize should be at least 1.")
+ keep_it_real = np.isrealobj(A)
+ if keep_it_real:
+ T, Z = schur(A)
+ if not np.array_equal(T, np.triu(T)):
+ T, Z = rsf2csf(T, Z)
+ else:
+ T, Z = schur(A, output='complex')
+ failflag = False
+ try:
+ R = _sqrtm_triu(T, blocksize=blocksize)
+ ZH = np.conjugate(Z).T
+ X = Z.dot(R).dot(ZH)
+ except SqrtmError:
+ failflag = True
+ X = np.empty_like(A)
+ X.fill(np.nan)
+
+ if disp:
+ if failflag:
+ print("Failed to find a square root.")
+ return X
+ else:
+ try:
+ arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro')
+ except ValueError:
+ # NaNs in matrix
+ arg2 = np.inf
+
+ return X, arg2
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm_triu.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm_triu.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..71485da
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_matfuncs_sqrtm_triu.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_procrustes.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_procrustes.py
new file mode 100644
index 0000000..b366ee7
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_procrustes.py
@@ -0,0 +1,89 @@
+"""
+Solve the orthogonal Procrustes problem.
+
+"""
+import numpy as np
+from .decomp_svd import svd
+
+
+__all__ = ['orthogonal_procrustes']
+
+
+def orthogonal_procrustes(A, B, check_finite=True):
+ """
+ Compute the matrix solution of the orthogonal Procrustes problem.
+
+ Given matrices A and B of equal shape, find an orthogonal matrix R
+ that most closely maps A to B using the algorithm given in [1]_.
+
+ Parameters
+ ----------
+ A : (M, N) array_like
+ Matrix to be mapped.
+ B : (M, N) array_like
+ Target matrix.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ R : (N, N) ndarray
+ The matrix solution of the orthogonal Procrustes problem.
+ Minimizes the Frobenius norm of ``(A @ R) - B``, subject to
+ ``R.T @ R = I``.
+ scale : float
+ Sum of the singular values of ``A.T @ B``.
+
+ Raises
+ ------
+ ValueError
+ If the input array shapes don't match or if check_finite is True and
+ the arrays contain Inf or NaN.
+
+ Notes
+ -----
+ Note that unlike higher level Procrustes analyses of spatial data, this
+ function only uses orthogonal transformations like rotations and
+ reflections, and it does not use scaling or translation.
+
+ .. versionadded:: 0.15.0
+
+ References
+ ----------
+ .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal
+ Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996.
+
+ Examples
+ --------
+ >>> from scipy.linalg import orthogonal_procrustes
+ >>> A = np.array([[ 2, 0, 1], [-2, 0, 0]])
+
+ Flip the order of columns and check for the anti-diagonal mapping
+
+ >>> R, sca = orthogonal_procrustes(A, np.fliplr(A))
+ >>> R
+ array([[-5.34384992e-17, 0.00000000e+00, 1.00000000e+00],
+ [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
+ [ 1.00000000e+00, 0.00000000e+00, -7.85941422e-17]])
+ >>> sca
+ 9.0
+
+ """
+ if check_finite:
+ A = np.asarray_chkfinite(A)
+ B = np.asarray_chkfinite(B)
+ else:
+ A = np.asanyarray(A)
+ B = np.asanyarray(B)
+ if A.ndim != 2:
+ raise ValueError('expected ndim to be 2, but observed %s' % A.ndim)
+ if A.shape != B.shape:
+ raise ValueError('the shapes of A and B differ (%s vs %s)' % (
+ A.shape, B.shape))
+ # Be clever with transposes, with the intention to save memory.
+ u, w, vt = svd(B.T.dot(A).T)
+ R = u.dot(vt)
+ scale = w.sum()
+ return R, scale
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_sketches.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_sketches.py
new file mode 100644
index 0000000..727870e
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_sketches.py
@@ -0,0 +1,166 @@
+""" Sketching-based Matrix Computations """
+
+# Author: Jordi Montes
+# August 28, 2017
+
+import numpy as np
+
+from scipy._lib._util import check_random_state, rng_integers
+from scipy.sparse import csc_matrix
+
+__all__ = ['clarkson_woodruff_transform']
+
+
+def cwt_matrix(n_rows, n_columns, seed=None):
+ r""""
+ Generate a matrix S which represents a Clarkson-Woodruff transform.
+
+ Given the desired size of matrix, the method returns a matrix S of size
+ (n_rows, n_columns) where each column has all the entries set to 0
+ except for one position which has been randomly set to +1 or -1 with
+ equal probability.
+
+ Parameters
+ ----------
+ n_rows: int
+ Number of rows of S
+ n_columns: int
+ Number of columns of S
+ seed : None or int or `numpy.random.RandomState` instance, optional
+ This parameter defines the ``RandomState`` object to use for drawing
+ random variates.
+ If None (or ``np.random``), the global ``np.random`` state is used.
+ If integer, it is used to seed the local ``RandomState`` instance.
+ Default is None.
+
+ Returns
+ -------
+ S : (n_rows, n_columns) csc_matrix
+ The returned matrix has ``n_columns`` nonzero entries.
+
+ Notes
+ -----
+ Given a matrix A, with probability at least 9/10,
+ .. math:: \|SA\| = (1 \pm \epsilon)\|A\|
+ Where the error epsilon is related to the size of S.
+ """
+ rng = check_random_state(seed)
+ rows = rng_integers(rng, 0, n_rows, n_columns)
+ cols = np.arange(n_columns+1)
+ signs = rng.choice([1, -1], n_columns)
+ S = csc_matrix((signs, rows, cols),shape=(n_rows, n_columns))
+ return S
+
+
+def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None):
+ r""""
+ Applies a Clarkson-Woodruff Transform/sketch to the input matrix.
+
+ Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of
+ size (sketch_size, d) so that
+
+ .. math:: \|Ax\| \approx \|A'x\|
+
+ with high probability via the Clarkson-Woodruff Transform, otherwise
+ known as the CountSketch matrix.
+
+ Parameters
+ ----------
+ input_matrix: array_like
+ Input matrix, of shape ``(n, d)``.
+ sketch_size: int
+ Number of rows for the sketch.
+ seed : None or int or `numpy.random.RandomState` instance, optional
+ This parameter defines the ``RandomState`` object to use for drawing
+ random variates.
+ If None (or ``np.random``), the global ``np.random`` state is used.
+ If integer, it is used to seed the local ``RandomState`` instance.
+ Default is None.
+
+ Returns
+ -------
+ A' : array_like
+ Sketch of the input matrix ``A``, of size ``(sketch_size, d)``.
+
+ Notes
+ -----
+ To make the statement
+
+ .. math:: \|Ax\| \approx \|A'x\|
+
+ precise, observe the following result which is adapted from the
+ proof of Theorem 14 of [2]_ via Markov's Inequality. If we have
+ a sketch size ``sketch_size=k`` which is at least
+
+ .. math:: k \geq \frac{2}{\epsilon^2\delta}
+
+ Then for any fixed vector ``x``,
+
+ .. math:: \|Ax\| = (1\pm\epsilon)\|A'x\|
+
+ with probability at least one minus delta.
+
+ This implementation takes advantage of sparsity: computing
+ a sketch takes time proportional to ``A.nnz``. Data ``A`` which
+ is in ``scipy.sparse.csc_matrix`` format gives the quickest
+ computation time for sparse input.
+
+ >>> from scipy import linalg
+ >>> from scipy import sparse
+ >>> n_rows, n_columns, density, sketch_n_rows = 15000, 100, 0.01, 200
+ >>> A = sparse.rand(n_rows, n_columns, density=density, format='csc')
+ >>> B = sparse.rand(n_rows, n_columns, density=density, format='csr')
+ >>> C = sparse.rand(n_rows, n_columns, density=density, format='coo')
+ >>> D = np.random.randn(n_rows, n_columns)
+ >>> SA = linalg.clarkson_woodruff_transform(A, sketch_n_rows) # fastest
+ >>> SB = linalg.clarkson_woodruff_transform(B, sketch_n_rows) # fast
+ >>> SC = linalg.clarkson_woodruff_transform(C, sketch_n_rows) # slower
+ >>> SD = linalg.clarkson_woodruff_transform(D, sketch_n_rows) # slowest
+
+ That said, this method does perform well on dense inputs, just slower
+ on a relative scale.
+
+ Examples
+ --------
+ Given a big dense matrix ``A``:
+
+ >>> from scipy import linalg
+ >>> n_rows, n_columns, sketch_n_rows = 15000, 100, 200
+ >>> A = np.random.randn(n_rows, n_columns)
+ >>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows)
+ >>> sketch.shape
+ (200, 100)
+ >>> norm_A = np.linalg.norm(A)
+ >>> norm_sketch = np.linalg.norm(sketch)
+
+ Now with high probability, the true norm ``norm_A`` is close to
+ the sketched norm ``norm_sketch`` in absolute value.
+
+ Similarly, applying our sketch preserves the solution to a linear
+ regression of :math:`\min \|Ax - b\|`.
+
+ >>> from scipy import linalg
+ >>> n_rows, n_columns, sketch_n_rows = 15000, 100, 200
+ >>> A = np.random.randn(n_rows, n_columns)
+ >>> b = np.random.randn(n_rows)
+ >>> x = np.linalg.lstsq(A, b, rcond=None)
+ >>> Ab = np.hstack((A, b.reshape(-1,1)))
+ >>> SAb = linalg.clarkson_woodruff_transform(Ab, sketch_n_rows)
+ >>> SA, Sb = SAb[:,:-1], SAb[:,-1]
+ >>> x_sketched = np.linalg.lstsq(SA, Sb, rcond=None)
+
+ As with the matrix norm example, ``np.linalg.norm(A @ x - b)``
+ is close to ``np.linalg.norm(A @ x_sketched - b)`` with high
+ probability.
+
+ References
+ ----------
+ .. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation and
+ regression in input sparsity time. In STOC, 2013.
+
+ .. [2] David P. Woodruff. Sketching as a tool for numerical linear algebra.
+ In Foundations and Trends in Theoretical Computer Science, 2014.
+
+ """
+ S = cwt_matrix(sketch_size, input_matrix.shape[0], seed)
+ return S.dot(input_matrix)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solve_toeplitz.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solve_toeplitz.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..7083b50
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solve_toeplitz.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solvers.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solvers.py
new file mode 100644
index 0000000..995147f
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_solvers.py
@@ -0,0 +1,842 @@
+"""Matrix equation solver routines"""
+# Author: Jeffrey Armstrong
+# February 24, 2012
+
+# Modified: Chad Fulton
+# June 19, 2014
+
+# Modified: Ilhan Polat
+# September 13, 2016
+
+import warnings
+import numpy as np
+from numpy.linalg import inv, LinAlgError, norm, cond, svd
+
+from .basic import solve, solve_triangular, matrix_balance
+from .lapack import get_lapack_funcs
+from .decomp_schur import schur
+from .decomp_lu import lu
+from .decomp_qr import qr
+from ._decomp_qz import ordqz
+from .decomp import _asarray_validated
+from .special_matrices import kron, block_diag
+
+__all__ = ['solve_sylvester',
+ 'solve_continuous_lyapunov', 'solve_discrete_lyapunov',
+ 'solve_lyapunov',
+ 'solve_continuous_are', 'solve_discrete_are']
+
+
+def solve_sylvester(a, b, q):
+ """
+ Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Leading matrix of the Sylvester equation
+ b : (N, N) array_like
+ Trailing matrix of the Sylvester equation
+ q : (M, N) array_like
+ Right-hand side
+
+ Returns
+ -------
+ x : (M, N) ndarray
+ The solution to the Sylvester equation.
+
+ Raises
+ ------
+ LinAlgError
+ If solution was not found
+
+ Notes
+ -----
+ Computes a solution to the Sylvester matrix equation via the Bartels-
+ Stewart algorithm. The A and B matrices first undergo Schur
+ decompositions. The resulting matrices are used to construct an
+ alternative Sylvester equation (``RY + YS^T = F``) where the R and S
+ matrices are in quasi-triangular form (or, when R, S or F are complex,
+ triangular form). The simplified equation is then solved using
+ ``*TRSYL`` from LAPACK directly.
+
+ .. versionadded:: 0.11.0
+
+ Examples
+ --------
+ Given `a`, `b`, and `q` solve for `x`:
+
+ >>> from scipy import linalg
+ >>> a = np.array([[-3, -2, 0], [-1, -1, 3], [3, -5, -1]])
+ >>> b = np.array([[1]])
+ >>> q = np.array([[1],[2],[3]])
+ >>> x = linalg.solve_sylvester(a, b, q)
+ >>> x
+ array([[ 0.0625],
+ [-0.5625],
+ [ 0.6875]])
+ >>> np.allclose(a.dot(x) + x.dot(b), q)
+ True
+
+ """
+
+ # Compute the Schur decomposition form of a
+ r, u = schur(a, output='real')
+
+ # Compute the Schur decomposition of b
+ s, v = schur(b.conj().transpose(), output='real')
+
+ # Construct f = u'*q*v
+ f = np.dot(np.dot(u.conj().transpose(), q), v)
+
+ # Call the Sylvester equation solver
+ trsyl, = get_lapack_funcs(('trsyl',), (r, s, f))
+ if trsyl is None:
+ raise RuntimeError('LAPACK implementation does not contain a proper '
+ 'Sylvester equation solver (TRSYL)')
+ y, scale, info = trsyl(r, s, f, tranb='C')
+
+ y = scale*y
+
+ if info < 0:
+ raise LinAlgError("Illegal value encountered in "
+ "the %d term" % (-info,))
+
+ return np.dot(np.dot(u, y), v.conj().transpose())
+
+
+def solve_continuous_lyapunov(a, q):
+ """
+ Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`.
+
+ Uses the Bartels-Stewart algorithm to find :math:`X`.
+
+ Parameters
+ ----------
+ a : array_like
+ A square matrix
+
+ q : array_like
+ Right-hand side square matrix
+
+ Returns
+ -------
+ x : ndarray
+ Solution to the continuous Lyapunov equation
+
+ See Also
+ --------
+ solve_discrete_lyapunov : computes the solution to the discrete-time
+ Lyapunov equation
+ solve_sylvester : computes the solution to the Sylvester equation
+
+ Notes
+ -----
+ The continuous Lyapunov equation is a special form of the Sylvester
+ equation, hence this solver relies on LAPACK routine ?TRSYL.
+
+ .. versionadded:: 0.11.0
+
+ Examples
+ --------
+ Given `a` and `q` solve for `x`:
+
+ >>> from scipy import linalg
+ >>> a = np.array([[-3, -2, 0], [-1, -1, 0], [0, -5, -1]])
+ >>> b = np.array([2, 4, -1])
+ >>> q = np.eye(3)
+ >>> x = linalg.solve_continuous_lyapunov(a, q)
+ >>> x
+ array([[ -0.75 , 0.875 , -3.75 ],
+ [ 0.875 , -1.375 , 5.3125],
+ [ -3.75 , 5.3125, -27.0625]])
+ >>> np.allclose(a.dot(x) + x.dot(a.T), q)
+ True
+ """
+
+ a = np.atleast_2d(_asarray_validated(a, check_finite=True))
+ q = np.atleast_2d(_asarray_validated(q, check_finite=True))
+
+ r_or_c = float
+
+ for ind, _ in enumerate((a, q)):
+ if np.iscomplexobj(_):
+ r_or_c = complex
+
+ if not np.equal(*_.shape):
+ raise ValueError("Matrix {} should be square.".format("aq"[ind]))
+
+ # Shape consistency check
+ if a.shape != q.shape:
+ raise ValueError("Matrix a and q should have the same shape.")
+
+ # Compute the Schur decomposition form of a
+ r, u = schur(a, output='real')
+
+ # Construct f = u'*q*u
+ f = u.conj().T.dot(q.dot(u))
+
+ # Call the Sylvester equation solver
+ trsyl = get_lapack_funcs('trsyl', (r, f))
+
+ dtype_string = 'T' if r_or_c == float else 'C'
+ y, scale, info = trsyl(r, r, f, tranb=dtype_string)
+
+ if info < 0:
+ raise ValueError('?TRSYL exited with the internal error '
+ '"illegal value in argument number {}.". See '
+ 'LAPACK documentation for the ?TRSYL error codes.'
+ ''.format(-info))
+ elif info == 1:
+ warnings.warn('Input "a" has an eigenvalue pair whose sum is '
+ 'very close to or exactly zero. The solution is '
+ 'obtained via perturbing the coefficients.',
+ RuntimeWarning)
+ y *= scale
+
+ return u.dot(y).dot(u.conj().T)
+
+
+# For backwards compatibility, keep the old name
+solve_lyapunov = solve_continuous_lyapunov
+
+
+def _solve_discrete_lyapunov_direct(a, q):
+ """
+ Solves the discrete Lyapunov equation directly.
+
+ This function is called by the `solve_discrete_lyapunov` function with
+ `method=direct`. It is not supposed to be called directly.
+ """
+
+ lhs = kron(a, a.conj())
+ lhs = np.eye(lhs.shape[0]) - lhs
+ x = solve(lhs, q.flatten())
+
+ return np.reshape(x, q.shape)
+
+
+def _solve_discrete_lyapunov_bilinear(a, q):
+ """
+ Solves the discrete Lyapunov equation using a bilinear transformation.
+
+ This function is called by the `solve_discrete_lyapunov` function with
+ `method=bilinear`. It is not supposed to be called directly.
+ """
+ eye = np.eye(a.shape[0])
+ aH = a.conj().transpose()
+ aHI_inv = inv(aH + eye)
+ b = np.dot(aH - eye, aHI_inv)
+ c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv)
+ return solve_lyapunov(b.conj().transpose(), -c)
+
+
+def solve_discrete_lyapunov(a, q, method=None):
+ """
+ Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`.
+
+ Parameters
+ ----------
+ a, q : (M, M) array_like
+ Square matrices corresponding to A and Q in the equation
+ above respectively. Must have the same shape.
+
+ method : {'direct', 'bilinear'}, optional
+ Type of solver.
+
+ If not given, chosen to be ``direct`` if ``M`` is less than 10 and
+ ``bilinear`` otherwise.
+
+ Returns
+ -------
+ x : ndarray
+ Solution to the discrete Lyapunov equation
+
+ See Also
+ --------
+ solve_continuous_lyapunov : computes the solution to the continuous-time
+ Lyapunov equation
+
+ Notes
+ -----
+ This section describes the available solvers that can be selected by the
+ 'method' parameter. The default method is *direct* if ``M`` is less than 10
+ and ``bilinear`` otherwise.
+
+ Method *direct* uses a direct analytical solution to the discrete Lyapunov
+ equation. The algorithm is given in, for example, [1]_. However, it requires
+ the linear solution of a system with dimension :math:`M^2` so that
+ performance degrades rapidly for even moderately sized matrices.
+
+ Method *bilinear* uses a bilinear transformation to convert the discrete
+ Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)`
+ where :math:`B=(A-I)(A+I)^{-1}` and
+ :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be
+ efficiently solved since it is a special case of a Sylvester equation.
+ The transformation algorithm is from Popov (1964) as described in [2]_.
+
+ .. versionadded:: 0.11.0
+
+ References
+ ----------
+ .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton
+ University Press, 1994. 265. Print.
+ http://doc1.lbfl.li/aca/FLMF037168.pdf
+ .. [2] Gajic, Z., and M.T.J. Qureshi. 2008.
+ Lyapunov Matrix Equation in System Stability and Control.
+ Dover Books on Engineering Series. Dover Publications.
+
+ Examples
+ --------
+ Given `a` and `q` solve for `x`:
+
+ >>> from scipy import linalg
+ >>> a = np.array([[0.2, 0.5],[0.7, -0.9]])
+ >>> q = np.eye(2)
+ >>> x = linalg.solve_discrete_lyapunov(a, q)
+ >>> x
+ array([[ 0.70872893, 1.43518822],
+ [ 1.43518822, -2.4266315 ]])
+ >>> np.allclose(a.dot(x).dot(a.T)-x, -q)
+ True
+
+ """
+ a = np.asarray(a)
+ q = np.asarray(q)
+ if method is None:
+ # Select automatically based on size of matrices
+ if a.shape[0] >= 10:
+ method = 'bilinear'
+ else:
+ method = 'direct'
+
+ meth = method.lower()
+
+ if meth == 'direct':
+ x = _solve_discrete_lyapunov_direct(a, q)
+ elif meth == 'bilinear':
+ x = _solve_discrete_lyapunov_bilinear(a, q)
+ else:
+ raise ValueError('Unknown solver %s' % method)
+
+ return x
+
+
+def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True):
+ r"""
+ Solves the continuous-time algebraic Riccati equation (CARE).
+
+ The CARE is defined as
+
+ .. math::
+
+ X A + A^H X - X B R^{-1} B^H X + Q = 0
+
+ The limitations for a solution to exist are :
+
+ * All eigenvalues of :math:`A` on the right half plane, should be
+ controllable.
+
+ * The associated hamiltonian pencil (See Notes), should have
+ eigenvalues sufficiently away from the imaginary axis.
+
+ Moreover, if ``e`` or ``s`` is not precisely ``None``, then the
+ generalized version of CARE
+
+ .. math::
+
+ E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0
+
+ is solved. When omitted, ``e`` is assumed to be the identity and ``s``
+ is assumed to be the zero matrix with sizes compatible with ``a`` and
+ ``b``, respectively.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Square matrix
+ b : (M, N) array_like
+ Input
+ q : (M, M) array_like
+ Input
+ r : (N, N) array_like
+ Nonsingular square matrix
+ e : (M, M) array_like, optional
+ Nonsingular square matrix
+ s : (M, N) array_like, optional
+ Input
+ balanced : bool, optional
+ The boolean that indicates whether a balancing step is performed
+ on the data. The default is set to True.
+
+ Returns
+ -------
+ x : (M, M) ndarray
+ Solution to the continuous-time algebraic Riccati equation.
+
+ Raises
+ ------
+ LinAlgError
+ For cases where the stable subspace of the pencil could not be
+ isolated. See Notes section and the references for details.
+
+ See Also
+ --------
+ solve_discrete_are : Solves the discrete-time algebraic Riccati equation
+
+ Notes
+ -----
+ The equation is solved by forming the extended hamiltonian matrix pencil,
+ as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
+
+ [ A 0 B ] [ E 0 0 ]
+ [-Q -A^H -S ] - \lambda * [ 0 E^H 0 ]
+ [ S^H B^H R ] [ 0 0 0 ]
+
+ and using a QZ decomposition method.
+
+ In this algorithm, the fail conditions are linked to the symmetry
+ of the product :math:`U_2 U_1^{-1}` and condition number of
+ :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
+ eigenvectors spanning the stable subspace with 2-m rows and partitioned
+ into two m-row matrices. See [1]_ and [2]_ for more details.
+
+ In order to improve the QZ decomposition accuracy, the pencil goes
+ through a balancing step where the sum of absolute values of
+ :math:`H` and :math:`J` entries (after removing the diagonal entries of
+ the sum) is balanced following the recipe given in [3]_.
+
+ .. versionadded:: 0.11.0
+
+ References
+ ----------
+ .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
+ Riccati Equations.", SIAM Journal on Scientific and Statistical
+ Computing, Vol.2(2), :doi:`10.1137/0902010`
+
+ .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
+ Equations.", Massachusetts Institute of Technology. Laboratory for
+ Information and Decision Systems. LIDS-R ; 859. Available online :
+ http://hdl.handle.net/1721.1/1301
+
+ .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
+ SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
+
+ Examples
+ --------
+ Given `a`, `b`, `q`, and `r` solve for `x`:
+
+ >>> from scipy import linalg
+ >>> a = np.array([[4, 3], [-4.5, -3.5]])
+ >>> b = np.array([[1], [-1]])
+ >>> q = np.array([[9, 6], [6, 4.]])
+ >>> r = 1
+ >>> x = linalg.solve_continuous_are(a, b, q, r)
+ >>> x
+ array([[ 21.72792206, 14.48528137],
+ [ 14.48528137, 9.65685425]])
+ >>> np.allclose(a.T.dot(x) + x.dot(a)-x.dot(b).dot(b.T).dot(x), -q)
+ True
+
+ """
+
+ # Validate input arguments
+ a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
+ a, b, q, r, e, s, 'care')
+
+ H = np.empty((2*m+n, 2*m+n), dtype=r_or_c)
+ H[:m, :m] = a
+ H[:m, m:2*m] = 0.
+ H[:m, 2*m:] = b
+ H[m:2*m, :m] = -q
+ H[m:2*m, m:2*m] = -a.conj().T
+ H[m:2*m, 2*m:] = 0. if s is None else -s
+ H[2*m:, :m] = 0. if s is None else s.conj().T
+ H[2*m:, m:2*m] = b.conj().T
+ H[2*m:, 2*m:] = r
+
+ if gen_are and e is not None:
+ J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c))
+ else:
+ J = block_diag(np.eye(2*m), np.zeros_like(r, dtype=r_or_c))
+
+ if balanced:
+ # xGEBAL does not remove the diagonals before scaling. Also
+ # to avoid destroying the Symplectic structure, we follow Ref.3
+ M = np.abs(H) + np.abs(J)
+ M[np.diag_indices_from(M)] = 0.
+ _, (sca, _) = matrix_balance(M, separate=1, permute=0)
+ # do we need to bother?
+ if not np.allclose(sca, np.ones_like(sca)):
+ # Now impose diag(D,inv(D)) from Benner where D is
+ # square root of s_i/s_(n+i) for i=0,....
+ sca = np.log2(sca)
+ # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
+ s = np.round((sca[m:2*m] - sca[:m])/2)
+ sca = 2 ** np.r_[s, -s, sca[2*m:]]
+ # Elementwise multiplication via broadcasting.
+ elwisescale = sca[:, None] * np.reciprocal(sca)
+ H *= elwisescale
+ J *= elwisescale
+
+ # Deflate the pencil to 2m x 2m ala Ref.1, eq.(55)
+ q, r = qr(H[:, -n:])
+ H = q[:, n:].conj().T.dot(H[:, :2*m])
+ J = q[:2*m, n:].conj().T.dot(J[:2*m, :2*m])
+
+ # Decide on which output type is needed for QZ
+ out_str = 'real' if r_or_c == float else 'complex'
+
+ _, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True,
+ overwrite_b=True, check_finite=False,
+ output=out_str)
+
+ # Get the relevant parts of the stable subspace basis
+ if e is not None:
+ u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
+ u00 = u[:m, :m]
+ u10 = u[m:, :m]
+
+ # Solve via back-substituion after checking the condition of u00
+ up, ul, uu = lu(u00)
+ if 1/cond(uu) < np.spacing(1.):
+ raise LinAlgError('Failed to find a finite solution.')
+
+ # Exploit the triangular structure
+ x = solve_triangular(ul.conj().T,
+ solve_triangular(uu.conj().T,
+ u10.conj().T,
+ lower=True),
+ unit_diagonal=True,
+ ).conj().T.dot(up.conj().T)
+ if balanced:
+ x *= sca[:m, None] * sca[:m]
+
+ # Check the deviation from symmetry for lack of success
+ # See proof of Thm.5 item 3 in [2]
+ u_sym = u00.conj().T.dot(u10)
+ n_u_sym = norm(u_sym, 1)
+ u_sym = u_sym - u_sym.conj().T
+ sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
+
+ if norm(u_sym, 1) > sym_threshold:
+ raise LinAlgError('The associated Hamiltonian pencil has eigenvalues '
+ 'too close to the imaginary axis')
+
+ return (x + x.conj().T)/2
+
+
+def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True):
+ r"""
+ Solves the discrete-time algebraic Riccati equation (DARE).
+
+ The DARE is defined as
+
+ .. math::
+
+ A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0
+
+ The limitations for a solution to exist are :
+
+ * All eigenvalues of :math:`A` outside the unit disc, should be
+ controllable.
+
+ * The associated symplectic pencil (See Notes), should have
+ eigenvalues sufficiently away from the unit circle.
+
+ Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the
+ generalized version of DARE
+
+ .. math::
+
+ A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0
+
+ is solved. When omitted, ``e`` is assumed to be the identity and ``s``
+ is assumed to be the zero matrix.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Square matrix
+ b : (M, N) array_like
+ Input
+ q : (M, M) array_like
+ Input
+ r : (N, N) array_like
+ Square matrix
+ e : (M, M) array_like, optional
+ Nonsingular square matrix
+ s : (M, N) array_like, optional
+ Input
+ balanced : bool
+ The boolean that indicates whether a balancing step is performed
+ on the data. The default is set to True.
+
+ Returns
+ -------
+ x : (M, M) ndarray
+ Solution to the discrete algebraic Riccati equation.
+
+ Raises
+ ------
+ LinAlgError
+ For cases where the stable subspace of the pencil could not be
+ isolated. See Notes section and the references for details.
+
+ See Also
+ --------
+ solve_continuous_are : Solves the continuous algebraic Riccati equation
+
+ Notes
+ -----
+ The equation is solved by forming the extended symplectic matrix pencil,
+ as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
+
+ [ A 0 B ] [ E 0 B ]
+ [ -Q E^H -S ] - \lambda * [ 0 A^H 0 ]
+ [ S^H 0 R ] [ 0 -B^H 0 ]
+
+ and using a QZ decomposition method.
+
+ In this algorithm, the fail conditions are linked to the symmetry
+ of the product :math:`U_2 U_1^{-1}` and condition number of
+ :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
+ eigenvectors spanning the stable subspace with 2-m rows and partitioned
+ into two m-row matrices. See [1]_ and [2]_ for more details.
+
+ In order to improve the QZ decomposition accuracy, the pencil goes
+ through a balancing step where the sum of absolute values of
+ :math:`H` and :math:`J` rows/cols (after removing the diagonal entries)
+ is balanced following the recipe given in [3]_. If the data has small
+ numerical noise, balancing may amplify their effects and some clean up
+ is required.
+
+ .. versionadded:: 0.11.0
+
+ References
+ ----------
+ .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
+ Riccati Equations.", SIAM Journal on Scientific and Statistical
+ Computing, Vol.2(2), :doi:`10.1137/0902010`
+
+ .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
+ Equations.", Massachusetts Institute of Technology. Laboratory for
+ Information and Decision Systems. LIDS-R ; 859. Available online :
+ http://hdl.handle.net/1721.1/1301
+
+ .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
+ SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
+
+ Examples
+ --------
+ Given `a`, `b`, `q`, and `r` solve for `x`:
+
+ >>> from scipy import linalg as la
+ >>> a = np.array([[0, 1], [0, -1]])
+ >>> b = np.array([[1, 0], [2, 1]])
+ >>> q = np.array([[-4, -4], [-4, 7]])
+ >>> r = np.array([[9, 3], [3, 1]])
+ >>> x = la.solve_discrete_are(a, b, q, r)
+ >>> x
+ array([[-4., -4.],
+ [-4., 7.]])
+ >>> R = la.solve(r + b.T.dot(x).dot(b), b.T.dot(x).dot(a))
+ >>> np.allclose(a.T.dot(x).dot(a) - x - a.T.dot(x).dot(b).dot(R), -q)
+ True
+
+ """
+
+ # Validate input arguments
+ a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
+ a, b, q, r, e, s, 'dare')
+
+ # Form the matrix pencil
+ H = np.zeros((2*m+n, 2*m+n), dtype=r_or_c)
+ H[:m, :m] = a
+ H[:m, 2*m:] = b
+ H[m:2*m, :m] = -q
+ H[m:2*m, m:2*m] = np.eye(m) if e is None else e.conj().T
+ H[m:2*m, 2*m:] = 0. if s is None else -s
+ H[2*m:, :m] = 0. if s is None else s.conj().T
+ H[2*m:, 2*m:] = r
+
+ J = np.zeros_like(H, dtype=r_or_c)
+ J[:m, :m] = np.eye(m) if e is None else e
+ J[m:2*m, m:2*m] = a.conj().T
+ J[2*m:, m:2*m] = -b.conj().T
+
+ if balanced:
+ # xGEBAL does not remove the diagonals before scaling. Also
+ # to avoid destroying the Symplectic structure, we follow Ref.3
+ M = np.abs(H) + np.abs(J)
+ M[np.diag_indices_from(M)] = 0.
+ _, (sca, _) = matrix_balance(M, separate=1, permute=0)
+ # do we need to bother?
+ if not np.allclose(sca, np.ones_like(sca)):
+ # Now impose diag(D,inv(D)) from Benner where D is
+ # square root of s_i/s_(n+i) for i=0,....
+ sca = np.log2(sca)
+ # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
+ s = np.round((sca[m:2*m] - sca[:m])/2)
+ sca = 2 ** np.r_[s, -s, sca[2*m:]]
+ # Elementwise multiplication via broadcasting.
+ elwisescale = sca[:, None] * np.reciprocal(sca)
+ H *= elwisescale
+ J *= elwisescale
+
+ # Deflate the pencil by the R column ala Ref.1
+ q_of_qr, _ = qr(H[:, -n:])
+ H = q_of_qr[:, n:].conj().T.dot(H[:, :2*m])
+ J = q_of_qr[:, n:].conj().T.dot(J[:, :2*m])
+
+ # Decide on which output type is needed for QZ
+ out_str = 'real' if r_or_c == float else 'complex'
+
+ _, _, _, _, _, u = ordqz(H, J, sort='iuc',
+ overwrite_a=True,
+ overwrite_b=True,
+ check_finite=False,
+ output=out_str)
+
+ # Get the relevant parts of the stable subspace basis
+ if e is not None:
+ u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
+ u00 = u[:m, :m]
+ u10 = u[m:, :m]
+
+ # Solve via back-substituion after checking the condition of u00
+ up, ul, uu = lu(u00)
+
+ if 1/cond(uu) < np.spacing(1.):
+ raise LinAlgError('Failed to find a finite solution.')
+
+ # Exploit the triangular structure
+ x = solve_triangular(ul.conj().T,
+ solve_triangular(uu.conj().T,
+ u10.conj().T,
+ lower=True),
+ unit_diagonal=True,
+ ).conj().T.dot(up.conj().T)
+ if balanced:
+ x *= sca[:m, None] * sca[:m]
+
+ # Check the deviation from symmetry for lack of success
+ # See proof of Thm.5 item 3 in [2]
+ u_sym = u00.conj().T.dot(u10)
+ n_u_sym = norm(u_sym, 1)
+ u_sym = u_sym - u_sym.conj().T
+ sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
+
+ if norm(u_sym, 1) > sym_threshold:
+ raise LinAlgError('The associated symplectic pencil has eigenvalues'
+ 'too close to the unit circle')
+
+ return (x + x.conj().T)/2
+
+
+def _are_validate_args(a, b, q, r, e, s, eq_type='care'):
+ """
+ A helper function to validate the arguments supplied to the
+ Riccati equation solvers. Any discrepancy found in the input
+ matrices leads to a ``ValueError`` exception.
+
+ Essentially, it performs:
+
+ - a check whether the input is free of NaN and Infs
+ - a pass for the data through ``numpy.atleast_2d()``
+ - squareness check of the relevant arrays
+ - shape consistency check of the arrays
+ - singularity check of the relevant arrays
+ - symmetricity check of the relevant matrices
+ - a check whether the regular or the generalized version is asked.
+
+ This function is used by ``solve_continuous_are`` and
+ ``solve_discrete_are``.
+
+ Parameters
+ ----------
+ a, b, q, r, e, s : array_like
+ Input data
+ eq_type : str
+ Accepted arguments are 'care' and 'dare'.
+
+ Returns
+ -------
+ a, b, q, r, e, s : ndarray
+ Regularized input data
+ m, n : int
+ shape of the problem
+ r_or_c : type
+ Data type of the problem, returns float or complex
+ gen_or_not : bool
+ Type of the equation, True for generalized and False for regular ARE.
+
+ """
+
+ if not eq_type.lower() in ('dare', 'care'):
+ raise ValueError("Equation type unknown. "
+ "Only 'care' and 'dare' is understood")
+
+ a = np.atleast_2d(_asarray_validated(a, check_finite=True))
+ b = np.atleast_2d(_asarray_validated(b, check_finite=True))
+ q = np.atleast_2d(_asarray_validated(q, check_finite=True))
+ r = np.atleast_2d(_asarray_validated(r, check_finite=True))
+
+ # Get the correct data types otherwise NumPy complains
+ # about pushing complex numbers into real arrays.
+ r_or_c = complex if np.iscomplexobj(b) else float
+
+ for ind, mat in enumerate((a, q, r)):
+ if np.iscomplexobj(mat):
+ r_or_c = complex
+
+ if not np.equal(*mat.shape):
+ raise ValueError("Matrix {} should be square.".format("aqr"[ind]))
+
+ # Shape consistency checks
+ m, n = b.shape
+ if m != a.shape[0]:
+ raise ValueError("Matrix a and b should have the same number of rows.")
+ if m != q.shape[0]:
+ raise ValueError("Matrix a and q should have the same shape.")
+ if n != r.shape[0]:
+ raise ValueError("Matrix b and r should have the same number of cols.")
+
+ # Check if the data matrices q, r are (sufficiently) hermitian
+ for ind, mat in enumerate((q, r)):
+ if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100:
+ raise ValueError("Matrix {} should be symmetric/hermitian."
+ "".format("qr"[ind]))
+
+ # Continuous time ARE should have a nonsingular r matrix.
+ if eq_type == 'care':
+ min_sv = svd(r, compute_uv=False)[-1]
+ if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1):
+ raise ValueError('Matrix r is numerically singular.')
+
+ # Check if the generalized case is required with omitted arguments
+ # perform late shape checking etc.
+ generalized_case = e is not None or s is not None
+
+ if generalized_case:
+ if e is not None:
+ e = np.atleast_2d(_asarray_validated(e, check_finite=True))
+ if not np.equal(*e.shape):
+ raise ValueError("Matrix e should be square.")
+ if m != e.shape[0]:
+ raise ValueError("Matrix a and e should have the same shape.")
+ # numpy.linalg.cond doesn't check for exact zeros and
+ # emits a runtime warning. Hence the following manual check.
+ min_sv = svd(e, compute_uv=False)[-1]
+ if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1):
+ raise ValueError('Matrix e is numerically singular.')
+ if np.iscomplexobj(e):
+ r_or_c = complex
+ if s is not None:
+ s = np.atleast_2d(_asarray_validated(s, check_finite=True))
+ if s.shape != b.shape:
+ raise ValueError("Matrix b and s should have the same shape.")
+ if np.iscomplexobj(s):
+ r_or_c = complex
+
+ return a, b, q, r, e, s, m, n, r_or_c, generalized_case
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_testutils.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_testutils.py
new file mode 100644
index 0000000..7438598
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/_testutils.py
@@ -0,0 +1,63 @@
+import numpy as np
+
+
+class _FakeMatrix(object):
+ def __init__(self, data):
+ self._data = data
+ self.__array_interface__ = data.__array_interface__
+
+
+class _FakeMatrix2(object):
+ def __init__(self, data):
+ self._data = data
+
+ def __array__(self):
+ return self._data
+
+
+def _get_array(shape, dtype):
+ """
+ Get a test array of given shape and data type.
+ Returned NxN matrices are posdef, and 2xN are banded-posdef.
+
+ """
+ if len(shape) == 2 and shape[0] == 2:
+ # yield a banded positive definite one
+ x = np.zeros(shape, dtype=dtype)
+ x[0, 1:] = -1
+ x[1] = 2
+ return x
+ elif len(shape) == 2 and shape[0] == shape[1]:
+ # always yield a positive definite matrix
+ x = np.zeros(shape, dtype=dtype)
+ j = np.arange(shape[0])
+ x[j, j] = 2
+ x[j[:-1], j[:-1]+1] = -1
+ x[j[:-1]+1, j[:-1]] = -1
+ return x
+ else:
+ np.random.seed(1234)
+ return np.random.randn(*shape).astype(dtype)
+
+
+def _id(x):
+ return x
+
+
+def assert_no_overwrite(call, shapes, dtypes=None):
+ """
+ Test that a call does not overwrite its input arguments
+ """
+
+ if dtypes is None:
+ dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+
+ for dtype in dtypes:
+ for order in ["C", "F"]:
+ for faker in [_id, _FakeMatrix, _FakeMatrix2]:
+ orig_inputs = [_get_array(s, dtype) for s in shapes]
+ inputs = [faker(x.copy(order)) for x in orig_inputs]
+ call(*inputs)
+ msg = "call modified inputs [%r, %r]" % (dtype, faker)
+ for a, b in zip(inputs, orig_inputs):
+ np.testing.assert_equal(a, b, err_msg=msg)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/basic.py
new file mode 100644
index 0000000..74b5edc
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/basic.py
@@ -0,0 +1,1831 @@
+#
+# Author: Pearu Peterson, March 2002
+#
+# w/ additions by Travis Oliphant, March 2002
+# and Jake Vanderplas, August 2012
+
+from warnings import warn
+import numpy as np
+from numpy import atleast_1d, atleast_2d
+from .flinalg import get_flinalg_funcs
+from .lapack import get_lapack_funcs, _compute_lwork
+from .misc import LinAlgError, _datacopied, LinAlgWarning
+from .decomp import _asarray_validated
+from . import decomp, decomp_svd
+from ._solve_toeplitz import levinson
+
+__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
+ 'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
+ 'pinv', 'pinv2', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
+
+
+# Linear equations
+def _solve_check(n, info, lamch=None, rcond=None):
+ """ Check arguments during the different steps of the solution phase """
+ if info < 0:
+ raise ValueError('LAPACK reported an illegal value in {}-th argument'
+ '.'.format(-info))
+ elif 0 < info:
+ raise LinAlgError('Matrix is singular.')
+
+ if lamch is None:
+ return
+ E = lamch('E')
+ if rcond < E:
+ warn('Ill-conditioned matrix (rcond={:.6g}): '
+ 'result may not be accurate.'.format(rcond),
+ LinAlgWarning, stacklevel=3)
+
+
+def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
+ overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
+ transposed=False):
+ """
+ Solves the linear equation set ``a * x = b`` for the unknown ``x``
+ for square ``a`` matrix.
+
+ If the data matrix is known to be a particular type then supplying the
+ corresponding string to ``assume_a`` key chooses the dedicated solver.
+ The available options are
+
+ =================== ========
+ generic matrix 'gen'
+ symmetric 'sym'
+ hermitian 'her'
+ positive definite 'pos'
+ =================== ========
+
+ If omitted, ``'gen'`` is the default structure.
+
+ The datatype of the arrays define which solver is called regardless
+ of the values. In other words, even when the complex array entries have
+ precisely zero imaginary parts, the complex solver will be called based
+ on the data type of the array.
+
+ Parameters
+ ----------
+ a : (N, N) array_like
+ Square input data
+ b : (N, NRHS) array_like
+ Input data for the right hand side.
+ sym_pos : bool, optional
+ Assume `a` is symmetric and positive definite. This key is deprecated
+ and assume_a = 'pos' keyword is recommended instead. The functionality
+ is the same. It will be removed in the future.
+ lower : bool, optional
+ If True, only the data contained in the lower triangle of `a`. Default
+ is to use upper triangle. (ignored for ``'gen'``)
+ overwrite_a : bool, optional
+ Allow overwriting data in `a` (may enhance performance).
+ Default is False.
+ overwrite_b : bool, optional
+ Allow overwriting data in `b` (may enhance performance).
+ Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ assume_a : str, optional
+ Valid entries are explained above.
+ transposed: bool, optional
+ If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
+ for complex matrices (only for True).
+
+ Returns
+ -------
+ x : (N, NRHS) ndarray
+ The solution array.
+
+ Raises
+ ------
+ ValueError
+ If size mismatches detected or input a is not square.
+ LinAlgError
+ If the matrix is singular.
+ LinAlgWarning
+ If an ill-conditioned input a is detected.
+ NotImplementedError
+ If transposed is True and input a is a complex matrix.
+
+ Examples
+ --------
+ Given `a` and `b`, solve for `x`:
+
+ >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
+ >>> b = np.array([2, 4, -1])
+ >>> from scipy import linalg
+ >>> x = linalg.solve(a, b)
+ >>> x
+ array([ 2., -2., 9.])
+ >>> np.dot(a, x) == b
+ array([ True, True, True], dtype=bool)
+
+ Notes
+ -----
+ If the input b matrix is a 1-D array with N elements, when supplied
+ together with an NxN input a, it is assumed as a valid column vector
+ despite the apparent size mismatch. This is compatible with the
+ numpy.dot() behavior and the returned result is still 1-D array.
+
+ The generic, symmetric, Hermitian and positive definite solutions are
+ obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
+ LAPACK respectively.
+ """
+ # Flags for 1-D or N-D right-hand side
+ b_is_1D = False
+
+ a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
+ b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
+ n = a1.shape[0]
+
+ overwrite_a = overwrite_a or _datacopied(a1, a)
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+
+ if a1.shape[0] != a1.shape[1]:
+ raise ValueError('Input a needs to be a square matrix.')
+
+ if n != b1.shape[0]:
+ # Last chance to catch 1x1 scalar a and 1-D b arrays
+ if not (n == 1 and b1.size != 0):
+ raise ValueError('Input b has to have same number of rows as '
+ 'input a')
+
+ # accommodate empty arrays
+ if b1.size == 0:
+ return np.asfortranarray(b1.copy())
+
+ # regularize 1-D b arrays to 2D
+ if b1.ndim == 1:
+ if n == 1:
+ b1 = b1[None, :]
+ else:
+ b1 = b1[:, None]
+ b_is_1D = True
+
+ # Backwards compatibility - old keyword.
+ if sym_pos:
+ assume_a = 'pos'
+
+ if assume_a not in ('gen', 'sym', 'her', 'pos'):
+ raise ValueError('{} is not a recognized matrix structure'
+ ''.format(assume_a))
+
+ # Deprecate keyword "debug"
+ if debug is not None:
+ warn('Use of the "debug" keyword is deprecated '
+ 'and this keyword will be removed in future '
+ 'versions of SciPy.', DeprecationWarning, stacklevel=2)
+
+ # Get the correct lamch function.
+ # The LAMCH functions only exists for S and D
+ # So for complex values we have to convert to real/double.
+ if a1.dtype.char in 'fF': # single precision
+ lamch = get_lapack_funcs('lamch', dtype='f')
+ else:
+ lamch = get_lapack_funcs('lamch', dtype='d')
+
+ # Currently we do not have the other forms of the norm calculators
+ # lansy, lanpo, lanhe.
+ # However, in any case they only reduce computations slightly...
+ lange = get_lapack_funcs('lange', (a1,))
+
+ # Since the I-norm and 1-norm are the same for symmetric matrices
+ # we can collect them all in this one call
+ # Note however, that when issuing 'gen' and form!='none', then
+ # the I-norm should be used
+ if transposed:
+ trans = 1
+ norm = 'I'
+ if np.iscomplexobj(a1):
+ raise NotImplementedError('scipy.linalg.solve can currently '
+ 'not solve a^T x = b or a^H x = b '
+ 'for complex matrices.')
+ else:
+ trans = 0
+ norm = '1'
+
+ anorm = lange(norm, a1)
+
+ # Generalized case 'gesv'
+ if assume_a == 'gen':
+ gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
+ (a1, b1))
+ lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
+ _solve_check(n, info)
+ x, info = getrs(lu, ipvt, b1,
+ trans=trans, overwrite_b=overwrite_b)
+ _solve_check(n, info)
+ rcond, info = gecon(lu, anorm, norm=norm)
+ # Hermitian case 'hesv'
+ elif assume_a == 'her':
+ hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
+ 'hesv_lwork'), (a1, b1))
+ lwork = _compute_lwork(hesv_lw, n, lower)
+ lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
+ lower=lower,
+ overwrite_a=overwrite_a,
+ overwrite_b=overwrite_b)
+ _solve_check(n, info)
+ rcond, info = hecon(lu, ipvt, anorm)
+ # Symmetric case 'sysv'
+ elif assume_a == 'sym':
+ sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
+ 'sysv_lwork'), (a1, b1))
+ lwork = _compute_lwork(sysv_lw, n, lower)
+ lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
+ lower=lower,
+ overwrite_a=overwrite_a,
+ overwrite_b=overwrite_b)
+ _solve_check(n, info)
+ rcond, info = sycon(lu, ipvt, anorm)
+ # Positive definite case 'posv'
+ else:
+ pocon, posv = get_lapack_funcs(('pocon', 'posv'),
+ (a1, b1))
+ lu, x, info = posv(a1, b1, lower=lower,
+ overwrite_a=overwrite_a,
+ overwrite_b=overwrite_b)
+ _solve_check(n, info)
+ rcond, info = pocon(lu, anorm)
+
+ _solve_check(n, info, lamch, rcond)
+
+ if b_is_1D:
+ x = x.ravel()
+
+ return x
+
+
+def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
+ overwrite_b=False, debug=None, check_finite=True):
+ """
+ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A triangular matrix
+ b : (M,) or (M, N) array_like
+ Right-hand side matrix in `a x = b`
+ lower : bool, optional
+ Use only data contained in the lower triangle of `a`.
+ Default is to use upper triangle.
+ trans : {0, 1, 2, 'N', 'T', 'C'}, optional
+ Type of system to solve:
+
+ ======== =========
+ trans system
+ ======== =========
+ 0 or 'N' a x = b
+ 1 or 'T' a^T x = b
+ 2 or 'C' a^H x = b
+ ======== =========
+ unit_diagonal : bool, optional
+ If True, diagonal elements of `a` are assumed to be 1 and
+ will not be referenced.
+ overwrite_b : bool, optional
+ Allow overwriting data in `b` (may enhance performance)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : (M,) or (M, N) ndarray
+ Solution to the system `a x = b`. Shape of return matches `b`.
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is singular
+
+ Notes
+ -----
+ .. versionadded:: 0.9.0
+
+ Examples
+ --------
+ Solve the lower triangular system a x = b, where::
+
+ [3 0 0 0] [4]
+ a = [2 1 0 0] b = [2]
+ [1 0 1 0] [4]
+ [1 1 1 1] [2]
+
+ >>> from scipy.linalg import solve_triangular
+ >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
+ >>> b = np.array([4, 2, 4, 2])
+ >>> x = solve_triangular(a, b, lower=True)
+ >>> x
+ array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
+ >>> a.dot(x) # Check the result
+ array([ 4., 2., 4., 2.])
+
+ """
+
+ # Deprecate keyword "debug"
+ if debug is not None:
+ warn('Use of the "debug" keyword is deprecated '
+ 'and this keyword will be removed in the future '
+ 'versions of SciPy.', DeprecationWarning, stacklevel=2)
+
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ b1 = _asarray_validated(b, check_finite=check_finite)
+ if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+ raise ValueError('expected square matrix')
+ if a1.shape[0] != b1.shape[0]:
+ raise ValueError('shapes of a {} and b {} are incompatible'
+ .format(a1.shape, b1.shape))
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ if debug:
+ print('solve:overwrite_b=', overwrite_b)
+ trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
+ trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
+ if a1.flags.f_contiguous or trans == 2:
+ x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
+ trans=trans, unitdiag=unit_diagonal)
+ else:
+ # transposed system is solved since trtrs expects Fortran ordering
+ x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
+ trans=not trans, unitdiag=unit_diagonal)
+
+ if info == 0:
+ return x
+ if info > 0:
+ raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
+ (info-1))
+ raise ValueError('illegal value in %dth argument of internal trtrs' %
+ (-info))
+
+
+def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
+ debug=None, check_finite=True):
+ """
+ Solve the equation a x = b for x, assuming a is banded matrix.
+
+ The matrix a is stored in `ab` using the matrix diagonal ordered form::
+
+ ab[u + i - j, j] == a[i,j]
+
+ Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
+
+ * a01 a12 a23 a34 a45
+ a00 a11 a22 a33 a44 a55
+ a10 a21 a32 a43 a54 *
+ a20 a31 a42 a53 * *
+
+ Parameters
+ ----------
+ (l, u) : (integer, integer)
+ Number of non-zero lower and upper diagonals
+ ab : (`l` + `u` + 1, M) array_like
+ Banded matrix
+ b : (M,) or (M, K) array_like
+ Right-hand side
+ overwrite_ab : bool, optional
+ Discard data in `ab` (may enhance performance)
+ overwrite_b : bool, optional
+ Discard data in `b` (may enhance performance)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : (M,) or (M, K) ndarray
+ The solution to the system a x = b. Returned shape depends on the
+ shape of `b`.
+
+ Examples
+ --------
+ Solve the banded system a x = b, where::
+
+ [5 2 -1 0 0] [0]
+ [1 4 2 -1 0] [1]
+ a = [0 1 3 2 -1] b = [2]
+ [0 0 1 2 2] [2]
+ [0 0 0 1 1] [3]
+
+ There is one nonzero diagonal below the main diagonal (l = 1), and
+ two above (u = 2). The diagonal banded form of the matrix is::
+
+ [* * -1 -1 -1]
+ ab = [* 2 2 2 2]
+ [5 4 3 2 1]
+ [1 1 1 1 *]
+
+ >>> from scipy.linalg import solve_banded
+ >>> ab = np.array([[0, 0, -1, -1, -1],
+ ... [0, 2, 2, 2, 2],
+ ... [5, 4, 3, 2, 1],
+ ... [1, 1, 1, 1, 0]])
+ >>> b = np.array([0, 1, 2, 2, 3])
+ >>> x = solve_banded((1, 2), ab, b)
+ >>> x
+ array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
+
+ """
+
+ # Deprecate keyword "debug"
+ if debug is not None:
+ warn('Use of the "debug" keyword is deprecated '
+ 'and this keyword will be removed in the future '
+ 'versions of SciPy.', DeprecationWarning, stacklevel=2)
+
+ a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
+ b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
+ # Validate shapes.
+ if a1.shape[-1] != b1.shape[0]:
+ raise ValueError("shapes of ab and b are not compatible.")
+ (nlower, nupper) = l_and_u
+ if nlower + nupper + 1 != a1.shape[0]:
+ raise ValueError("invalid values for the number of lower and upper "
+ "diagonals: l+u+1 (%d) does not equal ab.shape[0] "
+ "(%d)" % (nlower + nupper + 1, ab.shape[0]))
+
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ if a1.shape[-1] == 1:
+ b2 = np.array(b1, copy=(not overwrite_b))
+ b2 /= a1[1, 0]
+ return b2
+ if nlower == nupper == 1:
+ overwrite_ab = overwrite_ab or _datacopied(a1, ab)
+ gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
+ du = a1[0, 1:]
+ d = a1[1, :]
+ dl = a1[2, :-1]
+ du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
+ overwrite_ab, overwrite_b)
+ else:
+ gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
+ a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
+ a2[nlower:, :] = a1
+ lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
+ overwrite_b=overwrite_b)
+ if info == 0:
+ return x
+ if info > 0:
+ raise LinAlgError("singular matrix")
+ raise ValueError('illegal value in %d-th argument of internal '
+ 'gbsv/gtsv' % -info)
+
+
+def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
+ check_finite=True):
+ """
+ Solve equation a x = b. a is Hermitian positive-definite banded matrix.
+
+ The matrix a is stored in `ab` either in lower diagonal or upper
+ diagonal ordered form:
+
+ ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
+ ab[ i - j, j] == a[i,j] (if lower form; i >= j)
+
+ Example of `ab` (shape of a is (6, 6), `u` =2)::
+
+ upper form:
+ * * a02 a13 a24 a35
+ * a01 a12 a23 a34 a45
+ a00 a11 a22 a33 a44 a55
+
+ lower form:
+ a00 a11 a22 a33 a44 a55
+ a10 a21 a32 a43 a54 *
+ a20 a31 a42 a53 * *
+
+ Cells marked with * are not used.
+
+ Parameters
+ ----------
+ ab : (`u` + 1, M) array_like
+ Banded matrix
+ b : (M,) or (M, K) array_like
+ Right-hand side
+ overwrite_ab : bool, optional
+ Discard data in `ab` (may enhance performance)
+ overwrite_b : bool, optional
+ Discard data in `b` (may enhance performance)
+ lower : bool, optional
+ Is the matrix in the lower form. (Default is upper form)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : (M,) or (M, K) ndarray
+ The solution to the system a x = b. Shape of return matches shape
+ of `b`.
+
+ Examples
+ --------
+ Solve the banded system A x = b, where::
+
+ [ 4 2 -1 0 0 0] [1]
+ [ 2 5 2 -1 0 0] [2]
+ A = [-1 2 6 2 -1 0] b = [2]
+ [ 0 -1 2 7 2 -1] [3]
+ [ 0 0 -1 2 8 2] [3]
+ [ 0 0 0 -1 2 9] [3]
+
+ >>> from scipy.linalg import solveh_banded
+
+ `ab` contains the main diagonal and the nonzero diagonals below the
+ main diagonal. That is, we use the lower form:
+
+ >>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
+ ... [ 2, 2, 2, 2, 2, 0],
+ ... [-1, -1, -1, -1, 0, 0]])
+ >>> b = np.array([1, 2, 2, 3, 3, 3])
+ >>> x = solveh_banded(ab, b, lower=True)
+ >>> x
+ array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
+ 0.34733894])
+
+
+ Solve the Hermitian banded system H x = b, where::
+
+ [ 8 2-1j 0 0 ] [ 1 ]
+ H = [2+1j 5 1j 0 ] b = [1+1j]
+ [ 0 -1j 9 -2-1j] [1-2j]
+ [ 0 0 -2+1j 6 ] [ 0 ]
+
+ In this example, we put the upper diagonals in the array `hb`:
+
+ >>> hb = np.array([[0, 2-1j, 1j, -2-1j],
+ ... [8, 5, 9, 6 ]])
+ >>> b = np.array([1, 1+1j, 1-2j, 0])
+ >>> x = solveh_banded(hb, b)
+ >>> x
+ array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
+ 0.10077984-0.23035393j, -0.00479904-0.09358128j])
+
+ """
+ a1 = _asarray_validated(ab, check_finite=check_finite)
+ b1 = _asarray_validated(b, check_finite=check_finite)
+ # Validate shapes.
+ if a1.shape[-1] != b1.shape[0]:
+ raise ValueError("shapes of ab and b are not compatible.")
+
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ overwrite_ab = overwrite_ab or _datacopied(a1, ab)
+
+ if a1.shape[0] == 2:
+ ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
+ if lower:
+ d = a1[0, :].real
+ e = a1[1, :-1]
+ else:
+ d = a1[1, :].real
+ e = a1[0, 1:].conj()
+ d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
+ overwrite_b)
+ else:
+ pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
+ c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
+ overwrite_b=overwrite_b)
+ if info > 0:
+ raise LinAlgError("%dth leading minor not positive definite" % info)
+ if info < 0:
+ raise ValueError('illegal value in %dth argument of internal '
+ 'pbsv' % -info)
+ return x
+
+
+def solve_toeplitz(c_or_cr, b, check_finite=True):
+ """Solve a Toeplitz system using Levinson Recursion
+
+ The Toeplitz matrix has constant diagonals, with c as its first column
+ and r as its first row. If r is not given, ``r == conjugate(c)`` is
+ assumed.
+
+ Parameters
+ ----------
+ c_or_cr : array_like or tuple of (array_like, array_like)
+ The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+ actual shape of ``c``, it will be converted to a 1-D array. If not
+ supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+ real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+ of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+ of ``r``, it will be converted to a 1-D array.
+ b : (M,) or (M, K) array_like
+ Right-hand side in ``T x = b``.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (result entirely NaNs) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : (M,) or (M, K) ndarray
+ The solution to the system ``T x = b``. Shape of return matches shape
+ of `b`.
+
+ See Also
+ --------
+ toeplitz : Toeplitz matrix
+
+ Notes
+ -----
+ The solution is computed using Levinson-Durbin recursion, which is faster
+ than generic least-squares methods, but can be less numerically stable.
+
+ Examples
+ --------
+ Solve the Toeplitz system T x = b, where::
+
+ [ 1 -1 -2 -3] [1]
+ T = [ 3 1 -1 -2] b = [2]
+ [ 6 3 1 -1] [2]
+ [10 6 3 1] [5]
+
+ To specify the Toeplitz matrix, only the first column and the first
+ row are needed.
+
+ >>> c = np.array([1, 3, 6, 10]) # First column of T
+ >>> r = np.array([1, -1, -2, -3]) # First row of T
+ >>> b = np.array([1, 2, 2, 5])
+
+ >>> from scipy.linalg import solve_toeplitz, toeplitz
+ >>> x = solve_toeplitz((c, r), b)
+ >>> x
+ array([ 1.66666667, -1. , -2.66666667, 2.33333333])
+
+ Check the result by creating the full Toeplitz matrix and
+ multiplying it by `x`. We should get `b`.
+
+ >>> T = toeplitz(c, r)
+ >>> T.dot(x)
+ array([ 1., 2., 2., 5.])
+
+ """
+ # If numerical stability of this algorithm is a problem, a future
+ # developer might consider implementing other O(N^2) Toeplitz solvers,
+ # such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
+
+ r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
+ c_or_cr, b, check_finite, keep_b_shape=True)
+
+ # Form a 1-D array of values to be used in the matrix, containing a
+ # reversed copy of r[1:], followed by c.
+ vals = np.concatenate((r[-1:0:-1], c))
+ if b is None:
+ raise ValueError('illegal value, `b` is a required argument')
+
+ if b.ndim == 1:
+ x, _ = levinson(vals, np.ascontiguousarray(b))
+ else:
+ x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
+ for i in range(b.shape[1])])
+ x = x.reshape(*b_shape)
+
+ return x
+
+
+def _get_axis_len(aname, a, axis):
+ ax = axis
+ if ax < 0:
+ ax += a.ndim
+ if 0 <= ax < a.ndim:
+ return a.shape[ax]
+ raise ValueError("'%saxis' entry is out of bounds" % (aname,))
+
+
+def solve_circulant(c, b, singular='raise', tol=None,
+ caxis=-1, baxis=0, outaxis=0):
+ """Solve C x = b for x, where C is a circulant matrix.
+
+ `C` is the circulant matrix associated with the vector `c`.
+
+ The system is solved by doing division in Fourier space. The
+ calculation is::
+
+ x = ifft(fft(b) / fft(c))
+
+ where `fft` and `ifft` are the fast Fourier transform and its inverse,
+ respectively. For a large vector `c`, this is *much* faster than
+ solving the system with the full circulant matrix.
+
+ Parameters
+ ----------
+ c : array_like
+ The coefficients of the circulant matrix.
+ b : array_like
+ Right-hand side matrix in ``a x = b``.
+ singular : str, optional
+ This argument controls how a near singular circulant matrix is
+ handled. If `singular` is "raise" and the circulant matrix is
+ near singular, a `LinAlgError` is raised. If `singular` is
+ "lstsq", the least squares solution is returned. Default is "raise".
+ tol : float, optional
+ If any eigenvalue of the circulant matrix has an absolute value
+ that is less than or equal to `tol`, the matrix is considered to be
+ near singular. If not given, `tol` is set to::
+
+ tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
+
+ where `abs_eigs` is the array of absolute values of the eigenvalues
+ of the circulant matrix.
+ caxis : int
+ When `c` has dimension greater than 1, it is viewed as a collection
+ of circulant vectors. In this case, `caxis` is the axis of `c` that
+ holds the vectors of circulant coefficients.
+ baxis : int
+ When `b` has dimension greater than 1, it is viewed as a collection
+ of vectors. In this case, `baxis` is the axis of `b` that holds the
+ right-hand side vectors.
+ outaxis : int
+ When `c` or `b` are multidimensional, the value returned by
+ `solve_circulant` is multidimensional. In this case, `outaxis` is
+ the axis of the result that holds the solution vectors.
+
+ Returns
+ -------
+ x : ndarray
+ Solution to the system ``C x = b``.
+
+ Raises
+ ------
+ LinAlgError
+ If the circulant matrix associated with `c` is near singular.
+
+ See Also
+ --------
+ circulant : circulant matrix
+
+ Notes
+ -----
+ For a 1-D vector `c` with length `m`, and an array `b`
+ with shape ``(m, ...)``,
+
+ solve_circulant(c, b)
+
+ returns the same result as
+
+ solve(circulant(c), b)
+
+ where `solve` and `circulant` are from `scipy.linalg`.
+
+ .. versionadded:: 0.16.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
+
+ >>> c = np.array([2, 2, 4])
+ >>> b = np.array([1, 2, 3])
+ >>> solve_circulant(c, b)
+ array([ 0.75, -0.25, 0.25])
+
+ Compare that result to solving the system with `scipy.linalg.solve`:
+
+ >>> solve(circulant(c), b)
+ array([ 0.75, -0.25, 0.25])
+
+ A singular example:
+
+ >>> c = np.array([1, 1, 0, 0])
+ >>> b = np.array([1, 2, 3, 4])
+
+ Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
+ least square solution, use the option ``singular='lstsq'``:
+
+ >>> solve_circulant(c, b, singular='lstsq')
+ array([ 0.25, 1.25, 2.25, 1.25])
+
+ Compare to `scipy.linalg.lstsq`:
+
+ >>> x, resid, rnk, s = lstsq(circulant(c), b)
+ >>> x
+ array([ 0.25, 1.25, 2.25, 1.25])
+
+ A broadcasting example:
+
+ Suppose we have the vectors of two circulant matrices stored in an array
+ with shape (2, 5), and three `b` vectors stored in an array with shape
+ (3, 5). For example,
+
+ >>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
+ >>> b = np.arange(15).reshape(-1, 5)
+
+ We want to solve all combinations of circulant matrices and `b` vectors,
+ with the result stored in an array with shape (2, 3, 5). When we
+ disregard the axes of `c` and `b` that hold the vectors of coefficients,
+ the shapes of the collections are (2,) and (3,), respectively, which are
+ not compatible for broadcasting. To have a broadcast result with shape
+ (2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
+ shape (2, 1, 5). The last dimension holds the coefficients of the
+ circulant matrices, so when we call `solve_circulant`, we can use the
+ default ``caxis=-1``. The coefficients of the `b` vectors are in the last
+ dimension of the array `b`, so we use ``baxis=-1``. If we use the
+ default `outaxis`, the result will have shape (5, 2, 3), so we'll use
+ ``outaxis=-1`` to put the solution vectors in the last dimension.
+
+ >>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
+ >>> x.shape
+ (2, 3, 5)
+ >>> np.set_printoptions(precision=3) # For compact output of numbers.
+ >>> x
+ array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
+ [ 0.651, 0.989, 2.046, 0.627, 1.072],
+ [ 1.42 , 1.758, 2.816, 1.396, 1.841]],
+ [[ 0.401, 0.304, 0.694, -0.867, 0.377],
+ [ 0.856, 0.758, 1.149, -0.412, 0.831],
+ [ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
+
+ Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
+
+ >>> solve_circulant(c[1], b[1, :])
+ array([ 0.856, 0.758, 1.149, -0.412, 0.831])
+
+ """
+ c = np.atleast_1d(c)
+ nc = _get_axis_len("c", c, caxis)
+ b = np.atleast_1d(b)
+ nb = _get_axis_len("b", b, baxis)
+ if nc != nb:
+ raise ValueError('Shapes of c {} and b {} are incompatible'
+ .format(c.shape, b.shape))
+
+ fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
+ abs_fc = np.abs(fc)
+ if tol is None:
+ # This is the same tolerance as used in np.linalg.matrix_rank.
+ tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
+ if tol.shape != ():
+ tol.shape = tol.shape + (1,)
+ else:
+ tol = np.atleast_1d(tol)
+
+ near_zeros = abs_fc <= tol
+ is_near_singular = np.any(near_zeros)
+ if is_near_singular:
+ if singular == 'raise':
+ raise LinAlgError("near singular circulant matrix.")
+ else:
+ # Replace the small values with 1 to avoid errors in the
+ # division fb/fc below.
+ fc[near_zeros] = 1
+
+ fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
+
+ q = fb / fc
+
+ if is_near_singular:
+ # `near_zeros` is a boolean array, same shape as `c`, that is
+ # True where `fc` is (near) zero. `q` is the broadcasted result
+ # of fb / fc, so to set the values of `q` to 0 where `fc` is near
+ # zero, we use a mask that is the broadcast result of an array
+ # of True values shaped like `b` with `near_zeros`.
+ mask = np.ones_like(b, dtype=bool) & near_zeros
+ q[mask] = 0
+
+ x = np.fft.ifft(q, axis=-1)
+ if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
+ x = x.real
+ if outaxis != -1:
+ x = np.rollaxis(x, -1, outaxis)
+ return x
+
+
+# matrix inversion
+def inv(a, overwrite_a=False, check_finite=True):
+ """
+ Compute the inverse of a matrix.
+
+ Parameters
+ ----------
+ a : array_like
+ Square matrix to be inverted.
+ overwrite_a : bool, optional
+ Discard data in `a` (may improve performance). Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ ainv : ndarray
+ Inverse of the matrix `a`.
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is singular.
+ ValueError
+ If `a` is not square, or not 2D.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.array([[1., 2.], [3., 4.]])
+ >>> linalg.inv(a)
+ array([[-2. , 1. ],
+ [ 1.5, -0.5]])
+ >>> np.dot(a, linalg.inv(a))
+ array([[ 1., 0.],
+ [ 0., 1.]])
+
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+ raise ValueError('expected square matrix')
+ overwrite_a = overwrite_a or _datacopied(a1, a)
+ # XXX: I found no advantage or disadvantage of using finv.
+# finv, = get_flinalg_funcs(('inv',),(a1,))
+# if finv is not None:
+# a_inv,info = finv(a1,overwrite_a=overwrite_a)
+# if info==0:
+# return a_inv
+# if info>0: raise LinAlgError, "singular matrix"
+# if info<0: raise ValueError('illegal value in %d-th argument of '
+# 'internal inv.getrf|getri'%(-info))
+ getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
+ 'getri_lwork'),
+ (a1,))
+ lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
+ if info == 0:
+ lwork = _compute_lwork(getri_lwork, a1.shape[0])
+
+ # XXX: the following line fixes curious SEGFAULT when
+ # benchmarking 500x500 matrix inverse. This seems to
+ # be a bug in LAPACK ?getri routine because if lwork is
+ # minimal (when using lwork[0] instead of lwork[1]) then
+ # all tests pass. Further investigation is required if
+ # more such SEGFAULTs occur.
+ lwork = int(1.01 * lwork)
+ inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
+ if info > 0:
+ raise LinAlgError("singular matrix")
+ if info < 0:
+ raise ValueError('illegal value in %d-th argument of internal '
+ 'getrf|getri' % -info)
+ return inv_a
+
+
+# Determinant
+
+def det(a, overwrite_a=False, check_finite=True):
+ """
+ Compute the determinant of a matrix
+
+ The determinant of a square matrix is a value derived arithmetically
+ from the coefficients of the matrix.
+
+ The determinant for a 3x3 matrix, for example, is computed as follows::
+
+ a b c
+ d e f = A
+ g h i
+
+ det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A square matrix.
+ overwrite_a : bool, optional
+ Allow overwriting data in a (may enhance performance).
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ det : float or complex
+ Determinant of `a`.
+
+ Notes
+ -----
+ The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
+ >>> linalg.det(a)
+ 0.0
+ >>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
+ >>> linalg.det(a)
+ 3.0
+
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+ raise ValueError('expected square matrix')
+ overwrite_a = overwrite_a or _datacopied(a1, a)
+ fdet, = get_flinalg_funcs(('det',), (a1,))
+ a_det, info = fdet(a1, overwrite_a=overwrite_a)
+ if info < 0:
+ raise ValueError('illegal value in %d-th argument of internal '
+ 'det.getrf' % -info)
+ return a_det
+
+
+# Linear Least Squares
+def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
+ check_finite=True, lapack_driver=None):
+ """
+ Compute least-squares solution to equation Ax = b.
+
+ Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Left-hand side array
+ b : (M,) or (M, K) array_like
+ Right hand side array
+ cond : float, optional
+ Cutoff for 'small' singular values; used to determine effective
+ rank of a. Singular values smaller than
+ ``rcond * largest_singular_value`` are considered zero.
+ overwrite_a : bool, optional
+ Discard data in `a` (may enhance performance). Default is False.
+ overwrite_b : bool, optional
+ Discard data in `b` (may enhance performance). Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ lapack_driver : str, optional
+ Which LAPACK driver is used to solve the least-squares problem.
+ Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
+ (``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
+ faster on many problems. ``'gelss'`` was used historically. It is
+ generally slow but uses less memory.
+
+ .. versionadded:: 0.17.0
+
+ Returns
+ -------
+ x : (N,) or (N, K) ndarray
+ Least-squares solution. Return shape matches shape of `b`.
+ residues : (K,) ndarray or float
+ Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
+ ``ndim(A) == n`` (returns a scalar if b is 1-D). Otherwise a
+ (0,)-shaped array is returned.
+ rank : int
+ Effective rank of `a`.
+ s : (min(M, N),) ndarray or None
+ Singular values of `a`. The condition number of a is
+ ``abs(s[0] / s[-1])``.
+
+ Raises
+ ------
+ LinAlgError
+ If computation does not converge.
+
+ ValueError
+ When parameters are not compatible.
+
+ See Also
+ --------
+ scipy.optimize.nnls : linear least squares with non-negativity constraint
+
+ Notes
+ -----
+ When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
+ array and `s` is always ``None``.
+
+ Examples
+ --------
+ >>> from scipy.linalg import lstsq
+ >>> import matplotlib.pyplot as plt
+
+ Suppose we have the following data:
+
+ >>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
+ >>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
+
+ We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
+ to this data. We first form the "design matrix" M, with a constant
+ column of 1s and a column containing ``x**2``:
+
+ >>> M = x[:, np.newaxis]**[0, 2]
+ >>> M
+ array([[ 1. , 1. ],
+ [ 1. , 6.25],
+ [ 1. , 12.25],
+ [ 1. , 16. ],
+ [ 1. , 25. ],
+ [ 1. , 49. ],
+ [ 1. , 72.25]])
+
+ We want to find the least-squares solution to ``M.dot(p) = y``,
+ where ``p`` is a vector with length 2 that holds the parameters
+ ``a`` and ``b``.
+
+ >>> p, res, rnk, s = lstsq(M, y)
+ >>> p
+ array([ 0.20925829, 0.12013861])
+
+ Plot the data and the fitted curve.
+
+ >>> plt.plot(x, y, 'o', label='data')
+ >>> xx = np.linspace(0, 9, 101)
+ >>> yy = p[0] + p[1]*xx**2
+ >>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
+ >>> plt.xlabel('x')
+ >>> plt.ylabel('y')
+ >>> plt.legend(framealpha=1, shadow=True)
+ >>> plt.grid(alpha=0.25)
+ >>> plt.show()
+
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ b1 = _asarray_validated(b, check_finite=check_finite)
+ if len(a1.shape) != 2:
+ raise ValueError('Input array a should be 2D')
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+ if m != b1.shape[0]:
+ raise ValueError('Shape mismatch: a and b should have the same number'
+ ' of rows ({} != {}).'.format(m, b1.shape[0]))
+ if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
+ x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
+ if n == 0:
+ residues = np.linalg.norm(b1, axis=0)**2
+ else:
+ residues = np.empty((0,))
+ return x, residues, 0, np.empty((0,))
+
+ driver = lapack_driver
+ if driver is None:
+ driver = lstsq.default_lapack_driver
+ if driver not in ('gelsd', 'gelsy', 'gelss'):
+ raise ValueError('LAPACK driver "%s" is not found' % driver)
+
+ lapack_func, lapack_lwork = get_lapack_funcs((driver,
+ '%s_lwork' % driver),
+ (a1, b1))
+ real_data = True if (lapack_func.dtype.kind == 'f') else False
+
+ if m < n:
+ # need to extend b matrix as it will be filled with
+ # a larger solution matrix
+ if len(b1.shape) == 2:
+ b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
+ b2[:m, :] = b1
+ else:
+ b2 = np.zeros(n, dtype=lapack_func.dtype)
+ b2[:m] = b1
+ b1 = b2
+
+ overwrite_a = overwrite_a or _datacopied(a1, a)
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+
+ if cond is None:
+ cond = np.finfo(lapack_func.dtype).eps
+
+ if driver in ('gelss', 'gelsd'):
+ if driver == 'gelss':
+ lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+ v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
+ overwrite_a=overwrite_a,
+ overwrite_b=overwrite_b)
+
+ elif driver == 'gelsd':
+ if real_data:
+ lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+ x, s, rank, info = lapack_func(a1, b1, lwork,
+ iwork, cond, False, False)
+ else: # complex data
+ lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
+ nrhs, cond)
+ x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
+ cond, False, False)
+ if info > 0:
+ raise LinAlgError("SVD did not converge in Linear Least Squares")
+ if info < 0:
+ raise ValueError('illegal value in %d-th argument of internal %s'
+ % (-info, lapack_driver))
+ resids = np.asarray([], dtype=x.dtype)
+ if m > n:
+ x1 = x[:n]
+ if rank == n:
+ resids = np.sum(np.abs(x[n:])**2, axis=0)
+ x = x1
+ return x, resids, rank, s
+
+ elif driver == 'gelsy':
+ lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+ jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+ v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
+ lwork, False, False)
+ if info < 0:
+ raise ValueError("illegal value in %d-th argument of internal "
+ "gelsy" % -info)
+ if m > n:
+ x1 = x[:n]
+ x = x1
+ return x, np.array([], x.dtype), rank, None
+
+
+lstsq.default_lapack_driver = 'gelsd'
+
+
+def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
+ """
+ Compute the (Moore-Penrose) pseudo-inverse of a matrix.
+
+ Calculate a generalized inverse of a matrix using a least-squares
+ solver.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to be pseudo-inverted.
+ cond, rcond : float, optional
+ Cutoff factor for 'small' singular values. In `lstsq`,
+ singular values less than ``cond*largest_singular_value`` will be
+ considered as zero. If both are omitted, the default value
+ ``max(M, N) * eps`` is passed to `lstsq` where ``eps`` is the
+ corresponding machine precision value of the datatype of ``a``.
+
+ .. versionchanged:: 1.3.0
+ Previously the default cutoff value was just `eps` without the
+ factor ``max(M, N)``.
+
+ return_rank : bool, optional
+ if True, return the effective rank of the matrix
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ B : (N, M) ndarray
+ The pseudo-inverse of matrix `a`.
+ rank : int
+ The effective rank of the matrix. Returned if return_rank == True
+
+ Raises
+ ------
+ LinAlgError
+ If computation does not converge.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.random.randn(9, 6)
+ >>> B = linalg.pinv(a)
+ >>> np.allclose(a, np.dot(a, np.dot(B, a)))
+ True
+ >>> np.allclose(B, np.dot(B, np.dot(a, B)))
+ True
+
+ """
+ a = _asarray_validated(a, check_finite=check_finite)
+ # If a is sufficiently tall it is cheaper to compute using the transpose
+ trans = a.shape[0] / a.shape[1] >= 1.1
+ b = np.eye(a.shape[1] if trans else a.shape[0], dtype=a.dtype)
+
+ if rcond is not None:
+ cond = rcond
+
+ if cond is None:
+ cond = max(a.shape) * np.spacing(a.real.dtype.type(1))
+
+ x, resids, rank, s = lstsq(a.T if trans else a, b,
+ cond=cond, check_finite=False)
+
+ if return_rank:
+ return (x.T if trans else x), rank
+ else:
+ return x.T if trans else x
+
+
+def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
+ """
+ Compute the (Moore-Penrose) pseudo-inverse of a matrix.
+
+ Calculate a generalized inverse of a matrix using its
+ singular-value decomposition and including all 'large' singular
+ values.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to be pseudo-inverted.
+ cond, rcond : float or None
+ Cutoff for 'small' singular values; singular values smaller than this
+ value are considered as zero. If both are omitted, the default value
+ ``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the
+ machine precision value of the datatype of ``a``.
+
+ .. versionchanged:: 1.3.0
+ Previously the default cutoff value was just ``eps*f`` where ``f``
+ was ``1e3`` for single precision and ``1e6`` for double precision.
+
+ return_rank : bool, optional
+ If True, return the effective rank of the matrix.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ B : (N, M) ndarray
+ The pseudo-inverse of matrix `a`.
+ rank : int
+ The effective rank of the matrix. Returned if `return_rank` is True.
+
+ Raises
+ ------
+ LinAlgError
+ If SVD computation does not converge.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.random.randn(9, 6)
+ >>> B = linalg.pinv2(a)
+ >>> np.allclose(a, np.dot(a, np.dot(B, a)))
+ True
+ >>> np.allclose(B, np.dot(B, np.dot(a, B)))
+ True
+
+ """
+ a = _asarray_validated(a, check_finite=check_finite)
+ u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
+
+ if rcond is not None:
+ cond = rcond
+ if cond in [None, -1]:
+ t = u.dtype.char.lower()
+ cond = np.max(s) * max(a.shape) * np.finfo(t).eps
+
+ rank = np.sum(s > cond)
+
+ u = u[:, :rank]
+ u /= s[:rank]
+ B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
+
+ if return_rank:
+ return B, rank
+ else:
+ return B
+
+
+def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
+ check_finite=True):
+ """
+ Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
+
+ Calculate a generalized inverse of a Hermitian or real symmetric matrix
+ using its eigenvalue decomposition and including all eigenvalues with
+ 'large' absolute value.
+
+ Parameters
+ ----------
+ a : (N, N) array_like
+ Real symmetric or complex hermetian matrix to be pseudo-inverted
+ cond, rcond : float or None
+ Cutoff for 'small' singular values; singular values smaller than this
+ value are considered as zero. If both are omitted, the default
+ ``max(M,N)*largest_eigenvalue*eps`` is used where ``eps`` is the
+ machine precision value of the datatype of ``a``.
+
+ .. versionchanged:: 1.3.0
+ Previously the default cutoff value was just ``eps*f`` where ``f``
+ was ``1e3`` for single precision and ``1e6`` for double precision.
+
+ lower : bool, optional
+ Whether the pertinent array data is taken from the lower or upper
+ triangle of `a`. (Default: lower)
+ return_rank : bool, optional
+ If True, return the effective rank of the matrix.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ B : (N, N) ndarray
+ The pseudo-inverse of matrix `a`.
+ rank : int
+ The effective rank of the matrix. Returned if `return_rank` is True.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue does not converge
+
+ Examples
+ --------
+ >>> from scipy.linalg import pinvh
+ >>> a = np.random.randn(9, 6)
+ >>> a = np.dot(a, a.T)
+ >>> B = pinvh(a)
+ >>> np.allclose(a, np.dot(a, np.dot(B, a)))
+ True
+ >>> np.allclose(B, np.dot(B, np.dot(a, B)))
+ True
+
+ """
+ a = _asarray_validated(a, check_finite=check_finite)
+ s, u = decomp.eigh(a, lower=lower, check_finite=False)
+
+ if rcond is not None:
+ cond = rcond
+ if cond in [None, -1]:
+ t = u.dtype.char.lower()
+ cond = np.max(np.abs(s)) * max(a.shape) * np.finfo(t).eps
+
+ # For Hermitian matrices, singular values equal abs(eigenvalues)
+ above_cutoff = (abs(s) > cond)
+ psigma_diag = 1.0 / s[above_cutoff]
+ u = u[:, above_cutoff]
+
+ B = np.dot(u * psigma_diag, np.conjugate(u).T)
+
+ if return_rank:
+ return B, len(psigma_diag)
+ else:
+ return B
+
+
+def matrix_balance(A, permute=True, scale=True, separate=False,
+ overwrite_a=False):
+ """
+ Compute a diagonal similarity transformation for row/column balancing.
+
+ The balancing tries to equalize the row and column 1-norms by applying
+ a similarity transformation such that the magnitude variation of the
+ matrix entries is reflected to the scaling matrices.
+
+ Moreover, if enabled, the matrix is first permuted to isolate the upper
+ triangular parts of the matrix and, again if scaling is also enabled,
+ only the remaining subblocks are subjected to scaling.
+
+ The balanced matrix satisfies the following equality
+
+ .. math::
+
+ B = T^{-1} A T
+
+ The scaling coefficients are approximated to the nearest power of 2
+ to avoid round-off errors.
+
+ Parameters
+ ----------
+ A : (n, n) array_like
+ Square data matrix for the balancing.
+ permute : bool, optional
+ The selector to define whether permutation of A is also performed
+ prior to scaling.
+ scale : bool, optional
+ The selector to turn on and off the scaling. If False, the matrix
+ will not be scaled.
+ separate : bool, optional
+ This switches from returning a full matrix of the transformation
+ to a tuple of two separate 1-D permutation and scaling arrays.
+ overwrite_a : bool, optional
+ This is passed to xGEBAL directly. Essentially, overwrites the result
+ to the data. It might increase the space efficiency. See LAPACK manual
+ for details. This is False by default.
+
+ Returns
+ -------
+ B : (n, n) ndarray
+ Balanced matrix
+ T : (n, n) ndarray
+ A possibly permuted diagonal matrix whose nonzero entries are
+ integer powers of 2 to avoid numerical truncation errors.
+ scale, perm : (n,) ndarray
+ If ``separate`` keyword is set to True then instead of the array
+ ``T`` above, the scaling and the permutation vectors are given
+ separately as a tuple without allocating the full array ``T``.
+
+ Notes
+ -----
+
+ This algorithm is particularly useful for eigenvalue and matrix
+ decompositions and in many cases it is already called by various
+ LAPACK routines.
+
+ The algorithm is based on the well-known technique of [1]_ and has
+ been modified to account for special cases. See [2]_ for details
+ which have been implemented since LAPACK v3.5.0. Before this version
+ there are corner cases where balancing can actually worsen the
+ conditioning. See [3]_ for such examples.
+
+ The code is a wrapper around LAPACK's xGEBAL routine family for matrix
+ balancing.
+
+ .. versionadded:: 0.19.0
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
+
+ >>> y, permscale = linalg.matrix_balance(x)
+ >>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
+ array([ 3.66666667, 0.4995005 , 0.91312162])
+
+ >>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
+ array([ 1.2 , 1.27041742, 0.92658316]) # may vary
+
+ >>> permscale # only powers of 2 (0.5 == 2^(-1))
+ array([[ 0.5, 0. , 0. ], # may vary
+ [ 0. , 1. , 0. ],
+ [ 0. , 0. , 1. ]])
+
+ References
+ ----------
+ .. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
+ Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
+ Vol.13(4), 1969, :doi:`10.1007/BF02165404`
+
+ .. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
+ eigenvector computation", 2014, :arxiv:`1401.5766`
+
+ .. [3] : D.S. Watkins. A case where balancing is harmful.
+ Electron. Trans. Numer. Anal, Vol.23, 2006.
+
+ """
+
+ A = np.atleast_2d(_asarray_validated(A, check_finite=True))
+
+ if not np.equal(*A.shape):
+ raise ValueError('The data matrix for balancing should be square.')
+
+ gebal = get_lapack_funcs(('gebal'), (A,))
+ B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
+ overwrite_a=overwrite_a)
+
+ if info < 0:
+ raise ValueError('xGEBAL exited with the internal error '
+ '"illegal value in argument number {}.". See '
+ 'LAPACK documentation for the xGEBAL error codes.'
+ ''.format(-info))
+
+ # Separate the permutations from the scalings and then convert to int
+ scaling = np.ones_like(ps, dtype=float)
+ scaling[lo:hi+1] = ps[lo:hi+1]
+
+ # gebal uses 1-indexing
+ ps = ps.astype(int, copy=False) - 1
+ n = A.shape[0]
+ perm = np.arange(n)
+
+ # LAPACK permutes with the ordering n --> hi, then 0--> lo
+ if hi < n:
+ for ind, x in enumerate(ps[hi+1:][::-1], 1):
+ if n-ind == x:
+ continue
+ perm[[x, n-ind]] = perm[[n-ind, x]]
+
+ if lo > 0:
+ for ind, x in enumerate(ps[:lo]):
+ if ind == x:
+ continue
+ perm[[x, ind]] = perm[[ind, x]]
+
+ if separate:
+ return B, (scaling, perm)
+
+ # get the inverse permutation
+ iperm = np.empty_like(perm)
+ iperm[perm] = np.arange(n)
+
+ return B, np.diag(scaling)[iperm, :]
+
+
+def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
+ enforce_square=True):
+ """Validate arguments and format inputs for toeplitz functions
+
+ Parameters
+ ----------
+ c_or_cr : array_like or tuple of (array_like, array_like)
+ The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+ actual shape of ``c``, it will be converted to a 1-D array. If not
+ supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+ real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+ of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+ of ``r``, it will be converted to a 1-D array.
+ b : (M,) or (M, K) array_like
+ Right-hand side in ``T x = b``.
+ check_finite : bool
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (result entirely NaNs) if the inputs do contain infinities or NaNs.
+ keep_b_shape: bool
+ Whether to convert a (M,) dimensional b into a (M, 1) dimensional
+ matrix.
+ enforce_square: bool, optional
+ If True (default), this verifies that the Toeplitz matrix is square.
+
+ Returns
+ -------
+ r : array
+ 1d array corresponding to the first row of the Toeplitz matrix.
+ c: array
+ 1d array corresponding to the first column of the Toeplitz matrix.
+ b: array
+ (M,), (M, 1) or (M, K) dimensional array, post validation,
+ corresponding to ``b``.
+ dtype: numpy datatype
+ ``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
+ ``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
+ otherwise, it is ``np.float``.
+ b_shape: tuple
+ Shape of ``b`` after passing it through ``_asarray_validated``.
+
+ """
+
+ if isinstance(c_or_cr, tuple):
+ c, r = c_or_cr
+ c = _asarray_validated(c, check_finite=check_finite).ravel()
+ r = _asarray_validated(r, check_finite=check_finite).ravel()
+ else:
+ c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
+ r = c.conjugate()
+
+ if b is None:
+ raise ValueError('`b` must be an array, not None.')
+
+ b = _asarray_validated(b, check_finite=check_finite)
+ b_shape = b.shape
+
+ is_not_square = r.shape[0] != c.shape[0]
+ if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
+ raise ValueError('Incompatible dimensions.')
+
+ is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
+ dtype = np.complex128 if is_cmplx else np.double
+ r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
+
+ if b.ndim == 1 and not keep_b_shape:
+ b = b.reshape(-1, 1)
+ elif b.ndim != 1:
+ b = b.reshape(b.shape[0], -1)
+
+ return r, c, b, dtype, b_shape
+
+
+def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
+ """Efficient Toeplitz Matrix-Matrix Multiplication using FFT
+
+ This function returns the matrix multiplication between a Toeplitz
+ matrix and a dense matrix.
+
+ The Toeplitz matrix has constant diagonals, with c as its first column
+ and r as its first row. If r is not given, ``r == conjugate(c)`` is
+ assumed.
+
+ Parameters
+ ----------
+ c_or_cr : array_like or tuple of (array_like, array_like)
+ The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+ actual shape of ``c``, it will be converted to a 1-D array. If not
+ supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+ real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+ of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+ of ``r``, it will be converted to a 1-D array.
+ x : (M,) or (M, K) array_like
+ Matrix with which to multiply.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (result entirely NaNs) if the inputs do contain infinities or NaNs.
+ workers : int, optional
+ To pass to scipy.fft.fft and ifft. Maximum number of workers to use
+ for parallel computation. If negative, the value wraps around from
+ ``os.cpu_count()``. See scipy.fft.fft for more details.
+
+ Returns
+ -------
+ T @ x : (M,) or (M, K) ndarray
+ The result of the matrix multiplication ``T @ x``. Shape of return
+ matches shape of `x`.
+
+ See Also
+ --------
+ toeplitz : Toeplitz matrix
+ solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
+
+ Notes
+ -----
+ The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
+ to efficiently calculate the matrix-matrix product.
+
+ Because the computation is based on the FFT, integer inputs will
+ result in floating point outputs. This is unlike NumPy's `matmul`,
+ which preserves the data type of the input.
+
+ This is partly based on the implementation that can be found in [1]_,
+ licensed under the MIT license. More information about the method can be
+ found in reference [2]_. References [3]_ and [4]_ have more reference
+ implementations in Python.
+
+ .. versionadded:: 1.6.0
+
+ References
+ ----------
+ .. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
+ Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
+ Gaussian Process Inference with GPU Acceleration" with contributions
+ from Max Balandat and Ruihan Wu. Available online:
+ https://github.com/cornellius-gp/gpytorch
+
+ .. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
+ Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
+ Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
+ Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
+ http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
+
+ .. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
+ package for audio room simulations and array processing algorithms,
+ Proc. IEEE ICASSP, Calgary, CA, 2018.
+ https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
+ pyroomacoustics/adaptive/util.py
+
+ .. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
+ Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
+ the Seismological Society of America., January, 2017. Vol. 107(1),
+ pp. 276-291.
+
+ Examples
+ --------
+ Multiply the Toeplitz matrix T with matrix x::
+
+ [ 1 -1 -2 -3] [1 10]
+ T = [ 3 1 -1 -2] x = [2 11]
+ [ 6 3 1 -1] [2 11]
+ [10 6 3 1] [5 19]
+
+ To specify the Toeplitz matrix, only the first column and the first
+ row are needed.
+
+ >>> c = np.array([1, 3, 6, 10]) # First column of T
+ >>> r = np.array([1, -1, -2, -3]) # First row of T
+ >>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
+
+ >>> from scipy.linalg import toeplitz, matmul_toeplitz
+ >>> matmul_toeplitz((c, r), x)
+ array([[-20., -80.],
+ [ -7., -8.],
+ [ 9., 85.],
+ [ 33., 218.]])
+
+ Check the result by creating the full Toeplitz matrix and
+ multiplying it by ``x``.
+
+ >>> toeplitz(c, r) @ x
+ array([[-20, -80],
+ [ -7, -8],
+ [ 9, 85],
+ [ 33, 218]])
+
+ The full matrix is never formed explicitly, so this routine
+ is suitable for very large Toeplitz matrices.
+
+ >>> n = 1000000
+ >>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
+ array([1., 1., 1., ..., 1., 1., 1.])
+
+ """
+
+ from ..fft import fft, ifft, rfft, irfft
+
+ r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
+ c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
+ n, m = x.shape
+
+ T_nrows = len(c)
+ T_ncols = len(r)
+ p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
+
+ embedded_col = np.concatenate((c, r[-1:0:-1]))
+
+ if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
+ fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
+ fft_x = fft(x, n=p, axis=0, workers=workers)
+
+ mat_times_x = ifft(fft_mat*fft_x, axis=0,
+ workers=workers)[:T_nrows, :]
+ else:
+ # Real inputs; using rfft is faster
+ fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
+ fft_x = rfft(x, n=p, axis=0, workers=workers)
+
+ mat_times_x = irfft(fft_mat*fft_x, axis=0,
+ workers=workers, n=p)[:T_nrows, :]
+
+ return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
+ return mat_times_x.reshape(*return_shape)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/blas.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/blas.py
new file mode 100644
index 0000000..11f1b94
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/blas.py
@@ -0,0 +1,480 @@
+"""
+Low-level BLAS functions (:mod:`scipy.linalg.blas`)
+===================================================
+
+This module contains low-level functions from the BLAS library.
+
+.. versionadded:: 0.12.0
+
+.. note::
+
+ The common ``overwrite_<>`` option in many routines, allows the
+ input arrays to be overwritten to avoid extra memory allocation.
+ However this requires the array to satisfy two conditions
+ which are memory order and the data type to match exactly the
+ order and the type expected by the routine.
+
+ As an example, if you pass a double precision float array to any
+ ``S....`` routine which expects single precision arguments, f2py
+ will create an intermediate array to match the argument types and
+ overwriting will be performed on that intermediate array.
+
+ Similarly, if a C-contiguous array is passed, f2py will pass a
+ FORTRAN-contiguous array internally. Please make sure that these
+ details are satisfied. More information can be found in the f2py
+ documentation.
+
+.. warning::
+
+ These functions do little to no error checking.
+ It is possible to cause crashes by mis-using them,
+ so prefer using the higher-level routines in `scipy.linalg`.
+
+Finding functions
+-----------------
+
+.. autosummary::
+ :toctree: generated/
+
+ get_blas_funcs
+ find_best_blas_type
+
+BLAS Level 1 functions
+----------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ caxpy
+ ccopy
+ cdotc
+ cdotu
+ crotg
+ cscal
+ csrot
+ csscal
+ cswap
+ dasum
+ daxpy
+ dcopy
+ ddot
+ dnrm2
+ drot
+ drotg
+ drotm
+ drotmg
+ dscal
+ dswap
+ dzasum
+ dznrm2
+ icamax
+ idamax
+ isamax
+ izamax
+ sasum
+ saxpy
+ scasum
+ scnrm2
+ scopy
+ sdot
+ snrm2
+ srot
+ srotg
+ srotm
+ srotmg
+ sscal
+ sswap
+ zaxpy
+ zcopy
+ zdotc
+ zdotu
+ zdrot
+ zdscal
+ zrotg
+ zscal
+ zswap
+
+BLAS Level 2 functions
+----------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ sgbmv
+ sgemv
+ sger
+ ssbmv
+ sspr
+ sspr2
+ ssymv
+ ssyr
+ ssyr2
+ stbmv
+ stpsv
+ strmv
+ strsv
+ dgbmv
+ dgemv
+ dger
+ dsbmv
+ dspr
+ dspr2
+ dsymv
+ dsyr
+ dsyr2
+ dtbmv
+ dtpsv
+ dtrmv
+ dtrsv
+ cgbmv
+ cgemv
+ cgerc
+ cgeru
+ chbmv
+ chemv
+ cher
+ cher2
+ chpmv
+ chpr
+ chpr2
+ ctbmv
+ ctbsv
+ ctpmv
+ ctpsv
+ ctrmv
+ ctrsv
+ csyr
+ zgbmv
+ zgemv
+ zgerc
+ zgeru
+ zhbmv
+ zhemv
+ zher
+ zher2
+ zhpmv
+ zhpr
+ zhpr2
+ ztbmv
+ ztbsv
+ ztpmv
+ ztrmv
+ ztrsv
+ zsyr
+
+BLAS Level 3 functions
+----------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ sgemm
+ ssymm
+ ssyr2k
+ ssyrk
+ strmm
+ strsm
+ dgemm
+ dsymm
+ dsyr2k
+ dsyrk
+ dtrmm
+ dtrsm
+ cgemm
+ chemm
+ cher2k
+ cherk
+ csymm
+ csyr2k
+ csyrk
+ ctrmm
+ ctrsm
+ zgemm
+ zhemm
+ zher2k
+ zherk
+ zsymm
+ zsyr2k
+ zsyrk
+ ztrmm
+ ztrsm
+
+"""
+#
+# Author: Pearu Peterson, March 2002
+# refactoring by Fabian Pedregosa, March 2010
+#
+
+__all__ = ['get_blas_funcs', 'find_best_blas_type']
+
+import numpy as _np
+import functools
+
+from scipy.linalg import _fblas
+try:
+ from scipy.linalg import _cblas
+except ImportError:
+ _cblas = None
+
+try:
+ from scipy.linalg import _fblas_64
+ HAS_ILP64 = True
+except ImportError:
+ HAS_ILP64 = False
+ _fblas_64 = None
+
+# Expose all functions (only fblas --- cblas is an implementation detail)
+empty_module = None
+from scipy.linalg._fblas import *
+del empty_module
+
+# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to
+
+# single precision float : '?bBhH!!!!!!ef!!!!!!'
+# double precision float : '?bBhHiIlLqQefdg!!!!'
+# single precision complex : '?bBhH!!!!!!ef!!F!!!'
+# double precision complex : '?bBhHiIlLqQefdgFDG!'
+
+_type_score = {x: 1 for x in '?bBhHef'}
+_type_score.update({x: 2 for x in 'iIlLqQd'})
+
+# Handle float128(g) and complex256(G) separately in case non-Windows systems.
+# On Windows, the values will be rewritten to the same key with the same value.
+_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4})
+
+# Final mapping to the actual prefixes and dtypes
+_type_conv = {1: ('s', _np.dtype('float32')),
+ 2: ('d', _np.dtype('float64')),
+ 3: ('c', _np.dtype('complex64')),
+ 4: ('z', _np.dtype('complex128'))}
+
+# some convenience alias for complex functions
+_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
+ 'cdot': 'cdotc', 'zdot': 'zdotc',
+ 'cger': 'cgerc', 'zger': 'zgerc',
+ 'sdotc': 'sdot', 'sdotu': 'sdot',
+ 'ddotc': 'ddot', 'ddotu': 'ddot'}
+
+
+def find_best_blas_type(arrays=(), dtype=None):
+ """Find best-matching BLAS/LAPACK type.
+
+ Arrays are used to determine the optimal prefix of BLAS routines.
+
+ Parameters
+ ----------
+ arrays : sequence of ndarrays, optional
+ Arrays can be given to determine optimal prefix of BLAS
+ routines. If not given, double-precision routines will be
+ used, otherwise the most generic type in arrays will be used.
+ dtype : str or dtype, optional
+ Data-type specifier. Not used if `arrays` is non-empty.
+
+ Returns
+ -------
+ prefix : str
+ BLAS/LAPACK prefix character.
+ dtype : dtype
+ Inferred Numpy data type.
+ prefer_fortran : bool
+ Whether to prefer Fortran order routines over C order.
+
+ Examples
+ --------
+ >>> import scipy.linalg.blas as bla
+ >>> a = np.random.rand(10,15)
+ >>> b = np.asfortranarray(a) # Change the memory layout order
+ >>> bla.find_best_blas_type((a,))
+ ('d', dtype('float64'), False)
+ >>> bla.find_best_blas_type((a*1j,))
+ ('z', dtype('complex128'), False)
+ >>> bla.find_best_blas_type((b,))
+ ('d', dtype('float64'), True)
+
+ """
+ dtype = _np.dtype(dtype)
+ max_score = _type_score.get(dtype.char, 5)
+ prefer_fortran = False
+
+ if arrays:
+ # In most cases, single element is passed through, quicker route
+ if len(arrays) == 1:
+ max_score = _type_score.get(arrays[0].dtype.char, 5)
+ prefer_fortran = arrays[0].flags['FORTRAN']
+ else:
+ # use the most generic type in arrays
+ scores = [_type_score.get(x.dtype.char, 5) for x in arrays]
+ max_score = max(scores)
+ ind_max_score = scores.index(max_score)
+ # safe upcasting for mix of float64 and complex64 --> prefix 'z'
+ if max_score == 3 and (2 in scores):
+ max_score = 4
+
+ if arrays[ind_max_score].flags['FORTRAN']:
+ # prefer Fortran for leading array with column major order
+ prefer_fortran = True
+
+ # Get the LAPACK prefix and the corresponding dtype if not fall back
+ # to 'd' and double precision float.
+ prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64')))
+
+ return prefix, dtype, prefer_fortran
+
+
+def _get_funcs(names, arrays, dtype,
+ lib_name, fmodule, cmodule,
+ fmodule_name, cmodule_name, alias,
+ ilp64=False):
+ """
+ Return available BLAS/LAPACK functions.
+
+ Used also in lapack.py. See get_blas_funcs for docstring.
+ """
+
+ funcs = []
+ unpack = False
+ dtype = _np.dtype(dtype)
+ module1 = (cmodule, cmodule_name)
+ module2 = (fmodule, fmodule_name)
+
+ if isinstance(names, str):
+ names = (names,)
+ unpack = True
+
+ prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
+
+ if prefer_fortran:
+ module1, module2 = module2, module1
+
+ for name in names:
+ func_name = prefix + name
+ func_name = alias.get(func_name, func_name)
+ func = getattr(module1[0], func_name, None)
+ module_name = module1[1]
+ if func is None:
+ func = getattr(module2[0], func_name, None)
+ module_name = module2[1]
+ if func is None:
+ raise ValueError(
+ '%s function %s could not be found' % (lib_name, func_name))
+ func.module_name, func.typecode = module_name, prefix
+ func.dtype = dtype
+ if not ilp64:
+ func.int_dtype = _np.dtype(_np.intc)
+ else:
+ func.int_dtype = _np.dtype(_np.int64)
+ func.prefix = prefix # Backward compatibility
+ funcs.append(func)
+
+ if unpack:
+ return funcs[0]
+ else:
+ return funcs
+
+
+def _memoize_get_funcs(func):
+ """
+ Memoized fast path for _get_funcs instances
+ """
+ memo = {}
+ func.memo = memo
+
+ @functools.wraps(func)
+ def getter(names, arrays=(), dtype=None, ilp64=False):
+ key = (names, dtype, ilp64)
+ for array in arrays:
+ # cf. find_blas_funcs
+ key += (array.dtype.char, array.flags.fortran)
+
+ try:
+ value = memo.get(key)
+ except TypeError:
+ # unhashable key etc.
+ key = None
+ value = None
+
+ if value is not None:
+ return value
+
+ value = func(names, arrays, dtype, ilp64)
+
+ if key is not None:
+ memo[key] = value
+
+ return value
+
+ return getter
+
+
+@_memoize_get_funcs
+def get_blas_funcs(names, arrays=(), dtype=None, ilp64=False):
+ """Return available BLAS function objects from names.
+
+ Arrays are used to determine the optimal prefix of BLAS routines.
+
+ Parameters
+ ----------
+ names : str or sequence of str
+ Name(s) of BLAS functions without type prefix.
+
+ arrays : sequence of ndarrays, optional
+ Arrays can be given to determine optimal prefix of BLAS
+ routines. If not given, double-precision routines will be
+ used, otherwise the most generic type in arrays will be used.
+
+ dtype : str or dtype, optional
+ Data-type specifier. Not used if `arrays` is non-empty.
+
+ ilp64 : {True, False, 'preferred'}, optional
+ Whether to return ILP64 routine variant.
+ Choosing 'preferred' returns ILP64 routine if available,
+ and otherwise the 32-bit routine. Default: False
+
+ Returns
+ -------
+ funcs : list
+ List containing the found function(s).
+
+
+ Notes
+ -----
+ This routine automatically chooses between Fortran/C
+ interfaces. Fortran code is used whenever possible for arrays with
+ column major order. In all other cases, C code is preferred.
+
+ In BLAS, the naming convention is that all functions start with a
+ type prefix, which depends on the type of the principal
+ matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
+ types {float32, float64, complex64, complex128} respectively.
+ The code and the dtype are stored in attributes `typecode` and `dtype`
+ of the returned functions.
+
+ Examples
+ --------
+ >>> import scipy.linalg as LA
+ >>> a = np.random.rand(3,2)
+ >>> x_gemv = LA.get_blas_funcs('gemv', (a,))
+ >>> x_gemv.typecode
+ 'd'
+ >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,))
+ >>> x_gemv.typecode
+ 'z'
+
+ """
+ if isinstance(ilp64, str):
+ if ilp64 == 'preferred':
+ ilp64 = HAS_ILP64
+ else:
+ raise ValueError("Invalid value for 'ilp64'")
+
+ if not ilp64:
+ return _get_funcs(names, arrays, dtype,
+ "BLAS", _fblas, _cblas, "fblas", "cblas",
+ _blas_alias, ilp64=False)
+ else:
+ if not HAS_ILP64:
+ raise RuntimeError("BLAS ILP64 routine requested, but Scipy "
+ "compiled only with 32-bit BLAS")
+ return _get_funcs(names, arrays, dtype,
+ "BLAS", _fblas_64, None, "fblas_64", None,
+ _blas_alias, ilp64=True)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..82578d2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.pxd b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.pxd
new file mode 100644
index 0000000..5ddaa0b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_blas.pxd
@@ -0,0 +1,314 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+# Within scipy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_blas
+# from scipy.linalg cimport cython_blas
+# cimport scipy.linalg.cython_blas as cython_blas
+# cimport ..linalg.cython_blas as cython_blas
+
+# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+cdef void caxpy(int *n, c *ca, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void ccopy(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef c cdotc(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef c cdotu(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void cgbmv(char *trans, int *m, int *n, int *kl, int *ku, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cgemm(char *transa, char *transb, int *m, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void cgemv(char *trans, int *m, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cgerc(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void cgeru(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void chbmv(char *uplo, int *n, int *k, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void chemm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void chemv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cher(char *uplo, int *n, s *alpha, c *x, int *incx, c *a, int *lda) nogil
+
+cdef void cher2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void cher2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, s *beta, c *c, int *ldc) nogil
+
+cdef void cherk(char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c, int *ldc) nogil
+
+cdef void chpmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void chpr(char *uplo, int *n, s *alpha, c *x, int *incx, c *ap) nogil
+
+cdef void chpr2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *ap) nogil
+
+cdef void crotg(c *ca, c *cb, s *c, c *s) nogil
+
+cdef void cscal(int *n, c *ca, c *cx, int *incx) nogil
+
+cdef void csrot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, s *s) nogil
+
+cdef void csscal(int *n, s *sa, c *cx, int *incx) nogil
+
+cdef void cswap(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void csymm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void csyr2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void csyrk(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *beta, c *c, int *ldc) nogil
+
+cdef void ctbmv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctbsv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctpmv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil
+
+cdef void ctpsv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil
+
+cdef void ctrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void ctrmv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void ctrsv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil
+
+cdef d dasum(int *n, d *dx, int *incx) nogil
+
+cdef void daxpy(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef d dcabs1(z *z) nogil
+
+cdef void dcopy(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef d ddot(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef void dgbmv(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dgemm(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dgemv(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dger(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+
+cdef d dnrm2(int *n, d *x, int *incx) nogil
+
+cdef void drot(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil
+
+cdef void drotg(d *da, d *db, d *c, d *s) nogil
+
+cdef void drotm(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil
+
+cdef void drotmg(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil
+
+cdef void dsbmv(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dscal(int *n, d *da, d *dx, int *incx) nogil
+
+cdef d dsdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void dspmv(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dspr(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil
+
+cdef void dspr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil
+
+cdef void dswap(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef void dsymm(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dsymv(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dsyr(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil
+
+cdef void dsyr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+
+cdef void dsyr2k(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dsyrk(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil
+
+cdef void dtbmv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtbsv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtpmv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+
+cdef void dtpsv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+
+cdef void dtrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dtrmv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dtrsv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+
+cdef d dzasum(int *n, z *zx, int *incx) nogil
+
+cdef d dznrm2(int *n, z *x, int *incx) nogil
+
+cdef int icamax(int *n, c *cx, int *incx) nogil
+
+cdef int idamax(int *n, d *dx, int *incx) nogil
+
+cdef int isamax(int *n, s *sx, int *incx) nogil
+
+cdef int izamax(int *n, z *zx, int *incx) nogil
+
+cdef bint lsame(char *ca, char *cb) nogil
+
+cdef s sasum(int *n, s *sx, int *incx) nogil
+
+cdef void saxpy(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s scasum(int *n, c *cx, int *incx) nogil
+
+cdef s scnrm2(int *n, c *x, int *incx) nogil
+
+cdef void scopy(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s sdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s sdsdot(int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void sgbmv(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sgemm(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void sgemv(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sger(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+
+cdef s snrm2(int *n, s *x, int *incx) nogil
+
+cdef void srot(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil
+
+cdef void srotg(s *sa, s *sb, s *c, s *s) nogil
+
+cdef void srotm(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil
+
+cdef void srotmg(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil
+
+cdef void ssbmv(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sscal(int *n, s *sa, s *sx, int *incx) nogil
+
+cdef void sspmv(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sspr(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil
+
+cdef void sspr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil
+
+cdef void sswap(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void ssymm(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void ssymv(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void ssyr(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil
+
+cdef void ssyr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+
+cdef void ssyr2k(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void ssyrk(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil
+
+cdef void stbmv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void stbsv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void stpmv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+
+cdef void stpsv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+
+cdef void strmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void strmv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void strsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void strsv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void zaxpy(int *n, z *za, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zcopy(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef z zdotc(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef z zdotu(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zdrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, d *s) nogil
+
+cdef void zdscal(int *n, d *da, z *zx, int *incx) nogil
+
+cdef void zgbmv(char *trans, int *m, int *n, int *kl, int *ku, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zgemm(char *transa, char *transb, int *m, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zgemv(char *trans, int *m, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zgerc(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zgeru(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zhbmv(char *uplo, int *n, int *k, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zhemm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zhemv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zher(char *uplo, int *n, d *alpha, z *x, int *incx, z *a, int *lda) nogil
+
+cdef void zher2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zher2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, d *beta, z *c, int *ldc) nogil
+
+cdef void zherk(char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c, int *ldc) nogil
+
+cdef void zhpmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zhpr(char *uplo, int *n, d *alpha, z *x, int *incx, z *ap) nogil
+
+cdef void zhpr2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *ap) nogil
+
+cdef void zrotg(z *ca, z *cb, d *c, z *s) nogil
+
+cdef void zscal(int *n, z *za, z *zx, int *incx) nogil
+
+cdef void zswap(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zsymm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zsyr2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zsyrk(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *beta, z *c, int *ldc) nogil
+
+cdef void ztbmv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztbsv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztpmv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil
+
+cdef void ztpsv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil
+
+cdef void ztrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void ztrmv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void ztrsv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.cpython-39-x86_64-cygwin.dll b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.cpython-39-x86_64-cygwin.dll
new file mode 100644
index 0000000..5ba3aff
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.cpython-39-x86_64-cygwin.dll differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.pxd b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.pxd
new file mode 100644
index 0000000..7c36189
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/cython_lapack.pxd
@@ -0,0 +1,3021 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_lapack
+# from scipy.linalg cimport cython_lapack
+# cimport scipy.linalg.cython_lapack as cython_lapack
+# cimport ..linalg.cython_lapack as cython_lapack
+
+# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+# Function pointer type declarations for
+# gees and gges families of functions.
+ctypedef bint cselect1(c*)
+ctypedef bint cselect2(c*, c*)
+ctypedef bint dselect2(d*, d*)
+ctypedef bint dselect3(d*, d*, d*)
+ctypedef bint sselect2(s*, s*)
+ctypedef bint sselect3(s*, s*, s*)
+ctypedef bint zselect1(z*)
+ctypedef bint zselect2(z*, z*)
+
+cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil
+
+cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil
+
+cdef void cgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *pt, int *ldpt, c *c, int *ldc, c *work, s *rwork, int *info) nogil
+
+cdef void cgbcon(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cgbequ(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgbequb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgbsv(int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgbtf2(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void cgbtrf(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void cgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, c *v, int *ldv, int *info) nogil
+
+cdef void cgebal(char *job, int *n, c *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+
+cdef void cgebd2(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *info) nogil
+
+cdef void cgebrd(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *lwork, int *info) nogil
+
+cdef void cgecon(char *norm, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cgeequ(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgeequb(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cgeev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgehd2(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgehrd(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgelq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgelqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgels(char *trans, int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void cgelsd(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+
+cdef void cgelss(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgelsy(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *jpvt, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cgeql2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqlf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqp3(int *m, int *n, c *a, int *lda, int *jpvt, c *tau, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgeqr2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqr2p(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqrf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqrfp(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqrt(int *m, int *n, int *nb, c *a, int *lda, c *t, int *ldt, c *work, int *info) nogil
+
+cdef void cgeqrt2(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil
+
+cdef void cgeqrt3(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil
+
+cdef void cgerfs(char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgerq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgerqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgesc2(int *n, c *a, int *lda, c *rhs, int *ipiv, int *jpiv, s *scale) nogil
+
+cdef void cgesdd(char *jobz, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+
+cdef void cgesv(int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgesvd(char *jobu, char *jobvt, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgesvx(char *fact, char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgetc2(int *n, c *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void cgetf2(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void cgetrf(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void cgetri(int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void cgetrs(char *trans, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, c *v, int *ldv, int *info) nogil
+
+cdef void cggbal(char *job, int *n, c *a, int *lda, c *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+
+cdef void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void cggev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void cggglm(int *n, int *m, int *p, c *a, int *lda, c *b, int *ldb, c *d, c *x, c *y, c *work, int *lwork, int *info) nogil
+
+cdef void cgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *info) nogil
+
+cdef void cgglse(int *m, int *n, int *p, c *a, int *lda, c *b, int *ldb, c *c, c *d, c *x, c *work, int *lwork, int *info) nogil
+
+cdef void cggqrf(int *n, int *m, int *p, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil
+
+cdef void cggrqf(int *m, int *p, int *n, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil
+
+cdef void cgtcon(char *norm, int *n, c *dl, c *d, c *du, c *du2, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cgtrfs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgtsv(int *n, int *nrhs, c *dl, c *d, c *du, c *b, int *ldb, int *info) nogil
+
+cdef void cgtsvx(char *fact, char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgttrf(int *n, c *dl, c *d, c *du, c *du2, int *ipiv, int *info) nogil
+
+cdef void cgttrs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgtts2(int *itrans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb) nogil
+
+cdef void chbev(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chbevd(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chbevx(char *jobz, char *range, char *uplo, int *n, int *kd, c *ab, int *ldab, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chbgst(char *vect, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *x, int *ldx, c *work, s *rwork, int *info) nogil
+
+cdef void chbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chbtrd(char *vect, char *uplo, int *n, int *kd, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *work, int *info) nogil
+
+cdef void checon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cheequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil
+
+cdef void cheev(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cheevd(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cheevr(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cheevx(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chegs2(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void chegst(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void chegv(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void chegvd(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chegvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void cherfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chesv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void chesvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cheswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil
+
+cdef void chetd2(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, int *info) nogil
+
+cdef void chetf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void chetrd(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void chetrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void chetri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void chetri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void chetri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil
+
+cdef void chetrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chetrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void chfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c) nogil
+
+cdef void chgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *t, int *ldt, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef char chla_transtype(int *trans) nogil
+
+cdef void chpcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void chpev(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chpevd(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chpevx(char *jobz, char *range, char *uplo, int *n, c *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chpgst(int *itype, char *uplo, int *n, c *ap, c *bp, int *info) nogil
+
+cdef void chpgv(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chpgvd(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *ap, c *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chpsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chpsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chptrd(char *uplo, int *n, c *ap, s *d, s *e, c *tau, int *info) nogil
+
+cdef void chptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil
+
+cdef void chptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil
+
+cdef void chptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chsein(char *side, char *eigsrc, char *initv, bint *select, int *n, c *h, int *ldh, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void chseqr(char *job, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void clabrd(int *m, int *n, int *nb, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *x, int *ldx, c *y, int *ldy) nogil
+
+cdef void clacgv(int *n, c *x, int *incx) nogil
+
+cdef void clacn2(int *n, c *v, c *x, s *est, int *kase, int *isave) nogil
+
+cdef void clacon(int *n, c *v, c *x, s *est, int *kase) nogil
+
+cdef void clacp2(char *uplo, int *m, int *n, s *a, int *lda, c *b, int *ldb) nogil
+
+cdef void clacpy(char *uplo, int *m, int *n, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void clacrm(int *m, int *n, c *a, int *lda, s *b, int *ldb, c *c, int *ldc, s *rwork) nogil
+
+cdef void clacrt(int *n, c *cx, int *incx, c *cy, int *incy, c *c, c *s) nogil
+
+cdef c cladiv(c *x, c *y) nogil
+
+cdef void claed0(int *qsiz, int *n, s *d, s *e, c *q, int *ldq, c *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil
+
+cdef void claed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, c *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, c *work, s *rwork, int *iwork, int *info) nogil
+
+cdef void claed8(int *k, int *n, int *qsiz, c *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, c *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil
+
+cdef void claein(bint *rightv, bint *noinit, int *n, c *h, int *ldh, c *w, c *v, c *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil
+
+cdef void claesy(c *a, c *b, c *c, c *rt1, c *rt2, c *evscal, c *cs1, c *sn1) nogil
+
+cdef void claev2(c *a, c *b, c *c, s *rt1, s *rt2, s *cs1, c *sn1) nogil
+
+cdef void clag2z(int *m, int *n, c *sa, int *ldsa, z *a, int *lda, int *info) nogil
+
+cdef void clags2(bint *upper, s *a1, c *a2, s *a3, s *b1, c *b2, s *b3, s *csu, c *snu, s *csv, c *snv, s *csq, c *snq) nogil
+
+cdef void clagtm(char *trans, int *n, int *nrhs, s *alpha, c *dl, c *d, c *du, c *x, int *ldx, s *beta, c *b, int *ldb) nogil
+
+cdef void clahef(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil
+
+cdef void clahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, int *info) nogil
+
+cdef void clahr2(int *n, int *k, int *nb, c *a, int *lda, c *tau, c *t, int *ldt, c *y, int *ldy) nogil
+
+cdef void claic1(int *job, int *j, c *x, s *sest, c *w, c *gamma, s *sestpr, c *s, c *c) nogil
+
+cdef void clals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil
+
+cdef void clalsa(int *icompq, int *smlsiz, int *n, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil
+
+cdef void clalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, c *b, int *ldb, s *rcond, int *rank, c *work, s *rwork, int *iwork, int *info) nogil
+
+cdef s clangb(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, s *work) nogil
+
+cdef s clange(char *norm, int *m, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clangt(char *norm, int *n, c *dl, c *d, c *du) nogil
+
+cdef s clanhb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clanhe(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clanhf(char *norm, char *transr, char *uplo, int *n, c *a, s *work) nogil
+
+cdef s clanhp(char *norm, char *uplo, int *n, c *ap, s *work) nogil
+
+cdef s clanhs(char *norm, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clanht(char *norm, int *n, s *d, c *e) nogil
+
+cdef s clansb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clansp(char *norm, char *uplo, int *n, c *ap, s *work) nogil
+
+cdef s clansy(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clantb(char *norm, char *uplo, char *diag, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clantp(char *norm, char *uplo, char *diag, int *n, c *ap, s *work) nogil
+
+cdef s clantr(char *norm, char *uplo, char *diag, int *m, int *n, c *a, int *lda, s *work) nogil
+
+cdef void clapll(int *n, c *x, int *incx, c *y, int *incy, s *ssmin) nogil
+
+cdef void clapmr(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil
+
+cdef void clapmt(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil
+
+cdef void claqgb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void claqge(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void claqhb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqhe(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqhp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqp2(int *m, int *n, int *offset, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *work) nogil
+
+cdef void claqps(int *m, int *n, int *offset, int *nb, int *kb, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *auxv, c *f, int *ldf) nogil
+
+cdef void claqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void claqr1(int *n, c *h, int *ldh, c *s1, c *s2, c *v) nogil
+
+cdef void claqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil
+
+cdef void claqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil
+
+cdef void claqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void claqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, c *s, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, c *v, int *ldv, c *u, int *ldu, int *nv, c *wv, int *ldwv, int *nh, c *wh, int *ldwh) nogil
+
+cdef void claqsb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqsp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqsy(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void clar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, c *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+
+cdef void clar2v(int *n, c *x, c *y, c *z, int *incx, s *c, c *s, int *incc) nogil
+
+cdef void clarcm(int *m, int *n, s *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *rwork) nogil
+
+cdef void clarf(char *side, int *m, int *n, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil
+
+cdef void clarfg(int *n, c *alpha, c *x, int *incx, c *tau) nogil
+
+cdef void clarfgp(int *n, c *alpha, c *x, int *incx, c *tau) nogil
+
+cdef void clarft(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil
+
+cdef void clarfx(char *side, int *m, int *n, c *v, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clargv(int *n, c *x, int *incx, c *y, int *incy, s *c, int *incc) nogil
+
+cdef void clarnv(int *idist, int *iseed, int *n, c *x) nogil
+
+cdef void clarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, c *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+
+cdef void clartg(c *f, c *g, s *cs, c *sn, c *r) nogil
+
+cdef void clartv(int *n, c *x, int *incx, c *y, int *incy, s *c, c *s, int *incc) nogil
+
+cdef void clarz(char *side, int *m, int *n, int *l, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil
+
+cdef void clarzt(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil
+
+cdef void clascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, c *a, int *lda, int *info) nogil
+
+cdef void claset(char *uplo, int *m, int *n, c *alpha, c *beta, c *a, int *lda) nogil
+
+cdef void clasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, c *a, int *lda) nogil
+
+cdef void classq(int *n, c *x, int *incx, s *scale, s *sumsq) nogil
+
+cdef void claswp(int *n, c *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void clasyf(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil
+
+cdef void clatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, c *ab, int *ldab, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatdf(int *ijob, int *n, c *z, int *ldz, c *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void clatps(char *uplo, char *trans, char *diag, char *normin, int *n, c *ap, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatrd(char *uplo, int *n, int *nb, c *a, int *lda, s *e, c *tau, c *w, int *ldw) nogil
+
+cdef void clatrs(char *uplo, char *trans, char *diag, char *normin, int *n, c *a, int *lda, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatrz(int *m, int *n, int *l, c *a, int *lda, c *tau, c *work) nogil
+
+cdef void clauu2(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void clauum(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpbcon(char *uplo, int *n, int *kd, c *ab, int *ldab, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cpbequ(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpbrfs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpbstf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbsv(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void cpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpbtf2(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbtrf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbtrs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void cpftrf(char *transr, char *uplo, int *n, c *a, int *info) nogil
+
+cdef void cpftri(char *transr, char *uplo, int *n, c *a, int *info) nogil
+
+cdef void cpftrs(char *transr, char *uplo, int *n, int *nrhs, c *a, c *b, int *ldb, int *info) nogil
+
+cdef void cpocon(char *uplo, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cpoequ(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpoequb(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cporfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cposv(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void cposvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpotf2(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotrf(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotri(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotrs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void cppcon(char *uplo, int *n, c *ap, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cppequ(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cppsv(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void cppsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpptrf(char *uplo, int *n, c *ap, int *info) nogil
+
+cdef void cpptri(char *uplo, int *n, c *ap, int *info) nogil
+
+cdef void cpptrs(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void cpstf2(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void cpstrf(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void cptcon(int *n, s *d, c *e, s *anorm, s *rcond, s *rwork, int *info) nogil
+
+cdef void cpteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil
+
+cdef void cptrfs(char *uplo, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cptsv(int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil
+
+cdef void cptsvx(char *fact, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpttrf(int *n, s *d, c *e, int *info) nogil
+
+cdef void cpttrs(char *uplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil
+
+cdef void cptts2(int *iuplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb) nogil
+
+cdef void crot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, c *s) nogil
+
+cdef void cspcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cspmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cspr(char *uplo, int *n, c *alpha, c *x, int *incx, c *ap) nogil
+
+cdef void csprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cspsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cspsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void csptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil
+
+cdef void csptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil
+
+cdef void csptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void csrscl(int *n, s *sa, c *sx, int *incx) nogil
+
+cdef void cstedc(char *compz, int *n, s *d, s *e, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, c *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void cstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, c *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void csteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil
+
+cdef void csycon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void csyconv(char *uplo, char *way, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void csyequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil
+
+cdef void csymv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void csyr(char *uplo, int *n, c *alpha, c *x, int *incx, c *a, int *lda) nogil
+
+cdef void csyrfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void csysv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void csysvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void csyswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil
+
+cdef void csytf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void csytrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void csytri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void csytri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void csytri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil
+
+cdef void csytrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void csytrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void ctbcon(char *norm, char *uplo, char *diag, int *n, int *kd, c *ab, int *ldab, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void ctfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, c *alpha, c *a, c *b, int *ldb) nogil
+
+cdef void ctftri(char *transr, char *uplo, char *diag, int *n, c *a, int *info) nogil
+
+cdef void ctfttp(char *transr, char *uplo, int *n, c *arf, c *ap, int *info) nogil
+
+cdef void ctfttr(char *transr, char *uplo, int *n, c *arf, c *a, int *lda, int *info) nogil
+
+cdef void ctgevc(char *side, char *howmny, bint *select, int *n, c *s, int *lds, c *p, int *ldp, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil
+
+cdef void ctgex2(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *j1, int *info) nogil
+
+cdef void ctgexc(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+
+cdef void ctgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, int *m, s *pl, s *pr, s *dif, c *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ctgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, c *a, int *lda, c *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, c *u, int *ldu, c *v, int *ldv, c *q, int *ldq, c *work, int *ncycle, int *info) nogil
+
+cdef void ctgsna(char *job, char *howmny, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *dif, int *mm, int *m, c *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ctgsy2(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil
+
+cdef void ctgsyl(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *dif, c *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ctpcon(char *norm, char *uplo, char *diag, int *n, c *ap, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void ctpqrt(int *m, int *n, int *l, int *nb, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, c *work, int *info) nogil
+
+cdef void ctpqrt2(int *m, int *n, int *l, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, int *info) nogil
+
+cdef void ctprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *ldwork) nogil
+
+cdef void ctprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctptri(char *uplo, char *diag, int *n, c *ap, int *info) nogil
+
+cdef void ctptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void ctpttf(char *transr, char *uplo, int *n, c *ap, c *arf, int *info) nogil
+
+cdef void ctpttr(char *uplo, int *n, c *ap, c *a, int *lda, int *info) nogil
+
+cdef void ctrcon(char *norm, char *uplo, char *diag, int *n, c *a, int *lda, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctrevc(char *side, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil
+
+cdef void ctrexc(char *compq, int *n, c *t, int *ldt, c *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+
+cdef void ctrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctrsen(char *job, char *compq, bint *select, int *n, c *t, int *ldt, c *q, int *ldq, c *w, int *m, s *s, s *sep, c *work, int *lwork, int *info) nogil
+
+cdef void ctrsna(char *job, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *sep, int *mm, int *m, c *work, int *ldwork, s *rwork, int *info) nogil
+
+cdef void ctrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *scale, int *info) nogil
+
+cdef void ctrti2(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil
+
+cdef void ctrtri(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil
+
+cdef void ctrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void ctrttf(char *transr, char *uplo, int *n, c *a, int *lda, c *arf, int *info) nogil
+
+cdef void ctrttp(char *uplo, int *n, c *a, int *lda, c *ap, int *info) nogil
+
+cdef void ctzrzf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunbdb(char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, s *phi, c *taup1, c *taup2, c *tauq1, c *tauq2, c *work, int *lwork, int *info) nogil
+
+cdef void cuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil
+
+cdef void cung2l(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cung2r(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cungbr(char *vect, int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunghr(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungl2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cunglq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungql(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungqr(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungr2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cungrq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungtr(char *uplo, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunm2l(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunm2r(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunml2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmlq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmql(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmqr(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmr2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmrq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmtr(char *side, char *uplo, char *trans, int *m, int *n, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cupgtr(char *uplo, int *n, c *ap, c *tau, c *q, int *ldq, c *work, int *info) nogil
+
+cdef void cupmtr(char *side, char *uplo, char *trans, int *m, int *n, c *ap, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void dbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil
+
+cdef void dbdsdc(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil
+
+cdef void dbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void ddisna(char *job, int *m, int *n, d *d, d *sep, int *info) nogil
+
+cdef void dgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dgbcon(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgbequ(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgbequb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgbsv(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgbtf2(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void dgbtrf(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void dgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil
+
+cdef void dgebal(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+
+cdef void dgebd2(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil
+
+cdef void dgebrd(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil
+
+cdef void dgecon(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgeequ(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgeequb(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void dgeev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+
+cdef void dgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgehd2(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgehrd(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgelq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgelqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgels(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+
+cdef void dgelsd(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgelss(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+
+cdef void dgelsy(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+
+cdef void dgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dgeql2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqlf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqp3(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqr2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqr2p(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqrf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqrfp(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqrt(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil
+
+cdef void dgeqrt2(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+
+cdef void dgeqrt3(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+
+cdef void dgerfs(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgerq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgerqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgesc2(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil
+
+cdef void dgesdd(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgesvd(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil
+
+cdef void dgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil
+
+cdef void dgesvx(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgetc2(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void dgetf2(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dgetrf(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dgetri(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dgetrs(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil
+
+cdef void dggbal(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+
+cdef void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void dggev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+
+cdef void dggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void dggglm(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil
+
+cdef void dgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil
+
+cdef void dgglse(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil
+
+cdef void dggqrf(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+
+cdef void dggrqf(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+
+cdef void dgsvj0(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+
+cdef void dgsvj1(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+
+cdef void dgtcon(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgtrfs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgtsv(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil
+
+cdef void dgtsvx(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgttrf(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil
+
+cdef void dgttrs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgtts2(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil
+
+cdef void dhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void dhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef bint disnan(d *din) nogil
+
+cdef void dlabad(d *small, d *large) nogil
+
+cdef void dlabrd(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil
+
+cdef void dlacn2(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil
+
+cdef void dlacon(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil
+
+cdef void dlacpy(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dladiv(d *a, d *b, d *c, d *d, d *p, d *q) nogil
+
+cdef void dlae2(d *a, d *b, d *c, d *rt1, d *rt2) nogil
+
+cdef void dlaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed0(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed1(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed2(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+
+cdef void dlaed3(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil
+
+cdef void dlaed4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil
+
+cdef void dlaed5(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil
+
+cdef void dlaed6(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil
+
+cdef void dlaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed8(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil
+
+cdef void dlaed9(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil
+
+cdef void dlaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil
+
+cdef void dlaein(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil
+
+cdef void dlaev2(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil
+
+cdef void dlaexc(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil
+
+cdef void dlag2(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil
+
+cdef void dlag2s(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+
+cdef void dlags2(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil
+
+cdef void dlagtf(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil
+
+cdef void dlagtm(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil
+
+cdef void dlagts(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil
+
+cdef void dlagv2(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil
+
+cdef void dlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil
+
+cdef void dlahr2(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil
+
+cdef void dlaic1(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil
+
+cdef void dlaln2(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil
+
+cdef void dlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil
+
+cdef void dlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil
+
+cdef d dlamch(char *cmach) nogil
+
+cdef void dlamrg(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil
+
+cdef int dlaneg(int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil
+
+cdef d dlangb(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil
+
+cdef d dlange(char *norm, int *m, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlangt(char *norm, int *n, d *dl, d *d, d *du) nogil
+
+cdef d dlanhs(char *norm, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlansb(char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil
+
+cdef d dlansf(char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil
+
+cdef d dlansp(char *norm, char *uplo, int *n, d *ap, d *work) nogil
+
+cdef d dlanst(char *norm, int *n, d *d, d *e) nogil
+
+cdef d dlansy(char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlantb(char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil
+
+cdef d dlantp(char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil
+
+cdef d dlantr(char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil
+
+cdef void dlanv2(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil
+
+cdef void dlapll(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil
+
+cdef void dlapmr(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+
+cdef void dlapmt(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+
+cdef d dlapy2(d *x, d *y) nogil
+
+cdef d dlapy3(d *x, d *y, d *z) nogil
+
+cdef void dlaqgb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void dlaqge(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void dlaqp2(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil
+
+cdef void dlaqps(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil
+
+cdef void dlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dlaqr1(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil
+
+cdef void dlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+
+cdef void dlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+
+cdef void dlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil
+
+cdef void dlaqsb(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqsp(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqsy(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqtr(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil
+
+cdef void dlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+
+cdef void dlar2v(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil
+
+cdef void dlarf(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+
+cdef void dlarfg(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+
+cdef void dlarfgp(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+
+cdef void dlarft(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+
+cdef void dlarfx(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlargv(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil
+
+cdef void dlarnv(int *idist, int *iseed, int *n, d *x) nogil
+
+cdef void dlarra(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil
+
+cdef void dlarrb(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil
+
+cdef void dlarrc(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+
+cdef void dlarrd(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil
+
+cdef void dlarre(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil
+
+cdef void dlarrf(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil
+
+cdef void dlarrj(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil
+
+cdef void dlarrk(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil
+
+cdef void dlarrr(int *n, d *d, d *e, int *info) nogil
+
+cdef void dlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+
+cdef void dlartg(d *f, d *g, d *cs, d *sn, d *r) nogil
+
+cdef void dlartgp(d *f, d *g, d *cs, d *sn, d *r) nogil
+
+cdef void dlartgs(d *x, d *y, d *sigma, d *cs, d *sn) nogil
+
+cdef void dlartv(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil
+
+cdef void dlaruv(int *iseed, int *n, d *x) nogil
+
+cdef void dlarz(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+
+cdef void dlarzt(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+
+cdef void dlas2(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil
+
+cdef void dlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dlasd0(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil
+
+cdef void dlasd1(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil
+
+cdef void dlasd2(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+
+cdef void dlasd3(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil
+
+cdef void dlasd4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil
+
+cdef void dlasd5(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil
+
+cdef void dlasd6(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil
+
+cdef void dlasd8(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil
+
+cdef void dlasda(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dlasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+
+cdef void dlaset(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil
+
+cdef void dlasq1(int *n, d *d, d *e, d *work, int *info) nogil
+
+cdef void dlasq2(int *n, d *z, int *info) nogil
+
+cdef void dlasq3(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil
+
+cdef void dlasq4(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil
+
+cdef void dlasq6(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil
+
+cdef void dlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil
+
+cdef void dlasrt(char *id, int *n, d *d, int *info) nogil
+
+cdef void dlassq(int *n, d *x, int *incx, d *scale, d *sumsq) nogil
+
+cdef void dlasv2(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil
+
+cdef void dlaswp(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void dlasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil
+
+cdef void dlasyf(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil
+
+cdef void dlat2s(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+
+cdef void dlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatdf(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void dlatps(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatrd(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil
+
+cdef void dlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatrz(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil
+
+cdef void dlauu2(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dlauum(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dopgtr(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil
+
+cdef void dopmtr(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dorbdb(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil
+
+cdef void dorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dorg2l(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorg2r(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorgbr(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorghr(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgl2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorglq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgql(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgqr(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgr2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorgrq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgtr(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorm2l(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dorm2r(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dorml2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormlq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormql(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormqr(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormr2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormr3(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormrq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormrz(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormtr(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dpbcon(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dpbequ(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpbrfs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpbstf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbsv(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpbtf2(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbtrf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbtrs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dpftrf(char *transr, char *uplo, int *n, d *a, int *info) nogil
+
+cdef void dpftri(char *transr, char *uplo, int *n, d *a, int *info) nogil
+
+cdef void dpftrs(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil
+
+cdef void dpocon(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dpoequ(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpoequb(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dporfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dposvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpotf2(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotrf(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotri(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotrs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dppcon(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dppequ(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dppsv(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dppsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpptrf(char *uplo, int *n, d *ap, int *info) nogil
+
+cdef void dpptri(char *uplo, int *n, d *ap, int *info) nogil
+
+cdef void dpptrs(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dpstf2(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void dpstrf(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void dptcon(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil
+
+cdef void dpteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dptrfs(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil
+
+cdef void dptsv(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+
+cdef void dptsvx(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil
+
+cdef void dpttrf(int *n, d *d, d *e, int *info) nogil
+
+cdef void dpttrs(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+
+cdef void dptts2(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil
+
+cdef void drscl(int *n, d *sa, d *sx, int *incx) nogil
+
+cdef void dsbev(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsbevd(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsbevx(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsbgst(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil
+
+cdef void dsbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsbtrd(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil
+
+cdef void dsfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil
+
+cdef void dsgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+
+cdef void dspcon(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dspev(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dspevd(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dspevx(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dspgst(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil
+
+cdef void dspgv(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dspgvd(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+
+cdef void dsprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dspsv(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dspsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dsptrd(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil
+
+cdef void dsptrf(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil
+
+cdef void dsptri(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil
+
+cdef void dsptrs(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dstebz(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil
+
+cdef void dstedc(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsterf(int *n, d *d, d *e, int *info) nogil
+
+cdef void dstev(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dstevd(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstevr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstevx(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsycon(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dsyconv(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+
+cdef void dsyequb(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil
+
+cdef void dsyev(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil
+
+cdef void dsyevd(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsyevr(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsyevx(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsygs2(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dsygst(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dsygv(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil
+
+cdef void dsygvd(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsygvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsyrfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dsysv(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+
+cdef void dsysvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dsyswapr(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil
+
+cdef void dsytd2(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil
+
+cdef void dsytf2(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dsytrd(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dsytrf(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dsytri(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+
+cdef void dsytri2(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dsytri2x(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil
+
+cdef void dsytrs(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dsytrs2(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil
+
+cdef void dtbcon(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dtfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil
+
+cdef void dtftri(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil
+
+cdef void dtfttp(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil
+
+cdef void dtfttr(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil
+
+cdef void dtgevc(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+
+cdef void dtgex2(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil
+
+cdef void dtgexc(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil
+
+cdef void dtgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dtgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil
+
+cdef void dtgsna(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dtgsy2(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil
+
+cdef void dtgsyl(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dtpcon(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil
+
+cdef void dtpqrt(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil
+
+cdef void dtpqrt2(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil
+
+cdef void dtprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil
+
+cdef void dtprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtptri(char *uplo, char *diag, int *n, d *ap, int *info) nogil
+
+cdef void dtptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dtpttf(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil
+
+cdef void dtpttr(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil
+
+cdef void dtrcon(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtrevc(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+
+cdef void dtrexc(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil
+
+cdef void dtrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtrsen(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dtrsna(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil
+
+cdef void dtrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil
+
+cdef void dtrti2(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dtrtri(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dtrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dtrttf(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil
+
+cdef void dtrttp(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil
+
+cdef void dtzrzf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef d dzsum1(int *n, z *cx, int *incx) nogil
+
+cdef int icmax1(int *n, c *cx, int *incx) nogil
+
+cdef int ieeeck(int *ispec, s *zero, s *one) nogil
+
+cdef int ilaclc(int *m, int *n, c *a, int *lda) nogil
+
+cdef int ilaclr(int *m, int *n, c *a, int *lda) nogil
+
+cdef int iladiag(char *diag) nogil
+
+cdef int iladlc(int *m, int *n, d *a, int *lda) nogil
+
+cdef int iladlr(int *m, int *n, d *a, int *lda) nogil
+
+cdef int ilaprec(char *prec) nogil
+
+cdef int ilaslc(int *m, int *n, s *a, int *lda) nogil
+
+cdef int ilaslr(int *m, int *n, s *a, int *lda) nogil
+
+cdef int ilatrans(char *trans) nogil
+
+cdef int ilauplo(char *uplo) nogil
+
+cdef void ilaver(int *vers_major, int *vers_minor, int *vers_patch) nogil
+
+cdef int ilazlc(int *m, int *n, z *a, int *lda) nogil
+
+cdef int ilazlr(int *m, int *n, z *a, int *lda) nogil
+
+cdef int izmax1(int *n, z *cx, int *incx) nogil
+
+cdef void sbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil
+
+cdef void sbdsdc(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil
+
+cdef void sbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+
+cdef s scsum1(int *n, c *cx, int *incx) nogil
+
+cdef void sdisna(char *job, int *m, int *n, s *d, s *sep, int *info) nogil
+
+cdef void sgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sgbcon(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgbequ(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgbequb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgbsv(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgbtf2(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void sgbtrf(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void sgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil
+
+cdef void sgebal(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+
+cdef void sgebd2(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil
+
+cdef void sgebrd(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil
+
+cdef void sgecon(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgeequ(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgeequb(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void sgeev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+
+cdef void sgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgehd2(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgehrd(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgelq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgelqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgels(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+
+cdef void sgelsd(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgelss(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+
+cdef void sgelsy(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+
+cdef void sgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sgeql2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqlf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqp3(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqr2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqr2p(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqrf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqrfp(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqrt(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil
+
+cdef void sgeqrt2(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+
+cdef void sgeqrt3(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+
+cdef void sgerfs(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgerq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgerqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgesc2(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil
+
+cdef void sgesdd(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgesv(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgesvd(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil
+
+cdef void sgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil
+
+cdef void sgesvx(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgetc2(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void sgetf2(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void sgetrf(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void sgetri(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void sgetrs(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil
+
+cdef void sggbal(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+
+cdef void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void sggev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+
+cdef void sggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void sggglm(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil
+
+cdef void sgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil
+
+cdef void sgglse(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil
+
+cdef void sggqrf(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+
+cdef void sggrqf(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+
+cdef void sgsvj0(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+
+cdef void sgsvj1(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+
+cdef void sgtcon(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgtrfs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgtsv(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil
+
+cdef void sgtsvx(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgttrf(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil
+
+cdef void sgttrs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgtts2(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil
+
+cdef void shgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void shsein(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void shseqr(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slabad(s *small, s *large) nogil
+
+cdef void slabrd(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil
+
+cdef void slacn2(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil
+
+cdef void slacon(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil
+
+cdef void slacpy(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void sladiv(s *a, s *b, s *c, s *d, s *p, s *q) nogil
+
+cdef void slae2(s *a, s *b, s *c, s *rt1, s *rt2) nogil
+
+cdef void slaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil
+
+cdef void slaed0(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil
+
+cdef void slaed1(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil
+
+cdef void slaed2(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+
+cdef void slaed3(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil
+
+cdef void slaed4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil
+
+cdef void slaed5(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil
+
+cdef void slaed6(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil
+
+cdef void slaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil
+
+cdef void slaed8(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil
+
+cdef void slaed9(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil
+
+cdef void slaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil
+
+cdef void slaein(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil
+
+cdef void slaev2(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil
+
+cdef void slaexc(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil
+
+cdef void slag2(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil
+
+cdef void slag2d(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil
+
+cdef void slags2(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil
+
+cdef void slagtf(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil
+
+cdef void slagtm(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil
+
+cdef void slagts(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil
+
+cdef void slagv2(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil
+
+cdef void slahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil
+
+cdef void slahr2(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil
+
+cdef void slaic1(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil
+
+cdef void slaln2(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil
+
+cdef void slals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil
+
+cdef void slalsa(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil
+
+cdef s slamch(char *cmach) nogil
+
+cdef void slamrg(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil
+
+cdef s slangb(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil
+
+cdef s slange(char *norm, int *m, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slangt(char *norm, int *n, s *dl, s *d, s *du) nogil
+
+cdef s slanhs(char *norm, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slansb(char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil
+
+cdef s slansf(char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil
+
+cdef s slansp(char *norm, char *uplo, int *n, s *ap, s *work) nogil
+
+cdef s slanst(char *norm, int *n, s *d, s *e) nogil
+
+cdef s slansy(char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slantb(char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil
+
+cdef s slantp(char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil
+
+cdef s slantr(char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil
+
+cdef void slanv2(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil
+
+cdef void slapll(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil
+
+cdef void slapmr(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+
+cdef void slapmt(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+
+cdef s slapy2(s *x, s *y) nogil
+
+cdef s slapy3(s *x, s *y, s *z) nogil
+
+cdef void slaqgb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void slaqge(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void slaqp2(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil
+
+cdef void slaqps(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil
+
+cdef void slaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slaqr1(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil
+
+cdef void slaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+
+cdef void slaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+
+cdef void slaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil
+
+cdef void slaqsb(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqsp(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqsy(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqtr(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil
+
+cdef void slar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+
+cdef void slar2v(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil
+
+cdef void slarf(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+
+cdef void slarfg(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+
+cdef void slarfgp(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+
+cdef void slarft(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+
+cdef void slarfx(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slargv(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil
+
+cdef void slarnv(int *idist, int *iseed, int *n, s *x) nogil
+
+cdef void slarra(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil
+
+cdef void slarrb(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil
+
+cdef void slarrc(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+
+cdef void slarrd(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil
+
+cdef void slarre(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil
+
+cdef void slarrf(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil
+
+cdef void slarrj(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil
+
+cdef void slarrk(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil
+
+cdef void slarrr(int *n, s *d, s *e, int *info) nogil
+
+cdef void slarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+
+cdef void slartg(s *f, s *g, s *cs, s *sn, s *r) nogil
+
+cdef void slartgp(s *f, s *g, s *cs, s *sn, s *r) nogil
+
+cdef void slartgs(s *x, s *y, s *sigma, s *cs, s *sn) nogil
+
+cdef void slartv(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil
+
+cdef void slaruv(int *iseed, int *n, s *x) nogil
+
+cdef void slarz(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+
+cdef void slarzt(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+
+cdef void slas2(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil
+
+cdef void slascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil
+
+cdef void slasd0(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil
+
+cdef void slasd1(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil
+
+cdef void slasd2(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+
+cdef void slasd3(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil
+
+cdef void slasd4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil
+
+cdef void slasd5(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil
+
+cdef void slasd6(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil
+
+cdef void slasd8(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil
+
+cdef void slasda(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void slasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+
+cdef void slaset(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil
+
+cdef void slasq1(int *n, s *d, s *e, s *work, int *info) nogil
+
+cdef void slasq2(int *n, s *z, int *info) nogil
+
+cdef void slasq3(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil
+
+cdef void slasq4(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil
+
+cdef void slasq6(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil
+
+cdef void slasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil
+
+cdef void slasrt(char *id, int *n, s *d, int *info) nogil
+
+cdef void slassq(int *n, s *x, int *incx, s *scale, s *sumsq) nogil
+
+cdef void slasv2(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil
+
+cdef void slaswp(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void slasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil
+
+cdef void slasyf(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil
+
+cdef void slatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatdf(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void slatps(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatrd(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil
+
+cdef void slatrs(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatrz(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil
+
+cdef void slauu2(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void slauum(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void sopgtr(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil
+
+cdef void sopmtr(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sorbdb(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil
+
+cdef void sorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sorg2l(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorg2r(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorgbr(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorghr(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgl2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorglq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgql(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgqr(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgr2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorgrq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgtr(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorm2l(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sorm2r(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sorml2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormlq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormql(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormqr(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormr2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormr3(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormrq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormrz(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormtr(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void spbcon(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void spbequ(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spbrfs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spbstf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbsv(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void spbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spbtf2(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbtrf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbtrs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void spftrf(char *transr, char *uplo, int *n, s *a, int *info) nogil
+
+cdef void spftri(char *transr, char *uplo, int *n, s *a, int *info) nogil
+
+cdef void spftrs(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil
+
+cdef void spocon(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void spoequ(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spoequb(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void sporfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sposv(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void sposvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spotf2(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotrf(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotri(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotrs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void sppcon(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sppequ(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sppsv(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void sppsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spptrf(char *uplo, int *n, s *ap, int *info) nogil
+
+cdef void spptri(char *uplo, int *n, s *ap, int *info) nogil
+
+cdef void spptrs(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void spstf2(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void spstrf(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void sptcon(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil
+
+cdef void spteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sptrfs(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil
+
+cdef void sptsv(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+
+cdef void sptsvx(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil
+
+cdef void spttrf(int *n, s *d, s *e, int *info) nogil
+
+cdef void spttrs(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+
+cdef void sptts2(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil
+
+cdef void srscl(int *n, s *sa, s *sx, int *incx) nogil
+
+cdef void ssbev(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssbevd(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssbevx(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssbgst(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil
+
+cdef void ssbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssbtrd(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil
+
+cdef void ssfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil
+
+cdef void sspcon(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sspev(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sspevd(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sspevx(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void sspgst(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil
+
+cdef void sspgv(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sspgvd(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sspsv(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sspsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void ssptrd(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil
+
+cdef void ssptrf(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil
+
+cdef void ssptri(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil
+
+cdef void ssptrs(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sstebz(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil
+
+cdef void sstedc(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void sstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssterf(int *n, s *d, s *e, int *info) nogil
+
+cdef void sstev(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sstevd(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstevr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstevx(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssycon(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void ssyconv(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+
+cdef void ssyequb(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil
+
+cdef void ssyev(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil
+
+cdef void ssyevd(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssyevr(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssyevx(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssygs2(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void ssygst(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void ssygv(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil
+
+cdef void ssygvd(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssygvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssyrfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void ssysv(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+
+cdef void ssysvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ssyswapr(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil
+
+cdef void ssytd2(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil
+
+cdef void ssytf2(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void ssytrd(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void ssytrf(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void ssytri(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+
+cdef void ssytri2(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void ssytri2x(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil
+
+cdef void ssytrs(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void ssytrs2(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil
+
+cdef void stbcon(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void stbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void stbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void stfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil
+
+cdef void stftri(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil
+
+cdef void stfttp(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil
+
+cdef void stfttr(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil
+
+cdef void stgevc(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+
+cdef void stgex2(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil
+
+cdef void stgexc(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil
+
+cdef void stgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void stgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil
+
+cdef void stgsna(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void stgsy2(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil
+
+cdef void stgsyl(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void stpcon(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void stpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil
+
+cdef void stpqrt(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil
+
+cdef void stpqrt2(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil
+
+cdef void stprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil
+
+cdef void stprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void stptri(char *uplo, char *diag, int *n, s *ap, int *info) nogil
+
+cdef void stptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void stpttf(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil
+
+cdef void stpttr(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil
+
+cdef void strcon(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void strevc(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+
+cdef void strexc(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil
+
+cdef void strrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void strsen(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void strsna(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil
+
+cdef void strsyl(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil
+
+cdef void strti2(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+
+cdef void strtri(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+
+cdef void strtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void strttf(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil
+
+cdef void strttp(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil
+
+cdef void stzrzf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void xerbla_array(char *srname_array, int *srname_len, int *info) nogil
+
+cdef void zbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil
+
+cdef void zbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, z *vt, int *ldvt, z *u, int *ldu, z *c, int *ldc, d *rwork, int *info) nogil
+
+cdef void zcgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil
+
+cdef void zcposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil
+
+cdef void zdrscl(int *n, d *sa, z *sx, int *incx) nogil
+
+cdef void zgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *pt, int *ldpt, z *c, int *ldc, z *work, d *rwork, int *info) nogil
+
+cdef void zgbcon(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zgbequ(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgbequb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgbsv(int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgbtf2(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void zgbtrf(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void zgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, z *v, int *ldv, int *info) nogil
+
+cdef void zgebal(char *job, int *n, z *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+
+cdef void zgebd2(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *info) nogil
+
+cdef void zgebrd(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *lwork, int *info) nogil
+
+cdef void zgecon(char *norm, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zgeequ(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgeequb(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zgeev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgehd2(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgehrd(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgelq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgelqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgels(char *trans, int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zgelsd(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+
+cdef void zgelss(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgelsy(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *jpvt, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zgeql2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqlf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqp3(int *m, int *n, z *a, int *lda, int *jpvt, z *tau, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgeqr2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqr2p(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqrf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqrfp(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqrt(int *m, int *n, int *nb, z *a, int *lda, z *t, int *ldt, z *work, int *info) nogil
+
+cdef void zgeqrt2(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil
+
+cdef void zgeqrt3(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil
+
+cdef void zgerfs(char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgerq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgerqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgesc2(int *n, z *a, int *lda, z *rhs, int *ipiv, int *jpiv, d *scale) nogil
+
+cdef void zgesdd(char *jobz, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+
+cdef void zgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgesvd(char *jobu, char *jobvt, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgesvx(char *fact, char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgetc2(int *n, z *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void zgetf2(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zgetrf(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zgetri(int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zgetrs(char *trans, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, z *v, int *ldv, int *info) nogil
+
+cdef void zggbal(char *job, int *n, z *a, int *lda, z *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+
+cdef void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void zggev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void zggglm(int *n, int *m, int *p, z *a, int *lda, z *b, int *ldb, z *d, z *x, z *y, z *work, int *lwork, int *info) nogil
+
+cdef void zgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *info) nogil
+
+cdef void zgglse(int *m, int *n, int *p, z *a, int *lda, z *b, int *ldb, z *c, z *d, z *x, z *work, int *lwork, int *info) nogil
+
+cdef void zggqrf(int *n, int *m, int *p, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil
+
+cdef void zggrqf(int *m, int *p, int *n, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil
+
+cdef void zgtcon(char *norm, int *n, z *dl, z *d, z *du, z *du2, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zgtrfs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgtsv(int *n, int *nrhs, z *dl, z *d, z *du, z *b, int *ldb, int *info) nogil
+
+cdef void zgtsvx(char *fact, char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgttrf(int *n, z *dl, z *d, z *du, z *du2, int *ipiv, int *info) nogil
+
+cdef void zgttrs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgtts2(int *itrans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb) nogil
+
+cdef void zhbev(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhbevd(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhbevx(char *jobz, char *range, char *uplo, int *n, int *kd, z *ab, int *ldab, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhbgst(char *vect, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *x, int *ldx, z *work, d *rwork, int *info) nogil
+
+cdef void zhbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhbtrd(char *vect, char *uplo, int *n, int *kd, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *work, int *info) nogil
+
+cdef void zhecon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zheequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil
+
+cdef void zheev(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zheevd(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zheevr(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zheevx(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhegs2(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zhegst(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zhegv(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zhegvd(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhegvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zherfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhesv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zhesvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zheswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil
+
+cdef void zhetd2(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, int *info) nogil
+
+cdef void zhetf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zhetrd(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zhetrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zhetri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zhetri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zhetri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil
+
+cdef void zhetrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhetrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void zhfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c) nogil
+
+cdef void zhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *t, int *ldt, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zhpcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zhpev(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhpevd(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhpevx(char *jobz, char *range, char *uplo, int *n, z *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhpgst(int *itype, char *uplo, int *n, z *ap, z *bp, int *info) nogil
+
+cdef void zhpgv(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhpgvd(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *ap, z *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhpsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhpsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhptrd(char *uplo, int *n, z *ap, d *d, d *e, z *tau, int *info) nogil
+
+cdef void zhptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil
+
+cdef void zhptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil
+
+cdef void zhptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, z *h, int *ldh, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void zhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlabrd(int *m, int *n, int *nb, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *x, int *ldx, z *y, int *ldy) nogil
+
+cdef void zlacgv(int *n, z *x, int *incx) nogil
+
+cdef void zlacn2(int *n, z *v, z *x, d *est, int *kase, int *isave) nogil
+
+cdef void zlacon(int *n, z *v, z *x, d *est, int *kase) nogil
+
+cdef void zlacp2(char *uplo, int *m, int *n, d *a, int *lda, z *b, int *ldb) nogil
+
+cdef void zlacpy(char *uplo, int *m, int *n, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void zlacrm(int *m, int *n, z *a, int *lda, d *b, int *ldb, z *c, int *ldc, d *rwork) nogil
+
+cdef void zlacrt(int *n, z *cx, int *incx, z *cy, int *incy, z *c, z *s) nogil
+
+cdef z zladiv(z *x, z *y) nogil
+
+cdef void zlaed0(int *qsiz, int *n, d *d, d *e, z *q, int *ldq, z *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlaed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, z *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, z *work, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlaed8(int *k, int *n, int *qsiz, z *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, z *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil
+
+cdef void zlaein(bint *rightv, bint *noinit, int *n, z *h, int *ldh, z *w, z *v, z *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil
+
+cdef void zlaesy(z *a, z *b, z *c, z *rt1, z *rt2, z *evscal, z *cs1, z *sn1) nogil
+
+cdef void zlaev2(z *a, z *b, z *c, d *rt1, d *rt2, d *cs1, z *sn1) nogil
+
+cdef void zlag2c(int *m, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil
+
+cdef void zlags2(bint *upper, d *a1, z *a2, d *a3, d *b1, z *b2, d *b3, d *csu, z *snu, d *csv, z *snv, d *csq, z *snq) nogil
+
+cdef void zlagtm(char *trans, int *n, int *nrhs, d *alpha, z *dl, z *d, z *du, z *x, int *ldx, d *beta, z *b, int *ldb) nogil
+
+cdef void zlahef(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil
+
+cdef void zlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, int *info) nogil
+
+cdef void zlahr2(int *n, int *k, int *nb, z *a, int *lda, z *tau, z *t, int *ldt, z *y, int *ldy) nogil
+
+cdef void zlaic1(int *job, int *j, z *x, d *sest, z *w, z *gamma, d *sestpr, z *s, z *c) nogil
+
+cdef void zlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil
+
+cdef void zlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, z *b, int *ldb, d *rcond, int *rank, z *work, d *rwork, int *iwork, int *info) nogil
+
+cdef d zlangb(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, d *work) nogil
+
+cdef d zlange(char *norm, int *m, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlangt(char *norm, int *n, z *dl, z *d, z *du) nogil
+
+cdef d zlanhb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlanhe(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlanhf(char *norm, char *transr, char *uplo, int *n, z *a, d *work) nogil
+
+cdef d zlanhp(char *norm, char *uplo, int *n, z *ap, d *work) nogil
+
+cdef d zlanhs(char *norm, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlanht(char *norm, int *n, d *d, z *e) nogil
+
+cdef d zlansb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlansp(char *norm, char *uplo, int *n, z *ap, d *work) nogil
+
+cdef d zlansy(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlantb(char *norm, char *uplo, char *diag, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlantp(char *norm, char *uplo, char *diag, int *n, z *ap, d *work) nogil
+
+cdef d zlantr(char *norm, char *uplo, char *diag, int *m, int *n, z *a, int *lda, d *work) nogil
+
+cdef void zlapll(int *n, z *x, int *incx, z *y, int *incy, d *ssmin) nogil
+
+cdef void zlapmr(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil
+
+cdef void zlapmt(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil
+
+cdef void zlaqgb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void zlaqge(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void zlaqhb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqhe(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqhp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqp2(int *m, int *n, int *offset, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *work) nogil
+
+cdef void zlaqps(int *m, int *n, int *offset, int *nb, int *kb, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *auxv, z *f, int *ldf) nogil
+
+cdef void zlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlaqr1(int *n, z *h, int *ldh, z *s1, z *s2, z *v) nogil
+
+cdef void zlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil
+
+cdef void zlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil
+
+cdef void zlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, z *s, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, z *v, int *ldv, z *u, int *ldu, int *nv, z *wv, int *ldwv, int *nh, z *wh, int *ldwh) nogil
+
+cdef void zlaqsb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqsp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqsy(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, z *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+
+cdef void zlar2v(int *n, z *x, z *y, z *z, int *incx, d *c, z *s, int *incc) nogil
+
+cdef void zlarcm(int *m, int *n, d *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *rwork) nogil
+
+cdef void zlarf(char *side, int *m, int *n, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil
+
+cdef void zlarfg(int *n, z *alpha, z *x, int *incx, z *tau) nogil
+
+cdef void zlarfgp(int *n, z *alpha, z *x, int *incx, z *tau) nogil
+
+cdef void zlarft(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil
+
+cdef void zlarfx(char *side, int *m, int *n, z *v, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlargv(int *n, z *x, int *incx, z *y, int *incy, d *c, int *incc) nogil
+
+cdef void zlarnv(int *idist, int *iseed, int *n, z *x) nogil
+
+cdef void zlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, z *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+
+cdef void zlartg(z *f, z *g, d *cs, z *sn, z *r) nogil
+
+cdef void zlartv(int *n, z *x, int *incx, z *y, int *incy, d *c, z *s, int *incc) nogil
+
+cdef void zlarz(char *side, int *m, int *n, int *l, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil
+
+cdef void zlarzt(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil
+
+cdef void zlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zlaset(char *uplo, int *m, int *n, z *alpha, z *beta, z *a, int *lda) nogil
+
+cdef void zlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, z *a, int *lda) nogil
+
+cdef void zlassq(int *n, z *x, int *incx, d *scale, d *sumsq) nogil
+
+cdef void zlaswp(int *n, z *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void zlasyf(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil
+
+cdef void zlat2c(char *uplo, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil
+
+cdef void zlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, z *ab, int *ldab, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatdf(int *ijob, int *n, z *z, int *ldz, z *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void zlatps(char *uplo, char *trans, char *diag, char *normin, int *n, z *ap, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatrd(char *uplo, int *n, int *nb, z *a, int *lda, d *e, z *tau, z *w, int *ldw) nogil
+
+cdef void zlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, z *a, int *lda, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatrz(int *m, int *n, int *l, z *a, int *lda, z *tau, z *work) nogil
+
+cdef void zlauu2(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zlauum(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpbcon(char *uplo, int *n, int *kd, z *ab, int *ldab, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zpbequ(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpbrfs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpbstf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbsv(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void zpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpbtf2(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbtrf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbtrs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void zpftrf(char *transr, char *uplo, int *n, z *a, int *info) nogil
+
+cdef void zpftri(char *transr, char *uplo, int *n, z *a, int *info) nogil
+
+cdef void zpftrs(char *transr, char *uplo, int *n, int *nrhs, z *a, z *b, int *ldb, int *info) nogil
+
+cdef void zpocon(char *uplo, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zpoequ(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpoequb(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zporfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zposvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpotf2(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotrf(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotri(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotrs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zppcon(char *uplo, int *n, z *ap, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zppequ(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zppsv(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void zppsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpptrf(char *uplo, int *n, z *ap, int *info) nogil
+
+cdef void zpptri(char *uplo, int *n, z *ap, int *info) nogil
+
+cdef void zpptrs(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void zpstf2(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void zpstrf(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void zptcon(int *n, d *d, z *e, d *anorm, d *rcond, d *rwork, int *info) nogil
+
+cdef void zpteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil
+
+cdef void zptrfs(char *uplo, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zptsv(int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil
+
+cdef void zptsvx(char *fact, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpttrf(int *n, d *d, z *e, int *info) nogil
+
+cdef void zpttrs(char *uplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil
+
+cdef void zptts2(int *iuplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb) nogil
+
+cdef void zrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, z *s) nogil
+
+cdef void zspcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zspmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zspr(char *uplo, int *n, z *alpha, z *x, int *incx, z *ap) nogil
+
+cdef void zsprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zspsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zspsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zsptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil
+
+cdef void zsptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil
+
+cdef void zsptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zstedc(char *compz, int *n, d *d, d *e, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, z *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void zstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, z *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zsteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil
+
+cdef void zsycon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zsyconv(char *uplo, char *way, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zsyequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil
+
+cdef void zsymv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zsyr(char *uplo, int *n, z *alpha, z *x, int *incx, z *a, int *lda) nogil
+
+cdef void zsyrfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zsysv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zsysvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zsyswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil
+
+cdef void zsytf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zsytrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zsytri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zsytri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zsytri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil
+
+cdef void zsytrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zsytrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void ztbcon(char *norm, char *uplo, char *diag, int *n, int *kd, z *ab, int *ldab, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void ztfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, z *alpha, z *a, z *b, int *ldb) nogil
+
+cdef void ztftri(char *transr, char *uplo, char *diag, int *n, z *a, int *info) nogil
+
+cdef void ztfttp(char *transr, char *uplo, int *n, z *arf, z *ap, int *info) nogil
+
+cdef void ztfttr(char *transr, char *uplo, int *n, z *arf, z *a, int *lda, int *info) nogil
+
+cdef void ztgevc(char *side, char *howmny, bint *select, int *n, z *s, int *lds, z *p, int *ldp, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil
+
+cdef void ztgex2(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *j1, int *info) nogil
+
+cdef void ztgexc(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+
+cdef void ztgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, int *m, d *pl, d *pr, d *dif, z *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ztgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, z *a, int *lda, z *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, z *u, int *ldu, z *v, int *ldv, z *q, int *ldq, z *work, int *ncycle, int *info) nogil
+
+cdef void ztgsna(char *job, char *howmny, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *dif, int *mm, int *m, z *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ztgsy2(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil
+
+cdef void ztgsyl(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *dif, z *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ztpcon(char *norm, char *uplo, char *diag, int *n, z *ap, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void ztpqrt(int *m, int *n, int *l, int *nb, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, z *work, int *info) nogil
+
+cdef void ztpqrt2(int *m, int *n, int *l, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, int *info) nogil
+
+cdef void ztprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *ldwork) nogil
+
+cdef void ztprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztptri(char *uplo, char *diag, int *n, z *ap, int *info) nogil
+
+cdef void ztptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void ztpttf(char *transr, char *uplo, int *n, z *ap, z *arf, int *info) nogil
+
+cdef void ztpttr(char *uplo, int *n, z *ap, z *a, int *lda, int *info) nogil
+
+cdef void ztrcon(char *norm, char *uplo, char *diag, int *n, z *a, int *lda, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztrevc(char *side, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil
+
+cdef void ztrexc(char *compq, int *n, z *t, int *ldt, z *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+
+cdef void ztrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztrsen(char *job, char *compq, bint *select, int *n, z *t, int *ldt, z *q, int *ldq, z *w, int *m, d *s, d *sep, z *work, int *lwork, int *info) nogil
+
+cdef void ztrsna(char *job, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *sep, int *mm, int *m, z *work, int *ldwork, d *rwork, int *info) nogil
+
+cdef void ztrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *scale, int *info) nogil
+
+cdef void ztrti2(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil
+
+cdef void ztrtri(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil
+
+cdef void ztrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void ztrttf(char *transr, char *uplo, int *n, z *a, int *lda, z *arf, int *info) nogil
+
+cdef void ztrttp(char *uplo, int *n, z *a, int *lda, z *ap, int *info) nogil
+
+cdef void ztzrzf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunbdb(char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, d *phi, z *taup1, z *taup2, z *tauq1, z *tauq2, z *work, int *lwork, int *info) nogil
+
+cdef void zuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil
+
+cdef void zung2l(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zung2r(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zungbr(char *vect, int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunghr(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungl2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zunglq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungql(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungqr(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungr2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zungrq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungtr(char *uplo, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunm2l(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunm2r(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunml2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmlq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmql(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmqr(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmr2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmrq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmtr(char *side, char *uplo, char *trans, int *m, int *n, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zupgtr(char *uplo, int *n, z *ap, z *tau, z *q, int *ldq, z *work, int *info) nogil
+
+cdef void zupmtr(char *side, char *uplo, char *trans, int *m, int *n, z *ap, z *tau, z *c, int *ldc, z *work, int *info) nogil
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp.py
new file mode 100644
index 0000000..e473842
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp.py
@@ -0,0 +1,1585 @@
+# -*- coding: utf-8 -*-
+#
+# Author: Pearu Peterson, March 2002
+#
+# additions by Travis Oliphant, March 2002
+# additions by Eric Jones, June 2002
+# additions by Johannes Loehnert, June 2006
+# additions by Bart Vandereycken, June 2006
+# additions by Andrew D Straw, May 2007
+# additions by Tiziano Zito, November 2008
+#
+# April 2010: Functions for LU, QR, SVD, Schur, and Cholesky decompositions
+# were moved to their own files. Still in this file are functions for
+# eigenstuff and for the Hessenberg form.
+
+__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh',
+ 'eig_banded', 'eigvals_banded',
+ 'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf']
+
+import numpy
+from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast,
+ flatnonzero, conj, asarray, argsort, empty,
+ iscomplex, zeros, einsum, eye, inf)
+# Local imports
+from scipy._lib._util import _asarray_validated
+from .misc import LinAlgError, _datacopied, norm
+from .lapack import get_lapack_funcs, _compute_lwork
+
+
+_I = cast['F'](1j)
+
+
+def _make_complex_eigvecs(w, vin, dtype):
+ """
+ Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output
+ """
+ # - see LAPACK man page DGGEV at ALPHAI
+ v = numpy.array(vin, dtype=dtype)
+ m = (w.imag > 0)
+ m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709
+ for i in flatnonzero(m):
+ v.imag[:, i] = vin[:, i+1]
+ conj(v[:, i], v[:, i+1])
+ return v
+
+
+def _make_eigvals(alpha, beta, homogeneous_eigvals):
+ if homogeneous_eigvals:
+ if beta is None:
+ return numpy.vstack((alpha, numpy.ones_like(alpha)))
+ else:
+ return numpy.vstack((alpha, beta))
+ else:
+ if beta is None:
+ return alpha
+ else:
+ w = numpy.empty_like(alpha)
+ alpha_zero = (alpha == 0)
+ beta_zero = (beta == 0)
+ beta_nonzero = ~beta_zero
+ w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero]
+ # Use numpy.inf for complex values too since
+ # 1/numpy.inf = 0, i.e., it correctly behaves as projective
+ # infinity.
+ w[~alpha_zero & beta_zero] = numpy.inf
+ if numpy.all(alpha.imag == 0):
+ w[alpha_zero & beta_zero] = numpy.nan
+ else:
+ w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan)
+ return w
+
+
+def _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
+ homogeneous_eigvals):
+ ggev, = get_lapack_funcs(('ggev',), (a1, b1))
+ cvl, cvr = left, right
+ res = ggev(a1, b1, lwork=-1)
+ lwork = res[-2][0].real.astype(numpy.int_)
+ if ggev.typecode in 'cz':
+ alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork,
+ overwrite_a, overwrite_b)
+ w = _make_eigvals(alpha, beta, homogeneous_eigvals)
+ else:
+ alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr,
+ lwork, overwrite_a,
+ overwrite_b)
+ alpha = alphar + _I * alphai
+ w = _make_eigvals(alpha, beta, homogeneous_eigvals)
+ _check_info(info, 'generalized eig algorithm (ggev)')
+
+ only_real = numpy.all(w.imag == 0.0)
+ if not (ggev.typecode in 'cz' or only_real):
+ t = w.dtype.char
+ if left:
+ vl = _make_complex_eigvecs(w, vl, t)
+ if right:
+ vr = _make_complex_eigvecs(w, vr, t)
+
+ # the eigenvectors returned by the lapack function are NOT normalized
+ for i in range(vr.shape[0]):
+ if right:
+ vr[:, i] /= norm(vr[:, i])
+ if left:
+ vl[:, i] /= norm(vl[:, i])
+
+ if not (left or right):
+ return w
+ if left:
+ if right:
+ return w, vl, vr
+ return w, vl
+ return w, vr
+
+
+def eig(a, b=None, left=False, right=True, overwrite_a=False,
+ overwrite_b=False, check_finite=True, homogeneous_eigvals=False):
+ """
+ Solve an ordinary or generalized eigenvalue problem of a square matrix.
+
+ Find eigenvalues w and right or left eigenvectors of a general matrix::
+
+ a vr[:,i] = w[i] b vr[:,i]
+ a.H vl[:,i] = w[i].conj() b.H vl[:,i]
+
+ where ``.H`` is the Hermitian conjugation.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A complex or real matrix whose eigenvalues and eigenvectors
+ will be computed.
+ b : (M, M) array_like, optional
+ Right-hand side matrix in a generalized eigenvalue problem.
+ Default is None, identity matrix is assumed.
+ left : bool, optional
+ Whether to calculate and return left eigenvectors. Default is False.
+ right : bool, optional
+ Whether to calculate and return right eigenvectors. Default is True.
+ overwrite_a : bool, optional
+ Whether to overwrite `a`; may improve performance. Default is False.
+ overwrite_b : bool, optional
+ Whether to overwrite `b`; may improve performance. Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ homogeneous_eigvals : bool, optional
+ If True, return the eigenvalues in homogeneous coordinates.
+ In this case ``w`` is a (2, M) array so that::
+
+ w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
+
+ Default is False.
+
+ Returns
+ -------
+ w : (M,) or (2, M) double or complex ndarray
+ The eigenvalues, each repeated according to its
+ multiplicity. The shape is (M,) unless
+ ``homogeneous_eigvals=True``.
+ vl : (M, M) double or complex ndarray
+ The normalized left eigenvector corresponding to the eigenvalue
+ ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``.
+ vr : (M, M) double or complex ndarray
+ The normalized right eigenvector corresponding to the eigenvalue
+ ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigvals : eigenvalues of general arrays
+ eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays.
+ eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+ band matrices
+ eigh_tridiagonal : eigenvalues and right eiegenvectors for
+ symmetric/Hermitian tridiagonal matrices
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.array([[0., -1.], [1., 0.]])
+ >>> linalg.eigvals(a)
+ array([0.+1.j, 0.-1.j])
+
+ >>> b = np.array([[0., 1.], [1., 1.]])
+ >>> linalg.eigvals(a, b)
+ array([ 1.+0.j, -1.+0.j])
+
+ >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
+ >>> linalg.eigvals(a, homogeneous_eigvals=True)
+ array([[3.+0.j, 8.+0.j, 7.+0.j],
+ [1.+0.j, 1.+0.j, 1.+0.j]])
+
+ >>> a = np.array([[0., -1.], [1., 0.]])
+ >>> linalg.eigvals(a) == linalg.eig(a)[0]
+ array([ True, True])
+ >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector
+ array([[-0.70710678+0.j , -0.70710678-0.j ],
+ [-0. +0.70710678j, -0. -0.70710678j]])
+ >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector
+ array([[0.70710678+0.j , 0.70710678-0.j ],
+ [0. -0.70710678j, 0. +0.70710678j]])
+
+
+
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+ raise ValueError('expected square matrix')
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+ if b is not None:
+ b1 = _asarray_validated(b, check_finite=check_finite)
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
+ raise ValueError('expected square matrix')
+ if b1.shape != a1.shape:
+ raise ValueError('a and b must have the same shape')
+ return _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
+ homogeneous_eigvals)
+
+ geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,))
+ compute_vl, compute_vr = left, right
+
+ lwork = _compute_lwork(geev_lwork, a1.shape[0],
+ compute_vl=compute_vl,
+ compute_vr=compute_vr)
+
+ if geev.typecode in 'cz':
+ w, vl, vr, info = geev(a1, lwork=lwork,
+ compute_vl=compute_vl,
+ compute_vr=compute_vr,
+ overwrite_a=overwrite_a)
+ w = _make_eigvals(w, None, homogeneous_eigvals)
+ else:
+ wr, wi, vl, vr, info = geev(a1, lwork=lwork,
+ compute_vl=compute_vl,
+ compute_vr=compute_vr,
+ overwrite_a=overwrite_a)
+ t = {'f': 'F', 'd': 'D'}[wr.dtype.char]
+ w = wr + _I * wi
+ w = _make_eigvals(w, None, homogeneous_eigvals)
+
+ _check_info(info, 'eig algorithm (geev)',
+ positive='did not converge (only eigenvalues '
+ 'with order >= %d have converged)')
+
+ only_real = numpy.all(w.imag == 0.0)
+ if not (geev.typecode in 'cz' or only_real):
+ t = w.dtype.char
+ if left:
+ vl = _make_complex_eigvecs(w, vl, t)
+ if right:
+ vr = _make_complex_eigvecs(w, vr, t)
+ if not (left or right):
+ return w
+ if left:
+ if right:
+ return w, vl, vr
+ return w, vl
+ return w, vr
+
+
+def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False,
+ overwrite_b=False, turbo=True, eigvals=None, type=1,
+ check_finite=True, subset_by_index=None, subset_by_value=None,
+ driver=None):
+ """
+ Solve a standard or generalized eigenvalue problem for a complex
+ Hermitian or real symmetric matrix.
+
+ Find eigenvalues array ``w`` and optionally eigenvectors array ``v`` of
+ array ``a``, where ``b`` is positive definite such that for every
+ eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of
+ ``v``) satisfies::
+
+ a @ vi = λ * b @ vi
+ vi.conj().T @ a @ vi = λ
+ vi.conj().T @ b @ vi = 1
+
+ In the standard problem, ``b`` is assumed to be the identity matrix.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A complex Hermitian or real symmetric matrix whose eigenvalues and
+ eigenvectors will be computed.
+ b : (M, M) array_like, optional
+ A complex Hermitian or real symmetric definite positive matrix in.
+ If omitted, identity matrix is assumed.
+ lower : bool, optional
+ Whether the pertinent array data is taken from the lower or upper
+ triangle of ``a`` and, if applicable, ``b``. (Default: lower)
+ eigvals_only : bool, optional
+ Whether to calculate only eigenvalues and no eigenvectors.
+ (Default: both are calculated)
+ subset_by_index : iterable, optional
+ If provided, this two-element iterable defines the start and the end
+ indices of the desired eigenvalues (ascending order and 0-indexed).
+ To return only the second smallest to fifth smallest eigenvalues,
+ ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
+ available with "evr", "evx", and "gvx" drivers. The entries are
+ directly converted to integers via ``int()``.
+ subset_by_value : iterable, optional
+ If provided, this two-element iterable defines the half-open interval
+ ``(a, b]`` that, if any, only the eigenvalues between these values
+ are returned. Only available with "evr", "evx", and "gvx" drivers. Use
+ ``np.inf`` for the unconstrained ends.
+ driver: str, optional
+ Defines which LAPACK driver should be used. Valid options are "ev",
+ "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
+ generalized (where b is not None) problems. See the Notes section.
+ type : int, optional
+ For the generalized problems, this keyword specifies the problem type
+ to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
+ inputs)::
+
+ 1 => a @ v = w @ b @ v
+ 2 => a @ b @ v = w @ v
+ 3 => b @ a @ v = w @ v
+
+ This keyword is ignored for standard problems.
+ overwrite_a : bool, optional
+ Whether to overwrite data in ``a`` (may improve performance). Default
+ is False.
+ overwrite_b : bool, optional
+ Whether to overwrite data in ``b`` (may improve performance). Default
+ is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ turbo : bool, optional
+ *Deprecated since v1.5.0, use ``driver=gvd`` keyword instead*.
+ Use divide and conquer algorithm (faster but expensive in memory, only
+ for generalized eigenvalue problem and if full set of eigenvalues are
+ requested.). Has no significant effect if eigenvectors are not
+ requested.
+ eigvals : tuple (lo, hi), optional
+ *Deprecated since v1.5.0, use ``subset_by_index`` keyword instead*.
+ Indexes of the smallest and largest (in ascending order) eigenvalues
+ and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1.
+ If omitted, all eigenvalues and eigenvectors are returned.
+
+ Returns
+ -------
+ w : (N,) ndarray
+ The N (1<=N<=M) selected eigenvalues, in ascending order, each
+ repeated according to its multiplicity.
+ v : (M, N) ndarray
+ (if ``eigvals_only == False``)
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge, an error occurred, or
+ b matrix is not definite positive. Note that if input matrices are
+ not symmetric or Hermitian, no error will be reported but results will
+ be wrong.
+
+ See Also
+ --------
+ eigvalsh : eigenvalues of symmetric or Hermitian arrays
+ eig : eigenvalues and right eigenvectors for non-symmetric arrays
+ eigh_tridiagonal : eigenvalues and right eiegenvectors for
+ symmetric/Hermitian tridiagonal matrices
+
+ Notes
+ -----
+ This function does not check the input array for being Hermitian/symmetric
+ in order to allow for representing arrays with only their upper/lower
+ triangular parts. Also, note that even though not taken into account,
+ finiteness check applies to the whole array and unaffected by "lower"
+ keyword.
+
+ This function uses LAPACK drivers for computations in all possible keyword
+ combinations, prefixed with ``sy`` if arrays are real and ``he`` if
+ complex, e.g., a float array with "evr" driver is solved via
+ "syevr", complex arrays with "gvx" driver problem is solved via "hegvx"
+ etc.
+
+ As a brief summary, the slowest and the most robust driver is the
+ classical ``ev`` which uses symmetric QR. ``evr`` is seen as
+ the optimal choice for the most general cases. However, there are certain
+ occasions that ``evd`` computes faster at the expense of more
+ memory usage. ``evx``, while still being faster than ``ev``,
+ often performs worse than the rest except when very few eigenvalues are
+ requested for large arrays though there is still no performance guarantee.
+
+
+ For the generalized problem, normalization with respect to the given
+ type argument::
+
+ type 1 and 3 : v.conj().T @ a @ v = w
+ type 2 : inv(v).conj().T @ a @ inv(v) = w
+
+ type 1 or 2 : v.conj().T @ b @ v = I
+ type 3 : v.conj().T @ inv(b) @ v = I
+
+
+ Examples
+ --------
+ >>> from scipy.linalg import eigh
+ >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
+ >>> w, v = eigh(A)
+ >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+ True
+
+ Request only the eigenvalues
+
+ >>> w = eigh(A, eigvals_only=True)
+
+ Request eigenvalues that are less than 10.
+
+ >>> A = np.array([[34, -4, -10, -7, 2],
+ ... [-4, 7, 2, 12, 0],
+ ... [-10, 2, 44, 2, -19],
+ ... [-7, 12, 2, 79, -34],
+ ... [2, 0, -19, -34, 29]])
+ >>> eigh(A, eigvals_only=True, subset_by_value=[-np.inf, 10])
+ array([6.69199443e-07, 9.11938152e+00])
+
+ Request the largest second eigenvalue and its eigenvector
+
+ >>> w, v = eigh(A, subset_by_index=[1, 1])
+ >>> w
+ array([9.11938152])
+ >>> v.shape # only a single column is returned
+ (5, 1)
+
+ """
+ # set lower
+ uplo = 'L' if lower else 'U'
+ # Set job for Fortran routines
+ _job = 'N' if eigvals_only else 'V'
+
+ drv_str = [None, "ev", "evd", "evr", "evx", "gv", "gvd", "gvx"]
+ if driver not in drv_str:
+ raise ValueError('"{}" is unknown. Possible values are "None", "{}".'
+ ''.format(driver, '", "'.join(drv_str[1:])))
+
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+ raise ValueError('expected square "a" matrix')
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+ cplx = True if iscomplexobj(a1) else False
+ n = a1.shape[0]
+ drv_args = {'overwrite_a': overwrite_a}
+
+ if b is not None:
+ b1 = _asarray_validated(b, check_finite=check_finite)
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
+ raise ValueError('expected square "b" matrix')
+
+ if b1.shape != a1.shape:
+ raise ValueError("wrong b dimensions {}, should "
+ "be {}".format(b1.shape, a1.shape))
+
+ if type not in [1, 2, 3]:
+ raise ValueError('"type" keyword only accepts 1, 2, and 3.')
+
+ cplx = True if iscomplexobj(b1) else (cplx or False)
+ drv_args.update({'overwrite_b': overwrite_b, 'itype': type})
+
+ # backwards-compatibility handling
+ subset_by_index = subset_by_index if (eigvals is None) else eigvals
+
+ subset = (subset_by_index is not None) or (subset_by_value is not None)
+
+ # Both subsets can't be given
+ if subset_by_index and subset_by_value:
+ raise ValueError('Either index or value subset can be requested.')
+
+ # Take turbo into account if all conditions are met otherwise ignore
+ if turbo and b is not None:
+ driver = 'gvx' if subset else 'gvd'
+
+ # Check indices if given
+ if subset_by_index:
+ lo, hi = [int(x) for x in subset_by_index]
+ if not (0 <= lo <= hi < n):
+ raise ValueError('Requested eigenvalue indices are not valid. '
+ 'Valid range is [0, {}] and start <= end, but '
+ 'start={}, end={} is given'.format(n-1, lo, hi))
+ # fortran is 1-indexed
+ drv_args.update({'range': 'I', 'il': lo + 1, 'iu': hi + 1})
+
+ if subset_by_value:
+ lo, hi = subset_by_value
+ if not (-inf <= lo < hi <= inf):
+ raise ValueError('Requested eigenvalue bounds are not valid. '
+ 'Valid range is (-inf, inf) and low < high, but '
+ 'low={}, high={} is given'.format(lo, hi))
+
+ drv_args.update({'range': 'V', 'vl': lo, 'vu': hi})
+
+ # fix prefix for lapack routines
+ pfx = 'he' if cplx else 'sy'
+
+ # decide on the driver if not given
+ # first early exit on incompatible choice
+ if driver:
+ if b is None and (driver in ["gv", "gvd", "gvx"]):
+ raise ValueError('{} requires input b array to be supplied '
+ 'for generalized eigenvalue problems.'
+ ''.format(driver))
+ if (b is not None) and (driver in ['ev', 'evd', 'evr', 'evx']):
+ raise ValueError('"{}" does not accept input b array '
+ 'for standard eigenvalue problems.'
+ ''.format(driver))
+ if subset and (driver in ["ev", "evd", "gv", "gvd"]):
+ raise ValueError('"{}" cannot compute subsets of eigenvalues'
+ ''.format(driver))
+
+ # Default driver is evr and gvd
+ else:
+ driver = "evr" if b is None else ("gvx" if subset else "gvd")
+
+ lwork_spec = {
+ 'syevd': ['lwork', 'liwork'],
+ 'syevr': ['lwork', 'liwork'],
+ 'heevd': ['lwork', 'liwork', 'lrwork'],
+ 'heevr': ['lwork', 'lrwork', 'liwork'],
+ }
+
+ if b is None: # Standard problem
+ drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
+ [a1])
+ clw_args = {'n': n, 'lower': lower}
+ if driver == 'evd':
+ clw_args.update({'compute_v': 0 if _job == "N" else 1})
+
+ lw = _compute_lwork(drvlw, **clw_args)
+ # Multiple lwork vars
+ if isinstance(lw, tuple):
+ lwork_args = dict(zip(lwork_spec[pfx+driver], lw))
+ else:
+ lwork_args = {'lwork': lw}
+
+ drv_args.update({'lower': lower, 'compute_v': 0 if _job == "N" else 1})
+ w, v, *other_args, info = drv(a=a1, **drv_args, **lwork_args)
+
+ else: # Generalized problem
+ # 'gvd' doesn't have lwork query
+ if driver == "gvd":
+ drv = get_lapack_funcs(pfx + "gvd", [a1, b1])
+ lwork_args = {}
+ else:
+ drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
+ [a1, b1])
+ # generalized drivers use uplo instead of lower
+ lw = _compute_lwork(drvlw, n, uplo=uplo)
+ lwork_args = {'lwork': lw}
+
+ drv_args.update({'uplo': uplo, 'jobz': _job})
+
+ w, v, *other_args, info = drv(a=a1, b=b1, **drv_args, **lwork_args)
+
+ # m is always the first extra argument
+ w = w[:other_args[0]] if subset else w
+ v = v[:, :other_args[0]] if (subset and not eigvals_only) else v
+
+ # Check if we had a successful exit
+ if info == 0:
+ if eigvals_only:
+ return w
+ else:
+ return w, v
+ else:
+ if info < -1:
+ raise LinAlgError('Illegal value in argument {} of internal {}'
+ ''.format(-info, drv.typecode + pfx + driver))
+ elif info > n:
+ raise LinAlgError('The leading minor of order {} of B is not '
+ 'positive definite. The factorization of B '
+ 'could not be completed and no eigenvalues '
+ 'or eigenvectors were computed.'.format(info-n))
+ else:
+ drv_err = {'ev': 'The algorithm failed to converge; {} '
+ 'off-diagonal elements of an intermediate '
+ 'tridiagonal form did not converge to zero.',
+ 'evx': '{} eigenvectors failed to converge.',
+ 'evd': 'The algorithm failed to compute an eigenvalue '
+ 'while working on the submatrix lying in rows '
+ 'and columns {0}/{1} through mod({0},{1}).',
+ 'evr': 'Internal Error.'
+ }
+ if driver in ['ev', 'gv']:
+ msg = drv_err['ev'].format(info)
+ elif driver in ['evx', 'gvx']:
+ msg = drv_err['evx'].format(info)
+ elif driver in ['evd', 'gvd']:
+ if eigvals_only:
+ msg = drv_err['ev'].format(info)
+ else:
+ msg = drv_err['evd'].format(info, n+1)
+ else:
+ msg = drv_err['evr']
+
+ raise LinAlgError(msg)
+
+
+_conv_dict = {0: 0, 1: 1, 2: 2,
+ 'all': 0, 'value': 1, 'index': 2,
+ 'a': 0, 'v': 1, 'i': 2}
+
+
+def _check_select(select, select_range, max_ev, max_len):
+ """Check that select is valid, convert to Fortran style."""
+ if isinstance(select, str):
+ select = select.lower()
+ try:
+ select = _conv_dict[select]
+ except KeyError as e:
+ raise ValueError('invalid argument for select') from e
+ vl, vu = 0., 1.
+ il = iu = 1
+ if select != 0: # (non-all)
+ sr = asarray(select_range)
+ if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]:
+ raise ValueError('select_range must be a 2-element array-like '
+ 'in nondecreasing order')
+ if select == 1: # (value)
+ vl, vu = sr
+ if max_ev == 0:
+ max_ev = max_len
+ else: # 2 (index)
+ if sr.dtype.char.lower() not in 'hilqp':
+ raise ValueError('when using select="i", select_range must '
+ 'contain integers, got dtype %s (%s)'
+ % (sr.dtype, sr.dtype.char))
+ # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1
+ il, iu = sr + 1
+ if min(il, iu) < 1 or max(il, iu) > max_len:
+ raise ValueError('select_range out of bounds')
+ max_ev = iu - il + 1
+ return select, vl, vu, il, iu, max_ev
+
+
+def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False,
+ select='a', select_range=None, max_ev=0, check_finite=True):
+ """
+ Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
+
+ Find eigenvalues w and optionally right eigenvectors v of a::
+
+ a v[:,i] = w[i] v[:,i]
+ v.H v = identity
+
+ The matrix a is stored in a_band either in lower diagonal or upper
+ diagonal ordered form:
+
+ a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
+ a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
+
+ where u is the number of bands above the diagonal.
+
+ Example of a_band (shape of a is (6,6), u=2)::
+
+ upper form:
+ * * a02 a13 a24 a35
+ * a01 a12 a23 a34 a45
+ a00 a11 a22 a33 a44 a55
+
+ lower form:
+ a00 a11 a22 a33 a44 a55
+ a10 a21 a32 a43 a54 *
+ a20 a31 a42 a53 * *
+
+ Cells marked with * are not used.
+
+ Parameters
+ ----------
+ a_band : (u+1, M) array_like
+ The bands of the M by M matrix a.
+ lower : bool, optional
+ Is the matrix in the lower form. (Default is upper form)
+ eigvals_only : bool, optional
+ Compute only the eigenvalues and no eigenvectors.
+ (Default: calculate also eigenvectors)
+ overwrite_a_band : bool, optional
+ Discard data in a_band (may enhance performance)
+ select : {'a', 'v', 'i'}, optional
+ Which eigenvalues to calculate
+
+ ====== ========================================
+ select calculated
+ ====== ========================================
+ 'a' All eigenvalues
+ 'v' Eigenvalues in the interval (min, max]
+ 'i' Eigenvalues with indices min <= i <= max
+ ====== ========================================
+ select_range : (min, max), optional
+ Range of selected eigenvalues
+ max_ev : int, optional
+ For select=='v', maximum number of eigenvalues expected.
+ For other values of select, has no meaning.
+
+ In doubt, leave this parameter untouched.
+
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ w : (M,) ndarray
+ The eigenvalues, in ascending order, each repeated according to its
+ multiplicity.
+ v : (M, M) float or complex ndarray
+ The normalized eigenvector corresponding to the eigenvalue w[i] is
+ the column v[:,i].
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+ eig : eigenvalues and right eigenvectors of general arrays.
+ eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+ eigh_tridiagonal : eigenvalues and right eigenvectors for
+ symmetric/Hermitian tridiagonal matrices
+
+ Examples
+ --------
+ >>> from scipy.linalg import eig_banded
+ >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
+ >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
+ >>> w, v = eig_banded(Ab, lower=True)
+ >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+ True
+ >>> w = eig_banded(Ab, lower=True, eigvals_only=True)
+ >>> w
+ array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])
+
+ Request only the eigenvalues between ``[-3, 4]``
+
+ >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4])
+ >>> w
+ array([-2.22987175, 3.95222349])
+
+ """
+ if eigvals_only or overwrite_a_band:
+ a1 = _asarray_validated(a_band, check_finite=check_finite)
+ overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band))
+ else:
+ a1 = array(a_band)
+ if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all():
+ raise ValueError("array must not contain infs or NaNs")
+ overwrite_a_band = 1
+
+ if len(a1.shape) != 2:
+ raise ValueError('expected a 2-D array')
+ select, vl, vu, il, iu, max_ev = _check_select(
+ select, select_range, max_ev, a1.shape[1])
+ del select_range
+ if select == 0:
+ if a1.dtype.char in 'GFD':
+ # FIXME: implement this somewhen, for now go with builtin values
+ # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1)
+ # or by using calc_lwork.f ???
+ # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower)
+ internal_name = 'hbevd'
+ else: # a1.dtype.char in 'fd':
+ # FIXME: implement this somewhen, for now go with builtin values
+ # see above
+ # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower)
+ internal_name = 'sbevd'
+ bevd, = get_lapack_funcs((internal_name,), (a1,))
+ w, v, info = bevd(a1, compute_v=not eigvals_only,
+ lower=lower, overwrite_ab=overwrite_a_band)
+ else: # select in [1, 2]
+ if eigvals_only:
+ max_ev = 1
+ # calculate optimal abstol for dsbevx (see manpage)
+ if a1.dtype.char in 'fF': # single precision
+ lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),))
+ else:
+ lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),))
+ abstol = 2 * lamch('s')
+ if a1.dtype.char in 'GFD':
+ internal_name = 'hbevx'
+ else: # a1.dtype.char in 'gfd'
+ internal_name = 'sbevx'
+ bevx, = get_lapack_funcs((internal_name,), (a1,))
+ w, v, m, ifail, info = bevx(
+ a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev,
+ range=select, lower=lower, overwrite_ab=overwrite_a_band,
+ abstol=abstol)
+ # crop off w and v
+ w = w[:m]
+ if not eigvals_only:
+ v = v[:, :m]
+ _check_info(info, internal_name)
+
+ if eigvals_only:
+ return w
+ return w, v
+
+
+def eigvals(a, b=None, overwrite_a=False, check_finite=True,
+ homogeneous_eigvals=False):
+ """
+ Compute eigenvalues from an ordinary or generalized eigenvalue problem.
+
+ Find eigenvalues of a general matrix::
+
+ a vr[:,i] = w[i] b vr[:,i]
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A complex or real matrix whose eigenvalues and eigenvectors
+ will be computed.
+ b : (M, M) array_like, optional
+ Right-hand side matrix in a generalized eigenvalue problem.
+ If omitted, identity matrix is assumed.
+ overwrite_a : bool, optional
+ Whether to overwrite data in a (may improve performance)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities
+ or NaNs.
+ homogeneous_eigvals : bool, optional
+ If True, return the eigenvalues in homogeneous coordinates.
+ In this case ``w`` is a (2, M) array so that::
+
+ w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
+
+ Default is False.
+
+ Returns
+ -------
+ w : (M,) or (2, M) double or complex ndarray
+ The eigenvalues, each repeated according to its multiplicity
+ but not in any specific order. The shape is (M,) unless
+ ``homogeneous_eigvals=True``.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge
+
+ See Also
+ --------
+ eig : eigenvalues and right eigenvectors of general arrays.
+ eigvalsh : eigenvalues of symmetric or Hermitian arrays
+ eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+ eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+ matrices
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.array([[0., -1.], [1., 0.]])
+ >>> linalg.eigvals(a)
+ array([0.+1.j, 0.-1.j])
+
+ >>> b = np.array([[0., 1.], [1., 1.]])
+ >>> linalg.eigvals(a, b)
+ array([ 1.+0.j, -1.+0.j])
+
+ >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
+ >>> linalg.eigvals(a, homogeneous_eigvals=True)
+ array([[3.+0.j, 8.+0.j, 7.+0.j],
+ [1.+0.j, 1.+0.j, 1.+0.j]])
+
+ """
+ return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a,
+ check_finite=check_finite,
+ homogeneous_eigvals=homogeneous_eigvals)
+
+
+def eigvalsh(a, b=None, lower=True, overwrite_a=False,
+ overwrite_b=False, turbo=True, eigvals=None, type=1,
+ check_finite=True, subset_by_index=None, subset_by_value=None,
+ driver=None):
+ """
+ Solves a standard or generalized eigenvalue problem for a complex
+ Hermitian or real symmetric matrix.
+
+ Find eigenvalues array ``w`` of array ``a``, where ``b`` is positive
+ definite such that for every eigenvalue λ (i-th entry of w) and its
+ eigenvector vi (i-th column of v) satisfies::
+
+ a @ vi = λ * b @ vi
+ vi.conj().T @ a @ vi = λ
+ vi.conj().T @ b @ vi = 1
+
+ In the standard problem, b is assumed to be the identity matrix.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ A complex Hermitian or real symmetric matrix whose eigenvalues will
+ be computed.
+ b : (M, M) array_like, optional
+ A complex Hermitian or real symmetric definite positive matrix in.
+ If omitted, identity matrix is assumed.
+ lower : bool, optional
+ Whether the pertinent array data is taken from the lower or upper
+ triangle of ``a`` and, if applicable, ``b``. (Default: lower)
+ eigvals_only : bool, optional
+ Whether to calculate only eigenvalues and no eigenvectors.
+ (Default: both are calculated)
+ subset_by_index : iterable, optional
+ If provided, this two-element iterable defines the start and the end
+ indices of the desired eigenvalues (ascending order and 0-indexed).
+ To return only the second smallest to fifth smallest eigenvalues,
+ ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
+ available with "evr", "evx", and "gvx" drivers. The entries are
+ directly converted to integers via ``int()``.
+ subset_by_value : iterable, optional
+ If provided, this two-element iterable defines the half-open interval
+ ``(a, b]`` that, if any, only the eigenvalues between these values
+ are returned. Only available with "evr", "evx", and "gvx" drivers. Use
+ ``np.inf`` for the unconstrained ends.
+ driver: str, optional
+ Defines which LAPACK driver should be used. Valid options are "ev",
+ "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
+ generalized (where b is not None) problems. See the Notes section of
+ `scipy.linalg.eigh`.
+ type : int, optional
+ For the generalized problems, this keyword specifies the problem type
+ to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
+ inputs)::
+
+ 1 => a @ v = w @ b @ v
+ 2 => a @ b @ v = w @ v
+ 3 => b @ a @ v = w @ v
+
+ This keyword is ignored for standard problems.
+ overwrite_a : bool, optional
+ Whether to overwrite data in ``a`` (may improve performance). Default
+ is False.
+ overwrite_b : bool, optional
+ Whether to overwrite data in ``b`` (may improve performance). Default
+ is False.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ turbo : bool, optional
+ *Deprecated by ``driver=gvd`` option*. Has no significant effect for
+ eigenvalue computations since no eigenvectors are requested.
+
+ ..Deprecated in v1.5.0
+ eigvals : tuple (lo, hi), optional
+ *Deprecated by ``subset_by_index`` keyword*. Indexes of the smallest
+ and largest (in ascending order) eigenvalues and corresponding
+ eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all
+ eigenvalues and eigenvectors are returned.
+
+ .. Deprecated in v1.5.0
+
+ Returns
+ -------
+ w : (N,) ndarray
+ The ``N`` (``1<=N<=M``) selected eigenvalues, in ascending order, each
+ repeated according to its multiplicity.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge, an error occurred, or
+ b matrix is not definite positive. Note that if input matrices are
+ not symmetric or Hermitian, no error will be reported but results will
+ be wrong.
+
+ See Also
+ --------
+ eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+ eigvals : eigenvalues of general arrays
+ eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+ eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+ matrices
+
+ Notes
+ -----
+ This function does not check the input array for being Hermitian/symmetric
+ in order to allow for representing arrays with only their upper/lower
+ triangular parts.
+
+ This function serves as a one-liner shorthand for `scipy.linalg.eigh` with
+ the option ``eigvals_only=True`` to get the eigenvalues and not the
+ eigenvectors. Here it is kept as a legacy convenience. It might be
+ beneficial to use the main function to have full control and to be a bit
+ more pythonic.
+
+ Examples
+ --------
+ For more examples see `scipy.linalg.eigh`.
+
+ >>> from scipy.linalg import eigvalsh
+ >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
+ >>> w = eigvalsh(A)
+ >>> w
+ array([-3.74637491, -0.76263923, 6.08502336, 12.42399079])
+
+ """
+ return eigh(a, b=b, lower=lower, eigvals_only=True,
+ overwrite_a=overwrite_a, overwrite_b=overwrite_b,
+ turbo=turbo, eigvals=eigvals, type=type,
+ check_finite=check_finite, subset_by_index=subset_by_index,
+ subset_by_value=subset_by_value, driver=driver)
+
+
+def eigvals_banded(a_band, lower=False, overwrite_a_band=False,
+ select='a', select_range=None, check_finite=True):
+ """
+ Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
+
+ Find eigenvalues w of a::
+
+ a v[:,i] = w[i] v[:,i]
+ v.H v = identity
+
+ The matrix a is stored in a_band either in lower diagonal or upper
+ diagonal ordered form:
+
+ a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
+ a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
+
+ where u is the number of bands above the diagonal.
+
+ Example of a_band (shape of a is (6,6), u=2)::
+
+ upper form:
+ * * a02 a13 a24 a35
+ * a01 a12 a23 a34 a45
+ a00 a11 a22 a33 a44 a55
+
+ lower form:
+ a00 a11 a22 a33 a44 a55
+ a10 a21 a32 a43 a54 *
+ a20 a31 a42 a53 * *
+
+ Cells marked with * are not used.
+
+ Parameters
+ ----------
+ a_band : (u+1, M) array_like
+ The bands of the M by M matrix a.
+ lower : bool, optional
+ Is the matrix in the lower form. (Default is upper form)
+ overwrite_a_band : bool, optional
+ Discard data in a_band (may enhance performance)
+ select : {'a', 'v', 'i'}, optional
+ Which eigenvalues to calculate
+
+ ====== ========================================
+ select calculated
+ ====== ========================================
+ 'a' All eigenvalues
+ 'v' Eigenvalues in the interval (min, max]
+ 'i' Eigenvalues with indices min <= i <= max
+ ====== ========================================
+ select_range : (min, max), optional
+ Range of selected eigenvalues
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ w : (M,) ndarray
+ The eigenvalues, in ascending order, each repeated according to its
+ multiplicity.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+ band matrices
+ eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+ matrices
+ eigvals : eigenvalues of general arrays
+ eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+ eig : eigenvalues and right eigenvectors for non-symmetric arrays
+
+ Examples
+ --------
+ >>> from scipy.linalg import eigvals_banded
+ >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
+ >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
+ >>> w = eigvals_banded(Ab, lower=True)
+ >>> w
+ array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])
+ """
+ return eig_banded(a_band, lower=lower, eigvals_only=1,
+ overwrite_a_band=overwrite_a_band, select=select,
+ select_range=select_range, check_finite=check_finite)
+
+
+def eigvalsh_tridiagonal(d, e, select='a', select_range=None,
+ check_finite=True, tol=0., lapack_driver='auto'):
+ """
+ Solve eigenvalue problem for a real symmetric tridiagonal matrix.
+
+ Find eigenvalues `w` of ``a``::
+
+ a v[:,i] = w[i] v[:,i]
+ v.H v = identity
+
+ For a real symmetric matrix ``a`` with diagonal elements `d` and
+ off-diagonal elements `e`.
+
+ Parameters
+ ----------
+ d : ndarray, shape (ndim,)
+ The diagonal elements of the array.
+ e : ndarray, shape (ndim-1,)
+ The off-diagonal elements of the array.
+ select : {'a', 'v', 'i'}, optional
+ Which eigenvalues to calculate
+
+ ====== ========================================
+ select calculated
+ ====== ========================================
+ 'a' All eigenvalues
+ 'v' Eigenvalues in the interval (min, max]
+ 'i' Eigenvalues with indices min <= i <= max
+ ====== ========================================
+ select_range : (min, max), optional
+ Range of selected eigenvalues
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ tol : float
+ The absolute tolerance to which each eigenvalue is required
+ (only used when ``lapack_driver='stebz'``).
+ An eigenvalue (or cluster) is considered to have converged if it
+ lies in an interval of this width. If <= 0. (default),
+ the value ``eps*|a|`` is used where eps is the machine precision,
+ and ``|a|`` is the 1-norm of the matrix ``a``.
+ lapack_driver : str
+ LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
+ or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
+ and 'stebz' otherwise. 'sterf' and 'stev' can only be used when
+ ``select='a'``.
+
+ Returns
+ -------
+ w : (M,) ndarray
+ The eigenvalues, in ascending order, each repeated according to its
+ multiplicity.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigh_tridiagonal : eigenvalues and right eiegenvectors for
+ symmetric/Hermitian tridiagonal matrices
+
+ Examples
+ --------
+ >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh
+ >>> d = 3*np.ones(4)
+ >>> e = -1*np.ones(3)
+ >>> w = eigvalsh_tridiagonal(d, e)
+ >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
+ >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines
+ >>> np.allclose(w - w2, np.zeros(4))
+ True
+ """
+ return eigh_tridiagonal(
+ d, e, eigvals_only=True, select=select, select_range=select_range,
+ check_finite=check_finite, tol=tol, lapack_driver=lapack_driver)
+
+
+def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None,
+ check_finite=True, tol=0., lapack_driver='auto'):
+ """
+ Solve eigenvalue problem for a real symmetric tridiagonal matrix.
+
+ Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``::
+
+ a v[:,i] = w[i] v[:,i]
+ v.H v = identity
+
+ For a real symmetric matrix ``a`` with diagonal elements `d` and
+ off-diagonal elements `e`.
+
+ Parameters
+ ----------
+ d : ndarray, shape (ndim,)
+ The diagonal elements of the array.
+ e : ndarray, shape (ndim-1,)
+ The off-diagonal elements of the array.
+ select : {'a', 'v', 'i'}, optional
+ Which eigenvalues to calculate
+
+ ====== ========================================
+ select calculated
+ ====== ========================================
+ 'a' All eigenvalues
+ 'v' Eigenvalues in the interval (min, max]
+ 'i' Eigenvalues with indices min <= i <= max
+ ====== ========================================
+ select_range : (min, max), optional
+ Range of selected eigenvalues
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ tol : float
+ The absolute tolerance to which each eigenvalue is required
+ (only used when 'stebz' is the `lapack_driver`).
+ An eigenvalue (or cluster) is considered to have converged if it
+ lies in an interval of this width. If <= 0. (default),
+ the value ``eps*|a|`` is used where eps is the machine precision,
+ and ``|a|`` is the 1-norm of the matrix ``a``.
+ lapack_driver : str
+ LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
+ or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
+ and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and
+ ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is
+ used to find the corresponding eigenvectors. 'sterf' can only be
+ used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only
+ be used when ``select='a'``.
+
+ Returns
+ -------
+ w : (M,) ndarray
+ The eigenvalues, in ascending order, each repeated according to its
+ multiplicity.
+ v : (M, M) ndarray
+ The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is
+ the column ``v[:,i]``.
+
+ Raises
+ ------
+ LinAlgError
+ If eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+ matrices
+ eig : eigenvalues and right eigenvectors for non-symmetric arrays
+ eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+ eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+ band matrices
+
+ Notes
+ -----
+ This function makes use of LAPACK ``S/DSTEMR`` routines.
+
+ Examples
+ --------
+ >>> from scipy.linalg import eigh_tridiagonal
+ >>> d = 3*np.ones(4)
+ >>> e = -1*np.ones(3)
+ >>> w, v = eigh_tridiagonal(d, e)
+ >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
+ >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+ True
+ """
+ d = _asarray_validated(d, check_finite=check_finite)
+ e = _asarray_validated(e, check_finite=check_finite)
+ for check in (d, e):
+ if check.ndim != 1:
+ raise ValueError('expected a 1-D array')
+ if check.dtype.char in 'GFD': # complex
+ raise TypeError('Only real arrays currently supported')
+ if d.size != e.size + 1:
+ raise ValueError('d (%s) must have one more element than e (%s)'
+ % (d.size, e.size))
+ select, vl, vu, il, iu, _ = _check_select(
+ select, select_range, 0, d.size)
+ if not isinstance(lapack_driver, str):
+ raise TypeError('lapack_driver must be str')
+ drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev')
+ if lapack_driver not in drivers:
+ raise ValueError('lapack_driver must be one of %s, got %s'
+ % (drivers, lapack_driver))
+ if lapack_driver == 'auto':
+ lapack_driver = 'stemr' if select == 0 else 'stebz'
+ func, = get_lapack_funcs((lapack_driver,), (d, e))
+ compute_v = not eigvals_only
+ if lapack_driver == 'sterf':
+ if select != 0:
+ raise ValueError('sterf can only be used when select == "a"')
+ if not eigvals_only:
+ raise ValueError('sterf can only be used when eigvals_only is '
+ 'True')
+ w, info = func(d, e)
+ m = len(w)
+ elif lapack_driver == 'stev':
+ if select != 0:
+ raise ValueError('stev can only be used when select == "a"')
+ w, v, info = func(d, e, compute_v=compute_v)
+ m = len(w)
+ elif lapack_driver == 'stebz':
+ tol = float(tol)
+ internal_name = 'stebz'
+ stebz, = get_lapack_funcs((internal_name,), (d, e))
+ # If getting eigenvectors, needs to be block-ordered (B) instead of
+ # matrix-ordered (E), and we will reorder later
+ order = 'E' if eigvals_only else 'B'
+ m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol,
+ order)
+ else: # 'stemr'
+ # ?STEMR annoyingly requires size N instead of N-1
+ e_ = empty(e.size+1, e.dtype)
+ e_[:-1] = e
+ stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e))
+ lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu,
+ compute_v=compute_v)
+ _check_info(info, 'stemr_lwork')
+ m, w, v, info = func(d, e_, select, vl, vu, il, iu,
+ compute_v=compute_v, lwork=lwork, liwork=liwork)
+ _check_info(info, lapack_driver + ' (eigh_tridiagonal)')
+ w = w[:m]
+ if eigvals_only:
+ return w
+ else:
+ # Do we still need to compute the eigenvalues?
+ if lapack_driver == 'stebz':
+ func, = get_lapack_funcs(('stein',), (d, e))
+ v, info = func(d, e, w, iblock, isplit)
+ _check_info(info, 'stein (eigh_tridiagonal)',
+ positive='%d eigenvectors failed to converge')
+ # Convert block-order to matrix-order
+ order = argsort(w)
+ w, v = w[order], v[:, order]
+ else:
+ v = v[:, :m]
+ return w, v
+
+
+def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'):
+ """Check info return value."""
+ if info < 0:
+ raise ValueError('illegal value in argument %d of internal %s'
+ % (-info, driver))
+ if info > 0 and positive:
+ raise LinAlgError(("%s " + positive) % (driver, info,))
+
+
+def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True):
+ """
+ Compute Hessenberg form of a matrix.
+
+ The Hessenberg decomposition is::
+
+ A = Q H Q^H
+
+ where `Q` is unitary/orthogonal and `H` has only zero elements below
+ the first sub-diagonal.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Matrix to bring into Hessenberg form.
+ calc_q : bool, optional
+ Whether to compute the transformation matrix. Default is False.
+ overwrite_a : bool, optional
+ Whether to overwrite `a`; may improve performance.
+ Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ H : (M, M) ndarray
+ Hessenberg form of `a`.
+ Q : (M, M) ndarray
+ Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``.
+ Only returned if ``calc_q=True``.
+
+ Examples
+ --------
+ >>> from scipy.linalg import hessenberg
+ >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+ >>> H, Q = hessenberg(A, calc_q=True)
+ >>> H
+ array([[ 2. , -11.65843866, 1.42005301, 0.25349066],
+ [ -9.94987437, 14.53535354, -5.31022304, 2.43081618],
+ [ 0. , -1.83299243, 0.38969961, -0.51527034],
+ [ 0. , 0. , -3.83189513, 1.07494686]])
+ >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4)))
+ True
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
+ raise ValueError('expected square matrix')
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+ # if 2x2 or smaller: already in Hessenberg
+ if a1.shape[0] <= 2:
+ if calc_q:
+ return a1, eye(a1.shape[0])
+ return a1
+
+ gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal',
+ 'gehrd_lwork'), (a1,))
+ ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a)
+ _check_info(info, 'gebal (hessenberg)', positive=False)
+ n = len(a1)
+
+ lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi)
+
+ hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
+ _check_info(info, 'gehrd (hessenberg)', positive=False)
+ h = numpy.triu(hq, -1)
+ if not calc_q:
+ return h
+
+ # use orghr/unghr to compute q
+ orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,))
+ lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi)
+
+ q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
+ _check_info(info, 'orghr (hessenberg)', positive=False)
+ return h, q
+
+
+def cdf2rdf(w, v):
+ """
+ Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real
+ eigenvalues in a block diagonal form ``wr`` and the associated real
+ eigenvectors ``vr``, such that::
+
+ vr @ wr = X @ vr
+
+ continues to hold, where ``X`` is the original array for which ``w`` and
+ ``v`` are the eigenvalues and eigenvectors.
+
+ .. versionadded:: 1.1.0
+
+ Parameters
+ ----------
+ w : (..., M) array_like
+ Complex or real eigenvalues, an array or stack of arrays
+
+ Conjugate pairs must not be interleaved, else the wrong result
+ will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result,
+ but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not.
+
+ v : (..., M, M) array_like
+ Complex or real eigenvectors, a square array or stack of square arrays.
+
+ Returns
+ -------
+ wr : (..., M, M) ndarray
+ Real diagonal block form of eigenvalues
+ vr : (..., M, M) ndarray
+ Real eigenvectors associated with ``wr``
+
+ See Also
+ --------
+ eig : Eigenvalues and right eigenvectors for non-symmetric arrays
+ rsf2csf : Convert real Schur form to complex Schur form
+
+ Notes
+ -----
+ ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``.
+ For example, obtained by ``w, v = scipy.linalg.eig(X)`` or
+ ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent
+ stacked arrays.
+
+ .. versionadded:: 1.1.0
+
+ Examples
+ --------
+ >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+ >>> X
+ array([[ 1, 2, 3],
+ [ 0, 4, 5],
+ [ 0, -5, 4]])
+
+ >>> from scipy import linalg
+ >>> w, v = linalg.eig(X)
+ >>> w
+ array([ 1.+0.j, 4.+5.j, 4.-5.j])
+ >>> v
+ array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j],
+ [ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j],
+ [ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]])
+
+ >>> wr, vr = linalg.cdf2rdf(w, v)
+ >>> wr
+ array([[ 1., 0., 0.],
+ [ 0., 4., 5.],
+ [ 0., -5., 4.]])
+ >>> vr
+ array([[ 1. , 0.40016, -0.01906],
+ [ 0. , 0.64788, 0. ],
+ [ 0. , 0. , 0.64788]])
+
+ >>> vr @ wr
+ array([[ 1. , 1.69593, 1.9246 ],
+ [ 0. , 2.59153, 3.23942],
+ [ 0. , -3.23942, 2.59153]])
+ >>> X @ vr
+ array([[ 1. , 1.69593, 1.9246 ],
+ [ 0. , 2.59153, 3.23942],
+ [ 0. , -3.23942, 2.59153]])
+ """
+ w, v = _asarray_validated(w), _asarray_validated(v)
+
+ # check dimensions
+ if w.ndim < 1:
+ raise ValueError('expected w to be at least 1D')
+ if v.ndim < 2:
+ raise ValueError('expected v to be at least 2D')
+ if v.ndim != w.ndim + 1:
+ raise ValueError('expected eigenvectors array to have exactly one '
+ 'dimension more than eigenvalues array')
+
+ # check shapes
+ n = w.shape[-1]
+ M = w.shape[:-1]
+ if v.shape[-2] != v.shape[-1]:
+ raise ValueError('expected v to be a square matrix or stacked square '
+ 'matrices: v.shape[-2] = v.shape[-1]')
+ if v.shape[-1] != n:
+ raise ValueError('expected the same number of eigenvalues as '
+ 'eigenvectors')
+
+ # get indices for each first pair of complex eigenvalues
+ complex_mask = iscomplex(w)
+ n_complex = complex_mask.sum(axis=-1)
+
+ # check if all complex eigenvalues have conjugate pairs
+ if not (n_complex % 2 == 0).all():
+ raise ValueError('expected complex-conjugate pairs of eigenvalues')
+
+ # find complex indices
+ idx = nonzero(complex_mask)
+ idx_stack = idx[:-1]
+ idx_elem = idx[-1]
+
+ # filter them to conjugate indices, assuming pairs are not interleaved
+ j = idx_elem[0::2]
+ k = idx_elem[1::2]
+ stack_ind = ()
+ for i in idx_stack:
+ # should never happen, assuming nonzero orders by the last axis
+ assert (i[0::2] == i[1::2]).all(),\
+ "Conjugate pair spanned different arrays!"
+ stack_ind += (i[0::2],)
+
+ # all eigenvalues to diagonal form
+ wr = zeros(M + (n, n), dtype=w.real.dtype)
+ di = range(n)
+ wr[..., di, di] = w.real
+
+ # complex eigenvalues to real block diagonal form
+ wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag
+ wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag
+
+ # compute real eigenvectors associated with real block diagonal eigenvalues
+ u = zeros(M + (n, n), dtype=numpy.cdouble)
+ u[..., di, di] = 1.0
+ u[stack_ind + (j, j)] = 0.5j
+ u[stack_ind + (j, k)] = 0.5
+ u[stack_ind + (k, j)] = -0.5j
+ u[stack_ind + (k, k)] = 0.5
+
+ # multipy matrices v and u (equivalent to v @ u)
+ vr = einsum('...ij,...jk->...ik', v, u).real
+
+ return wr, vr
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_cholesky.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_cholesky.py
new file mode 100644
index 0000000..796f36f
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_cholesky.py
@@ -0,0 +1,352 @@
+"""Cholesky decomposition functions."""
+
+from numpy import asarray_chkfinite, asarray, atleast_2d
+
+# Local imports
+from .misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs
+
+__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
+ 'cho_solve_banded']
+
+
+def _cholesky(a, lower=False, overwrite_a=False, clean=True,
+ check_finite=True):
+ """Common code for cholesky() and cho_factor()."""
+
+ a1 = asarray_chkfinite(a) if check_finite else asarray(a)
+ a1 = atleast_2d(a1)
+
+ # Dimension check
+ if a1.ndim != 2:
+ raise ValueError('Input array needs to be 2D but received '
+ 'a {}d-array.'.format(a1.ndim))
+ # Squareness check
+ if a1.shape[0] != a1.shape[1]:
+ raise ValueError('Input array is expected to be square but has '
+ 'the shape: {}.'.format(a1.shape))
+
+ # Quick return for square empty array
+ if a1.size == 0:
+ return a1.copy(), lower
+
+ overwrite_a = overwrite_a or _datacopied(a1, a)
+ potrf, = get_lapack_funcs(('potrf',), (a1,))
+ c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)
+ if info > 0:
+ raise LinAlgError("%d-th leading minor of the array is not positive "
+ "definite" % info)
+ if info < 0:
+ raise ValueError('LAPACK reported an illegal value in {}-th argument'
+ 'on entry to "POTRF".'.format(-info))
+ return c, lower
+
+
+def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
+ """
+ Compute the Cholesky decomposition of a matrix.
+
+ Returns the Cholesky decomposition, :math:`A = L L^*` or
+ :math:`A = U^* U` of a Hermitian positive-definite matrix A.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Matrix to be decomposed
+ lower : bool, optional
+ Whether to compute the upper- or lower-triangular Cholesky
+ factorization. Default is upper-triangular.
+ overwrite_a : bool, optional
+ Whether to overwrite data in `a` (may improve performance).
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ c : (M, M) ndarray
+ Upper- or lower-triangular Cholesky factor of `a`.
+
+ Raises
+ ------
+ LinAlgError : if decomposition fails.
+
+ Examples
+ --------
+ >>> from scipy.linalg import cholesky
+ >>> a = np.array([[1,-2j],[2j,5]])
+ >>> L = cholesky(a, lower=True)
+ >>> L
+ array([[ 1.+0.j, 0.+0.j],
+ [ 0.+2.j, 1.+0.j]])
+ >>> L @ L.T.conj()
+ array([[ 1.+0.j, 0.-2.j],
+ [ 0.+2.j, 5.+0.j]])
+
+ """
+ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True,
+ check_finite=check_finite)
+ return c
+
+
+def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
+ """
+ Compute the Cholesky decomposition of a matrix, to use in cho_solve
+
+ Returns a matrix containing the Cholesky decomposition,
+ ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
+ The return value can be directly used as the first parameter to cho_solve.
+
+ .. warning::
+ The returned matrix also contains random data in the entries not
+ used by the Cholesky decomposition. If you need to zero these
+ entries, use the function `cholesky` instead.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Matrix to be decomposed
+ lower : bool, optional
+ Whether to compute the upper or lower triangular Cholesky factorization
+ (Default: upper-triangular)
+ overwrite_a : bool, optional
+ Whether to overwrite data in a (may improve performance)
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ c : (M, M) ndarray
+ Matrix whose upper or lower triangle contains the Cholesky factor
+ of `a`. Other parts of the matrix contain random data.
+ lower : bool
+ Flag indicating whether the factor is in the lower or upper triangle
+
+ Raises
+ ------
+ LinAlgError
+ Raised if decomposition fails.
+
+ See also
+ --------
+ cho_solve : Solve a linear set equations using the Cholesky factorization
+ of a matrix.
+
+ Examples
+ --------
+ >>> from scipy.linalg import cho_factor
+ >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
+ >>> c, low = cho_factor(A)
+ >>> c
+ array([[3. , 1. , 0.33333333, 1.66666667],
+ [3. , 2.44948974, 1.90515869, -0.27216553],
+ [1. , 5. , 2.29330749, 0.8559528 ],
+ [5. , 1. , 2. , 1.55418563]])
+ >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4)))
+ True
+
+ """
+ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,
+ check_finite=check_finite)
+ return c, lower
+
+
+def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):
+ """Solve the linear equations A x = b, given the Cholesky factorization of A.
+
+ Parameters
+ ----------
+ (c, lower) : tuple, (array, bool)
+ Cholesky factorization of a, as given by cho_factor
+ b : array
+ Right-hand side
+ overwrite_b : bool, optional
+ Whether to overwrite data in b (may improve performance)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : array
+ The solution to the system A x = b
+
+ See also
+ --------
+ cho_factor : Cholesky factorization of a matrix
+
+ Examples
+ --------
+ >>> from scipy.linalg import cho_factor, cho_solve
+ >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
+ >>> c, low = cho_factor(A)
+ >>> x = cho_solve((c, low), [1, 1, 1, 1])
+ >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4))
+ True
+
+ """
+ (c, lower) = c_and_lower
+ if check_finite:
+ b1 = asarray_chkfinite(b)
+ c = asarray_chkfinite(c)
+ else:
+ b1 = asarray(b)
+ c = asarray(c)
+ if c.ndim != 2 or c.shape[0] != c.shape[1]:
+ raise ValueError("The factored matrix c is not square.")
+ if c.shape[1] != b1.shape[0]:
+ raise ValueError("incompatible dimensions ({} and {})"
+ .format(c.shape, b1.shape))
+
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+
+ potrs, = get_lapack_funcs(('potrs',), (c, b1))
+ x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)
+ if info != 0:
+ raise ValueError('illegal value in %dth argument of internal potrs'
+ % -info)
+ return x
+
+
+def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True):
+ """
+ Cholesky decompose a banded Hermitian positive-definite matrix
+
+ The matrix a is stored in ab either in lower-diagonal or upper-
+ diagonal ordered form::
+
+ ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
+ ab[ i - j, j] == a[i,j] (if lower form; i >= j)
+
+ Example of ab (shape of a is (6,6), u=2)::
+
+ upper form:
+ * * a02 a13 a24 a35
+ * a01 a12 a23 a34 a45
+ a00 a11 a22 a33 a44 a55
+
+ lower form:
+ a00 a11 a22 a33 a44 a55
+ a10 a21 a32 a43 a54 *
+ a20 a31 a42 a53 * *
+
+ Parameters
+ ----------
+ ab : (u + 1, M) array_like
+ Banded matrix
+ overwrite_ab : bool, optional
+ Discard data in ab (may enhance performance)
+ lower : bool, optional
+ Is the matrix in the lower form. (Default is upper form)
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ c : (u + 1, M) ndarray
+ Cholesky factorization of a, in the same banded format as ab
+
+ See also
+ --------
+ cho_solve_banded : Solve a linear set equations, given the Cholesky factorization
+ of a banded Hermitian.
+
+ Examples
+ --------
+ >>> from scipy.linalg import cholesky_banded
+ >>> from numpy import allclose, zeros, diag
+ >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
+ >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
+ >>> A = A + A.conj().T + np.diag(Ab[2, :])
+ >>> c = cholesky_banded(Ab)
+ >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :])
+ >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5)))
+ True
+
+ """
+ if check_finite:
+ ab = asarray_chkfinite(ab)
+ else:
+ ab = asarray(ab)
+
+ pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))
+ c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)
+ if info > 0:
+ raise LinAlgError("%d-th leading minor not positive definite" % info)
+ if info < 0:
+ raise ValueError('illegal value in %d-th argument of internal pbtrf'
+ % -info)
+ return c
+
+
+def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True):
+ """
+ Solve the linear equations ``A x = b``, given the Cholesky factorization of
+ the banded Hermitian ``A``.
+
+ Parameters
+ ----------
+ (cb, lower) : tuple, (ndarray, bool)
+ `cb` is the Cholesky factorization of A, as given by cholesky_banded.
+ `lower` must be the same value that was given to cholesky_banded.
+ b : array_like
+ Right-hand side
+ overwrite_b : bool, optional
+ If True, the function will overwrite the values in `b`.
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : array
+ The solution to the system A x = b
+
+ See also
+ --------
+ cholesky_banded : Cholesky factorization of a banded matrix
+
+ Notes
+ -----
+
+ .. versionadded:: 0.8.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import cholesky_banded, cho_solve_banded
+ >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
+ >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
+ >>> A = A + A.conj().T + np.diag(Ab[2, :])
+ >>> c = cholesky_banded(Ab)
+ >>> x = cho_solve_banded((c, False), np.ones(5))
+ >>> np.allclose(A @ x - np.ones(5), np.zeros(5))
+ True
+
+ """
+ (cb, lower) = cb_and_lower
+ if check_finite:
+ cb = asarray_chkfinite(cb)
+ b = asarray_chkfinite(b)
+ else:
+ cb = asarray(cb)
+ b = asarray(b)
+
+ # Validate shapes.
+ if cb.shape[-1] != b.shape[0]:
+ raise ValueError("shapes of cb and b are not compatible.")
+
+ pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))
+ x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)
+ if info > 0:
+ raise LinAlgError("%dth leading minor not positive definite" % info)
+ if info < 0:
+ raise ValueError('illegal value in %dth argument of internal pbtrs'
+ % -info)
+ return x
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_lu.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_lu.py
new file mode 100644
index 0000000..ab88649
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_lu.py
@@ -0,0 +1,222 @@
+"""LU decomposition functions."""
+
+from warnings import warn
+
+from numpy import asarray, asarray_chkfinite
+
+# Local imports
+from .misc import _datacopied, LinAlgWarning
+from .lapack import get_lapack_funcs
+from .flinalg import get_flinalg_funcs
+
+__all__ = ['lu', 'lu_solve', 'lu_factor']
+
+
+def lu_factor(a, overwrite_a=False, check_finite=True):
+ """
+ Compute pivoted LU decomposition of a matrix.
+
+ The decomposition is::
+
+ A = P L U
+
+ where P is a permutation matrix, L lower triangular with unit
+ diagonal elements, and U upper triangular.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Matrix to decompose
+ overwrite_a : bool, optional
+ Whether to overwrite data in A (may increase performance)
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ lu : (N, N) ndarray
+ Matrix containing U in its upper triangle, and L in its lower triangle.
+ The unit diagonal elements of L are not stored.
+ piv : (N,) ndarray
+ Pivot indices representing the permutation matrix P:
+ row i of matrix was interchanged with row piv[i].
+
+ See also
+ --------
+ lu_solve : solve an equation system using the LU factorization of a matrix
+
+ Notes
+ -----
+ This is a wrapper to the ``*GETRF`` routines from LAPACK.
+
+ Examples
+ --------
+ >>> from scipy.linalg import lu_factor
+ >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+ >>> lu, piv = lu_factor(A)
+ >>> piv
+ array([2, 2, 3, 3], dtype=int32)
+
+ Convert LAPACK's ``piv`` array to NumPy index and test the permutation
+
+ >>> piv_py = [2, 0, 3, 1]
+ >>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)
+ >>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4)))
+ True
+ """
+ if check_finite:
+ a1 = asarray_chkfinite(a)
+ else:
+ a1 = asarray(a)
+ if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
+ raise ValueError('expected square matrix')
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+ getrf, = get_lapack_funcs(('getrf',), (a1,))
+ lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
+ if info < 0:
+ raise ValueError('illegal value in %dth argument of '
+ 'internal getrf (lu_factor)' % -info)
+ if info > 0:
+ warn("Diagonal number %d is exactly zero. Singular matrix." % info,
+ LinAlgWarning, stacklevel=2)
+ return lu, piv
+
+
+def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
+ """Solve an equation system, a x = b, given the LU factorization of a
+
+ Parameters
+ ----------
+ (lu, piv)
+ Factorization of the coefficient matrix a, as given by lu_factor
+ b : array
+ Right-hand side
+ trans : {0, 1, 2}, optional
+ Type of system to solve:
+
+ ===== =========
+ trans system
+ ===== =========
+ 0 a x = b
+ 1 a^T x = b
+ 2 a^H x = b
+ ===== =========
+ overwrite_b : bool, optional
+ Whether to overwrite data in b (may increase performance)
+ check_finite : bool, optional
+ Whether to check that the input matrices contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ x : array
+ Solution to the system
+
+ See also
+ --------
+ lu_factor : LU factorize a matrix
+
+ Examples
+ --------
+ >>> from scipy.linalg import lu_factor, lu_solve
+ >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+ >>> b = np.array([1, 1, 1, 1])
+ >>> lu, piv = lu_factor(A)
+ >>> x = lu_solve((lu, piv), b)
+ >>> np.allclose(A @ x - b, np.zeros((4,)))
+ True
+
+ """
+ (lu, piv) = lu_and_piv
+ if check_finite:
+ b1 = asarray_chkfinite(b)
+ else:
+ b1 = asarray(b)
+ overwrite_b = overwrite_b or _datacopied(b1, b)
+ if lu.shape[0] != b1.shape[0]:
+ raise ValueError("Shapes of lu {} and b {} are incompatible"
+ .format(lu.shape, b1.shape))
+
+ getrs, = get_lapack_funcs(('getrs',), (lu, b1))
+ x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
+ if info == 0:
+ return x
+ raise ValueError('illegal value in %dth argument of internal gesv|posv'
+ % -info)
+
+
+def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
+ """
+ Compute pivoted LU decomposition of a matrix.
+
+ The decomposition is::
+
+ A = P L U
+
+ where P is a permutation matrix, L lower triangular with unit
+ diagonal elements, and U upper triangular.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Array to decompose
+ permute_l : bool, optional
+ Perform the multiplication P*L (Default: do not permute)
+ overwrite_a : bool, optional
+ Whether to overwrite data in a (may improve performance)
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ **(If permute_l == False)**
+
+ p : (M, M) ndarray
+ Permutation matrix
+ l : (M, K) ndarray
+ Lower triangular or trapezoidal matrix with unit diagonal.
+ K = min(M, N)
+ u : (K, N) ndarray
+ Upper triangular or trapezoidal matrix
+
+ **(If permute_l == True)**
+
+ pl : (M, K) ndarray
+ Permuted L matrix.
+ K = min(M, N)
+ u : (K, N) ndarray
+ Upper triangular or trapezoidal matrix
+
+ Notes
+ -----
+ This is a LU factorization routine written for SciPy.
+
+ Examples
+ --------
+ >>> from scipy.linalg import lu
+ >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+ >>> p, l, u = lu(A)
+ >>> np.allclose(A - p @ l @ u, np.zeros((4, 4)))
+ True
+
+ """
+ if check_finite:
+ a1 = asarray_chkfinite(a)
+ else:
+ a1 = asarray(a)
+ if len(a1.shape) != 2:
+ raise ValueError('expected matrix')
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+ flu, = get_flinalg_funcs(('lu',), (a1,))
+ p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
+ if info < 0:
+ raise ValueError('illegal value in %dth argument of '
+ 'internal lu.getrf' % -info)
+ if permute_l:
+ return l, u
+ return p, l, u
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_qr.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_qr.py
new file mode 100644
index 0000000..53be60a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_qr.py
@@ -0,0 +1,424 @@
+"""QR decomposition functions."""
+import numpy
+
+# Local imports
+from .lapack import get_lapack_funcs
+from .misc import _datacopied
+
+__all__ = ['qr', 'qr_multiply', 'rq']
+
+
+def safecall(f, name, *args, **kwargs):
+ """Call a LAPACK routine, determining lwork automatically and handling
+ error return values"""
+ lwork = kwargs.get("lwork", None)
+ if lwork in (None, -1):
+ kwargs['lwork'] = -1
+ ret = f(*args, **kwargs)
+ kwargs['lwork'] = ret[-2][0].real.astype(numpy.int_)
+ ret = f(*args, **kwargs)
+ if ret[-1] < 0:
+ raise ValueError("illegal value in %dth argument of internal %s"
+ % (-ret[-1], name))
+ return ret[:-2]
+
+
+def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
+ check_finite=True):
+ """
+ Compute QR decomposition of a matrix.
+
+ Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
+ and R upper triangular.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to be decomposed
+ overwrite_a : bool, optional
+ Whether data in `a` is overwritten (may improve performance if
+ `overwrite_a` is set to True by reusing the existing input data
+ structure rather than creating a new one.)
+ lwork : int, optional
+ Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
+ is computed.
+ mode : {'full', 'r', 'economic', 'raw'}, optional
+ Determines what information is to be returned: either both Q and R
+ ('full', default), only R ('r') or both Q and R but computed in
+ economy-size ('economic', see Notes). The final option 'raw'
+ (added in SciPy 0.11) makes the function return two matrices
+ (Q, TAU) in the internal format used by LAPACK.
+ pivoting : bool, optional
+ Whether or not factorization should include pivoting for rank-revealing
+ qr decomposition. If pivoting, compute the decomposition
+ ``A P = Q R`` as above, but where P is chosen such that the diagonal
+ of R is non-increasing.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ Q : float or complex ndarray
+ Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
+ if ``mode='r'``.
+ R : float or complex ndarray
+ Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
+ P : int ndarray
+ Of shape (N,) for ``pivoting=True``. Not returned if
+ ``pivoting=False``.
+
+ Raises
+ ------
+ LinAlgError
+ Raised if decomposition fails
+
+ Notes
+ -----
+ This is an interface to the LAPACK routines dgeqrf, zgeqrf,
+ dorgqr, zungqr, dgeqp3, and zgeqp3.
+
+ If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
+ of (M,M) and (M,N), with ``K=min(M,N)``.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.random.randn(9, 6)
+
+ >>> q, r = linalg.qr(a)
+ >>> np.allclose(a, np.dot(q, r))
+ True
+ >>> q.shape, r.shape
+ ((9, 9), (9, 6))
+
+ >>> r2 = linalg.qr(a, mode='r')
+ >>> np.allclose(r, r2)
+ True
+
+ >>> q3, r3 = linalg.qr(a, mode='economic')
+ >>> q3.shape, r3.shape
+ ((9, 6), (6, 6))
+
+ >>> q4, r4, p4 = linalg.qr(a, pivoting=True)
+ >>> d = np.abs(np.diag(r4))
+ >>> np.all(d[1:] <= d[:-1])
+ True
+ >>> np.allclose(a[:, p4], np.dot(q4, r4))
+ True
+ >>> q4.shape, r4.shape, p4.shape
+ ((9, 9), (9, 6), (6,))
+
+ >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
+ >>> q5.shape, r5.shape, p5.shape
+ ((9, 6), (6, 6), (6,))
+
+ """
+ # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
+ # 'qr' are used below.
+ # 'raw' is used internally by qr_multiply
+ if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
+ raise ValueError("Mode argument should be one of ['full', 'r',"
+ "'economic', 'raw']")
+
+ if check_finite:
+ a1 = numpy.asarray_chkfinite(a)
+ else:
+ a1 = numpy.asarray(a)
+ if len(a1.shape) != 2:
+ raise ValueError("expected a 2-D array")
+ M, N = a1.shape
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+ if pivoting:
+ geqp3, = get_lapack_funcs(('geqp3',), (a1,))
+ qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
+ jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
+ else:
+ geqrf, = get_lapack_funcs(('geqrf',), (a1,))
+ qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
+ overwrite_a=overwrite_a)
+
+ if mode not in ['economic', 'raw'] or M < N:
+ R = numpy.triu(qr)
+ else:
+ R = numpy.triu(qr[:N, :])
+
+ if pivoting:
+ Rj = R, jpvt
+ else:
+ Rj = R,
+
+ if mode == 'r':
+ return Rj
+ elif mode == 'raw':
+ return ((qr, tau),) + Rj
+
+ gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
+
+ if M < N:
+ Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
+ lwork=lwork, overwrite_a=1)
+ elif mode == 'economic':
+ Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
+ overwrite_a=1)
+ else:
+ t = qr.dtype.char
+ qqr = numpy.empty((M, M), dtype=t)
+ qqr[:, :N] = qr
+ Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
+ overwrite_a=1)
+
+ return (Q,) + Rj
+
+
+def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
+ overwrite_a=False, overwrite_c=False):
+ """
+ Calculate the QR decomposition and multiply Q with a matrix.
+
+ Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
+ and R upper triangular. Multiply Q with a vector or a matrix c.
+
+ Parameters
+ ----------
+ a : (M, N), array_like
+ Input array
+ c : array_like
+ Input array to be multiplied by ``q``.
+ mode : {'left', 'right'}, optional
+ ``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if
+ mode is 'right'.
+ The shape of c must be appropriate for the matrix multiplications,
+ if mode is 'left', ``min(a.shape) == c.shape[0]``,
+ if mode is 'right', ``a.shape[0] == c.shape[1]``.
+ pivoting : bool, optional
+ Whether or not factorization should include pivoting for rank-revealing
+ qr decomposition, see the documentation of qr.
+ conjugate : bool, optional
+ Whether Q should be complex-conjugated. This might be faster
+ than explicit conjugation.
+ overwrite_a : bool, optional
+ Whether data in a is overwritten (may improve performance)
+ overwrite_c : bool, optional
+ Whether data in c is overwritten (may improve performance).
+ If this is used, c must be big enough to keep the result,
+ i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'.
+
+ Returns
+ -------
+ CQ : ndarray
+ The product of ``Q`` and ``c``.
+ R : (K, N), ndarray
+ R array of the resulting QR factorization where ``K = min(M, N)``.
+ P : (N,) ndarray
+ Integer pivot array. Only returned when ``pivoting=True``.
+
+ Raises
+ ------
+ LinAlgError
+ Raised if QR decomposition fails.
+
+ Notes
+ -----
+ This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``,
+ ``?UNMQR``, and ``?GEQP3``.
+
+ .. versionadded:: 0.11.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import qr_multiply, qr
+ >>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]])
+ >>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1)
+ >>> qc
+ array([[-1., 1., -1.],
+ [-1., -1., 1.],
+ [-1., -1., -1.],
+ [-1., 1., 1.]])
+ >>> r1
+ array([[-6., -3., -5. ],
+ [ 0., -1., -1.11022302e-16],
+ [ 0., 0., -1. ]])
+ >>> piv1
+ array([1, 0, 2], dtype=int32)
+ >>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1)
+ >>> np.allclose(2*q2 - qc, np.zeros((4, 3)))
+ True
+
+ """
+ if mode not in ['left', 'right']:
+ raise ValueError("Mode argument can only be 'left' or 'right' but "
+ "not '{}'".format(mode))
+ c = numpy.asarray_chkfinite(c)
+ if c.ndim < 2:
+ onedim = True
+ c = numpy.atleast_2d(c)
+ if mode == "left":
+ c = c.T
+ else:
+ onedim = False
+
+ a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr
+ M, N = a.shape
+
+ if mode == 'left':
+ if c.shape[0] != min(M, N + overwrite_c*(M-N)):
+ raise ValueError('Array shapes are not compatible for Q @ c'
+ ' operation: {} vs {}'.format(a.shape, c.shape))
+ else:
+ if M != c.shape[1]:
+ raise ValueError('Array shapes are not compatible for c @ Q'
+ ' operation: {} vs {}'.format(c.shape, a.shape))
+
+ raw = qr(a, overwrite_a, None, "raw", pivoting)
+ Q, tau = raw[0]
+
+ gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
+ if gor_un_mqr.typecode in ('s', 'd'):
+ trans = "T"
+ else:
+ trans = "C"
+
+ Q = Q[:, :min(M, N)]
+ if M > N and mode == "left" and not overwrite_c:
+ if conjugate:
+ cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
+ cc[:, :N] = c.T
+ else:
+ cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
+ cc[:N, :] = c
+ trans = "N"
+ if conjugate:
+ lr = "R"
+ else:
+ lr = "L"
+ overwrite_c = True
+ elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
+ cc = c.T
+ if mode == "left":
+ lr = "R"
+ else:
+ lr = "L"
+ else:
+ trans = "N"
+ cc = c
+ if mode == "left":
+ lr = "L"
+ else:
+ lr = "R"
+ cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
+ overwrite_c=overwrite_c)
+ if trans != "N":
+ cQ = cQ.T
+ if mode == "right":
+ cQ = cQ[:, :min(M, N)]
+ if onedim:
+ cQ = cQ.ravel()
+
+ return (cQ,) + raw[1:]
+
+
+def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True):
+ """
+ Compute RQ decomposition of a matrix.
+
+ Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal
+ and R upper triangular.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to be decomposed
+ overwrite_a : bool, optional
+ Whether data in a is overwritten (may improve performance)
+ lwork : int, optional
+ Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
+ is computed.
+ mode : {'full', 'r', 'economic'}, optional
+ Determines what information is to be returned: either both Q and R
+ ('full', default), only R ('r') or both Q and R but computed in
+ economy-size ('economic', see Notes).
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ R : float or complex ndarray
+ Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``.
+ Q : float or complex ndarray
+ Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned
+ if ``mode='r'``.
+
+ Raises
+ ------
+ LinAlgError
+ If decomposition fails.
+
+ Notes
+ -----
+ This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf,
+ sorgrq, dorgrq, cungrq and zungrq.
+
+ If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead
+ of (N,N) and (M,N), with ``K=min(M,N)``.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.random.randn(6, 9)
+ >>> r, q = linalg.rq(a)
+ >>> np.allclose(a, r @ q)
+ True
+ >>> r.shape, q.shape
+ ((6, 9), (9, 9))
+ >>> r2 = linalg.rq(a, mode='r')
+ >>> np.allclose(r, r2)
+ True
+ >>> r3, q3 = linalg.rq(a, mode='economic')
+ >>> r3.shape, q3.shape
+ ((6, 6), (6, 9))
+
+ """
+ if mode not in ['full', 'r', 'economic']:
+ raise ValueError(
+ "Mode argument should be one of ['full', 'r', 'economic']")
+
+ if check_finite:
+ a1 = numpy.asarray_chkfinite(a)
+ else:
+ a1 = numpy.asarray(a)
+ if len(a1.shape) != 2:
+ raise ValueError('expected matrix')
+ M, N = a1.shape
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+ gerqf, = get_lapack_funcs(('gerqf',), (a1,))
+ rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork,
+ overwrite_a=overwrite_a)
+ if not mode == 'economic' or N < M:
+ R = numpy.triu(rq, N-M)
+ else:
+ R = numpy.triu(rq[-M:, -M:])
+
+ if mode == 'r':
+ return R
+
+ gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
+
+ if N < M:
+ Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork,
+ overwrite_a=1)
+ elif mode == 'economic':
+ Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork,
+ overwrite_a=1)
+ else:
+ rq1 = numpy.empty((N, N), dtype=rq.dtype)
+ rq1[-M:] = rq
+ Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork,
+ overwrite_a=1)
+
+ return R, Q
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_schur.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_schur.py
new file mode 100644
index 0000000..edb70fc
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_schur.py
@@ -0,0 +1,292 @@
+"""Schur decomposition functions."""
+import numpy
+from numpy import asarray_chkfinite, single, asarray, array
+from numpy.linalg import norm
+
+
+# Local imports.
+from .misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs
+from .decomp import eigvals
+
+__all__ = ['schur', 'rsf2csf']
+
+_double_precision = ['i', 'l', 'd']
+
+
+def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
+ check_finite=True):
+ """
+ Compute Schur decomposition of a matrix.
+
+ The Schur decomposition is::
+
+ A = Z T Z^H
+
+ where Z is unitary and T is either upper-triangular, or for real
+ Schur decomposition (output='real'), quasi-upper triangular. In
+ the quasi-triangular form, 2x2 blocks describing complex-valued
+ eigenvalue pairs may extrude from the diagonal.
+
+ Parameters
+ ----------
+ a : (M, M) array_like
+ Matrix to decompose
+ output : {'real', 'complex'}, optional
+ Construct the real or complex Schur decomposition (for real matrices).
+ lwork : int, optional
+ Work array size. If None or -1, it is automatically computed.
+ overwrite_a : bool, optional
+ Whether to overwrite data in a (may improve performance).
+ sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+ Specifies whether the upper eigenvalues should be sorted. A callable
+ may be passed that, given a eigenvalue, returns a boolean denoting
+ whether the eigenvalue should be sorted to the top-left (True).
+ Alternatively, string parameters may be used::
+
+ 'lhp' Left-hand plane (x.real < 0.0)
+ 'rhp' Right-hand plane (x.real > 0.0)
+ 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
+ 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
+
+ Defaults to None (no sorting).
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ T : (M, M) ndarray
+ Schur form of A. It is real-valued for the real Schur decomposition.
+ Z : (M, M) ndarray
+ An unitary Schur transformation matrix for A.
+ It is real-valued for the real Schur decomposition.
+ sdim : int
+ If and only if sorting was requested, a third return value will
+ contain the number of eigenvalues satisfying the sort condition.
+
+ Raises
+ ------
+ LinAlgError
+ Error raised under three conditions:
+
+ 1. The algorithm failed due to a failure of the QR algorithm to
+ compute all eigenvalues.
+ 2. If eigenvalue sorting was requested, the eigenvalues could not be
+ reordered due to a failure to separate eigenvalues, usually because
+ of poor conditioning.
+ 3. If eigenvalue sorting was requested, roundoff errors caused the
+ leading eigenvalues to no longer satisfy the sorting condition.
+
+ See also
+ --------
+ rsf2csf : Convert real Schur form to complex Schur form
+
+ Examples
+ --------
+ >>> from scipy.linalg import schur, eigvals
+ >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
+ >>> T, Z = schur(A)
+ >>> T
+ array([[ 2.65896708, 1.42440458, -1.92933439],
+ [ 0. , -0.32948354, -0.49063704],
+ [ 0. , 1.31178921, -0.32948354]])
+ >>> Z
+ array([[0.72711591, -0.60156188, 0.33079564],
+ [0.52839428, 0.79801892, 0.28976765],
+ [0.43829436, 0.03590414, -0.89811411]])
+
+ >>> T2, Z2 = schur(A, output='complex')
+ >>> T2
+ array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j],
+ [ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j],
+ [ 0. , 0. , -0.32948354-0.80225456j]])
+ >>> eigvals(T2)
+ array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j])
+
+ An arbitrary custom eig-sorting condition, having positive imaginary part,
+ which is satisfied by only one eigenvalue
+
+ >>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0)
+ >>> sdim
+ 1
+
+ """
+ if output not in ['real', 'complex', 'r', 'c']:
+ raise ValueError("argument must be 'real', or 'complex'")
+ if check_finite:
+ a1 = asarray_chkfinite(a)
+ else:
+ a1 = asarray(a)
+ if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
+ raise ValueError('expected square matrix')
+ typ = a1.dtype.char
+ if output in ['complex', 'c'] and typ not in ['F', 'D']:
+ if typ in _double_precision:
+ a1 = a1.astype('D')
+ typ = 'D'
+ else:
+ a1 = a1.astype('F')
+ typ = 'F'
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+ gees, = get_lapack_funcs(('gees',), (a1,))
+ if lwork is None or lwork == -1:
+ # get optimal work array
+ result = gees(lambda x: None, a1, lwork=-1)
+ lwork = result[-2][0].real.astype(numpy.int_)
+
+ if sort is None:
+ sort_t = 0
+ sfunction = lambda x: None
+ else:
+ sort_t = 1
+ if callable(sort):
+ sfunction = sort
+ elif sort == 'lhp':
+ sfunction = lambda x: (x.real < 0.0)
+ elif sort == 'rhp':
+ sfunction = lambda x: (x.real >= 0.0)
+ elif sort == 'iuc':
+ sfunction = lambda x: (abs(x) <= 1.0)
+ elif sort == 'ouc':
+ sfunction = lambda x: (abs(x) > 1.0)
+ else:
+ raise ValueError("'sort' parameter must either be 'None', or a "
+ "callable, or one of ('lhp','rhp','iuc','ouc')")
+
+ result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
+ sort_t=sort_t)
+
+ info = result[-1]
+ if info < 0:
+ raise ValueError('illegal value in {}-th argument of internal gees'
+ ''.format(-info))
+ elif info == a1.shape[0] + 1:
+ raise LinAlgError('Eigenvalues could not be separated for reordering.')
+ elif info == a1.shape[0] + 2:
+ raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
+ elif info > 0:
+ raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
+
+ if sort_t == 0:
+ return result[0], result[-3]
+ else:
+ return result[0], result[-3], result[1]
+
+
+eps = numpy.finfo(float).eps
+feps = numpy.finfo(single).eps
+
+_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0,
+ 'f': 0, 'd': 0, 'F': 1, 'D': 1}
+_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
+_array_type = [['f', 'd'], ['F', 'D']]
+
+
+def _commonType(*arrays):
+ kind = 0
+ precision = 0
+ for a in arrays:
+ t = a.dtype.char
+ kind = max(kind, _array_kind[t])
+ precision = max(precision, _array_precision[t])
+ return _array_type[kind][precision]
+
+
+def _castCopy(type, *arrays):
+ cast_arrays = ()
+ for a in arrays:
+ if a.dtype.char == type:
+ cast_arrays = cast_arrays + (a.copy(),)
+ else:
+ cast_arrays = cast_arrays + (a.astype(type),)
+ if len(cast_arrays) == 1:
+ return cast_arrays[0]
+ else:
+ return cast_arrays
+
+
+def rsf2csf(T, Z, check_finite=True):
+ """
+ Convert real Schur form to complex Schur form.
+
+ Convert a quasi-diagonal real-valued Schur form to the upper-triangular
+ complex-valued Schur form.
+
+ Parameters
+ ----------
+ T : (M, M) array_like
+ Real Schur form of the original array
+ Z : (M, M) array_like
+ Schur transformation matrix
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ T : (M, M) ndarray
+ Complex Schur form of the original array
+ Z : (M, M) ndarray
+ Schur transformation matrix corresponding to the complex form
+
+ See Also
+ --------
+ schur : Schur decomposition of an array
+
+ Examples
+ --------
+ >>> from scipy.linalg import schur, rsf2csf
+ >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
+ >>> T, Z = schur(A)
+ >>> T
+ array([[ 2.65896708, 1.42440458, -1.92933439],
+ [ 0. , -0.32948354, -0.49063704],
+ [ 0. , 1.31178921, -0.32948354]])
+ >>> Z
+ array([[0.72711591, -0.60156188, 0.33079564],
+ [0.52839428, 0.79801892, 0.28976765],
+ [0.43829436, 0.03590414, -0.89811411]])
+ >>> T2 , Z2 = rsf2csf(T, Z)
+ >>> T2
+ array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j],
+ [0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j],
+ [0.+0.j , 0.+0.j, -0.32948354-0.802254558j]])
+ >>> Z2
+ array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j],
+ [0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j],
+ [0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]])
+
+ """
+ if check_finite:
+ Z, T = map(asarray_chkfinite, (Z, T))
+ else:
+ Z, T = map(asarray, (Z, T))
+
+ for ind, X in enumerate([Z, T]):
+ if X.ndim != 2 or X.shape[0] != X.shape[1]:
+ raise ValueError("Input '{}' must be square.".format('ZT'[ind]))
+
+ if T.shape[0] != Z.shape[0]:
+ raise ValueError("Input array shapes must match: Z: {} vs. T: {}"
+ "".format(Z.shape, T.shape))
+ N = T.shape[0]
+ t = _commonType(Z, T, array([3.0], 'F'))
+ Z, T = _castCopy(t, Z, T)
+
+ for m in range(N-1, 0, -1):
+ if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])):
+ mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m]
+ r = norm([mu[0], T[m, m-1]])
+ c = mu[0] / r
+ s = T[m, m-1] / r
+ G = array([[c.conj(), s], [-s, c]], dtype=t)
+
+ T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:])
+ T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T)
+ Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T)
+
+ T[m, m-1] = 0.0
+ return T, Z
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_svd.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_svd.py
new file mode 100644
index 0000000..c919a38
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/decomp_svd.py
@@ -0,0 +1,493 @@
+"""SVD decomposition functions."""
+import numpy
+from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip
+
+# Local imports.
+from .misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs, _compute_lwork
+from .decomp import _asarray_validated
+
+__all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space']
+
+
+def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False,
+ check_finite=True, lapack_driver='gesdd'):
+ """
+ Singular Value Decomposition.
+
+ Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and
+ a 1-D array ``s`` of singular values (real, non-negative) such that
+ ``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with
+ main diagonal ``s``.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to decompose.
+ full_matrices : bool, optional
+ If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``.
+ If False, the shapes are ``(M, K)`` and ``(K, N)``, where
+ ``K = min(M, N)``.
+ compute_uv : bool, optional
+ Whether to compute also ``U`` and ``Vh`` in addition to ``s``.
+ Default is True.
+ overwrite_a : bool, optional
+ Whether to overwrite `a`; may improve performance.
+ Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ lapack_driver : {'gesdd', 'gesvd'}, optional
+ Whether to use the more efficient divide-and-conquer approach
+ (``'gesdd'``) or general rectangular approach (``'gesvd'``)
+ to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach.
+ Default is ``'gesdd'``.
+
+ .. versionadded:: 0.18
+
+ Returns
+ -------
+ U : ndarray
+ Unitary matrix having left singular vectors as columns.
+ Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`.
+ s : ndarray
+ The singular values, sorted in non-increasing order.
+ Of shape (K,), with ``K = min(M, N)``.
+ Vh : ndarray
+ Unitary matrix having right singular vectors as rows.
+ Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`.
+
+ For ``compute_uv=False``, only ``s`` is returned.
+
+ Raises
+ ------
+ LinAlgError
+ If SVD computation does not converge.
+
+ See also
+ --------
+ svdvals : Compute singular values of a matrix.
+ diagsvd : Construct the Sigma matrix, given the vector s.
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> m, n = 9, 6
+ >>> a = np.random.randn(m, n) + 1.j*np.random.randn(m, n)
+ >>> U, s, Vh = linalg.svd(a)
+ >>> U.shape, s.shape, Vh.shape
+ ((9, 9), (6,), (6, 6))
+
+ Reconstruct the original matrix from the decomposition:
+
+ >>> sigma = np.zeros((m, n))
+ >>> for i in range(min(m, n)):
+ ... sigma[i, i] = s[i]
+ >>> a1 = np.dot(U, np.dot(sigma, Vh))
+ >>> np.allclose(a, a1)
+ True
+
+ Alternatively, use ``full_matrices=False`` (notice that the shape of
+ ``U`` is then ``(m, n)`` instead of ``(m, m)``):
+
+ >>> U, s, Vh = linalg.svd(a, full_matrices=False)
+ >>> U.shape, s.shape, Vh.shape
+ ((9, 6), (6,), (6, 6))
+ >>> S = np.diag(s)
+ >>> np.allclose(a, np.dot(U, np.dot(S, Vh)))
+ True
+
+ >>> s2 = linalg.svd(a, compute_uv=False)
+ >>> np.allclose(s, s2)
+ True
+
+ """
+ a1 = _asarray_validated(a, check_finite=check_finite)
+ if len(a1.shape) != 2:
+ raise ValueError('expected matrix')
+ m, n = a1.shape
+ overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+ if not isinstance(lapack_driver, str):
+ raise TypeError('lapack_driver must be a string')
+ if lapack_driver not in ('gesdd', 'gesvd'):
+ raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"'
+ % (lapack_driver,))
+ funcs = (lapack_driver, lapack_driver + '_lwork')
+ gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,), ilp64='preferred')
+
+ # compute optimal lwork
+ lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1],
+ compute_uv=compute_uv, full_matrices=full_matrices)
+
+ # perform decomposition
+ u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork,
+ full_matrices=full_matrices, overwrite_a=overwrite_a)
+
+ if info > 0:
+ raise LinAlgError("SVD did not converge")
+ if info < 0:
+ raise ValueError('illegal value in %dth argument of internal gesdd'
+ % -info)
+ if compute_uv:
+ return u, s, v
+ else:
+ return s
+
+
+def svdvals(a, overwrite_a=False, check_finite=True):
+ """
+ Compute singular values of a matrix.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ Matrix to decompose.
+ overwrite_a : bool, optional
+ Whether to overwrite `a`; may improve performance.
+ Default is False.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ s : (min(M, N),) ndarray
+ The singular values, sorted in decreasing order.
+
+ Raises
+ ------
+ LinAlgError
+ If SVD computation does not converge.
+
+ Notes
+ -----
+ ``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its
+ handling of the edge case of empty ``a``, where it returns an
+ empty sequence:
+
+ >>> a = np.empty((0, 2))
+ >>> from scipy.linalg import svdvals
+ >>> svdvals(a)
+ array([], dtype=float64)
+
+ See Also
+ --------
+ svd : Compute the full singular value decomposition of a matrix.
+ diagsvd : Construct the Sigma matrix, given the vector s.
+
+ Examples
+ --------
+ >>> from scipy.linalg import svdvals
+ >>> m = np.array([[1.0, 0.0],
+ ... [2.0, 3.0],
+ ... [1.0, 1.0],
+ ... [0.0, 2.0],
+ ... [1.0, 0.0]])
+ >>> svdvals(m)
+ array([ 4.28091555, 1.63516424])
+
+ We can verify the maximum singular value of `m` by computing the maximum
+ length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane.
+ We approximate "all" the unit vectors with a large sample. Because
+ of linearity, we only need the unit vectors with angles in [0, pi].
+
+ >>> t = np.linspace(0, np.pi, 2000)
+ >>> u = np.array([np.cos(t), np.sin(t)])
+ >>> np.linalg.norm(m.dot(u), axis=0).max()
+ 4.2809152422538475
+
+ `p` is a projection matrix with rank 1. With exact arithmetic,
+ its singular values would be [1, 0, 0, 0].
+
+ >>> v = np.array([0.1, 0.3, 0.9, 0.3])
+ >>> p = np.outer(v, v)
+ >>> svdvals(p)
+ array([ 1.00000000e+00, 2.02021698e-17, 1.56692500e-17,
+ 8.15115104e-34])
+
+ The singular values of an orthogonal matrix are all 1. Here, we
+ create a random orthogonal matrix by using the `rvs()` method of
+ `scipy.stats.ortho_group`.
+
+ >>> from scipy.stats import ortho_group
+ >>> np.random.seed(123)
+ >>> orth = ortho_group.rvs(4)
+ >>> svdvals(orth)
+ array([ 1., 1., 1., 1.])
+
+ """
+ a = _asarray_validated(a, check_finite=check_finite)
+ if a.size:
+ return svd(a, compute_uv=0, overwrite_a=overwrite_a,
+ check_finite=False)
+ elif len(a.shape) != 2:
+ raise ValueError('expected matrix')
+ else:
+ return numpy.empty(0)
+
+
+def diagsvd(s, M, N):
+ """
+ Construct the sigma matrix in SVD from singular values and size M, N.
+
+ Parameters
+ ----------
+ s : (M,) or (N,) array_like
+ Singular values
+ M : int
+ Size of the matrix whose singular values are `s`.
+ N : int
+ Size of the matrix whose singular values are `s`.
+
+ Returns
+ -------
+ S : (M, N) ndarray
+ The S-matrix in the singular value decomposition
+
+ See Also
+ --------
+ svd : Singular value decomposition of a matrix
+ svdvals : Compute singular values of a matrix.
+
+ Examples
+ --------
+ >>> from scipy.linalg import diagsvd
+ >>> vals = np.array([1, 2, 3]) # The array representing the computed svd
+ >>> diagsvd(vals, 3, 4)
+ array([[1, 0, 0, 0],
+ [0, 2, 0, 0],
+ [0, 0, 3, 0]])
+ >>> diagsvd(vals, 4, 3)
+ array([[1, 0, 0],
+ [0, 2, 0],
+ [0, 0, 3],
+ [0, 0, 0]])
+
+ """
+ part = diag(s)
+ typ = part.dtype.char
+ MorN = len(s)
+ if MorN == M:
+ return r_['-1', part, zeros((M, N-M), typ)]
+ elif MorN == N:
+ return r_[part, zeros((M-N, N), typ)]
+ else:
+ raise ValueError("Length of s must be M or N.")
+
+
+# Orthonormal decomposition
+
+def orth(A, rcond=None):
+ """
+ Construct an orthonormal basis for the range of A using SVD
+
+ Parameters
+ ----------
+ A : (M, N) array_like
+ Input array
+ rcond : float, optional
+ Relative condition number. Singular values ``s`` smaller than
+ ``rcond * max(s)`` are considered zero.
+ Default: floating point eps * max(M,N).
+
+ Returns
+ -------
+ Q : (M, K) ndarray
+ Orthonormal basis for the range of A.
+ K = effective rank of A, as determined by rcond
+
+ See also
+ --------
+ svd : Singular value decomposition of a matrix
+ null_space : Matrix null space
+
+ Examples
+ --------
+ >>> from scipy.linalg import orth
+ >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array
+ >>> orth(A)
+ array([[0., 1.],
+ [1., 0.]])
+ >>> orth(A.T)
+ array([[0., 1.],
+ [1., 0.],
+ [0., 0.]])
+
+ """
+ u, s, vh = svd(A, full_matrices=False)
+ M, N = u.shape[0], vh.shape[1]
+ if rcond is None:
+ rcond = numpy.finfo(s.dtype).eps * max(M, N)
+ tol = numpy.amax(s) * rcond
+ num = numpy.sum(s > tol, dtype=int)
+ Q = u[:, :num]
+ return Q
+
+
+def null_space(A, rcond=None):
+ """
+ Construct an orthonormal basis for the null space of A using SVD
+
+ Parameters
+ ----------
+ A : (M, N) array_like
+ Input array
+ rcond : float, optional
+ Relative condition number. Singular values ``s`` smaller than
+ ``rcond * max(s)`` are considered zero.
+ Default: floating point eps * max(M,N).
+
+ Returns
+ -------
+ Z : (N, K) ndarray
+ Orthonormal basis for the null space of A.
+ K = dimension of effective null space, as determined by rcond
+
+ See also
+ --------
+ svd : Singular value decomposition of a matrix
+ orth : Matrix range
+
+ Examples
+ --------
+ 1-D null space:
+
+ >>> from scipy.linalg import null_space
+ >>> A = np.array([[1, 1], [1, 1]])
+ >>> ns = null_space(A)
+ >>> ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector
+ array([[ 0.70710678],
+ [-0.70710678]])
+
+ 2-D null space:
+
+ >>> B = np.random.rand(3, 5)
+ >>> Z = null_space(B)
+ >>> Z.shape
+ (5, 2)
+ >>> np.allclose(B.dot(Z), 0)
+ True
+
+ The basis vectors are orthonormal (up to rounding error):
+
+ >>> Z.T.dot(Z)
+ array([[ 1.00000000e+00, 6.92087741e-17],
+ [ 6.92087741e-17, 1.00000000e+00]])
+
+ """
+ u, s, vh = svd(A, full_matrices=True)
+ M, N = u.shape[0], vh.shape[1]
+ if rcond is None:
+ rcond = numpy.finfo(s.dtype).eps * max(M, N)
+ tol = numpy.amax(s) * rcond
+ num = numpy.sum(s > tol, dtype=int)
+ Q = vh[num:,:].T.conj()
+ return Q
+
+
+def subspace_angles(A, B):
+ r"""
+ Compute the subspace angles between two matrices.
+
+ Parameters
+ ----------
+ A : (M, N) array_like
+ The first input array.
+ B : (M, K) array_like
+ The second input array.
+
+ Returns
+ -------
+ angles : ndarray, shape (min(N, K),)
+ The subspace angles between the column spaces of `A` and `B` in
+ descending order.
+
+ See Also
+ --------
+ orth
+ svd
+
+ Notes
+ -----
+ This computes the subspace angles according to the formula
+ provided in [1]_. For equivalence with MATLAB and Octave behavior,
+ use ``angles[0]``.
+
+ .. versionadded:: 1.0
+
+ References
+ ----------
+ .. [1] Knyazev A, Argentati M (2002) Principal Angles between Subspaces
+ in an A-Based Scalar Product: Algorithms and Perturbation
+ Estimates. SIAM J. Sci. Comput. 23:2008-2040.
+
+ Examples
+ --------
+ An Hadamard matrix, which has orthogonal columns, so we expect that
+ the suspace angle to be :math:`\frac{\pi}{2}`:
+
+ >>> from scipy.linalg import hadamard, subspace_angles
+ >>> H = hadamard(4)
+ >>> print(H)
+ [[ 1 1 1 1]
+ [ 1 -1 1 -1]
+ [ 1 1 -1 -1]
+ [ 1 -1 -1 1]]
+ >>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:]))
+ array([ 90., 90.])
+
+ And the subspace angle of a matrix to itself should be zero:
+
+ >>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps
+ array([ True, True], dtype=bool)
+
+ The angles between non-orthogonal subspaces are in between these extremes:
+
+ >>> x = np.random.RandomState(0).randn(4, 3)
+ >>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]]))
+ array([ 55.832])
+ """
+ # Steps here omit the U and V calculation steps from the paper
+
+ # 1. Compute orthonormal bases of column-spaces
+ A = _asarray_validated(A, check_finite=True)
+ if len(A.shape) != 2:
+ raise ValueError('expected 2D array, got shape %s' % (A.shape,))
+ QA = orth(A)
+ del A
+
+ B = _asarray_validated(B, check_finite=True)
+ if len(B.shape) != 2:
+ raise ValueError('expected 2D array, got shape %s' % (B.shape,))
+ if len(B) != len(QA):
+ raise ValueError('A and B must have the same number of rows, got '
+ '%s and %s' % (QA.shape[0], B.shape[0]))
+ QB = orth(B)
+ del B
+
+ # 2. Compute SVD for cosine
+ QA_H_QB = dot(QA.T.conj(), QB)
+ sigma = svdvals(QA_H_QB)
+
+ # 3. Compute matrix B
+ if QA.shape[1] >= QB.shape[1]:
+ B = QB - dot(QA, QA_H_QB)
+ else:
+ B = QA - dot(QB, QA_H_QB.T.conj())
+ del QA, QB, QA_H_QB
+
+ # 4. Compute SVD for sine
+ mask = sigma ** 2 >= 0.5
+ if mask.any():
+ mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.))
+ else:
+ mu_arcsin = 0.
+
+ # 5. Compute the principal angles
+ # with reverse ordering of sigma because smallest sigma belongs to largest
+ # angle theta
+ theta = where(mask, mu_arcsin, arccos(clip(sigma[::-1], -1., 1.)))
+ return theta
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/flinalg.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/flinalg.py
new file mode 100644
index 0000000..98cd03d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/flinalg.py
@@ -0,0 +1,56 @@
+#
+# Author: Pearu Peterson, March 2002
+#
+
+__all__ = ['get_flinalg_funcs']
+
+# The following ensures that possibly missing flavor (C or Fortran) is
+# replaced with the available one. If none is available, exception
+# is raised at the first attempt to use the resources.
+try:
+ from . import _flinalg
+except ImportError:
+ _flinalg = None
+# from numpy.distutils.misc_util import PostponedException
+# _flinalg = PostponedException()
+# print _flinalg.__doc__
+ has_column_major_storage = lambda a:0
+
+
+def has_column_major_storage(arr):
+ return arr.flags['FORTRAN']
+
+
+_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..
+
+
+def get_flinalg_funcs(names,arrays=(),debug=0):
+ """Return optimal available _flinalg function objects with
+ names. Arrays are used to determine optimal prefix."""
+ ordering = []
+ for i in range(len(arrays)):
+ t = arrays[i].dtype.char
+ if t not in _type_conv:
+ t = 'd'
+ ordering.append((t,i))
+ if ordering:
+ ordering.sort()
+ required_prefix = _type_conv[ordering[0][0]]
+ else:
+ required_prefix = 'd'
+ # Some routines may require special treatment.
+ # Handle them here before the default lookup.
+
+ # Default lookup:
+ if ordering and has_column_major_storage(arrays[ordering[0][1]]):
+ suffix1,suffix2 = '_c','_r'
+ else:
+ suffix1,suffix2 = '_r','_c'
+
+ funcs = []
+ for name in names:
+ func_name = required_prefix + name
+ func = getattr(_flinalg,func_name+suffix1,
+ getattr(_flinalg,func_name+suffix2,None))
+ funcs.append(func)
+ return tuple(funcs)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/interpolative.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/interpolative.py
new file mode 100644
index 0000000..12fbafb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/interpolative.py
@@ -0,0 +1,970 @@
+#******************************************************************************
+# Copyright (C) 2013 Kenneth L. Ho
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer. Redistributions in binary
+# form must reproduce the above copyright notice, this list of conditions and
+# the following disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# None of the names of the copyright holders may be used to endorse or
+# promote products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+# Python module for interfacing with `id_dist`.
+
+r"""
+======================================================================
+Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
+======================================================================
+
+.. moduleauthor:: Kenneth L. Ho
+
+.. versionadded:: 0.13
+
+.. currentmodule:: scipy.linalg.interpolative
+
+An interpolative decomposition (ID) of a matrix :math:`A \in
+\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
+factorization
+
+.. math::
+ A \Pi =
+ \begin{bmatrix}
+ A \Pi_{1} & A \Pi_{2}
+ \end{bmatrix} =
+ A \Pi_{1}
+ \begin{bmatrix}
+ I & T
+ \end{bmatrix},
+
+where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
+:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
+A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
+where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
+are the *skeleton* and *interpolation matrices*, respectively.
+
+If :math:`A` does not have exact rank :math:`k`, then there exists an
+approximation in the form of an ID such that :math:`A = BP + E`, where
+:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
+1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
++ 1}` is the best possible error for a rank-:math:`k` approximation
+and, in fact, is achieved by the singular value decomposition (SVD)
+:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
+k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
+and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
+\times k}` is diagonal with nonnegative entries. The principal
+advantages of using an ID over an SVD are that:
+
+- it is cheaper to construct;
+- it preserves the structure of :math:`A`; and
+- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
+
+Routines
+========
+
+Main functionality:
+
+.. autosummary::
+ :toctree: generated/
+
+ interp_decomp
+ reconstruct_matrix_from_id
+ reconstruct_interp_matrix
+ reconstruct_skel_matrix
+ id_to_svd
+ svd
+ estimate_spectral_norm
+ estimate_spectral_norm_diff
+ estimate_rank
+
+Support functions:
+
+.. autosummary::
+ :toctree: generated/
+
+ seed
+ rand
+
+
+References
+==========
+
+This module uses the ID software package [1]_ by Martinsson, Rokhlin,
+Shkolnisky, and Tygert, which is a Fortran library for computing IDs
+using various algorithms, including the rank-revealing QR approach of
+[2]_ and the more recent randomized methods described in [3]_, [4]_,
+and [5]_. This module exposes its functionality in a way convenient
+for Python users. Note that this module does not add any functionality
+beyond that of organizing a simpler and more consistent interface.
+
+We advise the user to consult also the `documentation for the ID package
+`_.
+
+.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
+ software package for low-rank approximation of matrices via interpolative
+ decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
+
+.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
+ compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
+ 2005. :doi:`10.1137/030602678`.
+
+.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
+ Tygert. "Randomized algorithms for the low-rank approximation of matrices."
+ *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
+ :doi:`10.1073/pnas.0709640104`.
+
+.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
+ algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
+ (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
+
+.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
+ randomized algorithm for the approximation of matrices." *Appl. Comput.
+ Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
+
+
+Tutorial
+========
+
+Initializing
+------------
+
+The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
+command:
+
+>>> import scipy.linalg.interpolative as sli
+
+Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
+know to have low rank:
+
+>>> from scipy.linalg import hilbert
+>>> n = 1000
+>>> A = hilbert(n)
+
+We can also do this explicitly via:
+
+>>> import numpy as np
+>>> n = 1000
+>>> A = np.empty((n, n), order='F')
+>>> for j in range(n):
+>>> for i in range(m):
+>>> A[i,j] = 1. / (i + j + 1)
+
+Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
+instantiates the matrix in Fortran-contiguous order and is important for
+avoiding data copying when passing to the backend.
+
+We then define multiplication routines for the matrix by regarding it as a
+:class:`scipy.sparse.linalg.LinearOperator`:
+
+>>> from scipy.sparse.linalg import aslinearoperator
+>>> L = aslinearoperator(A)
+
+This automatically sets up methods describing the action of the matrix and its
+adjoint on a vector.
+
+Computing an ID
+---------------
+
+We have several choices of algorithm to compute an ID. These fall largely
+according to two dichotomies:
+
+1. how the matrix is represented, i.e., via its entries or via its action on a
+ vector; and
+2. whether to approximate it to a fixed relative precision or to a fixed rank.
+
+We step through each choice in turn below.
+
+In all cases, the ID is represented by three parameters:
+
+1. a rank ``k``;
+2. an index array ``idx``; and
+3. interpolation coefficients ``proj``.
+
+The ID is specified by the relation
+``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
+
+From matrix entries
+...................
+
+We first consider a matrix given in terms of its entries.
+
+To compute an ID to a fixed precision, type:
+
+>>> k, idx, proj = sli.interp_decomp(A, eps)
+
+where ``eps < 1`` is the desired precision.
+
+To compute an ID to a fixed rank, use:
+
+>>> idx, proj = sli.interp_decomp(A, k)
+
+where ``k >= 1`` is the desired rank.
+
+Both algorithms use random sampling and are usually faster than the
+corresponding older, deterministic algorithms, which can be accessed via the
+commands:
+
+>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
+
+and:
+
+>>> idx, proj = sli.interp_decomp(A, k, rand=False)
+
+respectively.
+
+From matrix action
+..................
+
+Now consider a matrix given in terms of its action on a vector as a
+:class:`scipy.sparse.linalg.LinearOperator`.
+
+To compute an ID to a fixed precision, type:
+
+>>> k, idx, proj = sli.interp_decomp(L, eps)
+
+To compute an ID to a fixed rank, use:
+
+>>> idx, proj = sli.interp_decomp(L, k)
+
+These algorithms are randomized.
+
+Reconstructing an ID
+--------------------
+
+The ID routines above do not output the skeleton and interpolation matrices
+explicitly but instead return the relevant information in a more compact (and
+sometimes more useful) form. To build these matrices, write:
+
+>>> B = sli.reconstruct_skel_matrix(A, k, idx)
+
+for the skeleton matrix and:
+
+>>> P = sli.reconstruct_interp_matrix(idx, proj)
+
+for the interpolation matrix. The ID approximation can then be computed as:
+
+>>> C = np.dot(B, P)
+
+This can also be constructed directly using:
+
+>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
+
+without having to first compute ``P``.
+
+Alternatively, this can be done explicitly as well using:
+
+>>> B = A[:,idx[:k]]
+>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
+>>> C = np.dot(B, P)
+
+Computing an SVD
+----------------
+
+An ID can be converted to an SVD via the command:
+
+>>> U, S, V = sli.id_to_svd(B, idx, proj)
+
+The SVD approximation is then:
+
+>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
+
+The SVD can also be computed "fresh" by combining both the ID and conversion
+steps into one command. Following the various ID algorithms above, there are
+correspondingly various SVD algorithms that one can employ.
+
+From matrix entries
+...................
+
+We consider first SVD algorithms for a matrix given in terms of its entries.
+
+To compute an SVD to a fixed precision, type:
+
+>>> U, S, V = sli.svd(A, eps)
+
+To compute an SVD to a fixed rank, use:
+
+>>> U, S, V = sli.svd(A, k)
+
+Both algorithms use random sampling; for the determinstic versions, issue the
+keyword ``rand=False`` as above.
+
+From matrix action
+..................
+
+Now consider a matrix given in terms of its action on a vector.
+
+To compute an SVD to a fixed precision, type:
+
+>>> U, S, V = sli.svd(L, eps)
+
+To compute an SVD to a fixed rank, use:
+
+>>> U, S, V = sli.svd(L, k)
+
+Utility routines
+----------------
+
+Several utility routines are also available.
+
+To estimate the spectral norm of a matrix, use:
+
+>>> snorm = sli.estimate_spectral_norm(A)
+
+This algorithm is based on the randomized power method and thus requires only
+matrix-vector products. The number of iterations to take can be set using the
+keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
+:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
+as a :class:`numpy.ndarray`, in which case it is trivially converted using
+:func:`scipy.sparse.linalg.aslinearoperator`.
+
+The same algorithm can also estimate the spectral norm of the difference of two
+matrices ``A1`` and ``A2`` as follows:
+
+>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
+
+This is often useful for checking the accuracy of a matrix approximation.
+
+Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
+of a matrix as well. This can be done with either:
+
+>>> k = sli.estimate_rank(A, eps)
+
+or:
+
+>>> k = sli.estimate_rank(L, eps)
+
+depending on the representation. The parameter ``eps`` controls the definition
+of the numerical rank.
+
+Finally, the random number generation required for all randomized routines can
+be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
+values to their original values, use:
+
+>>> sli.seed('default')
+
+To specify the seed values, use:
+
+>>> sli.seed(s)
+
+where ``s`` must be an integer or array of 55 floats. If an integer, the array
+of floats is obtained by using ``numpy.random.rand`` with the given integer
+seed.
+
+To simply generate some random numbers, type:
+
+>>> sli.rand(n)
+
+where ``n`` is the number of random numbers to generate.
+
+Remarks
+-------
+
+The above functions all automatically detect the appropriate interface and work
+with both real and complex data types, passing input arguments to the proper
+backend routine.
+
+"""
+
+import scipy.linalg._interpolative_backend as backend
+import numpy as np
+
+_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
+_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
+
+
+def _is_real(A):
+ try:
+ if A.dtype == np.complex128:
+ return False
+ elif A.dtype == np.float64:
+ return True
+ else:
+ raise _DTYPE_ERROR
+ except AttributeError as e:
+ raise _TYPE_ERROR from e
+
+
+def seed(seed=None):
+ """
+ Seed the internal random number generator used in this ID package.
+
+ The generator is a lagged Fibonacci method with 55-element internal state.
+
+ Parameters
+ ----------
+ seed : int, sequence, 'default', optional
+ If 'default', the random seed is reset to a default value.
+
+ If `seed` is a sequence containing 55 floating-point numbers
+ in range [0,1], these are used to set the internal state of
+ the generator.
+
+ If the value is an integer, the internal state is obtained
+ from `numpy.random.RandomState` (MT19937) with the integer
+ used as the initial seed.
+
+ If `seed` is omitted (None), ``numpy.random.rand`` is used to
+ initialize the generator.
+
+ """
+ # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`,
+ # and :func:`backend.id_srando`.
+
+ if isinstance(seed, str) and seed == 'default':
+ backend.id_srando()
+ elif hasattr(seed, '__len__'):
+ state = np.asfortranarray(seed, dtype=float)
+ if state.shape != (55,):
+ raise ValueError("invalid input size")
+ elif state.min() < 0 or state.max() > 1:
+ raise ValueError("values not in range [0,1]")
+ backend.id_srandi(state)
+ elif seed is None:
+ backend.id_srandi(np.random.rand(55))
+ else:
+ rnd = np.random.RandomState(seed)
+ backend.id_srandi(rnd.rand(55))
+
+
+def rand(*shape):
+ """
+ Generate standard uniform pseudorandom numbers via a very efficient lagged
+ Fibonacci method.
+
+ This routine is used for all random number generation in this package and
+ can affect ID and SVD results.
+
+ Parameters
+ ----------
+ shape
+ Shape of output array
+
+ """
+ # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`.
+ return backend.id_srand(np.prod(shape)).reshape(shape)
+
+
+def interp_decomp(A, eps_or_k, rand=True):
+ """
+ Compute ID of a matrix.
+
+ An ID of a matrix `A` is a factorization defined by a rank `k`, a column
+ index array `idx`, and interpolation coefficients `proj` such that::
+
+ numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
+
+ The original matrix can then be reconstructed as::
+
+ numpy.hstack([A[:,idx[:k]],
+ numpy.dot(A[:,idx[:k]], proj)]
+ )[:,numpy.argsort(idx)]
+
+ or via the routine :func:`reconstruct_matrix_from_id`. This can
+ equivalently be written as::
+
+ numpy.dot(A[:,idx[:k]],
+ numpy.hstack([numpy.eye(k), proj])
+ )[:,np.argsort(idx)]
+
+ in terms of the skeleton and interpolation matrices::
+
+ B = A[:,idx[:k]]
+
+ and::
+
+ P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
+
+ respectively. See also :func:`reconstruct_interp_matrix` and
+ :func:`reconstruct_skel_matrix`.
+
+ The ID can be computed to any relative precision or rank (depending on the
+ value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
+ this function has the output signature::
+
+ k, idx, proj = interp_decomp(A, eps_or_k)
+
+ Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
+ signature is::
+
+ idx, proj = interp_decomp(A, eps_or_k)
+
+ .. This function automatically detects the form of the input parameters
+ and passes them to the appropriate backend. For details, see
+ :func:`backend.iddp_id`, :func:`backend.iddp_aid`,
+ :func:`backend.iddp_rid`, :func:`backend.iddr_id`,
+ :func:`backend.iddr_aid`, :func:`backend.iddr_rid`,
+ :func:`backend.idzp_id`, :func:`backend.idzp_aid`,
+ :func:`backend.idzp_rid`, :func:`backend.idzr_id`,
+ :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`.
+
+ Parameters
+ ----------
+ A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
+ Matrix to be factored
+ eps_or_k : float or int
+ Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
+ approximation.
+ rand : bool, optional
+ Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
+ (randomized algorithms are always used if `A` is of type
+ :class:`scipy.sparse.linalg.LinearOperator`).
+
+ Returns
+ -------
+ k : int
+ Rank required to achieve specified relative precision if
+ `eps_or_k < 1`.
+ idx : :class:`numpy.ndarray`
+ Column index array.
+ proj : :class:`numpy.ndarray`
+ Interpolation coefficients.
+ """
+ from scipy.sparse.linalg import LinearOperator
+
+ real = _is_real(A)
+
+ if isinstance(A, np.ndarray):
+ if eps_or_k < 1:
+ eps = eps_or_k
+ if rand:
+ if real:
+ k, idx, proj = backend.iddp_aid(eps, A)
+ else:
+ k, idx, proj = backend.idzp_aid(eps, A)
+ else:
+ if real:
+ k, idx, proj = backend.iddp_id(eps, A)
+ else:
+ k, idx, proj = backend.idzp_id(eps, A)
+ return k, idx - 1, proj
+ else:
+ k = int(eps_or_k)
+ if rand:
+ if real:
+ idx, proj = backend.iddr_aid(A, k)
+ else:
+ idx, proj = backend.idzr_aid(A, k)
+ else:
+ if real:
+ idx, proj = backend.iddr_id(A, k)
+ else:
+ idx, proj = backend.idzr_id(A, k)
+ return idx - 1, proj
+ elif isinstance(A, LinearOperator):
+ m, n = A.shape
+ matveca = A.rmatvec
+ if eps_or_k < 1:
+ eps = eps_or_k
+ if real:
+ k, idx, proj = backend.iddp_rid(eps, m, n, matveca)
+ else:
+ k, idx, proj = backend.idzp_rid(eps, m, n, matveca)
+ return k, idx - 1, proj
+ else:
+ k = int(eps_or_k)
+ if real:
+ idx, proj = backend.iddr_rid(m, n, matveca, k)
+ else:
+ idx, proj = backend.idzr_rid(m, n, matveca, k)
+ return idx - 1, proj
+ else:
+ raise _TYPE_ERROR
+
+
+def reconstruct_matrix_from_id(B, idx, proj):
+ """
+ Reconstruct matrix from its ID.
+
+ A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
+ and `proj`, respectively, can be reconstructed as::
+
+ numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
+
+ See also :func:`reconstruct_interp_matrix` and
+ :func:`reconstruct_skel_matrix`.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_reconid` and
+ :func:`backend.idz_reconid`.
+
+ Parameters
+ ----------
+ B : :class:`numpy.ndarray`
+ Skeleton matrix.
+ idx : :class:`numpy.ndarray`
+ Column index array.
+ proj : :class:`numpy.ndarray`
+ Interpolation coefficients.
+
+ Returns
+ -------
+ :class:`numpy.ndarray`
+ Reconstructed matrix.
+ """
+ if _is_real(B):
+ return backend.idd_reconid(B, idx + 1, proj)
+ else:
+ return backend.idz_reconid(B, idx + 1, proj)
+
+
+def reconstruct_interp_matrix(idx, proj):
+ """
+ Reconstruct interpolation matrix from ID.
+
+ The interpolation matrix can be reconstructed from the ID indices and
+ coefficients `idx` and `proj`, respectively, as::
+
+ P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
+
+ The original matrix can then be reconstructed from its skeleton matrix `B`
+ via::
+
+ numpy.dot(B, P)
+
+ See also :func:`reconstruct_matrix_from_id` and
+ :func:`reconstruct_skel_matrix`.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_reconint` and
+ :func:`backend.idz_reconint`.
+
+ Parameters
+ ----------
+ idx : :class:`numpy.ndarray`
+ Column index array.
+ proj : :class:`numpy.ndarray`
+ Interpolation coefficients.
+
+ Returns
+ -------
+ :class:`numpy.ndarray`
+ Interpolation matrix.
+ """
+ if _is_real(proj):
+ return backend.idd_reconint(idx + 1, proj)
+ else:
+ return backend.idz_reconint(idx + 1, proj)
+
+
+def reconstruct_skel_matrix(A, k, idx):
+ """
+ Reconstruct skeleton matrix from ID.
+
+ The skeleton matrix can be reconstructed from the original matrix `A` and its
+ ID rank and indices `k` and `idx`, respectively, as::
+
+ B = A[:,idx[:k]]
+
+ The original matrix can then be reconstructed via::
+
+ numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
+
+ See also :func:`reconstruct_matrix_from_id` and
+ :func:`reconstruct_interp_matrix`.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_copycols` and
+ :func:`backend.idz_copycols`.
+
+ Parameters
+ ----------
+ A : :class:`numpy.ndarray`
+ Original matrix.
+ k : int
+ Rank of ID.
+ idx : :class:`numpy.ndarray`
+ Column index array.
+
+ Returns
+ -------
+ :class:`numpy.ndarray`
+ Skeleton matrix.
+ """
+ if _is_real(A):
+ return backend.idd_copycols(A, k, idx + 1)
+ else:
+ return backend.idz_copycols(A, k, idx + 1)
+
+
+def id_to_svd(B, idx, proj):
+ """
+ Convert ID to SVD.
+
+ The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
+ coefficients `idx` and `proj`, respectively, is::
+
+ U, S, V = id_to_svd(B, idx, proj)
+ A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
+
+ See also :func:`svd`.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_id2svd` and
+ :func:`backend.idz_id2svd`.
+
+ Parameters
+ ----------
+ B : :class:`numpy.ndarray`
+ Skeleton matrix.
+ idx : :class:`numpy.ndarray`
+ Column index array.
+ proj : :class:`numpy.ndarray`
+ Interpolation coefficients.
+
+ Returns
+ -------
+ U : :class:`numpy.ndarray`
+ Left singular vectors.
+ S : :class:`numpy.ndarray`
+ Singular values.
+ V : :class:`numpy.ndarray`
+ Right singular vectors.
+ """
+ if _is_real(B):
+ U, V, S = backend.idd_id2svd(B, idx + 1, proj)
+ else:
+ U, V, S = backend.idz_id2svd(B, idx + 1, proj)
+ return U, S, V
+
+
+def estimate_spectral_norm(A, its=20):
+ """
+ Estimate spectral norm of a matrix by the randomized power method.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_snorm` and
+ :func:`backend.idz_snorm`.
+
+ Parameters
+ ----------
+ A : :class:`scipy.sparse.linalg.LinearOperator`
+ Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
+ `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+ its : int, optional
+ Number of power method iterations.
+
+ Returns
+ -------
+ float
+ Spectral norm estimate.
+ """
+ from scipy.sparse.linalg import aslinearoperator
+ A = aslinearoperator(A)
+ m, n = A.shape
+ matvec = lambda x: A. matvec(x)
+ matveca = lambda x: A.rmatvec(x)
+ if _is_real(A):
+ return backend.idd_snorm(m, n, matveca, matvec, its=its)
+ else:
+ return backend.idz_snorm(m, n, matveca, matvec, its=its)
+
+
+def estimate_spectral_norm_diff(A, B, its=20):
+ """
+ Estimate spectral norm of the difference of two matrices by the randomized
+ power method.
+
+ .. This function automatically detects the matrix data type and calls the
+ appropriate backend. For details, see :func:`backend.idd_diffsnorm` and
+ :func:`backend.idz_diffsnorm`.
+
+ Parameters
+ ----------
+ A : :class:`scipy.sparse.linalg.LinearOperator`
+ First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
+ `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+ B : :class:`scipy.sparse.linalg.LinearOperator`
+ Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
+ the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+ its : int, optional
+ Number of power method iterations.
+
+ Returns
+ -------
+ float
+ Spectral norm estimate of matrix difference.
+ """
+ from scipy.sparse.linalg import aslinearoperator
+ A = aslinearoperator(A)
+ B = aslinearoperator(B)
+ m, n = A.shape
+ matvec1 = lambda x: A. matvec(x)
+ matveca1 = lambda x: A.rmatvec(x)
+ matvec2 = lambda x: B. matvec(x)
+ matveca2 = lambda x: B.rmatvec(x)
+ if _is_real(A):
+ return backend.idd_diffsnorm(
+ m, n, matveca1, matveca2, matvec1, matvec2, its=its)
+ else:
+ return backend.idz_diffsnorm(
+ m, n, matveca1, matveca2, matvec1, matvec2, its=its)
+
+
+def svd(A, eps_or_k, rand=True):
+ """
+ Compute SVD of a matrix via an ID.
+
+ An SVD of a matrix `A` is a factorization::
+
+ A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
+
+ where `U` and `V` have orthonormal columns and `S` is nonnegative.
+
+ The SVD can be computed to any relative precision or rank (depending on the
+ value of `eps_or_k`).
+
+ See also :func:`interp_decomp` and :func:`id_to_svd`.
+
+ .. This function automatically detects the form of the input parameters and
+ passes them to the appropriate backend. For details, see
+ :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`,
+ :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`,
+ :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`,
+ :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`,
+ :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`,
+ :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`.
+
+ Parameters
+ ----------
+ A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
+ Matrix to be factored, given as either a :class:`numpy.ndarray` or a
+ :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
+ `rmatvec` methods (to apply the matrix and its adjoint).
+ eps_or_k : float or int
+ Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
+ approximation.
+ rand : bool, optional
+ Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
+ (randomized algorithms are always used if `A` is of type
+ :class:`scipy.sparse.linalg.LinearOperator`).
+
+ Returns
+ -------
+ U : :class:`numpy.ndarray`
+ Left singular vectors.
+ S : :class:`numpy.ndarray`
+ Singular values.
+ V : :class:`numpy.ndarray`
+ Right singular vectors.
+ """
+ from scipy.sparse.linalg import LinearOperator
+
+ real = _is_real(A)
+
+ if isinstance(A, np.ndarray):
+ if eps_or_k < 1:
+ eps = eps_or_k
+ if rand:
+ if real:
+ U, V, S = backend.iddp_asvd(eps, A)
+ else:
+ U, V, S = backend.idzp_asvd(eps, A)
+ else:
+ if real:
+ U, V, S = backend.iddp_svd(eps, A)
+ else:
+ U, V, S = backend.idzp_svd(eps, A)
+ else:
+ k = int(eps_or_k)
+ if k > min(A.shape):
+ raise ValueError("Approximation rank %s exceeds min(A.shape) = "
+ " %s " % (k, min(A.shape)))
+ if rand:
+ if real:
+ U, V, S = backend.iddr_asvd(A, k)
+ else:
+ U, V, S = backend.idzr_asvd(A, k)
+ else:
+ if real:
+ U, V, S = backend.iddr_svd(A, k)
+ else:
+ U, V, S = backend.idzr_svd(A, k)
+ elif isinstance(A, LinearOperator):
+ m, n = A.shape
+ matvec = lambda x: A.matvec(x)
+ matveca = lambda x: A.rmatvec(x)
+ if eps_or_k < 1:
+ eps = eps_or_k
+ if real:
+ U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec)
+ else:
+ U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec)
+ else:
+ k = int(eps_or_k)
+ if real:
+ U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k)
+ else:
+ U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k)
+ else:
+ raise _TYPE_ERROR
+ return U, S, V
+
+
+def estimate_rank(A, eps):
+ """
+ Estimate matrix rank to a specified relative precision using randomized
+ methods.
+
+ The matrix `A` can be given as either a :class:`numpy.ndarray` or a
+ :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
+ for each case. If `A` is of type :class:`numpy.ndarray`, then the output
+ rank is typically about 8 higher than the actual numerical rank.
+
+ .. This function automatically detects the form of the input parameters and
+ passes them to the appropriate backend. For details,
+ see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`,
+ :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`.
+
+ Parameters
+ ----------
+ A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
+ Matrix whose rank is to be estimated, given as either a
+ :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
+ with the `rmatvec` method (to apply the matrix adjoint).
+ eps : float
+ Relative error for numerical rank definition.
+
+ Returns
+ -------
+ int
+ Estimated matrix rank.
+ """
+ from scipy.sparse.linalg import LinearOperator
+
+ real = _is_real(A)
+
+ if isinstance(A, np.ndarray):
+ if real:
+ rank = backend.idd_estrank(eps, A)
+ else:
+ rank = backend.idz_estrank(eps, A)
+ if rank == 0:
+ # special return value for nearly full rank
+ rank = min(A.shape)
+ return rank
+ elif isinstance(A, LinearOperator):
+ m, n = A.shape
+ matveca = A.rmatvec
+ if real:
+ return backend.idd_findrank(eps, m, n, matveca)
+ else:
+ return backend.idz_findrank(eps, m, n, matveca)
+ else:
+ raise _TYPE_ERROR
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/lapack.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/lapack.py
new file mode 100644
index 0000000..7cf46eb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/lapack.py
@@ -0,0 +1,1039 @@
+"""
+Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
+=======================================================
+
+This module contains low-level functions from the LAPACK library.
+
+The `*gegv` family of routines have been removed from LAPACK 3.6.0
+and have been deprecated in SciPy 0.17.0. They will be removed in
+a future release.
+
+.. versionadded:: 0.12.0
+
+.. note::
+
+ The common ``overwrite_<>`` option in many routines, allows the
+ input arrays to be overwritten to avoid extra memory allocation.
+ However this requires the array to satisfy two conditions
+ which are memory order and the data type to match exactly the
+ order and the type expected by the routine.
+
+ As an example, if you pass a double precision float array to any
+ ``S....`` routine which expects single precision arguments, f2py
+ will create an intermediate array to match the argument types and
+ overwriting will be performed on that intermediate array.
+
+ Similarly, if a C-contiguous array is passed, f2py will pass a
+ FORTRAN-contiguous array internally. Please make sure that these
+ details are satisfied. More information can be found in the f2py
+ documentation.
+
+.. warning::
+
+ These functions do little to no error checking.
+ It is possible to cause crashes by mis-using them,
+ so prefer using the higher-level routines in `scipy.linalg`.
+
+Finding functions
+-----------------
+
+.. autosummary::
+ :toctree: generated/
+
+ get_lapack_funcs
+
+All functions
+-------------
+
+.. autosummary::
+ :toctree: generated/
+
+ sgbsv
+ dgbsv
+ cgbsv
+ zgbsv
+
+ sgbtrf
+ dgbtrf
+ cgbtrf
+ zgbtrf
+
+ sgbtrs
+ dgbtrs
+ cgbtrs
+ zgbtrs
+
+ sgebal
+ dgebal
+ cgebal
+ zgebal
+
+ sgecon
+ dgecon
+ cgecon
+ zgecon
+
+ sgeequ
+ dgeequ
+ cgeequ
+ zgeequ
+
+ sgeequb
+ dgeequb
+ cgeequb
+ zgeequb
+
+ sgees
+ dgees
+ cgees
+ zgees
+
+ sgeev
+ dgeev
+ cgeev
+ zgeev
+
+ sgeev_lwork
+ dgeev_lwork
+ cgeev_lwork
+ zgeev_lwork
+
+ sgegv
+ dgegv
+ cgegv
+ zgegv
+
+ sgehrd
+ dgehrd
+ cgehrd
+ zgehrd
+
+ sgehrd_lwork
+ dgehrd_lwork
+ cgehrd_lwork
+ zgehrd_lwork
+
+ sgejsv
+ dgejsv
+
+ sgels
+ dgels
+ cgels
+ zgels
+
+ sgels_lwork
+ dgels_lwork
+ cgels_lwork
+ zgels_lwork
+
+ sgelsd
+ dgelsd
+ cgelsd
+ zgelsd
+
+ sgelsd_lwork
+ dgelsd_lwork
+ cgelsd_lwork
+ zgelsd_lwork
+
+ sgelss
+ dgelss
+ cgelss
+ zgelss
+
+ sgelss_lwork
+ dgelss_lwork
+ cgelss_lwork
+ zgelss_lwork
+
+ sgelsy
+ dgelsy
+ cgelsy
+ zgelsy
+
+ sgelsy_lwork
+ dgelsy_lwork
+ cgelsy_lwork
+ zgelsy_lwork
+
+ sgeqp3
+ dgeqp3
+ cgeqp3
+ zgeqp3
+
+ sgeqrf
+ dgeqrf
+ cgeqrf
+ zgeqrf
+
+ sgeqrf_lwork
+ dgeqrf_lwork
+ cgeqrf_lwork
+ zgeqrf_lwork
+
+ sgeqrfp
+ dgeqrfp
+ cgeqrfp
+ zgeqrfp
+
+ sgeqrfp_lwork
+ dgeqrfp_lwork
+ cgeqrfp_lwork
+ zgeqrfp_lwork
+
+ sgerqf
+ dgerqf
+ cgerqf
+ zgerqf
+
+ sgesdd
+ dgesdd
+ cgesdd
+ zgesdd
+
+ sgesdd_lwork
+ dgesdd_lwork
+ cgesdd_lwork
+ zgesdd_lwork
+
+ sgesv
+ dgesv
+ cgesv
+ zgesv
+
+ sgesvd
+ dgesvd
+ cgesvd
+ zgesvd
+
+ sgesvd_lwork
+ dgesvd_lwork
+ cgesvd_lwork
+ zgesvd_lwork
+
+ sgesvx
+ dgesvx
+ cgesvx
+ zgesvx
+
+ sgetrf
+ dgetrf
+ cgetrf
+ zgetrf
+
+ sgetc2
+ dgetc2
+ cgetc2
+ zgetc2
+
+ sgetri
+ dgetri
+ cgetri
+ zgetri
+
+ sgetri_lwork
+ dgetri_lwork
+ cgetri_lwork
+ zgetri_lwork
+
+ sgetrs
+ dgetrs
+ cgetrs
+ zgetrs
+
+ sgesc2
+ dgesc2
+ cgesc2
+ zgesc2
+
+ sgges
+ dgges
+ cgges
+ zgges
+
+ sggev
+ dggev
+ cggev
+ zggev
+
+ sgglse
+ dgglse
+ cgglse
+ zgglse
+
+ sgglse_lwork
+ dgglse_lwork
+ cgglse_lwork
+ zgglse_lwork
+
+ sgtsv
+ dgtsv
+ cgtsv
+ zgtsv
+
+ sgtsvx
+ dgtsvx
+ cgtsvx
+ zgtsvx
+
+ chbevd
+ zhbevd
+
+ chbevx
+ zhbevx
+
+ checon
+ zhecon
+
+ cheequb
+ zheequb
+
+ cheev
+ zheev
+
+ cheev_lwork
+ zheev_lwork
+
+ cheevd
+ zheevd
+
+ cheevd_lwork
+ zheevd_lwork
+
+ cheevr
+ zheevr
+
+ cheevr_lwork
+ zheevr_lwork
+
+ cheevx
+ zheevx
+
+ cheevx_lwork
+ zheevx_lwork
+
+ chegst
+ zhegst
+
+ chegv
+ zhegv
+
+ chegv_lwork
+ zhegv_lwork
+
+ chegvd
+ zhegvd
+
+ chegvx
+ zhegvx
+
+ chegvx_lwork
+ zhegvx_lwork
+
+ chesv
+ zhesv
+
+ chesv_lwork
+ zhesv_lwork
+
+ chesvx
+ zhesvx
+
+ chesvx_lwork
+ zhesvx_lwork
+
+ chetrd
+ zhetrd
+
+ chetrd_lwork
+ zhetrd_lwork
+
+ chetrf
+ zhetrf
+
+ chetrf_lwork
+ zhetrf_lwork
+
+ chfrk
+ zhfrk
+
+ slamch
+ dlamch
+
+ slange
+ dlange
+ clange
+ zlange
+
+ slarf
+ dlarf
+ clarf
+ zlarf
+
+ slarfg
+ dlarfg
+ clarfg
+ zlarfg
+
+ slartg
+ dlartg
+ clartg
+ zlartg
+
+ slasd4
+ dlasd4
+
+ slaswp
+ dlaswp
+ claswp
+ zlaswp
+
+ slauum
+ dlauum
+ clauum
+ zlauum
+
+ sorcsd
+ dorcsd
+ sorcsd_lwork
+ dorcsd_lwork
+
+ sorghr
+ dorghr
+ sorghr_lwork
+ dorghr_lwork
+
+ sorgqr
+ dorgqr
+
+ sorgrq
+ dorgrq
+
+ sormqr
+ dormqr
+
+ sormrz
+ dormrz
+
+ sormrz_lwork
+ dormrz_lwork
+
+ spbsv
+ dpbsv
+ cpbsv
+ zpbsv
+
+ spbtrf
+ dpbtrf
+ cpbtrf
+ zpbtrf
+
+ spbtrs
+ dpbtrs
+ cpbtrs
+ zpbtrs
+
+ spftrf
+ dpftrf
+ cpftrf
+ zpftrf
+
+ spftri
+ dpftri
+ cpftri
+ zpftri
+
+ spftrs
+ dpftrs
+ cpftrs
+ zpftrs
+
+ spocon
+ dpocon
+ cpocon
+ zpocon
+
+ spstrf
+ dpstrf
+ cpstrf
+ zpstrf
+
+ spstf2
+ dpstf2
+ cpstf2
+ zpstf2
+
+ sposv
+ dposv
+ cposv
+ zposv
+
+ sposvx
+ dposvx
+ cposvx
+ zposvx
+
+ spotrf
+ dpotrf
+ cpotrf
+ zpotrf
+
+ spotri
+ dpotri
+ cpotri
+ zpotri
+
+ spotrs
+ dpotrs
+ cpotrs
+ zpotrs
+
+ sppcon
+ dppcon
+ cppcon
+ zppcon
+
+ sppsv
+ dppsv
+ cppsv
+ zppsv
+
+ spptrf
+ dpptrf
+ cpptrf
+ zpptrf
+
+ spptri
+ dpptri
+ cpptri
+ zpptri
+
+ spptrs
+ dpptrs
+ cpptrs
+ zpptrs
+
+ sptsv
+ dptsv
+ cptsv
+ zptsv
+
+ sptsvx
+ dptsvx
+ cptsvx
+ zptsvx
+
+ spttrf
+ dpttrf
+ cpttrf
+ zpttrf
+
+ spttrs
+ dpttrs
+ cpttrs
+ zpttrs
+
+ spteqr
+ dpteqr
+ cpteqr
+ zpteqr
+
+ crot
+ zrot
+
+ ssbev
+ dsbev
+
+ ssbevd
+ dsbevd
+
+ ssbevx
+ dsbevx
+
+ ssfrk
+ dsfrk
+
+ sstebz
+ dstebz
+
+ sstein
+ dstein
+
+ sstemr
+ dstemr
+
+ sstemr_lwork
+ dstemr_lwork
+
+ ssterf
+ dsterf
+
+ sstev
+ dstev
+
+ ssycon
+ dsycon
+ csycon
+ zsycon
+
+ ssyconv
+ dsyconv
+ csyconv
+ zsyconv
+
+ ssyequb
+ dsyequb
+ csyequb
+ zsyequb
+
+ ssyev
+ dsyev
+
+ ssyev_lwork
+ dsyev_lwork
+
+ ssyevd
+ dsyevd
+
+ ssyevd_lwork
+ dsyevd_lwork
+
+ ssyevr
+ dsyevr
+
+ ssyevr_lwork
+ dsyevr_lwork
+
+ ssyevx
+ dsyevx
+
+ ssyevx_lwork
+ dsyevx_lwork
+
+ ssygst
+ dsygst
+
+ ssygv
+ dsygv
+
+ ssygv_lwork
+ dsygv_lwork
+
+ ssygvd
+ dsygvd
+
+ ssygvx
+ dsygvx
+
+ ssygvx_lwork
+ dsygvx_lwork
+
+ ssysv
+ dsysv
+ csysv
+ zsysv
+
+ ssysv_lwork
+ dsysv_lwork
+ csysv_lwork
+ zsysv_lwork
+
+ ssysvx
+ dsysvx
+ csysvx
+ zsysvx
+
+ ssysvx_lwork
+ dsysvx_lwork
+ csysvx_lwork
+ zsysvx_lwork
+
+ ssytf2
+ dsytf2
+ csytf2
+ zsytf2
+
+ ssytrd
+ dsytrd
+
+ ssytrd_lwork
+ dsytrd_lwork
+
+ ssytrf
+ dsytrf
+ csytrf
+ zsytrf
+
+ ssytrf_lwork
+ dsytrf_lwork
+ csytrf_lwork
+ zsytrf_lwork
+
+ stbtrs
+ dtbtrs
+ ctbtrs
+ ztbtrs
+
+ stfsm
+ dtfsm
+ ctfsm
+ ztfsm
+
+ stfttp
+ dtfttp
+ ctfttp
+ ztfttp
+
+ stfttr
+ dtfttr
+ ctfttr
+ ztfttr
+
+ stgsen
+ dtgsen
+ ctgsen
+ ztgsen
+
+ stpttf
+ dtpttf
+ ctpttf
+ ztpttf
+
+ stpttr
+ dtpttr
+ ctpttr
+ ztpttr
+
+ strsyl
+ dtrsyl
+ ctrsyl
+ ztrsyl
+
+ strtri
+ dtrtri
+ ctrtri
+ ztrtri
+
+ strtrs
+ dtrtrs
+ ctrtrs
+ ztrtrs
+
+ strttf
+ dtrttf
+ ctrttf
+ ztrttf
+
+ strttp
+ dtrttp
+ ctrttp
+ ztrttp
+
+ stzrzf
+ dtzrzf
+ ctzrzf
+ ztzrzf
+
+ stzrzf_lwork
+ dtzrzf_lwork
+ ctzrzf_lwork
+ ztzrzf_lwork
+
+ cunghr
+ zunghr
+
+ cunghr_lwork
+ zunghr_lwork
+
+ cungqr
+ zungqr
+
+ cungrq
+ zungrq
+
+ cunmqr
+ zunmqr
+
+ sgeqrt
+ dgeqrt
+ cgeqrt
+ zgeqrt
+
+ sgemqrt
+ dgemqrt
+ cgemqrt
+ zgemqrt
+
+ sgttrf
+ dgttrf
+ cgttrf
+ zgttrf
+
+ sgttrs
+ dgttrs
+ cgttrs
+ zgttrs
+
+ stpqrt
+ dtpqrt
+ ctpqrt
+ ztpqrt
+
+ stpmqrt
+ dtpmqrt
+ ctpmqrt
+ ztpmqrt
+
+ cuncsd
+ zuncsd
+
+ cuncsd_lwork
+ zuncsd_lwork
+
+ cunmrz
+ zunmrz
+
+ cunmrz_lwork
+ zunmrz_lwork
+
+ ilaver
+
+"""
+#
+# Author: Pearu Peterson, March 2002
+#
+
+import numpy as _np
+from .blas import _get_funcs, _memoize_get_funcs
+from scipy.linalg import _flapack
+from re import compile as regex_compile
+try:
+ from scipy.linalg import _clapack
+except ImportError:
+ _clapack = None
+
+try:
+ from scipy.linalg import _flapack_64
+ HAS_ILP64 = True
+except ImportError:
+ HAS_ILP64 = False
+ _flapack_64 = None
+
+# Backward compatibility
+from scipy._lib._util import DeprecatedImport as _DeprecatedImport
+clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
+flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
+
+# Expose all functions (only flapack --- clapack is an implementation detail)
+empty_module = None
+from scipy.linalg._flapack import *
+del empty_module
+
+__all__ = ['get_lapack_funcs']
+
+_dep_message = """The `*gegv` family of routines has been deprecated in
+LAPACK 3.6.0 in favor of the `*ggev` family of routines.
+The corresponding wrappers will be removed from SciPy in
+a future release."""
+
+cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)
+dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)
+sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)
+zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)
+
+# Modify _flapack in this scope so the deprecation warnings apply to
+# functions returned by get_lapack_funcs.
+_flapack.cgegv = cgegv
+_flapack.dgegv = dgegv
+_flapack.sgegv = sgegv
+_flapack.zgegv = zgegv
+
+# some convenience alias for complex functions
+_lapack_alias = {
+ 'corghr': 'cunghr', 'zorghr': 'zunghr',
+ 'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
+ 'corgqr': 'cungqr', 'zorgqr': 'zungqr',
+ 'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
+ 'corgrq': 'cungrq', 'zorgrq': 'zungrq',
+}
+
+
+# Place guards against docstring rendering issues with special characters
+p1 = regex_compile(r'with bounds (?P.*?)( and (?P.*?) storage){0,1}\n')
+p2 = regex_compile(r'Default: (?P.*?)\n')
+
+
+def backtickrepl(m):
+ if m.group('s'):
+ return ('with bounds ``{}`` with ``{}`` storage\n'
+ ''.format(m.group('b'), m.group('s')))
+ else:
+ return 'with bounds ``{}``\n'.format(m.group('b'))
+
+
+for routine in [ssyevr, dsyevr, cheevr, zheevr,
+ ssyevx, dsyevx, cheevx, zheevx,
+ ssygvd, dsygvd, chegvd, zhegvd]:
+ if routine.__doc__:
+ routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
+ routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
+ else:
+ continue
+
+del regex_compile, p1, p2, backtickrepl
+
+
+@_memoize_get_funcs
+def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
+ """Return available LAPACK function objects from names.
+
+ Arrays are used to determine the optimal prefix of LAPACK routines.
+
+ Parameters
+ ----------
+ names : str or sequence of str
+ Name(s) of LAPACK functions without type prefix.
+
+ arrays : sequence of ndarrays, optional
+ Arrays can be given to determine optimal prefix of LAPACK
+ routines. If not given, double-precision routines will be
+ used, otherwise the most generic type in arrays will be used.
+
+ dtype : str or dtype, optional
+ Data-type specifier. Not used if `arrays` is non-empty.
+
+ ilp64 : {True, False, 'preferred'}, optional
+ Whether to return ILP64 routine variant.
+ Choosing 'preferred' returns ILP64 routine if available, and
+ otherwise the 32-bit routine. Default: False
+
+ Returns
+ -------
+ funcs : list
+ List containing the found function(s).
+
+ Notes
+ -----
+ This routine automatically chooses between Fortran/C
+ interfaces. Fortran code is used whenever possible for arrays with
+ column major order. In all other cases, C code is preferred.
+
+ In LAPACK, the naming convention is that all functions start with a
+ type prefix, which depends on the type of the principal
+ matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
+ types {float32, float64, complex64, complex128} respectively, and
+ are stored in attribute ``typecode`` of the returned functions.
+
+ Examples
+ --------
+ Suppose we would like to use '?lange' routine which computes the selected
+ norm of an array. We pass our array in order to get the correct 'lange'
+ flavor.
+
+ >>> import scipy.linalg as LA
+ >>> a = np.random.rand(3,2)
+ >>> x_lange = LA.get_lapack_funcs('lange', (a,))
+ >>> x_lange.typecode
+ 'd'
+ >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))
+ >>> x_lange.typecode
+ 'z'
+
+ Several LAPACK routines work best when its internal WORK array has
+ the optimal size (big enough for fast computation and small enough to
+ avoid waste of memory). This size is determined also by a dedicated query
+ to the function which is often wrapped as a standalone function and
+ commonly denoted as ``###_lwork``. Below is an example for ``?sysv``
+
+ >>> import scipy.linalg as LA
+ >>> a = np.random.rand(1000,1000)
+ >>> b = np.random.rand(1000,1)*1j
+ >>> # We pick up zsysv and zsysv_lwork due to b array
+ ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))
+ >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix
+ >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))
+
+ """
+ if isinstance(ilp64, str):
+ if ilp64 == 'preferred':
+ ilp64 = HAS_ILP64
+ else:
+ raise ValueError("Invalid value for 'ilp64'")
+
+ if not ilp64:
+ return _get_funcs(names, arrays, dtype,
+ "LAPACK", _flapack, _clapack,
+ "flapack", "clapack", _lapack_alias,
+ ilp64=False)
+ else:
+ if not HAS_ILP64:
+ raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
+ "compiled only with 32-bit BLAS")
+ return _get_funcs(names, arrays, dtype,
+ "LAPACK", _flapack_64, None,
+ "flapack_64", None, _lapack_alias,
+ ilp64=True)
+
+
+_int32_max = _np.iinfo(_np.int32).max
+_int64_max = _np.iinfo(_np.int64).max
+
+
+def _compute_lwork(routine, *args, **kwargs):
+ """
+ Round floating-point lwork returned by lapack to integer.
+
+ Several LAPACK routines compute optimal values for LWORK, which
+ they return in a floating-point variable. However, for large
+ values of LWORK, single-precision floating point is not sufficient
+ to hold the exact value --- some LAPACK versions (<= 3.5.0 at
+ least) truncate the returned integer to single precision and in
+ some cases this can be smaller than the required value.
+
+ Examples
+ --------
+ >>> from scipy.linalg import lapack
+ >>> n = 5000
+ >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))
+ >>> lwork = lapack._compute_lwork(s_lw, n)
+ >>> lwork
+ 32000
+
+ """
+ dtype = getattr(routine, 'dtype', None)
+ int_dtype = getattr(routine, 'int_dtype', None)
+ ret = routine(*args, **kwargs)
+ if ret[-1] != 0:
+ raise ValueError("Internal work array size computation failed: "
+ "%d" % (ret[-1],))
+
+ if len(ret) == 2:
+ return _check_work_float(ret[0].real, dtype, int_dtype)
+ else:
+ return tuple(_check_work_float(x.real, dtype, int_dtype)
+ for x in ret[:-1])
+
+
+def _check_work_float(value, dtype, int_dtype):
+ """
+ Convert LAPACK-returned work array size float to integer,
+ carefully for single-precision types.
+ """
+
+ if dtype == _np.float32 or dtype == _np.complex64:
+ # Single-precision routine -- take next fp value to work
+ # around possible truncation in LAPACK code
+ value = _np.nextafter(value, _np.inf, dtype=_np.float32)
+
+ value = int(value)
+ if int_dtype.itemsize == 4:
+ if value < 0 or value > _int32_max:
+ raise ValueError("Too large work array required -- computation "
+ "cannot be performed with standard 32-bit"
+ " LAPACK.")
+ elif int_dtype.itemsize == 8:
+ if value < 0 or value > _int64_max:
+ raise ValueError("Too large work array required -- computation"
+ " cannot be performed with standard 64-bit"
+ " LAPACK.")
+ return value
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/matfuncs.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/matfuncs.py
new file mode 100644
index 0000000..fb88471
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/matfuncs.py
@@ -0,0 +1,732 @@
+#
+# Author: Travis Oliphant, March 2002
+#
+
+__all__ = ['expm','cosm','sinm','tanm','coshm','sinhm',
+ 'tanhm','logm','funm','signm','sqrtm',
+ 'expm_frechet', 'expm_cond', 'fractional_matrix_power',
+ 'khatri_rao']
+
+from numpy import (Inf, dot, diag, prod, logical_not, ravel,
+ transpose, conjugate, absolute, amax, sign, isfinite, single)
+import numpy as np
+
+# Local imports
+from .misc import norm
+from .basic import solve, inv
+from .special_matrices import triu
+from .decomp_svd import svd
+from .decomp_schur import schur, rsf2csf
+from ._expm_frechet import expm_frechet, expm_cond
+from ._matfuncs_sqrtm import sqrtm
+
+eps = np.finfo(float).eps
+feps = np.finfo(single).eps
+
+_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
+
+
+###############################################################################
+# Utility functions.
+
+
+def _asarray_square(A):
+ """
+ Wraps asarray with the extra requirement that the input be a square matrix.
+
+ The motivation is that the matfuncs module has real functions that have
+ been lifted to square matrix functions.
+
+ Parameters
+ ----------
+ A : array_like
+ A square matrix.
+
+ Returns
+ -------
+ out : ndarray
+ An ndarray copy or view or other representation of A.
+
+ """
+ A = np.asarray(A)
+ if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+ raise ValueError('expected square array_like input')
+ return A
+
+
+def _maybe_real(A, B, tol=None):
+ """
+ Return either B or the real part of B, depending on properties of A and B.
+
+ The motivation is that B has been computed as a complicated function of A,
+ and B may be perturbed by negligible imaginary components.
+ If A is real and B is complex with small imaginary components,
+ then return a real copy of B. The assumption in that case would be that
+ the imaginary components of B are numerical artifacts.
+
+ Parameters
+ ----------
+ A : ndarray
+ Input array whose type is to be checked as real vs. complex.
+ B : ndarray
+ Array to be returned, possibly without its imaginary part.
+ tol : float
+ Absolute tolerance.
+
+ Returns
+ -------
+ out : real or complex array
+ Either the input array B or only the real part of the input array B.
+
+ """
+ # Note that booleans and integers compare as real.
+ if np.isrealobj(A) and np.iscomplexobj(B):
+ if tol is None:
+ tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
+ if np.allclose(B.imag, 0.0, atol=tol):
+ B = B.real
+ return B
+
+
+###############################################################################
+# Matrix functions.
+
+
+def fractional_matrix_power(A, t):
+ """
+ Compute the fractional power of a matrix.
+
+ Proceeds according to the discussion in section (6) of [1]_.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix whose fractional power to evaluate.
+ t : float
+ Fractional power.
+
+ Returns
+ -------
+ X : (N, N) array_like
+ The fractional power of the matrix.
+
+ References
+ ----------
+ .. [1] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ Examples
+ --------
+ >>> from scipy.linalg import fractional_matrix_power
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> b = fractional_matrix_power(a, 0.5)
+ >>> b
+ array([[ 0.75592895, 1.13389342],
+ [ 0.37796447, 1.88982237]])
+ >>> np.dot(b, b) # Verify square root
+ array([[ 1., 3.],
+ [ 1., 4.]])
+
+ """
+ # This fixes some issue with imports;
+ # this function calls onenormest which is in scipy.sparse.
+ A = _asarray_square(A)
+ import scipy.linalg._matfuncs_inv_ssq
+ return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
+
+
+def logm(A, disp=True):
+ """
+ Compute matrix logarithm.
+
+ The matrix logarithm is the inverse of
+ expm: expm(logm(`A`)) == `A`
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix whose logarithm to evaluate
+ disp : bool, optional
+ Print warning if error in the result is estimated large
+ instead of returning estimated error. (Default: True)
+
+ Returns
+ -------
+ logm : (N, N) ndarray
+ Matrix logarithm of `A`
+ errest : float
+ (if disp == False)
+
+ 1-norm of the estimated error, ||err||_1 / ||A||_1
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+ "Improved Inverse Scaling and Squaring Algorithms
+ for the Matrix Logarithm."
+ SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+ ISSN 1095-7197
+
+ .. [2] Nicholas J. Higham (2008)
+ "Functions of Matrices: Theory and Computation"
+ ISBN 978-0-898716-46-7
+
+ .. [3] Nicholas J. Higham and Lijing lin (2011)
+ "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+ SIAM Journal on Matrix Analysis and Applications,
+ 32 (3). pp. 1056-1078. ISSN 0895-4798
+
+ Examples
+ --------
+ >>> from scipy.linalg import logm, expm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> b = logm(a)
+ >>> b
+ array([[-1.02571087, 2.05142174],
+ [ 0.68380725, 1.02571087]])
+ >>> expm(b) # Verify expm(logm(a)) returns a
+ array([[ 1., 3.],
+ [ 1., 4.]])
+
+ """
+ A = _asarray_square(A)
+ # Avoid circular import ... this is OK, right?
+ import scipy.linalg._matfuncs_inv_ssq
+ F = scipy.linalg._matfuncs_inv_ssq._logm(A)
+ F = _maybe_real(A, F)
+ errtol = 1000*eps
+ #TODO use a better error approximation
+ errest = norm(expm(F)-A,1) / norm(A,1)
+ if disp:
+ if not isfinite(errest) or errest >= errtol:
+ print("logm result may be inaccurate, approximate err =", errest)
+ return F
+ else:
+ return F, errest
+
+
+def expm(A):
+ """
+ Compute the matrix exponential using Pade approximation.
+
+ Parameters
+ ----------
+ A : (N, N) array_like or sparse matrix
+ Matrix to be exponentiated.
+
+ Returns
+ -------
+ expm : (N, N) ndarray
+ Matrix exponential of `A`.
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
+ "A New Scaling and Squaring Algorithm for the Matrix Exponential."
+ SIAM Journal on Matrix Analysis and Applications.
+ 31 (3). pp. 970-989. ISSN 1095-7162
+
+ Examples
+ --------
+ >>> from scipy.linalg import expm, sinm, cosm
+
+ Matrix version of the formula exp(0) = 1:
+
+ >>> expm(np.zeros((2,2)))
+ array([[ 1., 0.],
+ [ 0., 1.]])
+
+ Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+ applied to a matrix:
+
+ >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+ >>> expm(1j*a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+ >>> cosm(a) + 1j*sinm(a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+ """
+ # Input checking and conversion is provided by sparse.linalg.expm().
+ import scipy.sparse.linalg
+ return scipy.sparse.linalg.expm(A)
+
+
+def cosm(A):
+ """
+ Compute the matrix cosine.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array
+
+ Returns
+ -------
+ cosm : (N, N) ndarray
+ Matrix cosine of A
+
+ Examples
+ --------
+ >>> from scipy.linalg import expm, sinm, cosm
+
+ Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+ applied to a matrix:
+
+ >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+ >>> expm(1j*a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+ >>> cosm(a) + 1j*sinm(a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+ """
+ A = _asarray_square(A)
+ if np.iscomplexobj(A):
+ return 0.5*(expm(1j*A) + expm(-1j*A))
+ else:
+ return expm(1j*A).real
+
+
+def sinm(A):
+ """
+ Compute the matrix sine.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array.
+
+ Returns
+ -------
+ sinm : (N, N) ndarray
+ Matrix sine of `A`
+
+ Examples
+ --------
+ >>> from scipy.linalg import expm, sinm, cosm
+
+ Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+ applied to a matrix:
+
+ >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+ >>> expm(1j*a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+ >>> cosm(a) + 1j*sinm(a)
+ array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+ [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+ """
+ A = _asarray_square(A)
+ if np.iscomplexobj(A):
+ return -0.5j*(expm(1j*A) - expm(-1j*A))
+ else:
+ return expm(1j*A).imag
+
+
+def tanm(A):
+ """
+ Compute the matrix tangent.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array.
+
+ Returns
+ -------
+ tanm : (N, N) ndarray
+ Matrix tangent of `A`
+
+ Examples
+ --------
+ >>> from scipy.linalg import tanm, sinm, cosm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> t = tanm(a)
+ >>> t
+ array([[ -2.00876993, -8.41880636],
+ [ -2.80626879, -10.42757629]])
+
+ Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
+
+ >>> s = sinm(a)
+ >>> c = cosm(a)
+ >>> s.dot(np.linalg.inv(c))
+ array([[ -2.00876993, -8.41880636],
+ [ -2.80626879, -10.42757629]])
+
+ """
+ A = _asarray_square(A)
+ return _maybe_real(A, solve(cosm(A), sinm(A)))
+
+
+def coshm(A):
+ """
+ Compute the hyperbolic matrix cosine.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array.
+
+ Returns
+ -------
+ coshm : (N, N) ndarray
+ Hyperbolic matrix cosine of `A`
+
+ Examples
+ --------
+ >>> from scipy.linalg import tanhm, sinhm, coshm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> c = coshm(a)
+ >>> c
+ array([[ 11.24592233, 38.76236492],
+ [ 12.92078831, 50.00828725]])
+
+ Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+ >>> t = tanhm(a)
+ >>> s = sinhm(a)
+ >>> t - s.dot(np.linalg.inv(c))
+ array([[ 2.72004641e-15, 4.55191440e-15],
+ [ 0.00000000e+00, -5.55111512e-16]])
+
+ """
+ A = _asarray_square(A)
+ return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
+
+
+def sinhm(A):
+ """
+ Compute the hyperbolic matrix sine.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array.
+
+ Returns
+ -------
+ sinhm : (N, N) ndarray
+ Hyperbolic matrix sine of `A`
+
+ Examples
+ --------
+ >>> from scipy.linalg import tanhm, sinhm, coshm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> s = sinhm(a)
+ >>> s
+ array([[ 10.57300653, 39.28826594],
+ [ 13.09608865, 49.86127247]])
+
+ Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+ >>> t = tanhm(a)
+ >>> c = coshm(a)
+ >>> t - s.dot(np.linalg.inv(c))
+ array([[ 2.72004641e-15, 4.55191440e-15],
+ [ 0.00000000e+00, -5.55111512e-16]])
+
+ """
+ A = _asarray_square(A)
+ return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
+
+
+def tanhm(A):
+ """
+ Compute the hyperbolic matrix tangent.
+
+ This routine uses expm to compute the matrix exponentials.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Input array
+
+ Returns
+ -------
+ tanhm : (N, N) ndarray
+ Hyperbolic matrix tangent of `A`
+
+ Examples
+ --------
+ >>> from scipy.linalg import tanhm, sinhm, coshm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> t = tanhm(a)
+ >>> t
+ array([[ 0.3428582 , 0.51987926],
+ [ 0.17329309, 0.86273746]])
+
+ Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+ >>> s = sinhm(a)
+ >>> c = coshm(a)
+ >>> t - s.dot(np.linalg.inv(c))
+ array([[ 2.72004641e-15, 4.55191440e-15],
+ [ 0.00000000e+00, -5.55111512e-16]])
+
+ """
+ A = _asarray_square(A)
+ return _maybe_real(A, solve(coshm(A), sinhm(A)))
+
+
+def funm(A, func, disp=True):
+ """
+ Evaluate a matrix function specified by a callable.
+
+ Returns the value of matrix-valued function ``f`` at `A`. The
+ function ``f`` is an extension of the scalar-valued function `func`
+ to matrices.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix at which to evaluate the function
+ func : callable
+ Callable object that evaluates a scalar function f.
+ Must be vectorized (eg. using vectorize).
+ disp : bool, optional
+ Print warning if error in the result is estimated large
+ instead of returning estimated error. (Default: True)
+
+ Returns
+ -------
+ funm : (N, N) ndarray
+ Value of the matrix function specified by func evaluated at `A`
+ errest : float
+ (if disp == False)
+
+ 1-norm of the estimated error, ||err||_1 / ||A||_1
+
+ Examples
+ --------
+ >>> from scipy.linalg import funm
+ >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+ >>> funm(a, lambda x: x*x)
+ array([[ 4., 15.],
+ [ 5., 19.]])
+ >>> a.dot(a)
+ array([[ 4., 15.],
+ [ 5., 19.]])
+
+ Notes
+ -----
+ This function implements the general algorithm based on Schur decomposition
+ (Algorithm 9.1.1. in [1]_).
+
+ If the input matrix is known to be diagonalizable, then relying on the
+ eigendecomposition is likely to be faster. For example, if your matrix is
+ Hermitian, you can do
+
+ >>> from scipy.linalg import eigh
+ >>> def funm_herm(a, func, check_finite=False):
+ ... w, v = eigh(a, check_finite=check_finite)
+ ... ## if you further know that your matrix is positive semidefinite,
+ ... ## you can optionally guard against precision errors by doing
+ ... # w = np.maximum(w, 0)
+ ... w = func(w)
+ ... return (v * w).dot(v.conj().T)
+
+ References
+ ----------
+ .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
+
+ """
+ A = _asarray_square(A)
+ # Perform Shur decomposition (lapack ?gees)
+ T, Z = schur(A)
+ T, Z = rsf2csf(T,Z)
+ n,n = T.shape
+ F = diag(func(diag(T))) # apply function to diagonal elements
+ F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
+
+ minden = abs(T[0,0])
+
+ # implement Algorithm 11.1.1 from Golub and Van Loan
+ # "matrix Computations."
+ for p in range(1,n):
+ for i in range(1,n-p+1):
+ j = i + p
+ s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
+ ksl = slice(i,j-1)
+ val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
+ s = s + val
+ den = T[j-1,j-1] - T[i-1,i-1]
+ if den != 0.0:
+ s = s / den
+ F[i-1,j-1] = s
+ minden = min(minden,abs(den))
+
+ F = dot(dot(Z, F), transpose(conjugate(Z)))
+ F = _maybe_real(A, F)
+
+ tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
+ if minden == 0.0:
+ minden = tol
+ err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
+ if prod(ravel(logical_not(isfinite(F))),axis=0):
+ err = Inf
+ if disp:
+ if err > 1000*tol:
+ print("funm result may be inaccurate, approximate err =", err)
+ return F
+ else:
+ return F, err
+
+
+def signm(A, disp=True):
+ """
+ Matrix sign function.
+
+ Extension of the scalar sign(x) to matrices.
+
+ Parameters
+ ----------
+ A : (N, N) array_like
+ Matrix at which to evaluate the sign function
+ disp : bool, optional
+ Print warning if error in the result is estimated large
+ instead of returning estimated error. (Default: True)
+
+ Returns
+ -------
+ signm : (N, N) ndarray
+ Value of the sign function at `A`
+ errest : float
+ (if disp == False)
+
+ 1-norm of the estimated error, ||err||_1 / ||A||_1
+
+ Examples
+ --------
+ >>> from scipy.linalg import signm, eigvals
+ >>> a = [[1,2,3], [1,2,1], [1,1,1]]
+ >>> eigvals(a)
+ array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
+ >>> eigvals(signm(a))
+ array([-1.+0.j, 1.+0.j, 1.+0.j])
+
+ """
+ A = _asarray_square(A)
+
+ def rounded_sign(x):
+ rx = np.real(x)
+ if rx.dtype.char == 'f':
+ c = 1e3*feps*amax(x)
+ else:
+ c = 1e3*eps*amax(x)
+ return sign((absolute(rx) > c) * rx)
+ result, errest = funm(A, rounded_sign, disp=0)
+ errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
+ if errest < errtol:
+ return result
+
+ # Handle signm of defective matrices:
+
+ # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
+ # 8:237-250,1981" for how to improve the following (currently a
+ # rather naive) iteration process:
+
+ # a = result # sometimes iteration converges faster but where??
+
+ # Shifting to avoid zero eigenvalues. How to ensure that shifting does
+ # not change the spectrum too much?
+ vals = svd(A, compute_uv=0)
+ max_sv = np.amax(vals)
+ # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
+ # c = 0.5/min_nonzero_sv
+ c = 0.5/max_sv
+ S0 = A + c*np.identity(A.shape[0])
+ prev_errest = errest
+ for i in range(100):
+ iS0 = inv(S0)
+ S0 = 0.5*(S0 + iS0)
+ Pp = 0.5*(dot(S0,S0)+S0)
+ errest = norm(dot(Pp,Pp)-Pp,1)
+ if errest < errtol or prev_errest == errest:
+ break
+ prev_errest = errest
+ if disp:
+ if not isfinite(errest) or errest >= errtol:
+ print("signm result may be inaccurate, approximate err =", errest)
+ return S0
+ else:
+ return S0, errest
+
+
+def khatri_rao(a, b):
+ r"""
+ Khatri-rao product
+
+ A column-wise Kronecker product of two matrices
+
+ Parameters
+ ----------
+ a: (n, k) array_like
+ Input array
+ b: (m, k) array_like
+ Input array
+
+ Returns
+ -------
+ c: (n*m, k) ndarray
+ Khatri-rao product of `a` and `b`.
+
+ Notes
+ -----
+ The mathematical definition of the Khatri-Rao product is:
+
+ .. math::
+
+ (A_{ij} \bigotimes B_{ij})_{ij}
+
+ which is the Kronecker product of every column of A and B, e.g.::
+
+ c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
+
+ See Also
+ --------
+ kron : Kronecker product
+
+ Examples
+ --------
+ >>> from scipy import linalg
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
+ >>> linalg.khatri_rao(a, b)
+ array([[ 3, 8, 15],
+ [ 6, 14, 24],
+ [ 2, 6, 27],
+ [12, 20, 30],
+ [24, 35, 48],
+ [ 8, 15, 54]])
+
+ """
+ a = np.asarray(a)
+ b = np.asarray(b)
+
+ if not(a.ndim == 2 and b.ndim == 2):
+ raise ValueError("The both arrays should be 2-dimensional.")
+
+ if not a.shape[1] == b.shape[1]:
+ raise ValueError("The number of columns for both arrays "
+ "should be equal.")
+
+ # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
+ c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
+ return c.reshape((-1,) + c.shape[2:])
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/misc.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/misc.py
new file mode 100644
index 0000000..d317a1b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/misc.py
@@ -0,0 +1,192 @@
+import numpy as np
+from numpy.linalg import LinAlgError
+from .blas import get_blas_funcs
+from .lapack import get_lapack_funcs
+
+__all__ = ['LinAlgError', 'LinAlgWarning', 'norm']
+
+
+class LinAlgWarning(RuntimeWarning):
+ """
+ The warning emitted when a linear algebra related operation is close
+ to fail conditions of the algorithm or loss of accuracy is expected.
+ """
+ pass
+
+
+def norm(a, ord=None, axis=None, keepdims=False, check_finite=True):
+ """
+ Matrix or vector norm.
+
+ This function is able to return one of seven different matrix norms,
+ or one of an infinite number of vector norms (described below), depending
+ on the value of the ``ord`` parameter.
+
+ Parameters
+ ----------
+ a : (M,) or (M, N) array_like
+ Input array. If `axis` is None, `a` must be 1D or 2D.
+ ord : {non-zero int, inf, -inf, 'fro'}, optional
+ Order of the norm (see table under ``Notes``). inf means NumPy's
+ `inf` object
+ axis : {int, 2-tuple of ints, None}, optional
+ If `axis` is an integer, it specifies the axis of `a` along which to
+ compute the vector norms. If `axis` is a 2-tuple, it specifies the
+ axes that hold 2-D matrices, and the matrix norms of these matrices
+ are computed. If `axis` is None then either a vector norm (when `a`
+ is 1-D) or a matrix norm (when `a` is 2-D) is returned.
+ keepdims : bool, optional
+ If this is set to True, the axes which are normed over are left in the
+ result as dimensions with size one. With this option the result will
+ broadcast correctly against the original `a`.
+ check_finite : bool, optional
+ Whether to check that the input matrix contains only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+ Returns
+ -------
+ n : float or ndarray
+ Norm of the matrix or vector(s).
+
+ Notes
+ -----
+ For values of ``ord <= 0``, the result is, strictly speaking, not a
+ mathematical 'norm', but it may still be useful for various numerical
+ purposes.
+
+ The following norms can be calculated:
+
+ ===== ============================ ==========================
+ ord norm for matrices norm for vectors
+ ===== ============================ ==========================
+ None Frobenius norm 2-norm
+ 'fro' Frobenius norm --
+ inf max(sum(abs(x), axis=1)) max(abs(x))
+ -inf min(sum(abs(x), axis=1)) min(abs(x))
+ 0 -- sum(x != 0)
+ 1 max(sum(abs(x), axis=0)) as below
+ -1 min(sum(abs(x), axis=0)) as below
+ 2 2-norm (largest sing. value) as below
+ -2 smallest singular value as below
+ other -- sum(abs(x)**ord)**(1./ord)
+ ===== ============================ ==========================
+
+ The Frobenius norm is given by [1]_:
+
+ :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
+
+ The ``axis`` and ``keepdims`` arguments are passed directly to
+ ``numpy.linalg.norm`` and are only usable if they are supported
+ by the version of numpy in use.
+
+ References
+ ----------
+ .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
+ Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
+
+ Examples
+ --------
+ >>> from scipy.linalg import norm
+ >>> a = np.arange(9) - 4.0
+ >>> a
+ array([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
+ >>> b = a.reshape((3, 3))
+ >>> b
+ array([[-4., -3., -2.],
+ [-1., 0., 1.],
+ [ 2., 3., 4.]])
+
+ >>> norm(a)
+ 7.745966692414834
+ >>> norm(b)
+ 7.745966692414834
+ >>> norm(b, 'fro')
+ 7.745966692414834
+ >>> norm(a, np.inf)
+ 4
+ >>> norm(b, np.inf)
+ 9
+ >>> norm(a, -np.inf)
+ 0
+ >>> norm(b, -np.inf)
+ 2
+
+ >>> norm(a, 1)
+ 20
+ >>> norm(b, 1)
+ 7
+ >>> norm(a, -1)
+ -4.6566128774142013e-010
+ >>> norm(b, -1)
+ 6
+ >>> norm(a, 2)
+ 7.745966692414834
+ >>> norm(b, 2)
+ 7.3484692283495345
+
+ >>> norm(a, -2)
+ 0
+ >>> norm(b, -2)
+ 1.8570331885190563e-016
+ >>> norm(a, 3)
+ 5.8480354764257312
+ >>> norm(a, -3)
+ 0
+
+ """
+ # Differs from numpy only in non-finite handling and the use of blas.
+ if check_finite:
+ a = np.asarray_chkfinite(a)
+ else:
+ a = np.asarray(a)
+
+ # Only use optimized norms if axis and keepdims are not specified.
+ if a.dtype.char in 'fdFD' and axis is None and not keepdims:
+
+ if ord in (None, 2) and (a.ndim == 1):
+ # use blas for fast and stable euclidean norm
+ nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred')
+ return nrm2(a)
+
+ if a.ndim == 2 and axis is None and not keepdims:
+ # Use lapack for a couple fast matrix norms.
+ # For some reason the *lange frobenius norm is slow.
+ lange_args = None
+ # Make sure this works if the user uses the axis keywords
+ # to apply the norm to the transpose.
+ if ord == 1:
+ if np.isfortran(a):
+ lange_args = '1', a
+ elif np.isfortran(a.T):
+ lange_args = 'i', a.T
+ elif ord == np.inf:
+ if np.isfortran(a):
+ lange_args = 'i', a
+ elif np.isfortran(a.T):
+ lange_args = '1', a.T
+ if lange_args:
+ lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred')
+ return lange(*lange_args)
+
+ # Filter out the axis and keepdims arguments if they aren't used so they
+ # are never inadvertently passed to a version of numpy that doesn't
+ # support them.
+ if axis is not None:
+ if keepdims:
+ return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)
+ return np.linalg.norm(a, ord=ord, axis=axis)
+ return np.linalg.norm(a, ord=ord)
+
+
+def _datacopied(arr, original):
+ """
+ Strict check for `arr` not sharing any data with `original`,
+ under the assumption that arr = asarray(original)
+
+ """
+ if arr is original:
+ return False
+ if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
+ return False
+ return arr.base is None
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/setup.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/setup.py
new file mode 100644
index 0000000..0cbd5ff
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/setup.py
@@ -0,0 +1,153 @@
+from os.path import join
+
+
+def configuration(parent_package='', top_path=None):
+ from distutils.sysconfig import get_python_inc
+ from scipy._build_utils.system_info import get_info, numpy_info
+ from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+ from scipy._build_utils import (get_g77_abi_wrappers, gfortran_legacy_flag_hook,
+ blas_ilp64_pre_build_hook, get_f2py_int64_options,
+ uses_blas64)
+
+ config = Configuration('linalg', parent_package, top_path)
+
+ lapack_opt = get_info('lapack_opt')
+
+ atlas_version = ([v[3:-3] for k, v in lapack_opt.get('define_macros', [])
+ if k == 'ATLAS_INFO']+[None])[0]
+ if atlas_version:
+ print(('ATLAS version: %s' % atlas_version))
+
+ if uses_blas64():
+ lapack_ilp64_opt = get_info('lapack_ilp64_opt', 2)
+
+ # fblas:
+ sources = ['fblas.pyf.src']
+ sources += get_g77_abi_wrappers(lapack_opt)
+ depends = ['fblas_l?.pyf.src']
+
+ config.add_extension('_fblas',
+ sources=sources,
+ depends=depends,
+ extra_info=lapack_opt
+ )
+
+ if uses_blas64():
+ sources = ['fblas_64.pyf.src'] + sources[1:]
+ ext = config.add_extension('_fblas_64',
+ sources=sources,
+ depends=depends,
+ f2py_options=get_f2py_int64_options(),
+ extra_info=lapack_ilp64_opt)
+ ext._pre_build_hook = blas_ilp64_pre_build_hook(lapack_ilp64_opt)
+
+ # flapack:
+ sources = ['flapack.pyf.src']
+ sources += get_g77_abi_wrappers(lapack_opt)
+ dep_pfx = join('src', 'lapack_deprecations')
+ deprecated_lapack_routines = [join(dep_pfx, c + 'gegv.f') for c in 'cdsz']
+ sources += deprecated_lapack_routines
+ depends = ['flapack_gen.pyf.src',
+ 'flapack_gen_banded.pyf.src',
+ 'flapack_gen_tri.pyf.src',
+ 'flapack_pos_def.pyf.src',
+ 'flapack_pos_def_tri.pyf.src',
+ 'flapack_sym_herm.pyf.src',
+ 'flapack_other.pyf.src',
+ 'flapack_user.pyf.src']
+
+ config.add_extension('_flapack',
+ sources=sources,
+ depends=depends,
+ extra_info=lapack_opt
+ )
+
+ if uses_blas64():
+ sources = ['flapack_64.pyf.src'] + sources[1:]
+ ext = config.add_extension('_flapack_64',
+ sources=sources,
+ depends=depends,
+ f2py_options=get_f2py_int64_options(),
+ extra_info=lapack_ilp64_opt)
+ ext._pre_build_hook = blas_ilp64_pre_build_hook(lapack_ilp64_opt)
+
+ if atlas_version is not None:
+ # cblas:
+ config.add_extension('_cblas',
+ sources=['cblas.pyf.src'],
+ depends=['cblas.pyf.src', 'cblas_l1.pyf.src'],
+ extra_info=lapack_opt
+ )
+
+ # clapack:
+ config.add_extension('_clapack',
+ sources=['clapack.pyf.src'],
+ depends=['clapack.pyf.src'],
+ extra_info=lapack_opt
+ )
+
+ # _flinalg:
+ config.add_extension('_flinalg',
+ sources=[join('src', 'det.f'), join('src', 'lu.f')],
+ extra_info=lapack_opt
+ )
+
+ # _interpolative:
+ ext = config.add_extension('_interpolative',
+ sources=[join('src', 'id_dist', 'src', '*.f'),
+ "interpolative.pyf"],
+ extra_info=lapack_opt
+ )
+ ext._pre_build_hook = gfortran_legacy_flag_hook
+
+ # _solve_toeplitz:
+ config.add_extension('_solve_toeplitz',
+ sources=[('_solve_toeplitz.c')],
+ include_dirs=[get_numpy_include_dirs()])
+
+ # _matfuncs_sqrtm_triu:
+ config.add_extension('_matfuncs_sqrtm_triu',
+ sources=[('_matfuncs_sqrtm_triu.c')],
+ include_dirs=[get_numpy_include_dirs()])
+
+ config.add_data_dir('tests')
+
+ # Cython BLAS/LAPACK
+ config.add_data_files('cython_blas.pxd')
+ config.add_data_files('cython_lapack.pxd')
+
+ sources = ['_blas_subroutine_wrappers.f', '_lapack_subroutine_wrappers.f']
+ sources += get_g77_abi_wrappers(lapack_opt)
+ includes = numpy_info().get_include_dirs() + [get_python_inc()]
+ config.add_library('fwrappers', sources=sources, include_dirs=includes)
+
+ config.add_extension('cython_blas',
+ sources=['cython_blas.c'],
+ depends=['cython_blas.pyx', 'cython_blas.pxd',
+ 'fortran_defs.h', '_blas_subroutines.h'],
+ include_dirs=['.'],
+ libraries=['fwrappers'],
+ extra_info=lapack_opt)
+
+ config.add_extension('cython_lapack',
+ sources=['cython_lapack.c'],
+ depends=['cython_lapack.pyx', 'cython_lapack.pxd',
+ 'fortran_defs.h', '_lapack_subroutines.h'],
+ include_dirs=['.'],
+ libraries=['fwrappers'],
+ extra_info=lapack_opt)
+
+ config.add_extension('_decomp_update',
+ sources=['_decomp_update.c'])
+
+ # Add any license files
+ config.add_data_files('src/id_dist/doc/doc.tex')
+ config.add_data_files('src/lapack_deprecations/LICENSE')
+
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+
+ setup(**configuration(top_path='').todict())
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/special_matrices.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/special_matrices.py
new file mode 100644
index 0000000..c862b58
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/special_matrices.py
@@ -0,0 +1,1374 @@
+import math
+import numpy as np
+from numpy.lib.stride_tricks import as_strided
+
+__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
+ 'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
+ 'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
+ 'fiedler', 'fiedler_companion', 'convolution_matrix']
+
+
+# -----------------------------------------------------------------------------
+# matrix construction functions
+# -----------------------------------------------------------------------------
+
+#
+# *Note*: tri{,u,l} is implemented in NumPy, but an important bug was fixed in
+# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
+# compatibility.
+
+def tri(N, M=None, k=0, dtype=None):
+ """
+ Construct (N, M) matrix filled with ones at and below the kth diagonal.
+
+ The matrix has A[i,j] == 1 for i <= j + k
+
+ Parameters
+ ----------
+ N : int
+ The size of the first dimension of the matrix.
+ M : int or None, optional
+ The size of the second dimension of the matrix. If `M` is None,
+ `M = N` is assumed.
+ k : int, optional
+ Number of subdiagonal below which matrix is filled with ones.
+ `k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
+ superdiagonal.
+ dtype : dtype, optional
+ Data type of the matrix.
+
+ Returns
+ -------
+ tri : (N, M) ndarray
+ Tri matrix.
+
+ Examples
+ --------
+ >>> from scipy.linalg import tri
+ >>> tri(3, 5, 2, dtype=int)
+ array([[1, 1, 1, 0, 0],
+ [1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1]])
+ >>> tri(3, 5, -1, dtype=int)
+ array([[0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0]])
+
+ """
+ if M is None:
+ M = N
+ if isinstance(M, str):
+ # pearu: any objections to remove this feature?
+ # As tri(N,'d') is equivalent to tri(N,dtype='d')
+ dtype = M
+ M = N
+ m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M))
+ if dtype is None:
+ return m
+ else:
+ return m.astype(dtype)
+
+
+def tril(m, k=0):
+ """
+ Make a copy of a matrix with elements above the kth diagonal zeroed.
+
+ Parameters
+ ----------
+ m : array_like
+ Matrix whose elements to return
+ k : int, optional
+ Diagonal above which to zero elements.
+ `k` == 0 is the main diagonal, `k` < 0 subdiagonal and
+ `k` > 0 superdiagonal.
+
+ Returns
+ -------
+ tril : ndarray
+ Return is the same shape and type as `m`.
+
+ Examples
+ --------
+ >>> from scipy.linalg import tril
+ >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 0, 0, 0],
+ [ 4, 0, 0],
+ [ 7, 8, 0],
+ [10, 11, 12]])
+
+ """
+ m = np.asarray(m)
+ out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
+ return out
+
+
+def triu(m, k=0):
+ """
+ Make a copy of a matrix with elements below the kth diagonal zeroed.
+
+ Parameters
+ ----------
+ m : array_like
+ Matrix whose elements to return
+ k : int, optional
+ Diagonal below which to zero elements.
+ `k` == 0 is the main diagonal, `k` < 0 subdiagonal and
+ `k` > 0 superdiagonal.
+
+ Returns
+ -------
+ triu : ndarray
+ Return matrix with zeroed elements below the kth diagonal and has
+ same shape and type as `m`.
+
+ Examples
+ --------
+ >>> from scipy.linalg import triu
+ >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 1, 2, 3],
+ [ 4, 5, 6],
+ [ 0, 8, 9],
+ [ 0, 0, 12]])
+
+ """
+ m = np.asarray(m)
+ out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
+ return out
+
+
+def toeplitz(c, r=None):
+ """
+ Construct a Toeplitz matrix.
+
+ The Toeplitz matrix has constant diagonals, with c as its first column
+ and r as its first row. If r is not given, ``r == conjugate(c)`` is
+ assumed.
+
+ Parameters
+ ----------
+ c : array_like
+ First column of the matrix. Whatever the actual shape of `c`, it
+ will be converted to a 1-D array.
+ r : array_like, optional
+ First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
+ in this case, if c[0] is real, the result is a Hermitian matrix.
+ r[0] is ignored; the first row of the returned matrix is
+ ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
+ converted to a 1-D array.
+
+ Returns
+ -------
+ A : (len(c), len(r)) ndarray
+ The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
+
+ See Also
+ --------
+ circulant : circulant matrix
+ hankel : Hankel matrix
+ solve_toeplitz : Solve a Toeplitz system.
+
+ Notes
+ -----
+ The behavior when `c` or `r` is a scalar, or when `c` is complex and
+ `r` is None, was changed in version 0.8.0. The behavior in previous
+ versions was undocumented and is no longer supported.
+
+ Examples
+ --------
+ >>> from scipy.linalg import toeplitz
+ >>> toeplitz([1,2,3], [1,4,5,6])
+ array([[1, 4, 5, 6],
+ [2, 1, 4, 5],
+ [3, 2, 1, 4]])
+ >>> toeplitz([1.0, 2+3j, 4-1j])
+ array([[ 1.+0.j, 2.-3.j, 4.+1.j],
+ [ 2.+3.j, 1.+0.j, 2.-3.j],
+ [ 4.-1.j, 2.+3.j, 1.+0.j]])
+
+ """
+ c = np.asarray(c).ravel()
+ if r is None:
+ r = c.conjugate()
+ else:
+ r = np.asarray(r).ravel()
+ # Form a 1-D array containing a reversed c followed by r[1:] that could be
+ # strided to give us toeplitz matrix.
+ vals = np.concatenate((c[::-1], r[1:]))
+ out_shp = len(c), len(r)
+ n = vals.strides[0]
+ return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy()
+
+
+def circulant(c):
+ """
+ Construct a circulant matrix.
+
+ Parameters
+ ----------
+ c : (N,) array_like
+ 1-D array, the first column of the matrix.
+
+ Returns
+ -------
+ A : (N, N) ndarray
+ A circulant matrix whose first column is `c`.
+
+ See Also
+ --------
+ toeplitz : Toeplitz matrix
+ hankel : Hankel matrix
+ solve_circulant : Solve a circulant system.
+
+ Notes
+ -----
+ .. versionadded:: 0.8.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import circulant
+ >>> circulant([1, 2, 3])
+ array([[1, 3, 2],
+ [2, 1, 3],
+ [3, 2, 1]])
+
+ """
+ c = np.asarray(c).ravel()
+ # Form an extended array that could be strided to give circulant version
+ c_ext = np.concatenate((c[::-1], c[:0:-1]))
+ L = len(c)
+ n = c_ext.strides[0]
+ return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy()
+
+
+def hankel(c, r=None):
+ """
+ Construct a Hankel matrix.
+
+ The Hankel matrix has constant anti-diagonals, with `c` as its
+ first column and `r` as its last row. If `r` is not given, then
+ `r = zeros_like(c)` is assumed.
+
+ Parameters
+ ----------
+ c : array_like
+ First column of the matrix. Whatever the actual shape of `c`, it
+ will be converted to a 1-D array.
+ r : array_like, optional
+ Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
+ r[0] is ignored; the last row of the returned matrix is
+ ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
+ converted to a 1-D array.
+
+ Returns
+ -------
+ A : (len(c), len(r)) ndarray
+ The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
+
+ See Also
+ --------
+ toeplitz : Toeplitz matrix
+ circulant : circulant matrix
+
+ Examples
+ --------
+ >>> from scipy.linalg import hankel
+ >>> hankel([1, 17, 99])
+ array([[ 1, 17, 99],
+ [17, 99, 0],
+ [99, 0, 0]])
+ >>> hankel([1,2,3,4], [4,7,7,8,9])
+ array([[1, 2, 3, 4, 7],
+ [2, 3, 4, 7, 7],
+ [3, 4, 7, 7, 8],
+ [4, 7, 7, 8, 9]])
+
+ """
+ c = np.asarray(c).ravel()
+ if r is None:
+ r = np.zeros_like(c)
+ else:
+ r = np.asarray(r).ravel()
+ # Form a 1-D array of values to be used in the matrix, containing `c`
+ # followed by r[1:].
+ vals = np.concatenate((c, r[1:]))
+ # Stride on concatenated array to get hankel matrix
+ out_shp = len(c), len(r)
+ n = vals.strides[0]
+ return as_strided(vals, shape=out_shp, strides=(n, n)).copy()
+
+
+def hadamard(n, dtype=int):
+ """
+ Construct an Hadamard matrix.
+
+ Constructs an n-by-n Hadamard matrix, using Sylvester's
+ construction. `n` must be a power of 2.
+
+ Parameters
+ ----------
+ n : int
+ The order of the matrix. `n` must be a power of 2.
+ dtype : dtype, optional
+ The data type of the array to be constructed.
+
+ Returns
+ -------
+ H : (n, n) ndarray
+ The Hadamard matrix.
+
+ Notes
+ -----
+ .. versionadded:: 0.8.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import hadamard
+ >>> hadamard(2, dtype=complex)
+ array([[ 1.+0.j, 1.+0.j],
+ [ 1.+0.j, -1.-0.j]])
+ >>> hadamard(4)
+ array([[ 1, 1, 1, 1],
+ [ 1, -1, 1, -1],
+ [ 1, 1, -1, -1],
+ [ 1, -1, -1, 1]])
+
+ """
+
+ # This function is a slightly modified version of the
+ # function contributed by Ivo in ticket #675.
+
+ if n < 1:
+ lg2 = 0
+ else:
+ lg2 = int(math.log(n, 2))
+ if 2 ** lg2 != n:
+ raise ValueError("n must be an positive integer, and n must be "
+ "a power of 2")
+
+ H = np.array([[1]], dtype=dtype)
+
+ # Sylvester's construction
+ for i in range(0, lg2):
+ H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
+
+ return H
+
+
+def leslie(f, s):
+ """
+ Create a Leslie matrix.
+
+ Given the length n array of fecundity coefficients `f` and the length
+ n-1 array of survival coefficients `s`, return the associated Leslie
+ matrix.
+
+ Parameters
+ ----------
+ f : (N,) array_like
+ The "fecundity" coefficients.
+ s : (N-1,) array_like
+ The "survival" coefficients, has to be 1-D. The length of `s`
+ must be one less than the length of `f`, and it must be at least 1.
+
+ Returns
+ -------
+ L : (N, N) ndarray
+ The array is zero except for the first row,
+ which is `f`, and the first sub-diagonal, which is `s`.
+ The data-type of the array will be the data-type of ``f[0]+s[0]``.
+
+ Notes
+ -----
+ .. versionadded:: 0.8.0
+
+ The Leslie matrix is used to model discrete-time, age-structured
+ population growth [1]_ [2]_. In a population with `n` age classes, two sets
+ of parameters define a Leslie matrix: the `n` "fecundity coefficients",
+ which give the number of offspring per-capita produced by each age
+ class, and the `n` - 1 "survival coefficients", which give the
+ per-capita survival rate of each age class.
+
+ References
+ ----------
+ .. [1] P. H. Leslie, On the use of matrices in certain population
+ mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
+ .. [2] P. H. Leslie, Some further notes on the use of matrices in
+ population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
+ (Dec. 1948)
+
+ Examples
+ --------
+ >>> from scipy.linalg import leslie
+ >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
+ array([[ 0.1, 2. , 1. , 0.1],
+ [ 0.2, 0. , 0. , 0. ],
+ [ 0. , 0.8, 0. , 0. ],
+ [ 0. , 0. , 0.7, 0. ]])
+
+ """
+ f = np.atleast_1d(f)
+ s = np.atleast_1d(s)
+ if f.ndim != 1:
+ raise ValueError("Incorrect shape for f. f must be 1D")
+ if s.ndim != 1:
+ raise ValueError("Incorrect shape for s. s must be 1D")
+ if f.size != s.size + 1:
+ raise ValueError("Incorrect lengths for f and s. The length"
+ " of s must be one less than the length of f.")
+ if s.size == 0:
+ raise ValueError("The length of s must be at least 1.")
+
+ tmp = f[0] + s[0]
+ n = f.size
+ a = np.zeros((n, n), dtype=tmp.dtype)
+ a[0] = f
+ a[list(range(1, n)), list(range(0, n - 1))] = s
+ return a
+
+
+def kron(a, b):
+ """
+ Kronecker product.
+
+ The result is the block matrix::
+
+ a[0,0]*b a[0,1]*b ... a[0,-1]*b
+ a[1,0]*b a[1,1]*b ... a[1,-1]*b
+ ...
+ a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b
+
+ Parameters
+ ----------
+ a : (M, N) ndarray
+ Input array
+ b : (P, Q) ndarray
+ Input array
+
+ Returns
+ -------
+ A : (M*P, N*Q) ndarray
+ Kronecker product of `a` and `b`.
+
+ Examples
+ --------
+ >>> from numpy import array
+ >>> from scipy.linalg import kron
+ >>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
+ array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]])
+
+ """
+ if not a.flags['CONTIGUOUS']:
+ a = np.reshape(a, a.shape)
+ if not b.flags['CONTIGUOUS']:
+ b = np.reshape(b, b.shape)
+ o = np.outer(a, b)
+ o = o.reshape(a.shape + b.shape)
+ return np.concatenate(np.concatenate(o, axis=1), axis=1)
+
+
+def block_diag(*arrs):
+ """
+ Create a block diagonal matrix from provided arrays.
+
+ Given the inputs `A`, `B` and `C`, the output will have these
+ arrays arranged on the diagonal::
+
+ [[A, 0, 0],
+ [0, B, 0],
+ [0, 0, C]]
+
+ Parameters
+ ----------
+ A, B, C, ... : array_like, up to 2-D
+ Input arrays. A 1-D array or array_like sequence of length `n` is
+ treated as a 2-D array with shape ``(1,n)``.
+
+ Returns
+ -------
+ D : ndarray
+ Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
+ same dtype as `A`.
+
+ Notes
+ -----
+ If all the input arrays are square, the output is known as a
+ block diagonal matrix.
+
+ Empty sequences (i.e., array-likes of zero size) will not be ignored.
+ Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``.
+
+ Examples
+ --------
+ >>> from scipy.linalg import block_diag
+ >>> A = [[1, 0],
+ ... [0, 1]]
+ >>> B = [[3, 4, 5],
+ ... [6, 7, 8]]
+ >>> C = [[7]]
+ >>> P = np.zeros((2, 0), dtype='int32')
+ >>> block_diag(A, B, C)
+ array([[1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0],
+ [0, 0, 3, 4, 5, 0],
+ [0, 0, 6, 7, 8, 0],
+ [0, 0, 0, 0, 0, 7]])
+ >>> block_diag(A, P, B, C)
+ array([[1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 3, 4, 5, 0],
+ [0, 0, 6, 7, 8, 0],
+ [0, 0, 0, 0, 0, 7]])
+ >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
+ array([[ 1., 0., 0., 0., 0.],
+ [ 0., 2., 3., 0., 0.],
+ [ 0., 0., 0., 4., 5.],
+ [ 0., 0., 0., 6., 7.]])
+
+ """
+ if arrs == ():
+ arrs = ([],)
+ arrs = [np.atleast_2d(a) for a in arrs]
+
+ bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
+ if bad_args:
+ raise ValueError("arguments in the following positions have dimension "
+ "greater than 2: %s" % bad_args)
+
+ shapes = np.array([a.shape for a in arrs])
+ out_dtype = np.find_common_type([arr.dtype for arr in arrs], [])
+ out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype)
+
+ r, c = 0, 0
+ for i, (rr, cc) in enumerate(shapes):
+ out[r:r + rr, c:c + cc] = arrs[i]
+ r += rr
+ c += cc
+ return out
+
+
+def companion(a):
+ """
+ Create a companion matrix.
+
+ Create the companion matrix [1]_ associated with the polynomial whose
+ coefficients are given in `a`.
+
+ Parameters
+ ----------
+ a : (N,) array_like
+ 1-D array of polynomial coefficients. The length of `a` must be
+ at least two, and ``a[0]`` must not be zero.
+
+ Returns
+ -------
+ c : (N-1, N-1) ndarray
+ The first row of `c` is ``-a[1:]/a[0]``, and the first
+ sub-diagonal is all ones. The data-type of the array is the same
+ as the data-type of ``1.0*a[0]``.
+
+ Raises
+ ------
+ ValueError
+ If any of the following are true: a) ``a.ndim != 1``;
+ b) ``a.size < 2``; c) ``a[0] == 0``.
+
+ Notes
+ -----
+ .. versionadded:: 0.8.0
+
+ References
+ ----------
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
+ Cambridge University Press, 1999, pp. 146-7.
+
+ Examples
+ --------
+ >>> from scipy.linalg import companion
+ >>> companion([1, -10, 31, -30])
+ array([[ 10., -31., 30.],
+ [ 1., 0., 0.],
+ [ 0., 1., 0.]])
+
+ """
+ a = np.atleast_1d(a)
+
+ if a.ndim != 1:
+ raise ValueError("Incorrect shape for `a`. `a` must be "
+ "one-dimensional.")
+
+ if a.size < 2:
+ raise ValueError("The length of `a` must be at least 2.")
+
+ if a[0] == 0:
+ raise ValueError("The first coefficient in `a` must not be zero.")
+
+ first_row = -a[1:] / (1.0 * a[0])
+ n = a.size
+ c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
+ c[0] = first_row
+ c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
+ return c
+
+
+def helmert(n, full=False):
+ """
+ Create an Helmert matrix of order `n`.
+
+ This has applications in statistics, compositional or simplicial analysis,
+ and in Aitchison geometry.
+
+ Parameters
+ ----------
+ n : int
+ The size of the array to create.
+ full : bool, optional
+ If True the (n, n) ndarray will be returned.
+ Otherwise the submatrix that does not include the first
+ row will be returned.
+ Default: False.
+
+ Returns
+ -------
+ M : ndarray
+ The Helmert matrix.
+ The shape is (n, n) or (n-1, n) depending on the `full` argument.
+
+ Examples
+ --------
+ >>> from scipy.linalg import helmert
+ >>> helmert(5, full=True)
+ array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ],
+ [ 0.70710678, -0.70710678, 0. , 0. , 0. ],
+ [ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ],
+ [ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ],
+ [ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]])
+
+ """
+ H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n))
+ d = np.arange(n) * np.arange(1, n+1)
+ H[0] = 1
+ d[0] = n
+ H_full = H / np.sqrt(d)[:, np.newaxis]
+ if full:
+ return H_full
+ else:
+ return H_full[1:]
+
+
+def hilbert(n):
+ """
+ Create a Hilbert matrix of order `n`.
+
+ Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
+
+ Parameters
+ ----------
+ n : int
+ The size of the array to create.
+
+ Returns
+ -------
+ h : (n, n) ndarray
+ The Hilbert matrix.
+
+ See Also
+ --------
+ invhilbert : Compute the inverse of a Hilbert matrix.
+
+ Notes
+ -----
+ .. versionadded:: 0.10.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import hilbert
+ >>> hilbert(3)
+ array([[ 1. , 0.5 , 0.33333333],
+ [ 0.5 , 0.33333333, 0.25 ],
+ [ 0.33333333, 0.25 , 0.2 ]])
+
+ """
+ values = 1.0 / (1.0 + np.arange(2 * n - 1))
+ h = hankel(values[:n], r=values[n - 1:])
+ return h
+
+
+def invhilbert(n, exact=False):
+ """
+ Compute the inverse of the Hilbert matrix of order `n`.
+
+ The entries in the inverse of a Hilbert matrix are integers. When `n`
+ is greater than 14, some entries in the inverse exceed the upper limit
+ of 64 bit integers. The `exact` argument provides two options for
+ dealing with these large integers.
+
+ Parameters
+ ----------
+ n : int
+ The order of the Hilbert matrix.
+ exact : bool, optional
+ If False, the data type of the array that is returned is np.float64,
+ and the array is an approximation of the inverse.
+ If True, the array is the exact integer inverse array. To represent
+ the exact inverse when n > 14, the returned array is an object array
+ of long integers. For n <= 14, the exact inverse is returned as an
+ array with data type np.int64.
+
+ Returns
+ -------
+ invh : (n, n) ndarray
+ The data type of the array is np.float64 if `exact` is False.
+ If `exact` is True, the data type is either np.int64 (for n <= 14)
+ or object (for n > 14). In the latter case, the objects in the
+ array will be long integers.
+
+ See Also
+ --------
+ hilbert : Create a Hilbert matrix.
+
+ Notes
+ -----
+ .. versionadded:: 0.10.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import invhilbert
+ >>> invhilbert(4)
+ array([[ 16., -120., 240., -140.],
+ [ -120., 1200., -2700., 1680.],
+ [ 240., -2700., 6480., -4200.],
+ [ -140., 1680., -4200., 2800.]])
+ >>> invhilbert(4, exact=True)
+ array([[ 16, -120, 240, -140],
+ [ -120, 1200, -2700, 1680],
+ [ 240, -2700, 6480, -4200],
+ [ -140, 1680, -4200, 2800]], dtype=int64)
+ >>> invhilbert(16)[7,7]
+ 4.2475099528537506e+19
+ >>> invhilbert(16, exact=True)[7,7]
+ 42475099528537378560
+
+ """
+ from scipy.special import comb
+ if exact:
+ if n > 14:
+ dtype = object
+ else:
+ dtype = np.int64
+ else:
+ dtype = np.float64
+ invh = np.empty((n, n), dtype=dtype)
+ for i in range(n):
+ for j in range(0, i + 1):
+ s = i + j
+ invh[i, j] = ((-1) ** s * (s + 1) *
+ comb(n + i, n - j - 1, exact) *
+ comb(n + j, n - i - 1, exact) *
+ comb(s, i, exact) ** 2)
+ if i != j:
+ invh[j, i] = invh[i, j]
+ return invh
+
+
+def pascal(n, kind='symmetric', exact=True):
+ """
+ Returns the n x n Pascal matrix.
+
+ The Pascal matrix is a matrix containing the binomial coefficients as
+ its elements.
+
+ Parameters
+ ----------
+ n : int
+ The size of the matrix to create; that is, the result is an n x n
+ matrix.
+ kind : str, optional
+ Must be one of 'symmetric', 'lower', or 'upper'.
+ Default is 'symmetric'.
+ exact : bool, optional
+ If `exact` is True, the result is either an array of type
+ numpy.uint64 (if n < 35) or an object array of Python long integers.
+ If `exact` is False, the coefficients in the matrix are computed using
+ `scipy.special.comb` with `exact=False`. The result will be a floating
+ point array, and the values in the array will not be the exact
+ coefficients, but this version is much faster than `exact=True`.
+
+ Returns
+ -------
+ p : (n, n) ndarray
+ The Pascal matrix.
+
+ See Also
+ --------
+ invpascal
+
+ Notes
+ -----
+ See https://en.wikipedia.org/wiki/Pascal_matrix for more information
+ about Pascal matrices.
+
+ .. versionadded:: 0.11.0
+
+ Examples
+ --------
+ >>> from scipy.linalg import pascal
+ >>> pascal(4)
+ array([[ 1, 1, 1, 1],
+ [ 1, 2, 3, 4],
+ [ 1, 3, 6, 10],
+ [ 1, 4, 10, 20]], dtype=uint64)
+ >>> pascal(4, kind='lower')
+ array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 2, 1, 0],
+ [1, 3, 3, 1]], dtype=uint64)
+ >>> pascal(50)[-1, -1]
+ 25477612258980856902730428600
+ >>> from scipy.special import comb
+ >>> comb(98, 49, exact=True)
+ 25477612258980856902730428600
+
+ """
+
+ from scipy.special import comb
+ if kind not in ['symmetric', 'lower', 'upper']:
+ raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
+
+ if exact:
+ if n >= 35:
+ L_n = np.empty((n, n), dtype=object)
+ L_n.fill(0)
+ else:
+ L_n = np.zeros((n, n), dtype=np.uint64)
+ for i in range(n):
+ for j in range(i + 1):
+ L_n[i, j] = comb(i, j, exact=True)
+ else:
+ L_n = comb(*np.ogrid[:n, :n])
+
+ if kind == 'lower':
+ p = L_n
+ elif kind == 'upper':
+ p = L_n.T
+ else:
+ p = np.dot(L_n, L_n.T)
+
+ return p
+
+
+def invpascal(n, kind='symmetric', exact=True):
+ """
+ Returns the inverse of the n x n Pascal matrix.
+
+ The Pascal matrix is a matrix containing the binomial coefficients as
+ its elements.
+
+ Parameters
+ ----------
+ n : int
+ The size of the matrix to create; that is, the result is an n x n
+ matrix.
+ kind : str, optional
+ Must be one of 'symmetric', 'lower', or 'upper'.
+ Default is 'symmetric'.
+ exact : bool, optional
+ If `exact` is True, the result is either an array of type
+ ``numpy.int64`` (if `n` <= 35) or an object array of Python integers.
+ If `exact` is False, the coefficients in the matrix are computed using
+ `scipy.special.comb` with `exact=False`. The result will be a floating
+ point array, and for large `n`, the values in the array will not be the
+ exact coefficients.
+
+ Returns
+ -------
+ invp : (n, n) ndarray
+ The inverse of the Pascal matrix.
+
+ See Also
+ --------
+ pascal
+
+ Notes
+ -----
+
+ .. versionadded:: 0.16.0
+
+ References
+ ----------
+ .. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix
+ .. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
+ Gazette, 59(408), pp. 111-112, 1975.
+
+ Examples
+ --------
+ >>> from scipy.linalg import invpascal, pascal
+ >>> invp = invpascal(5)
+ >>> invp
+ array([[ 5, -10, 10, -5, 1],
+ [-10, 30, -35, 19, -4],
+ [ 10, -35, 46, -27, 6],
+ [ -5, 19, -27, 17, -4],
+ [ 1, -4, 6, -4, 1]])
+
+ >>> p = pascal(5)
+ >>> p.dot(invp)
+ array([[ 1., 0., 0., 0., 0.],
+ [ 0., 1., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 1., 0.],
+ [ 0., 0., 0., 0., 1.]])
+
+ An example of the use of `kind` and `exact`:
+
+ >>> invpascal(5, kind='lower', exact=False)
+ array([[ 1., -0., 0., -0., 0.],
+ [-1., 1., -0., 0., -0.],
+ [ 1., -2., 1., -0., 0.],
+ [-1., 3., -3., 1., -0.],
+ [ 1., -4., 6., -4., 1.]])
+
+ """
+ from scipy.special import comb
+
+ if kind not in ['symmetric', 'lower', 'upper']:
+ raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
+
+ if kind == 'symmetric':
+ if exact:
+ if n > 34:
+ dt = object
+ else:
+ dt = np.int64
+ else:
+ dt = np.float64
+ invp = np.empty((n, n), dtype=dt)
+ for i in range(n):
+ for j in range(0, i + 1):
+ v = 0
+ for k in range(n - i):
+ v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
+ exact=exact)
+ invp[i, j] = (-1)**(i - j) * v
+ if i != j:
+ invp[j, i] = invp[i, j]
+ else:
+ # For the 'lower' and 'upper' cases, we computer the inverse by
+ # changing the sign of every other diagonal of the pascal matrix.
+ invp = pascal(n, kind=kind, exact=exact)
+ if invp.dtype == np.uint64:
+ # This cast from np.uint64 to int64 OK, because if `kind` is not
+ # "symmetric", the values in invp are all much less than 2**63.
+ invp = invp.view(np.int64)
+
+ # The toeplitz matrix has alternating bands of 1 and -1.
+ invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
+
+ return invp
+
+
+def dft(n, scale=None):
+ """
+ Discrete Fourier transform matrix.
+
+ Create the matrix that computes the discrete Fourier transform of a
+ sequence [1]_. The nth primitive root of unity used to generate the
+ matrix is exp(-2*pi*i/n), where i = sqrt(-1).
+
+ Parameters
+ ----------
+ n : int
+ Size the matrix to create.
+ scale : str, optional
+ Must be None, 'sqrtn', or 'n'.
+ If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
+ If `scale` is 'n', the matrix is divided by `n`.
+ If `scale` is None (the default), the matrix is not normalized, and the
+ return value is simply the Vandermonde matrix of the roots of unity.
+
+ Returns
+ -------
+ m : (n, n) ndarray
+ The DFT matrix.
+
+ Notes
+ -----
+ When `scale` is None, multiplying a vector by the matrix returned by
+ `dft` is mathematically equivalent to (but much less efficient than)
+ the calculation performed by `scipy.fft.fft`.
+
+ .. versionadded:: 0.14.0
+
+ References
+ ----------
+ .. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix
+
+ Examples
+ --------
+ >>> from scipy.linalg import dft
+ >>> np.set_printoptions(precision=2, suppress=True) # for compact output
+ >>> m = dft(5)
+ >>> m
+ array([[ 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j ],
+ [ 1. +0.j , 0.31-0.95j, -0.81-0.59j, -0.81+0.59j, 0.31+0.95j],
+ [ 1. +0.j , -0.81-0.59j, 0.31+0.95j, 0.31-0.95j, -0.81+0.59j],
+ [ 1. +0.j , -0.81+0.59j, 0.31-0.95j, 0.31+0.95j, -0.81-0.59j],
+ [ 1. +0.j , 0.31+0.95j, -0.81+0.59j, -0.81-0.59j, 0.31-0.95j]])
+ >>> x = np.array([1, 2, 3, 0, 3])
+ >>> m @ x # Compute the DFT of x
+ array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
+
+ Verify that ``m @ x`` is the same as ``fft(x)``.
+
+ >>> from scipy.fft import fft
+ >>> fft(x) # Same result as m @ x
+ array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
+ """
+ if scale not in [None, 'sqrtn', 'n']:
+ raise ValueError("scale must be None, 'sqrtn', or 'n'; "
+ "%r is not valid." % (scale,))
+
+ omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
+ m = omegas ** np.arange(n)
+ if scale == 'sqrtn':
+ m /= math.sqrt(n)
+ elif scale == 'n':
+ m /= n
+ return m
+
+
+def fiedler(a):
+ """Returns a symmetric Fiedler matrix
+
+ Given an sequence of numbers `a`, Fiedler matrices have the structure
+ ``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative
+ entries. A Fiedler matrix has a dominant positive eigenvalue and other
+ eigenvalues are negative. Although not valid generally, for certain inputs,
+ the inverse and the determinant can be derived explicitly as given in [1]_.
+
+ Parameters
+ ----------
+ a : (n,) array_like
+ coefficient array
+
+ Returns
+ -------
+ F : (n, n) ndarray
+
+ See Also
+ --------
+ circulant, toeplitz
+
+ Notes
+ -----
+
+ .. versionadded:: 1.3.0
+
+ References
+ ----------
+ .. [1] J. Todd, "Basic Numerical Mathematics: Vol.2 : Numerical Algebra",
+ 1977, Birkhauser, :doi:`10.1007/978-3-0348-7286-7`
+
+ Examples
+ --------
+ >>> from scipy.linalg import det, inv, fiedler
+ >>> a = [1, 4, 12, 45, 77]
+ >>> n = len(a)
+ >>> A = fiedler(a)
+ >>> A
+ array([[ 0, 3, 11, 44, 76],
+ [ 3, 0, 8, 41, 73],
+ [11, 8, 0, 33, 65],
+ [44, 41, 33, 0, 32],
+ [76, 73, 65, 32, 0]])
+
+ The explicit formulas for determinant and inverse seem to hold only for
+ monotonically increasing/decreasing arrays. Note the tridiagonal structure
+ and the corners.
+
+ >>> Ai = inv(A)
+ >>> Ai[np.abs(Ai) < 1e-12] = 0. # cleanup the numerical noise for display
+ >>> Ai
+ array([[-0.16008772, 0.16666667, 0. , 0. , 0.00657895],
+ [ 0.16666667, -0.22916667, 0.0625 , 0. , 0. ],
+ [ 0. , 0.0625 , -0.07765152, 0.01515152, 0. ],
+ [ 0. , 0. , 0.01515152, -0.03077652, 0.015625 ],
+ [ 0.00657895, 0. , 0. , 0.015625 , -0.00904605]])
+ >>> det(A)
+ 15409151.999999998
+ >>> (-1)**(n-1) * 2**(n-2) * np.diff(a).prod() * (a[-1] - a[0])
+ 15409152
+
+ """
+ a = np.atleast_1d(a)
+
+ if a.ndim != 1:
+ raise ValueError("Input 'a' must be a 1D array.")
+
+ if a.size == 0:
+ return np.array([], dtype=float)
+ elif a.size == 1:
+ return np.array([[0.]])
+ else:
+ return np.abs(a[:, None] - a)
+
+
+def fiedler_companion(a):
+ """ Returns a Fiedler companion matrix
+
+ Given a polynomial coefficient array ``a``, this function forms a
+ pentadiagonal matrix with a special structure whose eigenvalues coincides
+ with the roots of ``a``.
+
+ Parameters
+ ----------
+ a : (N,) array_like
+ 1-D array of polynomial coefficients in descending order with a nonzero
+ leading coefficient. For ``N < 2``, an empty array is returned.
+
+ Returns
+ -------
+ c : (N-1, N-1) ndarray
+ Resulting companion matrix
+
+ Notes
+ -----
+ Similar to `companion` the leading coefficient should be nonzero. In the case
+ the leading coefficient is not 1, other coefficients are rescaled before
+ the array generation. To avoid numerical issues, it is best to provide a
+ monic polynomial.
+
+ .. versionadded:: 1.3.0
+
+ See Also
+ --------
+ companion
+
+ References
+ ----------
+ .. [1] M. Fiedler, " A note on companion matrices", Linear Algebra and its
+ Applications, 2003, :doi:`10.1016/S0024-3795(03)00548-2`
+
+ Examples
+ --------
+ >>> from scipy.linalg import fiedler_companion, eigvals
+ >>> p = np.poly(np.arange(1, 9, 2)) # [1., -16., 86., -176., 105.]
+ >>> fc = fiedler_companion(p)
+ >>> fc
+ array([[ 16., -86., 1., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 176., 0., -105.],
+ [ 0., 1., 0., 0.]])
+ >>> eigvals(fc)
+ array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j])
+
+ """
+ a = np.atleast_1d(a)
+
+ if a.ndim != 1:
+ raise ValueError("Input 'a' must be a 1-D array.")
+
+ if a.size <= 2:
+ if a.size == 2:
+ return np.array([[-(a/a[0])[-1]]])
+ return np.array([], dtype=a.dtype)
+
+ if a[0] == 0.:
+ raise ValueError('Leading coefficient is zero.')
+
+ a = a/a[0]
+ n = a.size - 1
+ c = np.zeros((n, n), dtype=a.dtype)
+ # subdiagonals
+ c[range(3, n, 2), range(1, n-2, 2)] = 1.
+ c[range(2, n, 2), range(1, n-1, 2)] = -a[3::2]
+ # superdiagonals
+ c[range(0, n-2, 2), range(2, n, 2)] = 1.
+ c[range(0, n-1, 2), range(1, n, 2)] = -a[2::2]
+ c[[0, 1], 0] = [-a[1], 1]
+
+ return c
+
+
+def convolution_matrix(a, n, mode='full'):
+ """
+ Construct a convolution matrix.
+
+ Constructs the Toeplitz matrix representing one-dimensional
+ convolution [1]_. See the notes below for details.
+
+ Parameters
+ ----------
+ a : (m,) array_like
+ The 1-D array to convolve.
+ n : int
+ The number of columns in the resulting matrix. It gives the length
+ of the input to be convolved with `a`. This is analogous to the
+ length of `v` in ``numpy.convolve(a, v)``.
+ mode : str
+ This is analogous to `mode` in ``numpy.convolve(v, a, mode)``.
+ It must be one of ('full', 'valid', 'same').
+ See below for how `mode` determines the shape of the result.
+
+ Returns
+ -------
+ A : (k, n) ndarray
+ The convolution matrix whose row count `k` depends on `mode`::
+
+ ======= =========================
+ mode k
+ ======= =========================
+ 'full' m + n -1
+ 'same' max(m, n)
+ 'valid' max(m, n) - min(m, n) + 1
+ ======= =========================
+
+ See Also
+ --------
+ toeplitz : Toeplitz matrix
+
+ Notes
+ -----
+ The code::
+
+ A = convolution_matrix(a, n, mode)
+
+ creates a Toeplitz matrix `A` such that ``A @ v`` is equivalent to
+ using ``convolve(a, v, mode)``. The returned array always has `n`
+ columns. The number of rows depends on the specified `mode`, as
+ explained above.
+
+ In the default 'full' mode, the entries of `A` are given by::
+
+ A[i, j] == (a[i-j] if (0 <= (i-j) < m) else 0)
+
+ where ``m = len(a)``. Suppose, for example, the input array is
+ ``[x, y, z]``. The convolution matrix has the form::
+
+ [x, 0, 0, ..., 0, 0]
+ [y, x, 0, ..., 0, 0]
+ [z, y, x, ..., 0, 0]
+ ...
+ [0, 0, 0, ..., x, 0]
+ [0, 0, 0, ..., y, x]
+ [0, 0, 0, ..., z, y]
+ [0, 0, 0, ..., 0, z]
+
+ In 'valid' mode, the entries of `A` are given by::
+
+ A[i, j] == (a[i-j+m-1] if (0 <= (i-j+m-1) < m) else 0)
+
+ This corresponds to a matrix whose rows are the subset of those from
+ the 'full' case where all the coefficients in `a` are contained in the
+ row. For input ``[x, y, z]``, this array looks like::
+
+ [z, y, x, 0, 0, ..., 0, 0, 0]
+ [0, z, y, x, 0, ..., 0, 0, 0]
+ [0, 0, z, y, x, ..., 0, 0, 0]
+ ...
+ [0, 0, 0, 0, 0, ..., x, 0, 0]
+ [0, 0, 0, 0, 0, ..., y, x, 0]
+ [0, 0, 0, 0, 0, ..., z, y, x]
+
+ In the 'same' mode, the entries of `A` are given by::
+
+ d = (m - 1) // 2
+ A[i, j] == (a[i-j+d] if (0 <= (i-j+d) < m) else 0)
+
+ The typical application of the 'same' mode is when one has a signal of
+ length `n` (with `n` greater than ``len(a)``), and the desired output
+ is a filtered signal that is still of length `n`.
+
+ For input ``[x, y, z]``, this array looks like::
+
+ [y, x, 0, 0, ..., 0, 0, 0]
+ [z, y, x, 0, ..., 0, 0, 0]
+ [0, z, y, x, ..., 0, 0, 0]
+ [0, 0, z, y, ..., 0, 0, 0]
+ ...
+ [0, 0, 0, 0, ..., y, x, 0]
+ [0, 0, 0, 0, ..., z, y, x]
+ [0, 0, 0, 0, ..., 0, z, y]
+
+ .. versionadded:: 1.5.0
+
+ References
+ ----------
+ .. [1] "Convolution", https://en.wikipedia.org/wiki/Convolution
+
+ Examples
+ --------
+ >>> from scipy.linalg import convolution_matrix
+ >>> A = convolution_matrix([-1, 4, -2], 5, mode='same')
+ >>> A
+ array([[ 4, -1, 0, 0, 0],
+ [-2, 4, -1, 0, 0],
+ [ 0, -2, 4, -1, 0],
+ [ 0, 0, -2, 4, -1],
+ [ 0, 0, 0, -2, 4]])
+
+ Compare multiplication by `A` with the use of `numpy.convolve`.
+
+ >>> x = np.array([1, 2, 0, -3, 0.5])
+ >>> A @ x
+ array([ 2. , 6. , -1. , -12.5, 8. ])
+
+ Verify that ``A @ x`` produced the same result as applying the
+ convolution function.
+
+ >>> np.convolve([-1, 4, -2], x, mode='same')
+ array([ 2. , 6. , -1. , -12.5, 8. ])
+
+ For comparison to the case ``mode='same'`` shown above, here are the
+ matrices produced by ``mode='full'`` and ``mode='valid'`` for the
+ same coefficients and size.
+
+ >>> convolution_matrix([-1, 4, -2], 5, mode='full')
+ array([[-1, 0, 0, 0, 0],
+ [ 4, -1, 0, 0, 0],
+ [-2, 4, -1, 0, 0],
+ [ 0, -2, 4, -1, 0],
+ [ 0, 0, -2, 4, -1],
+ [ 0, 0, 0, -2, 4],
+ [ 0, 0, 0, 0, -2]])
+
+ >>> convolution_matrix([-1, 4, -2], 5, mode='valid')
+ array([[-2, 4, -1, 0, 0],
+ [ 0, -2, 4, -1, 0],
+ [ 0, 0, -2, 4, -1]])
+ """
+ if n <= 0:
+ raise ValueError('n must be a positive integer.')
+
+ a = np.asarray(a)
+ if a.ndim != 1:
+ raise ValueError('convolution_matrix expects a one-dimensional '
+ 'array as input')
+ if a.size == 0:
+ raise ValueError('len(a) must be at least 1.')
+
+ if mode not in ('full', 'valid', 'same'):
+ raise ValueError(
+ "'mode' argument must be one of ('full', 'valid', 'same')")
+
+ # create zero padded versions of the array
+ az = np.pad(a, (0, n-1), 'constant')
+ raz = np.pad(a[::-1], (0, n-1), 'constant')
+
+ if mode == 'same':
+ trim = min(n, len(a)) - 1
+ tb = trim//2
+ te = trim - tb
+ col0 = az[tb:len(az)-te]
+ row0 = raz[-n-tb:len(raz)-tb]
+ elif mode == 'valid':
+ tb = min(n, len(a)) - 1
+ te = tb
+ col0 = az[tb:len(az)-te]
+ row0 = raz[-n-tb:len(raz)-tb]
+ else: # 'full'
+ col0 = az
+ row0 = raz[-n:]
+ return toeplitz(col0, row0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/id_dist/doc/doc.tex b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/id_dist/doc/doc.tex
new file mode 100644
index 0000000..8bcece8
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/id_dist/doc/doc.tex
@@ -0,0 +1,977 @@
+\documentclass[letterpaper,12pt]{article}
+\usepackage[margin=1in]{geometry}
+\usepackage{verbatim}
+\usepackage{amsmath}
+\usepackage{supertabular}
+\usepackage{array}
+
+\def\T{{\hbox{\scriptsize{\rm T}}}}
+\def\epsilon{\varepsilon}
+\def\bigoh{\mathcal{O}}
+\def\phi{\varphi}
+\def\st{{\hbox{\scriptsize{\rm st}}}}
+\def\th{{\hbox{\scriptsize{\rm th}}}}
+\def\x{\mathbf{x}}
+
+
+\title{ID: A software package for low-rank approximation
+ of matrices via interpolative decompositions, Version 0.4}
+\author{Per-Gunnar Martinsson, Vladimir Rokhlin,\\
+ Yoel Shkolnisky, and Mark Tygert}
+
+
+\begin{document}
+
+\maketitle
+
+\newpage
+
+{\parindent=0pt
+
+The present document and all of the software
+in the accompanying distribution (which is contained in the directory
+{\tt id\_dist} and its subdirectories, or in the file
+{\tt id\_dist.tar.gz})\, is
+
+\bigskip
+
+Copyright \copyright\ 2014 by P.-G. Martinsson, V. Rokhlin,
+Y. Shkolnisky, and M. Tygert.
+
+\bigskip
+
+All rights reserved.
+
+\bigskip
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+\begin{enumerate}
+\item Redistributions of source code must retain the above copyright
+notice, this list of conditions, and the following disclaimer.
+\item Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions, and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+\item None of the names of the copyright holders may be used to endorse
+or promote products derived from this software without specific prior
+written permission.
+\end{enumerate}
+
+\bigskip
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNERS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+}
+
+\newpage
+
+\tableofcontents
+
+\newpage
+
+
+
+\hrule
+
+\medskip
+
+\centerline{\Large \bf IMPORTANT}
+
+\medskip
+
+\hrule
+
+\medskip
+
+\noindent At the minimum, please read Subsection~\ref{warning}
+and Section~\ref{naming} below, and beware that the {\it N.B.}'s
+in the source code comments highlight key information about the routines;
+{\it N.B.} stands for {\it nota bene} (Latin for ``note well'').
+
+\medskip
+
+\hrule
+
+\bigskip
+
+
+
+\section{Introduction}
+
+This software distribution provides Fortran routines
+for computing low-rank approximations to matrices,
+in the forms of interpolative decompositions (IDs)
+and singular value decompositions (SVDs).
+The routines use algorithms based on the ID.
+The ID is also commonly known as
+the approximation obtained via skeletonization,
+the approximation obtained via subsampling,
+and the approximation obtained via subset selection.
+The ID provides many advantages in many applications,
+and we suspect that it will become increasingly popular
+once tools for its computation become more widely available.
+This software distribution includes some such tools,
+as well as tools for computing low-rank approximations
+in the form of SVDs.
+Section~\ref{defs} below defines IDs and SVDs,
+and provides references to detailed discussions of the algorithms
+used in this software package.
+
+Please beware that normalized power iterations are better suited than
+the software in this distribution
+for computing principal component analyses
+in the typical case when the square of the signal-to-noise ratio
+is not orders of magnitude greater than both dimensions
+of the data matrix; see~\cite{halko-martinsson-tropp}.
+
+The algorithms used in this distribution have been optimized
+for accuracy, efficiency, and reliability;
+as a somewhat counterintuitive consequence, many must be randomized.
+All randomized codes in this software package succeed
+with overwhelmingly high probability (see, for example,
+\cite{halko-martinsson-tropp}).
+The truly paranoid are welcome to use the routines {\tt idd\_diffsnorm}
+and {\tt idz\_diffsnorm} to evaluate rapidly the quality
+of the approximations produced by the randomized algorithms
+(as done, for example, in the files
+{\tt idd\_a\_test.f}, {\tt idd\_r\_test.f}, {\tt idz\_a\_test.f},
+and {\tt idz\_r\_test.f} in the {\tt test} subdirectory
+of the main directory {\tt id\_dist}).
+In most circumstances, evaluating the quality of an approximation
+via routines {\tt idd\_diffsnorm} or {\tt idz\_diffsnorm} is much faster
+than forming the approximation to be evaluated. Still, we are unaware
+of any instance in which a properly-compiled routine failed to produce
+an accurate approximation.
+To facilitate successful compilation, we encourage the user
+to read the instructions in the next section,
+and to read Section~\ref{naming}, too.
+
+
+
+\section{Compilation instructions}
+
+
+Followed in numerical order, the subsections of this section
+provide step-by-step instructions for compiling the software
+under a Unix-compatible operating system.
+
+
+\subsection{Beware that default command-line flags may not be
+ sufficient for compiling the source codes!}
+\label{warning}
+
+The Fortran source codes in this distribution pass {\tt real*8}
+variables as integer variables, integers as {\tt real*8}'s,
+{\tt real*8}'s as {\tt complex*16}'s, and so on.
+This is common practice in numerical codes, and is not an error;
+be sure to provide the relevant command-line flags to the compiler
+(for example, run {\tt fort77} and {\tt f2c} with the flag {\tt -!P}).
+When following the compilation instructions
+in Subsection~\ref{makefile_edit} below,
+be sure to set {\tt FFLAGS} appropriately.
+
+
+\subsection{Install LAPACK}
+
+The SVD routines in this distribution depend on LAPACK.
+Before compiling the present distribution,
+create the LAPACK and BLAS archive (library) {\tt .a} files;
+information about installing LAPACK is available
+at {\tt http://www.netlib.org/lapack/} (and several other web sites).
+
+
+\subsection{Decompress and untar the file {\tt id\_dist.tar.gz}}
+
+At the command line, decompress and untar the file
+{\tt id\_dist.tar.gz} by issuing a command such as
+{\tt tar -xvvzf id\_dist.tar.gz}.
+This will create a directory named {\tt id\_dist}.
+
+
+\subsection{Edit the Makefile}
+\label{makefile_edit}
+
+The directory {\tt id\_dist} contains a file named {\tt Makefile}.
+In {\tt Makefile}, set the following:
+%
+\begin{itemize}
+\item {\tt FC} is the Fortran compiler.
+\item {\tt FFLAGS} is the set of command-line flags
+ (specifying optimization settings, for example)
+ for the Fortran compiler specified by {\tt FC};
+ please heed the warning in Subsection~\ref{warning} above!
+\item {\tt BLAS\_LIB} is the file-system path to the BLAS archive
+ (library) {\tt .a} file.
+\item {\tt LAPACK\_LIB} is the file-system path to the LAPACK archive
+ (library) {\tt .a} file.
+\item {\tt ARCH} is the archiver utility (usually {\tt ar}).
+\item {\tt ARCHFLAGS} is the set of command-line flags
+ for the archiver specified by {\tt ARCH} needed
+ to create an archive (usually {\tt cr}).
+\item {\tt RANLIB} is to be set to {\tt ranlib}
+ when {\tt ranlib} is available, and is to be set to {\tt echo}
+ when {\tt ranlib} is not available.
+\end{itemize}
+
+
+\subsection{Make and test the libraries}
+
+At the command line in a shell that adheres
+to the Bourne shell conventions for redirection, issue the command
+``{\tt make clean; make}'' to both create the archive (library)
+{\tt id\_lib.a} and test it.
+(In most modern Unix distributions, {\tt sh} is the Bourne shell,
+or else is fully compatible with the Bourne shell;
+the Korn shell {\tt ksh} and the Bourne-again shell {\tt bash}
+also use the Bourne shell conventions for redirection.)
+{\tt make} places the file {\tt id\_lib.a}
+in the directory {\tt id\_dist}; the archive (library) file
+{\tt id\_lib.a} contains machine code for all user-callable routines
+in this distribution.
+
+
+
+\section{Naming conventions}
+\label{naming}
+
+The names of routines and files in this distribution
+start with prefixes, followed by an underscore (``\_'').
+The prefixes are two to four characters in length,
+and have the following meanings:
+%
+\begin{itemize}
+\item The first two letters are always ``{\tt id}'',
+ the name of this distribution.
+\item The third letter (when present) is either ``{\tt d}''
+ or ``{\tt z}'';
+ ``{\tt d}'' stands for double precision ({\tt real*8}),
+ and ``{\tt z}'' stands for double complex ({\tt complex*16}).
+\item The fourth letter (when present) is either ``{\tt r}''
+ or ``{\tt p}'';
+ ``{\tt r}'' stands for specified rank,
+ and ``{\tt p}'' stands for specified precision.
+ The specified rank routines require the user to provide
+ the rank of the approximation to be constructed,
+ while the specified precision routines adjust the rank adaptively
+ to attain the desired precision.
+\end{itemize}
+
+For example, {\tt iddr\_aid} is a {\tt real*8} routine which computes
+an approximation of specified rank.
+{\tt idz\_snorm} is a {\tt complex*16} routine.
+{\tt id\_randperm} is yet another routine in this distribution.
+
+
+
+\section{Example programs}
+
+For examples of how to use the user-callable routines
+in this distribution, see the source codes in subdirectory {\tt test}
+of the main directory {\tt id\_dist}.
+
+
+
+\section{Directory structure}
+
+The main {\tt id\_dist} directory contains a Makefile,
+the auxiliary text files {\tt README.txt} and {\tt size.txt},
+and the following subdirectories, described in the subsections below:
+%
+\begin{enumerate}
+\item {\tt bin}
+\item {\tt development}
+\item {\tt doc}
+\item {\tt src}
+\item {\tt test}
+\item {\tt tmp}
+\end{enumerate}
+%
+If a ``{\tt make all}'' command has completed successfully,
+then the main {\tt id\_dist} directory will also contain
+an archive (library) file {\tt id\_lib.a} containing machine code
+for all of the user-callable routines.
+
+
+\subsection{Subdirectory {\tt bin}}
+
+Once all of the libraries have been made via the Makefile
+in the main {\tt id\_dist} directory,
+the subdirectory {\tt bin} will contain object files (machine code),
+each compiled from the corresponding file of source code
+in the subdirectory {\tt src} of {\tt id\_dist}.
+
+
+\subsection{Subdirectory {\tt development}}
+
+Each Fortran file in the subdirectory {\tt development}
+(except for {\tt dfft.f} and {\tt prini.f})
+specifies its dependencies at the top, then provides a main program
+for testing and debugging, and finally provides source code
+for a library of user-callable subroutines.
+The Fortran file {\tt dfft.f} is a copy of P. N. Swarztrauber's FFTPACK library
+for computing fast Fourier transforms.
+The Fortran file {\tt prini.f} is a copy of V. Rokhlin's library
+of formatted printing routines.
+Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain.
+The shell script {\tt RUNME.sh} runs shell scripts {\tt make\_src.sh}
+and {\tt make\_test.sh}, which fill the subdirectories {\tt src}
+and {\tt test} of the main directory {\tt id\_dist}
+with source codes for user-callable routines
+and with the main program testing codes.
+
+
+\subsection{Subdirectory {\tt doc}}
+
+Subdirectory {\tt doc} contains this documentation,
+supplementing comments in the source codes.
+
+
+\subsection{Subdirectory {\tt src}}
+
+The files in the subdirectory {\tt src} provide source code
+for software libraries. Each file in the subdirectory {\tt src}
+(except for {\tt dfft.f} and {\tt prini.f}) is
+the bottom part of the corresponding file
+in the subdirectory {\tt development} of {\tt id\_dist}.
+The file {\tt dfft.f} is just a copy
+of P. N. Swarztrauber's FFTPACK library
+for computing fast Fourier transforms.
+The file {\tt prini.f} is a copy of V. Rokhlin's library
+of formatted printing routines.
+Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain.
+
+
+\subsection{Subdirectory {\tt test}}
+
+The files in subdirectory {\tt test} provide source code
+for testing and debugging. Each file in subdirectory {\tt test} is
+the top part of the corresponding file
+in subdirectory {\tt development} of {\tt id\_dist},
+and provides a main program and a list of its dependencies.
+These codes provide examples of how to call the user-callable routines.
+
+
+
+\section{Catalog of the routines}
+
+The main routines for decomposing {\tt real*8} matrices are:
+%
+\begin{enumerate}
+%
+\item IDs of arbitrary (generally dense) matrices:
+{\tt iddp\_id}, {\tt iddr\_id}, {\tt iddp\_aid}, {\tt iddr\_aid}
+%
+\item IDs of matrices that may be rapidly applied to arbitrary vectors
+(as may the matrices' transposes):
+{\tt iddp\_rid}, {\tt iddr\_rid}
+%
+\item SVDs of arbitrary (generally dense) matrices:
+{\tt iddp\_svd}, {\tt iddr\_svd}, {\tt iddp\_asvd},\\{\tt iddr\_asvd}
+%
+\item SVDs of matrices that may be rapidly applied to arbitrary vectors
+(as may the matrices' transposes):
+{\tt iddp\_rsvd}, {\tt iddr\_rsvd}
+%
+\end{enumerate}
+
+Similarly, the main routines for decomposing {\tt complex*16} matrices
+are:
+%
+\begin{enumerate}
+%
+\item IDs of arbitrary (generally dense) matrices:
+{\tt idzp\_id}, {\tt idzr\_id}, {\tt idzp\_aid}, {\tt idzr\_aid}
+%
+\item IDs of matrices that may be rapidly applied to arbitrary vectors
+(as may the matrices' adjoints):
+{\tt idzp\_rid}, {\tt idzr\_rid}
+%
+\item SVDs of arbitrary (generally dense) matrices:
+{\tt idzp\_svd}, {\tt idzr\_svd}, {\tt idzp\_asvd},\\{\tt idzr\_asvd}
+%
+\item SVDs of matrices that may be rapidly applied to arbitrary vectors
+(as may the matrices' adjoints):
+{\tt idzp\_rsvd}, {\tt idzr\_rsvd}
+%
+\end{enumerate}
+
+This distribution also includes routines for constructing pivoted $QR$
+decompositions (in {\tt idd\_qrpiv.f} and {\tt idz\_qrpiv.f}), for
+estimating the spectral norms of matrices that may be applied rapidly
+to arbitrary vectors as may their adjoints (in {\tt idd\_snorm.f}
+and {\tt idz\_snorm.f}), for converting IDs to SVDs (in
+{\tt idd\_id2svd.f} and {\tt idz\_id2svd.f}), and for computing rapidly
+arbitrary subsets of the entries of the discrete Fourier transforms
+of vectors (in {\tt idd\_sfft.f} and {\tt idz\_sfft.f}).
+
+
+\subsection{List of the routines}
+
+The following is an alphabetical list of the routines
+in this distribution, together with brief descriptions
+of their functionality and the names of the files containing
+the routines' source code:
+
+\begin{center}
+%
+\tablehead{\bf Routine & \bf Description & \bf Source file \\}
+\tabletail{\hline}
+%
+\begin{supertabular}{>{\raggedright}p{1.2in} p{.53\textwidth} l}
+%
+\hline
+{\tt id\_frand} & generates pseudorandom numbers drawn uniformly from
+the interval $[0,1]$; this routine is more efficient than routine
+{\tt id\_srand}, but cannot generate fewer than 55 pseudorandom numbers
+per call & {\tt id\_rand.f} \\\hline
+%
+{\tt id\_frandi} & initializes the seed values for routine
+{\tt id\_frand} to specified values & {\tt id\_rand.f} \\\hline
+%
+{\tt id\_frando} & initializes the seed values for routine
+{\tt id\_frand} to their original, default values & {\tt id\_rand.f}
+\\\hline
+%
+{\tt id\_randperm} & generates a uniformly random permutation &
+{\tt id\_rand.f} \\\hline
+%
+{\tt id\_srand} & generates pseudorandom numbers drawn uniformly from
+the interval $[0,1]$; this routine is less efficient than routine
+{\tt id\_frand}, but can generate fewer than 55 pseudorandom numbers
+per call & {\tt id\_rand.f} \\\hline
+%
+{\tt id\_srandi} & initializes the seed values for routine
+{\tt id\_srand} to specified values & {\tt id\_rand.f} \\\hline
+%
+{\tt id\_srando} & initializes the seed values for routine
+{\tt id\_srand} to their original, default values & {\tt id\_rand.f}
+\\\hline
+%
+{\tt idd\_copycols} & collects together selected columns of a matrix &
+{\tt idd\_id.f} \\\hline
+%
+{\tt idd\_diffsnorm} & estimates the spectral norm of the difference
+between two matrices specified by routines for applying the matrices
+and their transposes to arbitrary vectors; this routine uses the power
+method with a random starting vector & {\tt idd\_snorm.f} \\\hline
+%
+{\tt idd\_enorm} & calculates the Euclidean norm of a vector &
+{\tt idd\_snorm.f} \\\hline
+%
+{\tt idd\_estrank} & estimates the numerical rank of an arbitrary
+(generally dense) matrix to a specified precision; this routine is
+randomized, and must be initialized with routine {\tt idd\_frmi} &
+{\tt iddp\_aid.f} \\\hline
+%
+{\tt idd\_frm} & transforms a vector into a vector which is
+sufficiently scrambled to be subsampled, via a composition of Rokhlin's
+random transform, random subselection, and a fast Fourier transform &
+{\tt idd\_frm.f} \\\hline
+%
+{\tt idd\_frmi} & initializes routine {\tt idd\_frm} & {\tt idd\_frm.f}
+\\\hline
+%
+{\tt idd\_getcols} & collects together selected columns of a matrix
+specified by a routine for applying the matrix to arbitrary vectors &
+{\tt idd\_id.f} \\\hline
+%
+{\tt idd\_house} & calculates the vector and scalar needed to apply the
+Householder transformation reflecting a given vector into its first
+entry & {\tt idd\_house.f} \\\hline
+%
+{\tt idd\_houseapp} & applies a Householder matrix to a vector &
+{\tt idd\_house.f} \\\hline
+%
+{\tt idd\_id2svd} & converts an approximation to a matrix in the form
+of an ID into an approximation in the form of an SVD &
+{\tt idd\_id2svd.f} \\\hline
+%
+{\tt idd\_ldiv} & finds the greatest integer less than or equal to a
+specified integer, that is divisible by another (larger) specified
+integer & {\tt idd\_sfft.f} \\\hline
+%
+{\tt idd\_pairsamps} & calculates the indices of the pairs of integers
+that the individual integers in a specified set belong to &
+{\tt idd\_frm.f} \\\hline
+%
+{\tt idd\_permmult} & multiplies together a bunch of permutations &
+{\tt idd\_qrpiv.f} \\\hline
+%
+{\tt idd\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition
+from the output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv} &
+{\tt idd\_qrpiv.f} \\\hline
+%
+{\tt idd\_qrmatmat} & applies to multiple vectors collected together as
+a matrix the $Q$ matrix (or its transpose) in the $QR$ decomposition of
+a matrix, as described by the output of routines {\tt iddp\_qrpiv} or
+{\tt iddr\_qrpiv}; to apply $Q$ (or its transpose) to a single vector
+without having to provide a work array, use routine {\tt idd\_qrmatvec}
+instead & {\tt idd\_qrpiv.f} \\\hline
+%
+{\tt idd\_qrmatvec} & applies to a single vector the $Q$ matrix (or its
+transpose) in the $QR$ decomposition of a matrix, as described by the
+output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv}; to apply $Q$
+(or its transpose) to several vectors efficiently, use routine
+{\tt idd\_qrmatmat} instead & {\tt idd\_qrpiv.f} \\\hline
+%
+{\tt idd\_random\_} {\tt transf} & applies rapidly a
+random orthogonal matrix to a user-supplied vector & {\tt id\_rtrans.f}
+\\\hline
+%
+{\tt idd\_random\_ transf\_init} & \raggedright initializes routines
+{\tt idd\_random\_transf} and {\tt idd\_random\_transf\_inverse} &
+{\tt id\_rtrans.f} \\\hline
+%
+{\tt idd\_random\_} {\tt transf\_inverse} & applies
+rapidly the inverse of the operator applied by routine
+{\tt idd\_random\_transf} & {\tt id\_rtrans.f} \\\hline
+%
+{\tt idd\_reconid} & reconstructs a matrix from its ID &
+{\tt idd\_id.f} \\\hline
+%
+{\tt idd\_reconint} & constructs $P$ in the ID $A = B \, P$, where the
+columns of $B$ are a subset of the columns of $A$, and $P$ is the
+projection coefficient matrix, given {\tt list}, {\tt krank}, and
+{\tt proj} output by routines {\tt iddr\_id}, {\tt iddp\_id},
+{\tt iddr\_aid}, {\tt iddp\_aid}, {\tt iddr\_rid}, or {\tt iddp\_rid} &
+{\tt idd\_id.f} \\\hline
+%
+{\tt idd\_sfft} & rapidly computes a subset of the entries of the
+discrete Fourier transform of a vector, composed with permutation
+matrices both on input and on output & {\tt idd\_sfft.f} \\\hline
+%
+{\tt idd\_sffti} & initializes routine {\tt idd\_sfft} &
+{\tt idd\_sfft.f} \\\hline
+%
+{\tt idd\_sfrm} & transforms a vector into a scrambled vector of
+specified length, via a composition of Rokhlin's random transform,
+random subselection, and a fast Fourier transform & {\tt idd\_frm.f}
+\\\hline
+%
+{\tt idd\_sfrmi} & initializes routine {\tt idd\_sfrm} &
+{\tt idd\_frm.f} \\\hline
+%
+{\tt idd\_snorm} & estimates the spectral norm of a matrix specified by
+routines for applying the matrix and its transpose to arbitrary
+vectors; this routine uses the power method with a random starting
+vector & {\tt idd\_snorm.f} \\\hline
+%
+{\tt iddp\_aid} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is randomized, and must
+be initialized with routine {\tt idd\_frmi} & {\tt iddp\_aid.f}
+\\\hline
+%
+{\tt iddp\_asvd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is randomized, and must
+be initialized with routine {\tt idd\_frmi} & {\tt iddp\_asvd.f}
+\\\hline
+%
+{\tt iddp\_id} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is often less efficient
+than routine {\tt iddp\_aid} & {\tt idd\_id.f} \\\hline
+%
+{\tt iddp\_qrpiv} & computes the pivoted $QR$ decomposition of an
+arbitrary (generally dense) matrix via Householder transformations,
+stopping at a specified precision of the decomposition &
+{\tt idd\_qrpiv.f} \\\hline
+%
+{\tt iddp\_rid} & computes the ID, to a specified precision, of a
+matrix specified by a routine for applying its transpose to arbitrary
+vectors; this routine is randomized & {\tt iddp\_rid.f} \\\hline
+%
+{\tt iddp\_rsvd} & computes the SVD, to a specified precision, of a
+matrix specified by routines for applying the matrix and its transpose
+to arbitrary vectors; this routine is randomized & {\tt iddp\_rsvd.f}
+\\\hline
+%
+{\tt iddp\_svd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is often less efficient
+than routine {\tt iddp\_asvd} & {\tt idd\_svd.f} \\\hline
+%
+{\tt iddr\_aid} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is randomized, and must be
+initialized by routine {\tt iddr\_aidi} & {\tt iddr\_aid.f} \\\hline
+%
+{\tt iddr\_aidi} & initializes routine {\tt iddr\_aid} &
+{\tt iddr\_aid.f} \\\hline
+%
+{\tt iddr\_asvd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is randomized, and must be
+initialized with routine {\tt idd\_aidi} & {\tt iddr\_asvd.f}
+\\\hline
+%
+{\tt iddr\_id} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is often less efficient than
+routine {\tt iddr\_aid} & {\tt idd\_id.f} \\\hline
+%
+{\tt iddr\_qrpiv} & computes the pivoted $QR$ decomposition of an
+arbitrary (generally dense) matrix via Householder transformations,
+stopping at a specified rank of the decomposition & {\tt idd\_qrpiv.f}
+\\\hline
+%
+{\tt iddr\_rid} & computes the ID, to a specified rank, of a matrix
+specified by a routine for applying its transpose to arbitrary vectors;
+this routine is randomized & {\tt iddr\_rid.f} \\\hline
+%
+{\tt iddr\_rsvd} & computes the SVD, to a specified rank, of a matrix
+specified by routines for applying the matrix and its transpose to
+arbitrary vectors; this routine is randomized & {\tt iddr\_rsvd.f}
+\\\hline
+%
+{\tt iddr\_svd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is often less efficient than
+routine {\tt iddr\_asvd} & {\tt idd\_svd.f} \\\hline
+%
+{\tt idz\_copycols} & collects together selected columns of a matrix &
+{\tt idz\_id.f} \\\hline
+%
+{\tt idz\_diffsnorm} & estimates the spectral norm of the difference
+between two matrices specified by routines for applying the matrices
+and their adjoints to arbitrary vectors; this routine uses the power
+method with a random starting vector & {\tt idz\_snorm.f} \\\hline
+%
+{\tt idz\_enorm} & calculates the Euclidean norm of a vector &
+{\tt idz\_snorm.f} \\\hline
+%
+{\tt idz\_estrank} & estimates the numerical rank of an arbitrary
+(generally dense) matrix to a specified precision; this routine is
+randomized, and must be initialized with routine {\tt idz\_frmi} &
+{\tt idzp\_aid.f} \\\hline
+%
+{\tt idz\_frm} & transforms a vector into a vector which is
+sufficiently scrambled to be subsampled, via a composition of Rokhlin's
+random transform, random subselection, and a fast Fourier transform &
+{\tt idz\_frm.f} \\\hline
+%
+{\tt idz\_frmi} & initializes routine {\tt idz\_frm} & {\tt idz\_frm.f}
+\\\hline
+%
+{\tt idz\_getcols} & collects together selected columns of a matrix
+specified by a routine for applying the matrix to arbitrary vectors &
+{\tt idz\_id.f} \\\hline
+%
+{\tt idz\_house} & calculates the vector and scalar needed to apply the
+Householder transformation reflecting a given vector into its first
+entry & {\tt idz\_house.f} \\\hline
+%
+{\tt idz\_houseapp} & applies a Householder matrix to a vector &
+{\tt idz\_house.f} \\\hline
+%
+{\tt idz\_id2svd} & converts an approximation to a matrix in the form
+of an ID into an approximation in the form of an SVD &
+{\tt idz\_id2svd.f} \\\hline
+%
+{\tt idz\_ldiv} & finds the greatest integer less than or equal to a
+specified integer, that is divisible by another (larger) specified
+integer & {\tt idz\_sfft.f} \\\hline
+%
+{\tt idz\_permmult} & multiplies together a bunch of permutations &
+{\tt idz\_qrpiv.f} \\\hline
+%
+{\tt idz\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition
+from the output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv} &
+{\tt idz\_qrpiv.f} \\\hline
+%
+{\tt idz\_qrmatmat} & applies to multiple vectors collected together as
+a matrix the $Q$ matrix (or its adjoint) in the $QR$ decomposition of
+a matrix, as described by the output of routines {\tt idzp\_qrpiv} or
+{\tt idzr\_qrpiv}; to apply $Q$ (or its adjoint) to a single vector
+without having to provide a work array, use routine {\tt idz\_qrmatvec}
+instead & {\tt idz\_qrpiv.f} \\\hline
+%
+{\tt idz\_qrmatvec} & applies to a single vector the $Q$ matrix (or its
+adjoint) in the $QR$ decomposition of a matrix, as described by the
+output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv}; to apply $Q$
+(or its adjoint) to several vectors efficiently, use routine
+{\tt idz\_qrmatmat} instead & {\tt idz\_qrpiv.f} \\\hline
+%
+{\tt idz\_random\_ transf} & applies rapidly a random unitary matrix to
+a user-supplied vector & {\tt id\_rtrans.f} \\\hline
+%
+{\tt idz\_random\_ transf\_init} & \raggedright initializes routines
+{\tt idz\_random\_transf} and {\tt idz\_random\_transf\_inverse} &
+{\tt id\_rtrans.f} \\\hline
+%
+{\tt idz\_random\_ transf\_inverse} & applies rapidly the inverse of
+the operator applied by routine {\tt idz\_random\_transf} &
+{\tt id\_rtrans.f} \\\hline
+%
+{\tt idz\_reconid} & reconstructs a matrix from its ID &
+{\tt idz\_id.f} \\\hline
+%
+{\tt idz\_reconint} & constructs $P$ in the ID $A = B \, P$, where the
+columns of $B$ are a subset of the columns of $A$, and $P$ is the
+projection coefficient matrix, given {\tt list}, {\tt krank}, and
+{\tt proj} output by routines {\tt idzr\_id}, {\tt idzp\_id},
+{\tt idzr\_aid}, {\tt idzp\_aid}, {\tt idzr\_rid}, or {\tt idzp\_rid} &
+{\tt idz\_id.f} \\\hline
+%
+{\tt idz\_sfft} & rapidly computes a subset of the entries of the
+discrete Fourier transform of a vector, composed with permutation
+matrices both on input and on output & {\tt idz\_sfft.f} \\\hline
+%
+{\tt idz\_sffti} & initializes routine {\tt idz\_sfft} &
+{\tt idz\_sfft.f} \\\hline
+%
+{\tt idz\_sfrm} & transforms a vector into a scrambled vector of
+specified length, via a composition of Rokhlin's random transform,
+random subselection, and a fast Fourier transform & {\tt idz\_frm.f}
+\\\hline
+%
+{\tt idz\_sfrmi} & initializes routine {\tt idz\_sfrm} &
+{\tt idz\_frm.f} \\\hline
+%
+{\tt idz\_snorm} & estimates the spectral norm of a matrix specified by
+routines for applying the matrix and its adjoint to arbitrary
+vectors; this routine uses the power method with a random starting
+vector & {\tt idz\_snorm.f} \\\hline
+%
+{\tt idzp\_aid} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is randomized, and must
+be initialized with routine {\tt idz\_frmi} & {\tt idzp\_aid.f}
+\\\hline
+%
+{\tt idzp\_asvd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is randomized, and must
+be initialized with routine {\tt idz\_frmi} & {\tt idzp\_asvd.f}
+\\\hline
+%
+{\tt idzp\_id} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is often less efficient
+than routine {\tt idzp\_aid} & {\tt idz\_id.f} \\\hline
+%
+{\tt idzp\_qrpiv} & computes the pivoted $QR$ decomposition of an
+arbitrary (generally dense) matrix via Householder transformations,
+stopping at a specified precision of the decomposition &
+{\tt idz\_qrpiv.f} \\\hline
+%
+{\tt idzp\_rid} & computes the ID, to a specified precision, of a
+matrix specified by a routine for applying its adjoint to arbitrary
+vectors; this routine is randomized & {\tt idzp\_rid.f} \\\hline
+%
+{\tt idzp\_rsvd} & computes the SVD, to a specified precision, of a
+matrix specified by routines for applying the matrix and its adjoint
+to arbitrary vectors; this routine is randomized & {\tt idzp\_rsvd.f}
+\\\hline
+%
+{\tt idzp\_svd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified precision; this routine is often less efficient
+than routine {\tt idzp\_asvd} & {\tt idz\_svd.f} \\\hline
+%
+{\tt idzr\_aid} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is randomized, and must be
+initialized by routine {\tt idzr\_aidi} & {\tt idzr\_aid.f} \\\hline
+%
+{\tt idzr\_aidi} & initializes routine {\tt idzr\_aid} &
+{\tt idzr\_aid.f} \\\hline
+%
+{\tt idzr\_asvd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is randomized, and must be
+initialized with routine {\tt idz\_aidi} & {\tt idzr\_asvd.f}
+\\\hline
+%
+{\tt idzr\_id} & computes the ID of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is often less efficient than
+routine {\tt idzr\_aid} & {\tt idz\_id.f} \\\hline
+%
+{\tt idzr\_qrpiv} & computes the pivoted $QR$ decomposition of an
+arbitrary (generally dense) matrix via Householder transformations,
+stopping at a specified rank of the decomposition & {\tt idz\_qrpiv.f}
+\\\hline
+%
+{\tt idzr\_rid} & computes the ID, to a specified rank, of a matrix
+specified by a routine for applying its adjoint to arbitrary vectors;
+this routine is randomized & {\tt idzr\_rid.f} \\\hline
+%
+{\tt idzr\_rsvd} & computes the SVD, to a specified rank, of a matrix
+specified by routines for applying the matrix and its adjoint to
+arbitrary vectors; this routine is randomized & {\tt idzr\_rsvd.f}
+\\\hline
+%
+{\tt idzr\_svd} & computes the SVD of an arbitrary (generally dense)
+matrix, to a specified rank; this routine is often less efficient than
+routine {\tt idzr\_asvd} & {\tt idz\_svd.f} \\
+%
+\end{supertabular}
+\end{center}
+
+
+
+\section{Documentation in the source codes}
+
+Each routine in the source codes includes documentation
+in the comments immediately following the declaration
+of the subroutine's calling sequence.
+This documentation describes the purpose of the routine,
+the input and output variables, and the required work arrays (if any).
+This documentation also cites relevant references.
+Please pay attention to the {\it N.B.}'s;
+{\it N.B.} stands for {\it nota bene} (Latin for ``note well'')
+and highlights important information about the routines.
+
+
+
+\section{Notation and decompositions}
+\label{defs}
+
+This section sets notational conventions employed
+in this documentation and the associated software,
+and defines both the singular value decomposition (SVD)
+and the interpolative decomposition (ID).
+For information concerning other mathematical objects
+used in the code (such as Householder transformations,
+pivoted $QR$ decompositions, and discrete and fast Fourier transforms
+--- DFTs and FFTs), see, for example,~\cite{golub-van_loan}.
+For detailed descriptions and proofs of the mathematical facts
+discussed in the present section, see, for example,
+\cite{golub-van_loan} and the references
+in~\cite{halko-martinsson-tropp}.
+
+Throughout this document and the accompanying software distribution,
+$\| \x \|$ always denotes the Euclidean norm of the vector $\x$,
+and $\| A \|$ always denotes the spectral norm of the matrix $A$.
+Subsection~\ref{Euclidean} below defines the Euclidean norm;
+Subsection~\ref{spectral} below defines the spectral norm.
+We use $A^*$ to denote the adjoint of the matrix $A$.
+
+
+\subsection{Euclidean norm}
+\label{Euclidean}
+
+For any positive integer $n$, and vector $\x$ of length $n$,
+the Euclidean ($l^2$) norm $\| \x \|$ is
+%
+\begin{equation}
+\| \x \| = \sqrt{ \sum_{k=1}^n |x_k|^2 },
+\end{equation}
+%
+where $x_1$,~$x_2$, \dots, $x_{n-1}$,~$x_n$ are the entries of $\x$.
+
+
+\subsection{Spectral norm}
+\label{spectral}
+
+For any positive integers $m$ and $n$, and $m \times n$ matrix $A$,
+the spectral ($l^2$ operator) norm $\| A \|$ is
+%
+\begin{equation}
+\| A_{m \times n} \|
+= \max \frac{\| A_{m \times n} \, \x_{n \times 1} \|}
+ {\| \x_{n \times 1} \|},
+\end{equation}
+%
+where the $\max$ is taken over all $n \times 1$ column vectors $\x$
+such that $\| \x \| \ne 0$.
+
+
+\subsection{Singular value decomposition (SVD)}
+
+For any positive real number $\epsilon$,
+positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$,
+and any $m \times n$ matrix $A$,
+a rank-$k$ approximation to $A$ in the form of an SVD
+(to precision $\epsilon$) consists of an $m \times k$ matrix $U$
+whose columns are orthonormal, an $n \times k$ matrix $V$
+whose columns are orthonormal, and a diagonal $k \times k$ matrix
+$\Sigma$ with diagonal entries
+$\Sigma_{1,1} \ge \Sigma_{2,2} \ge \dots \ge \Sigma_{n-1,n-1}
+ \ge \Sigma_{n,n} \ge 0$,
+such that
+%
+\begin{equation}
+\| A_{m \times n} - U_{m \times k} \, \Sigma_{k \times k}
+ \, (V^*)_{k \times n} \| \le \epsilon.
+\end{equation}
+%
+The product $U \, \Sigma \, V^*$ is known as an SVD.
+The columns of $U$ are known as left singular vectors;
+the columns of $V$ are known as right singular vectors.
+The diagonal entries of $\Sigma$ are known as singular values.
+
+When $k = m$ or $k = n$, and $A = U \, \Sigma \, V^*$,
+then $U \, \Sigma \, V^*$ is known as the SVD
+of $A$; the columns of $U$ are the left singular vectors of $A$,
+the columns of $V$ are the right singular vectors of $A$,
+and the diagonal entries of $\Sigma$ are the singular values of $A$.
+For any positive integer $k$ with $k < m$ and $k < n$,
+there exists a rank-$k$ approximation to $A$ in the form of an SVD,
+to precision $\sigma_{k+1}$, where $\sigma_{k+1}$ is the $(k+1)^\st$
+greatest singular value of $A$.
+
+
+\subsection{Interpolative decomposition (ID)}
+
+For any positive real number $\epsilon$,
+positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$,
+and any $m \times n$ matrix $A$,
+a rank-$k$ approximation to $A$ in the form of an ID
+(to precision $\epsilon$) consists of a $k \times n$ matrix $P$,
+and an $m \times k$ matrix $B$ whose columns constitute a subset
+of the columns of $A$, such that
+%
+\begin{enumerate}
+\item $\| A_{m \times n} - B_{m \times k} \, P_{k \times n} \|
+ \le \epsilon$,
+\item some subset of the columns of $P$ makes up the $k \times k$
+ identity matrix, and
+\item every entry of $P$ has an absolute value less than or equal
+ to a reasonably small positive real number, say 2.
+\end{enumerate}
+%
+The product $B \, P$ is known as an ID.
+The matrix $P$ is known as the projection or interpolation matrix
+of the ID. Property~1 above approximates each column of $A$
+via a linear combination of the columns of $B$
+(which are themselves columns of $A$), with the coefficients
+in the linear combination given by the entries of $P$.
+
+The interpolative decomposition is ``interpolative''
+due to Property~2 above. The ID is numerically stable
+due to Property~3 above.
+It follows from Property~2 that the least ($k^\th$ greatest) singular value
+of $P$ is at least 1. Combining Properties~2 and~3 yields that
+%
+\begin{equation}
+\| P_{k \times n} \| \le \sqrt{4k(n-k)+1}.
+\end{equation}
+
+When $k = m$ or $k = n$, and $A = B \, P$,
+then $B \, P$ is known as the ID of $A$.
+For any positive integer $k$ with $k < m$ and $k < n$,
+there exists a rank-$k$ approximation to $A$ in the form of an ID,
+to precision $\sqrt{k(n-k)+1} \; \sigma_{k+1}$,
+where $\sigma_{k+1}$ is the $(k+1)^\st$ greatest singular value of $A$
+(in fact, there exists an ID in which every entry
+of the projection matrix $P$ has an absolute value less than or equal
+to 1).
+
+
+
+\section{Bug reports, feedback, and support}
+
+Please let us know about errors in the software or in the documentation
+via e-mail to {\tt tygert@aya.yale.edu}.
+We would also appreciate hearing about particular applications of the codes,
+especially in the form of journal articles
+e-mailed to {\tt tygert@aya.yale.edu}.
+Mathematical and technical support may also be available via e-mail. Enjoy!
+
+
+
+\bibliographystyle{siam}
+\bibliography{doc}
+
+
+\end{document}
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/lapack_deprecations/LICENSE b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/lapack_deprecations/LICENSE
new file mode 100644
index 0000000..8d713b6
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/src/lapack_deprecations/LICENSE
@@ -0,0 +1,48 @@
+Copyright (c) 1992-2015 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+Copyright (c) 2000-2015 The University of California Berkeley. All
+ rights reserved.
+Copyright (c) 2006-2015 The University of Colorado Denver. All rights
+ reserved.
+
+$COPYRIGHT$
+
+Additional copyrights may follow
+
+$HEADER$
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+- Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+- Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+The copyright holders provide no reassurances that the source code
+provided does not infringe any patent, copyright, or any other
+intellectual property rights of third parties. The copyright holders
+disclaim any liability to any recipient for claims brought against
+recipient by any third party for infringement of that parties
+intellectual property rights.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_15_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_15_data.npz
new file mode 100644
index 0000000..31a7dc6
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_15_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_18_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_18_data.npz
new file mode 100644
index 0000000..6bd78dc
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_18_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_19_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_19_data.npz
new file mode 100644
index 0000000..3564000
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_19_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_20_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_20_data.npz
new file mode 100644
index 0000000..e68e5a2
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_20_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_6_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_6_data.npz
new file mode 100644
index 0000000..e70ff73
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/carex_6_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/gendare_20170120_data.npz b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/gendare_20170120_data.npz
new file mode 100644
index 0000000..22cb129
Binary files /dev/null and b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/data/gendare_20170120_data.npz differ
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_basic.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_basic.py
new file mode 100644
index 0000000..fe44665
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_basic.py
@@ -0,0 +1,1615 @@
+import warnings
+import itertools
+import numpy as np
+from numpy import (arange, array, dot, zeros, identity, conjugate, transpose,
+ float32)
+import numpy.linalg as linalg
+from numpy.random import random
+
+from numpy.testing import (assert_equal, assert_almost_equal, assert_,
+ assert_array_almost_equal, assert_allclose,
+ assert_array_equal, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,
+ solve_banded, solveh_banded, solve_triangular,
+ solve_circulant, circulant, LinAlgError, block_diag,
+ matrix_balance, LinAlgWarning)
+
+from scipy.linalg._testutils import assert_no_overwrite
+from scipy._lib._testutils import check_free_memory
+from scipy.linalg.blas import HAS_ILP64
+
+REAL_DTYPES = [np.float32, np.float64, np.longdouble]
+COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def _eps_cast(dtyp):
+ """Get the epsilon for dtype, possibly downcast to BLAS types."""
+ dt = dtyp
+ if dt == np.longdouble:
+ dt = np.float64
+ elif dt == np.clongdouble:
+ dt = np.complex128
+ return np.finfo(dt).eps
+
+
+class TestSolveBanded(object):
+
+ def test_real(self):
+ a = array([[1.0, 20, 0, 0],
+ [-30, 4, 6, 0],
+ [2, 1, 20, 2],
+ [0, -1, 7, 14]])
+ ab = array([[0.0, 20, 6, 2],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0],
+ [2, -1, 0, 0]])
+ l, u = 2, 1
+ b4 = array([10.0, 0.0, 2.0, 14.0])
+ b4by1 = b4.reshape(-1, 1)
+ b4by2 = array([[2, 1],
+ [-30, 4],
+ [2, 3],
+ [1, 3]])
+ b4by4 = array([[1, 0, 0, 0],
+ [0, 0, 0, 1],
+ [0, 1, 0, 0],
+ [0, 1, 0, 0]])
+ for b in [b4, b4by1, b4by2, b4by4]:
+ x = solve_banded((l, u), ab, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_complex(self):
+ a = array([[1.0, 20, 0, 0],
+ [-30, 4, 6, 0],
+ [2j, 1, 20, 2j],
+ [0, -1, 7, 14]])
+ ab = array([[0.0, 20, 6, 2j],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0],
+ [2j, -1, 0, 0]])
+ l, u = 2, 1
+ b4 = array([10.0, 0.0, 2.0, 14.0j])
+ b4by1 = b4.reshape(-1, 1)
+ b4by2 = array([[2, 1],
+ [-30, 4],
+ [2, 3],
+ [1, 3]])
+ b4by4 = array([[1, 0, 0, 0],
+ [0, 0, 0, 1j],
+ [0, 1, 0, 0],
+ [0, 1, 0, 0]])
+ for b in [b4, b4by1, b4by2, b4by4]:
+ x = solve_banded((l, u), ab, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_tridiag_real(self):
+ ab = array([[0.0, 20, 6, 2],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0]])
+ a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
+ ab[2, :-1], -1)
+ b4 = array([10.0, 0.0, 2.0, 14.0])
+ b4by1 = b4.reshape(-1, 1)
+ b4by2 = array([[2, 1],
+ [-30, 4],
+ [2, 3],
+ [1, 3]])
+ b4by4 = array([[1, 0, 0, 0],
+ [0, 0, 0, 1],
+ [0, 1, 0, 0],
+ [0, 1, 0, 0]])
+ for b in [b4, b4by1, b4by2, b4by4]:
+ x = solve_banded((1, 1), ab, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_tridiag_complex(self):
+ ab = array([[0.0, 20, 6, 2j],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0]])
+ a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
+ ab[2, :-1], -1)
+ b4 = array([10.0, 0.0, 2.0, 14.0j])
+ b4by1 = b4.reshape(-1, 1)
+ b4by2 = array([[2, 1],
+ [-30, 4],
+ [2, 3],
+ [1, 3]])
+ b4by4 = array([[1, 0, 0, 0],
+ [0, 0, 0, 1],
+ [0, 1, 0, 0],
+ [0, 1, 0, 0]])
+ for b in [b4, b4by1, b4by2, b4by4]:
+ x = solve_banded((1, 1), ab, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_check_finite(self):
+ a = array([[1.0, 20, 0, 0],
+ [-30, 4, 6, 0],
+ [2, 1, 20, 2],
+ [0, -1, 7, 14]])
+ ab = array([[0.0, 20, 6, 2],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0],
+ [2, -1, 0, 0]])
+ l, u = 2, 1
+ b4 = array([10.0, 0.0, 2.0, 14.0])
+ x = solve_banded((l, u), ab, b4, check_finite=False)
+ assert_array_almost_equal(dot(a, x), b4)
+
+ def test_bad_shape(self):
+ ab = array([[0.0, 20, 6, 2],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0],
+ [2, -1, 0, 0]])
+ l, u = 2, 1
+ bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4)
+ assert_raises(ValueError, solve_banded, (l, u), ab, bad)
+ assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
+
+ # Values of (l,u) are not compatible with ab.
+ assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
+
+ def test_1x1(self):
+ b = array([[1., 2., 3.]])
+ x = solve_banded((1, 1), [[0], [2], [0]], b)
+ assert_array_equal(x, [[0.5, 1.0, 1.5]])
+ assert_equal(x.dtype, np.dtype('f8'))
+ assert_array_equal(b, [[1.0, 2.0, 3.0]])
+
+ def test_native_list_arguments(self):
+ a = [[1.0, 20, 0, 0],
+ [-30, 4, 6, 0],
+ [2, 1, 20, 2],
+ [0, -1, 7, 14]]
+ ab = [[0.0, 20, 6, 2],
+ [1, 4, 20, 14],
+ [-30, 1, 7, 0],
+ [2, -1, 0, 0]]
+ l, u = 2, 1
+ b = [10.0, 0.0, 2.0, 14.0]
+ x = solve_banded((l, u), ab, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+
+class TestSolveHBanded(object):
+
+ def test_01_upper(self):
+ # Solve
+ # [ 4 1 2 0] [1]
+ # [ 1 4 1 2] X = [4]
+ # [ 2 1 4 1] [1]
+ # [ 0 2 1 4] [2]
+ # with the RHS as a 1D array.
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]])
+ b = array([1.0, 4.0, 1.0, 2.0])
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+ def test_02_upper(self):
+ # Solve
+ # [ 4 1 2 0] [1 6]
+ # [ 1 4 1 2] X = [4 2]
+ # [ 2 1 4 1] [1 6]
+ # [ 0 2 1 4] [2 1]
+ #
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]])
+ b = array([[1.0, 6.0],
+ [4.0, 2.0],
+ [1.0, 6.0],
+ [2.0, 1.0]])
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0],
+ [0.0, 0.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_03_upper(self):
+ # Solve
+ # [ 4 1 2 0] [1]
+ # [ 1 4 1 2] X = [4]
+ # [ 2 1 4 1] [1]
+ # [ 0 2 1 4] [2]
+ # with the RHS as a 2D array with shape (3,1).
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]])
+ b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))
+
+ def test_01_lower(self):
+ # Solve
+ # [ 4 1 2 0] [1]
+ # [ 1 4 1 2] X = [4]
+ # [ 2 1 4 1] [1]
+ # [ 0 2 1 4] [2]
+ #
+ ab = array([[4.0, 4.0, 4.0, 4.0],
+ [1.0, 1.0, 1.0, -99],
+ [2.0, 2.0, 0.0, 0.0]])
+ b = array([1.0, 4.0, 1.0, 2.0])
+ x = solveh_banded(ab, b, lower=True)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+ def test_02_lower(self):
+ # Solve
+ # [ 4 1 2 0] [1 6]
+ # [ 1 4 1 2] X = [4 2]
+ # [ 2 1 4 1] [1 6]
+ # [ 0 2 1 4] [2 1]
+ #
+ ab = array([[4.0, 4.0, 4.0, 4.0],
+ [1.0, 1.0, 1.0, -99],
+ [2.0, 2.0, 0.0, 0.0]])
+ b = array([[1.0, 6.0],
+ [4.0, 2.0],
+ [1.0, 6.0],
+ [2.0, 1.0]])
+ x = solveh_banded(ab, b, lower=True)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0],
+ [0.0, 0.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_01_float32(self):
+ # Solve
+ # [ 4 1 2 0] [1]
+ # [ 1 4 1 2] X = [4]
+ # [ 2 1 4 1] [1]
+ # [ 0 2 1 4] [2]
+ #
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]], dtype=float32)
+ b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+ def test_02_float32(self):
+ # Solve
+ # [ 4 1 2 0] [1 6]
+ # [ 1 4 1 2] X = [4 2]
+ # [ 2 1 4 1] [1 6]
+ # [ 0 2 1 4] [2 1]
+ #
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]], dtype=float32)
+ b = array([[1.0, 6.0],
+ [4.0, 2.0],
+ [1.0, 6.0],
+ [2.0, 1.0]], dtype=float32)
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0],
+ [0.0, 0.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_01_complex(self):
+ # Solve
+ # [ 4 -j 2 0] [2-j]
+ # [ j 4 -j 2] X = [4-j]
+ # [ 2 j 4 -j] [4+j]
+ # [ 0 2 j 4] [2+j]
+ #
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, -1.0j, -1.0j, -1.0j],
+ [4.0, 4.0, 4.0, 4.0]])
+ b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
+
+ def test_02_complex(self):
+ # Solve
+ # [ 4 -j 2 0] [2-j 2+4j]
+ # [ j 4 -j 2] X = [4-j -1-j]
+ # [ 2 j 4 -j] [4+j 4+2j]
+ # [ 0 2 j 4] [2+j j]
+ #
+ ab = array([[0.0, 0.0, 2.0, 2.0],
+ [-99, -1.0j, -1.0j, -1.0j],
+ [4.0, 4.0, 4.0, 4.0]])
+ b = array([[2-1j, 2+4j],
+ [4.0-1j, -1-1j],
+ [4.0+1j, 4+2j],
+ [2+1j, 1j]])
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0j],
+ [1.0, 0.0],
+ [1.0, 1.0],
+ [0.0, 0.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_tridiag_01_upper(self):
+ # Solve
+ # [ 4 1 0] [1]
+ # [ 1 4 1] X = [4]
+ # [ 0 1 4] [1]
+ # with the RHS as a 1D array.
+ ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+ b = array([1.0, 4.0, 1.0])
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+ def test_tridiag_02_upper(self):
+ # Solve
+ # [ 4 1 0] [1 4]
+ # [ 1 4 1] X = [4 2]
+ # [ 0 1 4] [1 4]
+ #
+ ab = array([[-99, 1.0, 1.0],
+ [4.0, 4.0, 4.0]])
+ b = array([[1.0, 4.0],
+ [4.0, 2.0],
+ [1.0, 4.0]])
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_tridiag_03_upper(self):
+ # Solve
+ # [ 4 1 0] [1]
+ # [ 1 4 1] X = [4]
+ # [ 0 1 4] [1]
+ # with the RHS as a 2D array with shape (3,1).
+ ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+ b = array([1.0, 4.0, 1.0]).reshape(-1, 1)
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))
+
+ def test_tridiag_01_lower(self):
+ # Solve
+ # [ 4 1 0] [1]
+ # [ 1 4 1] X = [4]
+ # [ 0 1 4] [1]
+ #
+ ab = array([[4.0, 4.0, 4.0],
+ [1.0, 1.0, -99]])
+ b = array([1.0, 4.0, 1.0])
+ x = solveh_banded(ab, b, lower=True)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+ def test_tridiag_02_lower(self):
+ # Solve
+ # [ 4 1 0] [1 4]
+ # [ 1 4 1] X = [4 2]
+ # [ 0 1 4] [1 4]
+ #
+ ab = array([[4.0, 4.0, 4.0],
+ [1.0, 1.0, -99]])
+ b = array([[1.0, 4.0],
+ [4.0, 2.0],
+ [1.0, 4.0]])
+ x = solveh_banded(ab, b, lower=True)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_tridiag_01_float32(self):
+ # Solve
+ # [ 4 1 0] [1]
+ # [ 1 4 1] X = [4]
+ # [ 0 1 4] [1]
+ #
+ ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
+ b = array([1.0, 4.0, 1.0], dtype=float32)
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+ def test_tridiag_02_float32(self):
+ # Solve
+ # [ 4 1 0] [1 4]
+ # [ 1 4 1] X = [4 2]
+ # [ 0 1 4] [1 4]
+ #
+ ab = array([[-99, 1.0, 1.0],
+ [4.0, 4.0, 4.0]], dtype=float32)
+ b = array([[1.0, 4.0],
+ [4.0, 2.0],
+ [1.0, 4.0]], dtype=float32)
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0],
+ [1.0, 0.0],
+ [0.0, 1.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_tridiag_01_complex(self):
+ # Solve
+ # [ 4 -j 0] [ -j]
+ # [ j 4 -j] X = [4-j]
+ # [ 0 j 4] [4+j]
+ #
+ ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
+ b = array([-1.0j, 4.0-1j, 4+1j])
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 1.0])
+
+ def test_tridiag_02_complex(self):
+ # Solve
+ # [ 4 -j 0] [ -j 4j]
+ # [ j 4 -j] X = [4-j -1-j]
+ # [ 0 j 4] [4+j 4 ]
+ #
+ ab = array([[-99, -1.0j, -1.0j],
+ [4.0, 4.0, 4.0]])
+ b = array([[-1j, 4.0j],
+ [4.0-1j, -1.0-1j],
+ [4.0+1j, 4.0]])
+ x = solveh_banded(ab, b)
+ expected = array([[0.0, 1.0j],
+ [1.0, 0.0],
+ [1.0, 1.0]])
+ assert_array_almost_equal(x, expected)
+
+ def test_check_finite(self):
+ # Solve
+ # [ 4 1 0] [1]
+ # [ 1 4 1] X = [4]
+ # [ 0 1 4] [1]
+ # with the RHS as a 1D array.
+ ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+ b = array([1.0, 4.0, 1.0])
+ x = solveh_banded(ab, b, check_finite=False)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+ def test_bad_shapes(self):
+ ab = array([[-99, 1.0, 1.0],
+ [4.0, 4.0, 4.0]])
+ b = array([[1.0, 4.0],
+ [4.0, 2.0]])
+ assert_raises(ValueError, solveh_banded, ab, b)
+ assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
+ assert_raises(ValueError, solveh_banded, ab, [1.0])
+
+ def test_1x1(self):
+ x = solveh_banded([[1]], [[1, 2, 3]])
+ assert_array_equal(x, [[1.0, 2.0, 3.0]])
+ assert_equal(x.dtype, np.dtype('f8'))
+
+ def test_native_list_arguments(self):
+ # Same as test_01_upper, using python's native list.
+ ab = [[0.0, 0.0, 2.0, 2.0],
+ [-99, 1.0, 1.0, 1.0],
+ [4.0, 4.0, 4.0, 4.0]]
+ b = [1.0, 4.0, 1.0, 2.0]
+ x = solveh_banded(ab, b)
+ assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+
+class TestSolve(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_20Feb04_bug(self):
+ a = [[1, 1], [1.0, 0]] # ok
+ x0 = solve(a, [1, 0j])
+ assert_array_almost_equal(dot(a, x0), [1, 0])
+
+ # gives failure with clapack.zgesv(..,rowmajor=0)
+ a = [[1, 1], [1.2, 0]]
+ b = [1, 0j]
+ x0 = solve(a, b)
+ assert_array_almost_equal(dot(a, x0), [1, 0])
+
+ def test_simple(self):
+ a = [[1, 20], [-30, 4]]
+ for b in ([[1, 0], [0, 1]], [1, 0],
+ [[2, 1], [-30, 4]]):
+ x = solve(a, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_simple_sym(self):
+ a = [[2, 3], [3, 5]]
+ for lower in [0, 1]:
+ for b in ([[1, 0], [0, 1]], [1, 0]):
+ x = solve(a, b, sym_pos=1, lower=lower)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_simple_sym_complex(self):
+ a = [[5, 2], [2, 4]]
+ for b in [[1j, 0],
+ [[1j, 1j],
+ [0, 2]],
+ ]:
+ x = solve(a, b, sym_pos=1)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_simple_complex(self):
+ a = array([[5, 2], [2j, 4]], 'D')
+ for b in [[1j, 0],
+ [[1j, 1j],
+ [0, 2]],
+ [1, 0j],
+ array([1, 0], 'D'),
+ ]:
+ x = solve(a, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_nils_20Feb04(self):
+ n = 2
+ A = random([n, n])+random([n, n])*1j
+ X = zeros((n, n), 'D')
+ Ainv = inv(A)
+ R = identity(n)+identity(n)*0j
+ for i in arange(0, n):
+ r = R[:, i]
+ X[:, i] = solve(A, r)
+ assert_array_almost_equal(X, Ainv)
+
+ def test_random(self):
+
+ n = 20
+ a = random([n, n])
+ for i in range(n):
+ a[i, i] = 20*(.1+a[i, i])
+ for i in range(4):
+ b = random([n, 3])
+ x = solve(a, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_random_complex(self):
+ n = 20
+ a = random([n, n]) + 1j * random([n, n])
+ for i in range(n):
+ a[i, i] = 20*(.1+a[i, i])
+ for i in range(2):
+ b = random([n, 3])
+ x = solve(a, b)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_random_sym(self):
+ n = 20
+ a = random([n, n])
+ for i in range(n):
+ a[i, i] = abs(20*(.1+a[i, i]))
+ for j in range(i):
+ a[i, j] = a[j, i]
+ for i in range(4):
+ b = random([n])
+ x = solve(a, b, sym_pos=1)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_random_sym_complex(self):
+ n = 20
+ a = random([n, n])
+ a = a + 1j*random([n, n])
+ for i in range(n):
+ a[i, i] = abs(20*(.1+a[i, i]))
+ for j in range(i):
+ a[i, j] = conjugate(a[j, i])
+ b = random([n])+2j*random([n])
+ for i in range(2):
+ x = solve(a, b, sym_pos=1)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_check_finite(self):
+ a = [[1, 20], [-30, 4]]
+ for b in ([[1, 0], [0, 1]], [1, 0],
+ [[2, 1], [-30, 4]]):
+ x = solve(a, b, check_finite=False)
+ assert_array_almost_equal(dot(a, x), b)
+
+ def test_scalar_a_and_1D_b(self):
+ a = 1
+ b = [1, 2, 3]
+ x = solve(a, b)
+ assert_array_almost_equal(x.ravel(), b)
+ assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape')
+
+ def test_simple2(self):
+ a = np.array([[1.80, 2.88, 2.05, -0.89],
+ [525.00, -295.00, -95.00, -380.00],
+ [1.58, -2.69, -2.90, -1.04],
+ [-1.11, -0.66, -0.59, 0.80]])
+
+ b = np.array([[9.52, 18.47],
+ [2435.00, 225.00],
+ [0.77, -13.28],
+ [-6.22, -6.21]])
+
+ x = solve(a, b)
+ assert_array_almost_equal(x, np.array([[1., -1, 3, -5],
+ [3, 2, 4, 1]]).T)
+
+ def test_simple_complex2(self):
+ a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j],
+ [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j],
+ [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j],
+ [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]])
+
+ b = np.array([[26.26+51.78j, 31.32-6.70j],
+ [64.30-86.80j, 158.60-14.20j],
+ [-5.75+25.31j, -2.15+30.19j],
+ [1.16+2.57j, -2.56+7.55j]])
+
+ x = solve(a, b)
+ assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j],
+ [2-3.j, 5+1.j],
+ [-4-5.j, -3+4.j],
+ [6.j, 2-3.j]]))
+
+ def test_hermitian(self):
+ # An upper triangular matrix will be used for hermitian matrix a
+ a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j],
+ [0, -4.63, -1.84+0.03j, 2.21+0.21j],
+ [0, 0, -8.87, 1.58-0.90j],
+ [0, 0, 0, -1.36]])
+ b = np.array([[2.98-10.18j, 28.68-39.89j],
+ [-9.58+3.88j, -24.79-8.40j],
+ [-0.77-16.05j, 4.23-70.02j],
+ [7.79+5.48j, -35.39+18.01j]])
+ res = np.array([[2.+1j, -8+6j],
+ [3.-2j, 7-2j],
+ [-1+2j, -1+5j],
+ [1.-1j, 3-4j]])
+ x = solve(a, b, assume_a='her')
+ assert_array_almost_equal(x, res)
+ # Also conjugate a and test for lower triangular data
+ x = solve(a.conj().T, b, assume_a='her', lower=True)
+ assert_array_almost_equal(x, res)
+
+ def test_pos_and_sym(self):
+ A = np.arange(1, 10).reshape(3, 3)
+ x = solve(np.tril(A)/9, np.ones(3), assume_a='pos')
+ assert_array_almost_equal(x, [9., 1.8, 1.])
+ x = solve(np.tril(A)/9, np.ones(3), assume_a='sym')
+ assert_array_almost_equal(x, [9., 1.8, 1.])
+
+ def test_singularity(self):
+ a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1],
+ [1, 1, 1, 0, 0, 0, 1, 0, 1],
+ [0, 1, 1, 0, 0, 0, 1, 0, 1],
+ [1, 0, 1, 1, 1, 1, 0, 0, 0],
+ [1, 0, 1, 1, 1, 1, 0, 0, 0],
+ [1, 0, 1, 1, 1, 1, 0, 0, 0],
+ [1, 0, 1, 1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1]])
+ b = np.arange(9)[:, None]
+ assert_raises(LinAlgError, solve, a, b)
+
+ def test_ill_condition_warning(self):
+ a = np.array([[1, 1], [1+1e-16, 1-1e-16]])
+ b = np.ones(2)
+ with warnings.catch_warnings():
+ warnings.simplefilter('error')
+ assert_raises(LinAlgWarning, solve, a, b)
+
+ def test_empty_rhs(self):
+ a = np.eye(2)
+ b = [[], []]
+ x = solve(a, b)
+ assert_(x.size == 0, 'Returned array is not empty')
+ assert_(x.shape == (2, 0), 'Returned empty array shape is wrong')
+
+ def test_multiple_rhs(self):
+ a = np.eye(2)
+ b = np.random.rand(2, 3, 4)
+ x = solve(a, b)
+ assert_array_almost_equal(x, b)
+
+ def test_transposed_keyword(self):
+ A = np.arange(9).reshape(3, 3) + 1
+ x = solve(np.tril(A)/9, np.ones(3), transposed=True)
+ assert_array_almost_equal(x, [1.2, 0.2, 1])
+ x = solve(np.tril(A)/9, np.ones(3), transposed=False)
+ assert_array_almost_equal(x, [9, -5.4, -1.2])
+
+ def test_transposed_notimplemented(self):
+ a = np.eye(3).astype(complex)
+ with assert_raises(NotImplementedError):
+ solve(a, a, transposed=True)
+
+ def test_nonsquare_a(self):
+ assert_raises(ValueError, solve, [1, 2], 1)
+
+ def test_size_mismatch_with_1D_b(self):
+ assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3))
+ assert_raises(ValueError, solve, np.eye(3), np.ones(4))
+
+ def test_assume_a_keyword(self):
+ assert_raises(ValueError, solve, 1, 1, assume_a='zxcv')
+
+ @pytest.mark.skip(reason="Failure on OS X (gh-7500), "
+ "crash on Windows (gh-8064)")
+ def test_all_type_size_routine_combinations(self):
+ sizes = [10, 100]
+ assume_as = ['gen', 'sym', 'pos', 'her']
+ dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+ for size, assume_a, dtype in itertools.product(sizes, assume_as,
+ dtypes):
+ is_complex = dtype in (np.complex64, np.complex128)
+ if assume_a == 'her' and not is_complex:
+ continue
+
+ err_msg = ("Failed for size: {}, assume_a: {},"
+ "dtype: {}".format(size, assume_a, dtype))
+
+ a = np.random.randn(size, size).astype(dtype)
+ b = np.random.randn(size).astype(dtype)
+ if is_complex:
+ a = a + (1j*np.random.randn(size, size)).astype(dtype)
+
+ if assume_a == 'sym': # Can still be complex but only symmetric
+ a = a + a.T
+ elif assume_a == 'her': # Handle hermitian matrices here instead
+ a = a + a.T.conj()
+ elif assume_a == 'pos':
+ a = a.conj().T.dot(a) + 0.1*np.eye(size)
+
+ tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6
+
+ if assume_a in ['gen', 'sym', 'her']:
+ # We revert the tolerance from before
+ # 4b4a6e7c34fa4060533db38f9a819b98fa81476c
+ if dtype in (np.float32, np.complex64):
+ tol *= 10
+
+ x = solve(a, b, assume_a=assume_a)
+ assert_allclose(a.dot(x), b,
+ atol=tol * size,
+ rtol=tol * size,
+ err_msg=err_msg)
+
+ if assume_a == 'sym' and dtype not in (np.complex64,
+ np.complex128):
+ x = solve(a, b, assume_a=assume_a, transposed=True)
+ assert_allclose(a.dot(x), b,
+ atol=tol * size,
+ rtol=tol * size,
+ err_msg=err_msg)
+
+
+class TestSolveTriangular(object):
+
+ def test_simple(self):
+ """
+ solve_triangular on a simple 2x2 matrix.
+ """
+ A = array([[1, 0], [1, 2]])
+ b = [1, 1]
+ sol = solve_triangular(A, b, lower=True)
+ assert_array_almost_equal(sol, [1, 0])
+
+ # check that it works also for non-contiguous matrices
+ sol = solve_triangular(A.T, b, lower=False)
+ assert_array_almost_equal(sol, [.5, .5])
+
+ # and that it gives the same result as trans=1
+ sol = solve_triangular(A, b, lower=True, trans=1)
+ assert_array_almost_equal(sol, [.5, .5])
+
+ b = identity(2)
+ sol = solve_triangular(A, b, lower=True, trans=1)
+ assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
+
+ def test_simple_complex(self):
+ """
+ solve_triangular on a simple 2x2 complex matrix
+ """
+ A = array([[1+1j, 0], [1j, 2]])
+ b = identity(2)
+ sol = solve_triangular(A, b, lower=True, trans=1)
+ assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
+
+ # check other option combinations with complex rhs
+ b = np.diag([1+1j, 1+2j])
+ sol = solve_triangular(A, b, lower=True, trans=0)
+ assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
+
+ sol = solve_triangular(A, b, lower=True, trans=1)
+ assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
+
+ sol = solve_triangular(A, b, lower=True, trans=2)
+ assert_array_almost_equal(sol, [[1j, -0.75-0.25j], [0, 0.5+1j]])
+
+ sol = solve_triangular(A.T, b, lower=False, trans=0)
+ assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
+
+ sol = solve_triangular(A.T, b, lower=False, trans=1)
+ assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
+
+ sol = solve_triangular(A.T, b, lower=False, trans=2)
+ assert_array_almost_equal(sol, [[1j, 0], [-0.5, 0.5+1j]])
+
+ def test_check_finite(self):
+ """
+ solve_triangular on a simple 2x2 matrix.
+ """
+ A = array([[1, 0], [1, 2]])
+ b = [1, 1]
+ sol = solve_triangular(A, b, lower=True, check_finite=False)
+ assert_array_almost_equal(sol, [1, 0])
+
+
+class TestInv(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_simple(self):
+ a = [[1, 2], [3, 4]]
+ a_inv = inv(a)
+ assert_array_almost_equal(dot(a, a_inv), np.eye(2))
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]]
+ a_inv = inv(a)
+ assert_array_almost_equal(dot(a, a_inv), np.eye(3))
+
+ def test_random(self):
+ n = 20
+ for i in range(4):
+ a = random([n, n])
+ for i in range(n):
+ a[i, i] = 20*(.1+a[i, i])
+ a_inv = inv(a)
+ assert_array_almost_equal(dot(a, a_inv),
+ identity(n))
+
+ def test_simple_complex(self):
+ a = [[1, 2], [3, 4j]]
+ a_inv = inv(a)
+ assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
+
+ def test_random_complex(self):
+ n = 20
+ for i in range(4):
+ a = random([n, n])+2j*random([n, n])
+ for i in range(n):
+ a[i, i] = 20*(.1+a[i, i])
+ a_inv = inv(a)
+ assert_array_almost_equal(dot(a, a_inv),
+ identity(n))
+
+ def test_check_finite(self):
+ a = [[1, 2], [3, 4]]
+ a_inv = inv(a, check_finite=False)
+ assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
+
+
+class TestDet(object):
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_simple(self):
+ a = [[1, 2], [3, 4]]
+ a_det = det(a)
+ assert_almost_equal(a_det, -2.0)
+
+ def test_simple_complex(self):
+ a = [[1, 2], [3, 4j]]
+ a_det = det(a)
+ assert_almost_equal(a_det, -6+4j)
+
+ def test_random(self):
+ basic_det = linalg.det
+ n = 20
+ for i in range(4):
+ a = random([n, n])
+ d1 = det(a)
+ d2 = basic_det(a)
+ assert_almost_equal(d1, d2)
+
+ def test_random_complex(self):
+ basic_det = linalg.det
+ n = 20
+ for i in range(4):
+ a = random([n, n]) + 2j*random([n, n])
+ d1 = det(a)
+ d2 = basic_det(a)
+ assert_allclose(d1, d2, rtol=1e-13)
+
+ def test_check_finite(self):
+ a = [[1, 2], [3, 4]]
+ a_det = det(a, check_finite=False)
+ assert_almost_equal(a_det, -2.0)
+
+
+def direct_lstsq(a, b, cmplx=0):
+ at = transpose(a)
+ if cmplx:
+ at = conjugate(at)
+ a1 = dot(at, a)
+ b1 = dot(at, b)
+ return solve(a1, b1)
+
+
+class TestLstsq(object):
+
+ lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)
+
+ def setup_method(self):
+ np.random.seed(1234)
+
+ def test_simple_exact(self):
+ for dtype in REAL_DTYPES:
+ a = np.array([[1, 20], [-30, 4]], dtype=dtype)
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ for bt in (((1, 0), (0, 1)), (1, 0),
+ ((2, 1), (-30, 4))):
+ # Store values in case they are overwritten
+ # later
+ a1 = a.copy()
+ b = np.array(bt, dtype=dtype)
+ b1 = b.copy()
+ out = lstsq(a1, b1,
+ lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == 2,
+ 'expected efficient rank 2, got %s' % r)
+ assert_allclose(dot(a, x), b,
+ atol=25 * _eps_cast(a1.dtype),
+ rtol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_simple_overdet(self):
+ for dtype in REAL_DTYPES:
+ a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)
+ b = np.array([1, 2, 3], dtype=dtype)
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1, lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ if lapack_driver == 'gelsy':
+ residuals = np.sum((b - a.dot(x))**2)
+ else:
+ residuals = out[1]
+ r = out[2]
+ assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+ assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
+ residuals,
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+ assert_allclose(x, (-0.428571428571429, 0.85714285714285),
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_simple_overdet_complex(self):
+ for dtype in COMPLEX_DTYPES:
+ a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)
+ b = np.array([1, 2+4j, 3], dtype=dtype)
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1, lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+
+ x = out[0]
+ if lapack_driver == 'gelsy':
+ res = b - a.dot(x)
+ residuals = np.sum(res * res.conj())
+ else:
+ residuals = out[1]
+ r = out[2]
+ assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+ assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
+ residuals,
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+ assert_allclose(
+ x, (-0.4831460674157303 + 0.258426966292135j,
+ 0.921348314606741 + 0.292134831460674j),
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_simple_underdet(self):
+ for dtype in REAL_DTYPES:
+ a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
+ b = np.array([1, 2], dtype=dtype)
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1, lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+
+ x = out[0]
+ r = out[2]
+ assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+ assert_allclose(x, (-0.055555555555555, 0.111111111111111,
+ 0.277777777777777),
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_random_exact(self):
+ for dtype in REAL_DTYPES:
+ for n in (20, 200):
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ a = np.asarray(random([n, n]), dtype=dtype)
+ for i in range(n):
+ a[i, i] = 20 * (0.1 + a[i, i])
+ for i in range(4):
+ b = np.asarray(random([n, 3]), dtype=dtype)
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1,
+ lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == n, 'expected efficient rank %s, '
+ 'got %s' % (n, r))
+ if dtype is np.float32:
+ assert_allclose(
+ dot(a, x), b,
+ rtol=500 * _eps_cast(a1.dtype),
+ atol=500 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+ else:
+ assert_allclose(
+ dot(a, x), b,
+ rtol=1000 * _eps_cast(a1.dtype),
+ atol=1000 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_random_complex_exact(self):
+ for dtype in COMPLEX_DTYPES:
+ for n in (20, 200):
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ a = np.asarray(random([n, n]) + 1j*random([n, n]),
+ dtype=dtype)
+ for i in range(n):
+ a[i, i] = 20 * (0.1 + a[i, i])
+ for i in range(2):
+ b = np.asarray(random([n, 3]), dtype=dtype)
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1, lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == n, 'expected efficient rank %s, '
+ 'got %s' % (n, r))
+ if dtype is np.complex64:
+ assert_allclose(
+ dot(a, x), b,
+ rtol=400 * _eps_cast(a1.dtype),
+ atol=400 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+ else:
+ assert_allclose(
+ dot(a, x), b,
+ rtol=1000 * _eps_cast(a1.dtype),
+ atol=1000 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_random_overdet(self):
+ for dtype in REAL_DTYPES:
+ for (n, m) in ((20, 15), (200, 2)):
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ a = np.asarray(random([n, m]), dtype=dtype)
+ for i in range(m):
+ a[i, i] = 20 * (0.1 + a[i, i])
+ for i in range(4):
+ b = np.asarray(random([n, 3]), dtype=dtype)
+ # Store values in case they are overwritten later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1,
+ lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == m, 'expected efficient rank %s, '
+ 'got %s' % (m, r))
+ assert_allclose(
+ x, direct_lstsq(a, b, cmplx=0),
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_random_complex_overdet(self):
+ for dtype in COMPLEX_DTYPES:
+ for (n, m) in ((20, 15), (200, 2)):
+ for lapack_driver in TestLstsq.lapack_drivers:
+ for overwrite in (True, False):
+ a = np.asarray(random([n, m]) + 1j*random([n, m]),
+ dtype=dtype)
+ for i in range(m):
+ a[i, i] = 20 * (0.1 + a[i, i])
+ for i in range(2):
+ b = np.asarray(random([n, 3]), dtype=dtype)
+ # Store values in case they are overwritten
+ # later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1,
+ lapack_driver=lapack_driver,
+ overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == m, 'expected efficient rank %s, '
+ 'got %s' % (m, r))
+ assert_allclose(
+ x, direct_lstsq(a, b, cmplx=1),
+ rtol=25 * _eps_cast(a1.dtype),
+ atol=25 * _eps_cast(a1.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_check_finite(self):
+ with suppress_warnings() as sup:
+ # On (some) OSX this tests triggers a warning (gh-7538)
+ sup.filter(RuntimeWarning,
+ "internal gelsd driver lwork query error,.*"
+ "Falling back to 'gelss' driver.")
+
+ at = np.array(((1, 20), (-30, 4)))
+ for dtype, bt, lapack_driver, overwrite, check_finite in \
+ itertools.product(REAL_DTYPES,
+ (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))),
+ TestLstsq.lapack_drivers,
+ (True, False),
+ (True, False)):
+
+ a = at.astype(dtype)
+ b = np.array(bt, dtype=dtype)
+ # Store values in case they are overwritten
+ # later
+ a1 = a.copy()
+ b1 = b.copy()
+ out = lstsq(a1, b1, lapack_driver=lapack_driver,
+ check_finite=check_finite, overwrite_a=overwrite,
+ overwrite_b=overwrite)
+ x = out[0]
+ r = out[2]
+ assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+ assert_allclose(dot(a, x), b,
+ rtol=25 * _eps_cast(a.dtype),
+ atol=25 * _eps_cast(a.dtype),
+ err_msg="driver: %s" % lapack_driver)
+
+ def test_zero_size(self):
+ for a_shape, b_shape in (((0, 2), (0,)),
+ ((0, 4), (0, 2)),
+ ((4, 0), (4,)),
+ ((4, 0), (4, 2))):
+ b = np.ones(b_shape)
+ x, residues, rank, s = lstsq(np.zeros(a_shape), b)
+ assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))
+ residues_should_be = (np.empty((0,)) if a_shape[1]
+ else np.linalg.norm(b, axis=0)**2)
+ assert_equal(residues, residues_should_be)
+ assert_(rank == 0, 'expected rank 0')
+ assert_equal(s, np.empty((0,)))
+
+
+class TestPinv(object):
+
+ def test_simple_real(self):
+ a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ a_pinv = pinv(a)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+ a_pinv = pinv2(a)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+ def test_simple_complex(self):
+ a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
+ dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
+ dtype=float))
+ a_pinv = pinv(a)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+ a_pinv = pinv2(a)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+ def test_simple_singular(self):
+ a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
+ a_pinv = pinv(a)
+ a_pinv2 = pinv2(a)
+ assert_array_almost_equal(a_pinv, a_pinv2)
+
+ def test_simple_cols(self):
+ a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
+ a_pinv = pinv(a)
+ a_pinv2 = pinv2(a)
+ assert_array_almost_equal(a_pinv, a_pinv2)
+
+ def test_simple_rows(self):
+ a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
+ a_pinv = pinv(a)
+ a_pinv2 = pinv2(a)
+ assert_array_almost_equal(a_pinv, a_pinv2)
+
+ def test_check_finite(self):
+ a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
+ a_pinv = pinv(a, check_finite=False)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+ a_pinv = pinv2(a, check_finite=False)
+ assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+ def test_native_list_argument(self):
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+ a_pinv = pinv(a)
+ a_pinv2 = pinv2(a)
+ assert_array_almost_equal(a_pinv, a_pinv2)
+
+ def test_tall_transposed(self):
+ a = random([10, 2])
+ a_pinv = pinv(a)
+ # The result will be transposed internally hence will be a C-layout
+ # instead of the typical LAPACK output with Fortran-layout
+ assert a_pinv.flags['C_CONTIGUOUS']
+
+
+class TestPinvSymmetric(object):
+
+ def test_simple_real(self):
+ a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ a = np.dot(a, a.T)
+ a_pinv = pinvh(a)
+ assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+ def test_nonpositive(self):
+ a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
+ a = np.dot(a, a.T)
+ u, s, vt = np.linalg.svd(a)
+ s[0] *= -1
+ a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
+ a_pinv = pinv2(a)
+ a_pinvh = pinvh(a)
+ assert_array_almost_equal(a_pinv, a_pinvh)
+
+ def test_simple_complex(self):
+ a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
+ dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
+ dtype=float))
+ a = np.dot(a, a.conj().T)
+ a_pinv = pinvh(a)
+ assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+ def test_native_list_argument(self):
+ a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ a = np.dot(a, a.T)
+ a_pinv = pinvh(a.tolist())
+ assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+
+def test_pinv_pinv2_comparison(): # As reported in gh-8861
+ I_6 = np.eye(6)
+ Ts = np.diag([-1] * 4 + [-2], k=-1) + np.diag([-2] + [-1] * 4, k=1)
+ T = I_6 + Ts
+ A = 25 * (np.kron(I_6, T) + np.kron(Ts, I_6))
+
+ Ap, Ap2 = pinv(A), pinv2(A)
+
+ tol = 1e-11
+ assert_allclose(A @ Ap @ A - A, A @ Ap2 @ A - A, rtol=0., atol=tol)
+ assert_allclose(Ap @ A @ Ap - Ap, Ap2 @ A @ Ap2 - Ap2, rtol=0., atol=tol)
+
+
+@pytest.mark.parametrize('scale', (1e-20, 1., 1e20))
+@pytest.mark.parametrize('pinv_', (pinv, pinvh, pinv2))
+def test_auto_rcond(scale, pinv_):
+ x = np.array([[1, 0], [0, 1e-10]]) * scale
+ expected = np.diag(1. / np.diag(x))
+ x_inv = pinv_(x)
+ assert_allclose(x_inv, expected)
+
+
+class TestVectorNorms(object):
+
+ def test_types(self):
+ for dtype in np.typecodes['AllFloat']:
+ x = np.array([1, 2, 3], dtype=dtype)
+ tol = max(1e-15, np.finfo(dtype).eps.real * 20)
+ assert_allclose(norm(x), np.sqrt(14), rtol=tol)
+ assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
+
+ for dtype in np.typecodes['Complex']:
+ x = np.array([1j, 2j, 3j], dtype=dtype)
+ tol = max(1e-15, np.finfo(dtype).eps.real * 20)
+ assert_allclose(norm(x), np.sqrt(14), rtol=tol)
+ assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
+
+ def test_overflow(self):
+ # unlike numpy's norm, this one is
+ # safer on overflow
+ a = array([1e20], dtype=float32)
+ assert_almost_equal(norm(a), a)
+
+ def test_stable(self):
+ # more stable than numpy's norm
+ a = array([1e4] + [1]*10000, dtype=float32)
+ try:
+ # snrm in double precision; we obtain the same as for float64
+ # -- large atol needed due to varying blas implementations
+ assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
+ except AssertionError:
+ # snrm implemented in single precision, == np.linalg.norm result
+ msg = ": Result should equal either 0.0 or 0.5 (depending on " \
+ "implementation of snrm2)."
+ assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
+
+ def test_zero_norm(self):
+ assert_equal(norm([1, 0, 3], 0), 2)
+ assert_equal(norm([1, 2, 3], 0), 3)
+
+ def test_axis_kwd(self):
+ a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+ assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)
+ assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)
+
+ def test_keepdims_kwd(self):
+ a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+ b = norm(a, axis=1, keepdims=True)
+ assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)
+ assert_(b.shape == (2, 1, 2))
+ assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)
+
+ @pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required")
+ def test_large_vector(self):
+ check_free_memory(free_mb=17000)
+ x = np.zeros([2**31], dtype=np.float64)
+ x[-1] = 1
+ res = norm(x)
+ del x
+ assert_allclose(res, 1.0)
+
+
+class TestMatrixNorms(object):
+
+ def test_matrix_norms(self):
+ # Not all of these are matrix norms in the most technical sense.
+ np.random.seed(1234)
+ for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4):
+ for t in np.single, np.double, np.csingle, np.cdouble, np.int64:
+ A = 10 * np.random.randn(n, m).astype(t)
+ if np.issubdtype(A.dtype, np.complexfloating):
+ A = (A + 10j * np.random.randn(n, m)).astype(t)
+ t_high = np.cdouble
+ else:
+ t_high = np.double
+ for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf):
+ actual = norm(A, ord=order)
+ desired = np.linalg.norm(A, ord=order)
+ # SciPy may return higher precision matrix norms.
+ # This is a consequence of using LAPACK.
+ if not np.allclose(actual, desired):
+ desired = np.linalg.norm(A.astype(t_high), ord=order)
+ assert_allclose(actual, desired)
+
+ def test_axis_kwd(self):
+ a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+ b = norm(a, ord=np.inf, axis=(1, 0))
+ c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1))
+ d = norm(a, ord=1, axis=(0, 1))
+ assert_allclose(b, c)
+ assert_allclose(c, d)
+ assert_allclose(b, d)
+ assert_(b.shape == c.shape == d.shape)
+ b = norm(a, ord=1, axis=(1, 0))
+ c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1))
+ d = norm(a, ord=np.inf, axis=(0, 1))
+ assert_allclose(b, c)
+ assert_allclose(c, d)
+ assert_allclose(b, d)
+ assert_(b.shape == c.shape == d.shape)
+
+ def test_keepdims_kwd(self):
+ a = np.arange(120, dtype='d').reshape(2, 3, 4, 5)
+ b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True)
+ c = norm(a, ord=1, axis=(0, 1), keepdims=True)
+ assert_allclose(b, c)
+ assert_(b.shape == c.shape)
+
+
+class TestOverwrite(object):
+ def test_solve(self):
+ assert_no_overwrite(solve, [(3, 3), (3,)])
+
+ def test_solve_triangular(self):
+ assert_no_overwrite(solve_triangular, [(3, 3), (3,)])
+
+ def test_solve_banded(self):
+ assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b),
+ [(4, 6), (6,)])
+
+ def test_solveh_banded(self):
+ assert_no_overwrite(solveh_banded, [(2, 6), (6,)])
+
+ def test_inv(self):
+ assert_no_overwrite(inv, [(3, 3)])
+
+ def test_det(self):
+ assert_no_overwrite(det, [(3, 3)])
+
+ def test_lstsq(self):
+ assert_no_overwrite(lstsq, [(3, 2), (3,)])
+
+ def test_pinv(self):
+ assert_no_overwrite(pinv, [(3, 3)])
+
+ def test_pinv2(self):
+ assert_no_overwrite(pinv2, [(3, 3)])
+
+ def test_pinvh(self):
+ assert_no_overwrite(pinvh, [(3, 3)])
+
+
+class TestSolveCirculant(object):
+
+ def test_basic1(self):
+ c = np.array([1, 2, 3, 5])
+ b = np.array([1, -1, 1, 0])
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+ def test_basic2(self):
+ # b is a 2-d matrix.
+ c = np.array([1, 2, -3, -5])
+ b = np.arange(12).reshape(4, 3)
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+ def test_basic3(self):
+ # b is a 3-d matrix.
+ c = np.array([1, 2, -3, -5])
+ b = np.arange(24).reshape(4, 3, 2)
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+ def test_complex(self):
+ # Complex b and c
+ c = np.array([1+2j, -3, 4j, 5])
+ b = np.arange(8).reshape(4, 2) + 0.5j
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+ def test_random_b_and_c(self):
+ # Random b and c
+ np.random.seed(54321)
+ c = np.random.randn(50)
+ b = np.random.randn(50)
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+ def test_singular(self):
+ # c gives a singular circulant matrix.
+ c = np.array([1, 1, 0, 0])
+ b = np.array([1, 2, 3, 4])
+ x = solve_circulant(c, b, singular='lstsq')
+ y, res, rnk, s = lstsq(circulant(c), b)
+ assert_allclose(x, y)
+ assert_raises(LinAlgError, solve_circulant, x, y)
+
+ def test_axis_args(self):
+ # Test use of caxis, baxis and outaxis.
+
+ # c has shape (2, 1, 4)
+ c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])
+
+ # b has shape (3, 4)
+ b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])
+
+ x = solve_circulant(c, b, baxis=1)
+ assert_equal(x.shape, (4, 2, 3))
+ expected = np.empty_like(x)
+ expected[:, 0, :] = solve(circulant(c[0]), b.T)
+ expected[:, 1, :] = solve(circulant(c[1]), b.T)
+ assert_allclose(x, expected)
+
+ x = solve_circulant(c, b, baxis=1, outaxis=-1)
+ assert_equal(x.shape, (2, 3, 4))
+ assert_allclose(np.rollaxis(x, -1), expected)
+
+ # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).
+ x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)
+ assert_equal(x.shape, (4, 2, 3))
+ assert_allclose(x, expected)
+
+ def test_native_list_arguments(self):
+ # Same as test_basic1 using python's native list.
+ c = [1, 2, 3, 5]
+ b = [1, -1, 1, 0]
+ x = solve_circulant(c, b)
+ y = solve(circulant(c), b)
+ assert_allclose(x, y)
+
+
+class TestMatrix_Balance(object):
+
+ def test_string_arg(self):
+ assert_raises(ValueError, matrix_balance, 'Some string for fail')
+
+ def test_infnan_arg(self):
+ assert_raises(ValueError, matrix_balance,
+ np.array([[1, 2], [3, np.inf]]))
+ assert_raises(ValueError, matrix_balance,
+ np.array([[1, 2], [3, np.nan]]))
+
+ def test_scaling(self):
+ _, y = matrix_balance(np.array([[1000, 1], [1000, 0]]))
+ # Pre/post LAPACK 3.5.0 gives the same result up to an offset
+ # since in each case col norm is x1000 greater and
+ # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5.
+ assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5)
+
+ def test_scaling_order(self):
+ A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]])
+ x, y = matrix_balance(A)
+ assert_allclose(solve(y, A).dot(y), x)
+
+ def test_separate(self):
+ _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]),
+ separate=1)
+ assert_equal(int(np.diff(np.log2(y))), 5)
+ assert_allclose(z, np.arange(2))
+
+ def test_permutation(self):
+ A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))),
+ np.ones((3, 3)))
+ x, (y, z) = matrix_balance(A, separate=1)
+ assert_allclose(y, np.ones_like(y))
+ assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2]))
+
+ def test_perm_and_scaling(self):
+ # Matrix with its diagonal removed
+ cases = ( # Case 0
+ np.array([[0., 0., 0., 0., 0.000002],
+ [0., 0., 0., 0., 0.],
+ [2., 2., 0., 0., 0.],
+ [2., 2., 0., 0., 0.],
+ [0., 0., 0.000002, 0., 0.]]),
+ # Case 1 user reported GH-7258
+ np.array([[-0.5, 0., 0., 0.],
+ [0., -1., 0., 0.],
+ [1., 0., -0.5, 0.],
+ [0., 1., 0., -1.]]),
+ # Case 2 user reported GH-7258
+ np.array([[-3., 0., 1., 0.],
+ [-1., -1., -0., 1.],
+ [-3., -0., -0., 0.],
+ [-1., -0., 1., -1.]])
+ )
+
+ for A in cases:
+ x, y = matrix_balance(A)
+ x, (s, p) = matrix_balance(A, separate=1)
+ ip = np.empty_like(p)
+ ip[p] = np.arange(A.shape[0])
+ assert_allclose(y, np.diag(s)[ip, :])
+ assert_allclose(solve(y, A).dot(y), x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_blas.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_blas.py
new file mode 100644
index 0000000..bb4d2f3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_blas.py
@@ -0,0 +1,1096 @@
+#
+# Created by: Pearu Peterson, April 2002
+#
+
+__usage__ = """
+Build linalg:
+ python setup.py build
+Run tests if scipy is installed:
+ python -c 'import scipy;scipy.linalg.test()'
+"""
+
+import math
+import pytest
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_,
+ assert_array_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from numpy import float32, float64, complex64, complex128, arange, triu, \
+ tril, zeros, tril_indices, ones, mod, diag, append, eye, \
+ nonzero
+
+from numpy.random import rand, seed
+from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve
+
+try:
+ from scipy.linalg import _cblas as cblas
+except ImportError:
+ cblas = None
+
+REAL_DTYPES = [float32, float64]
+COMPLEX_DTYPES = [complex64, complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def test_get_blas_funcs():
+ # check that it returns Fortran code for arrays that are
+ # fortran-ordered
+ f1, f2, f3 = get_blas_funcs(
+ ('axpy', 'axpy', 'axpy'),
+ (np.empty((2, 2), dtype=np.complex64, order='F'),
+ np.empty((2, 2), dtype=np.complex128, order='C'))
+ )
+
+ # get_blas_funcs will choose libraries depending on most generic
+ # array
+ assert_equal(f1.typecode, 'z')
+ assert_equal(f2.typecode, 'z')
+ if cblas is not None:
+ assert_equal(f1.module_name, 'cblas')
+ assert_equal(f2.module_name, 'cblas')
+
+ # check defaults.
+ f1 = get_blas_funcs('rotg')
+ assert_equal(f1.typecode, 'd')
+
+ # check also dtype interface
+ f1 = get_blas_funcs('gemm', dtype=np.complex64)
+ assert_equal(f1.typecode, 'c')
+ f1 = get_blas_funcs('gemm', dtype='F')
+ assert_equal(f1.typecode, 'c')
+
+ # extended precision complex
+ f1 = get_blas_funcs('gemm', dtype=np.longcomplex)
+ assert_equal(f1.typecode, 'z')
+
+ # check safe complex upcasting
+ f1 = get_blas_funcs('axpy',
+ (np.empty((2, 2), dtype=np.float64),
+ np.empty((2, 2), dtype=np.complex64))
+ )
+ assert_equal(f1.typecode, 'z')
+
+
+def test_get_blas_funcs_alias():
+ # check alias for get_blas_funcs
+ f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64)
+ assert f.typecode == 'c'
+ assert g.typecode == 'c'
+
+ f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64)
+ assert f is g
+ assert f is h
+
+
+class TestCBLAS1Simple(object):
+
+ def test_axpy(self):
+ for p in 'sd':
+ f = getattr(cblas, p+'axpy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
+ [7, 9, 18])
+ for p in 'cz':
+ f = getattr(cblas, p+'axpy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
+ [7, 10j-1, 18])
+
+
+class TestFBLAS1Simple(object):
+
+ def test_axpy(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'axpy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
+ [7, 9, 18])
+ for p in 'cz':
+ f = getattr(fblas, p+'axpy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
+ [7, 10j-1, 18])
+
+ def test_copy(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'copy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5])
+ for p in 'cz':
+ f = getattr(fblas, p+'copy', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j])
+
+ def test_asum(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'asum', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3, -4, 5]), 12)
+ for p in ['sc', 'dz']:
+ f = getattr(fblas, p+'asum', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3j, -4, 3-4j]), 14)
+
+ def test_dot(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'dot', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9)
+
+ def test_complex_dotu(self):
+ for p in 'cz':
+ f = getattr(fblas, p+'dotu', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j)
+
+ def test_complex_dotc(self):
+ for p in 'cz':
+ f = getattr(fblas, p+'dotc', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j)
+
+ def test_nrm2(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'nrm2', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3, -4, 5]), math.sqrt(50))
+ for p in ['c', 'z', 'sc', 'dz']:
+ f = getattr(fblas, p+'nrm2', None)
+ if f is None:
+ continue
+ assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50))
+
+ def test_scal(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'scal', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10])
+ for p in 'cz':
+ f = getattr(fblas, p+'scal', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j])
+ for p in ['cs', 'zd']:
+ f = getattr(fblas, p+'scal', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j])
+
+ def test_swap(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'swap', None)
+ if f is None:
+ continue
+ x, y = [2, 3, 1], [-2, 3, 7]
+ x1, y1 = f(x, y)
+ assert_array_almost_equal(x1, y)
+ assert_array_almost_equal(y1, x)
+ for p in 'cz':
+ f = getattr(fblas, p+'swap', None)
+ if f is None:
+ continue
+ x, y = [2, 3j, 1], [-2, 3, 7-3j]
+ x1, y1 = f(x, y)
+ assert_array_almost_equal(x1, y)
+ assert_array_almost_equal(y1, x)
+
+ def test_amax(self):
+ for p in 'sd':
+ f = getattr(fblas, 'i'+p+'amax')
+ assert_equal(f([-2, 4, 3]), 1)
+ for p in 'cz':
+ f = getattr(fblas, 'i'+p+'amax')
+ assert_equal(f([-5, 4+3j, 6]), 1)
+ # XXX: need tests for rot,rotm,rotg,rotmg
+
+
+class TestFBLAS2Simple(object):
+
+ def test_gemv(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'gemv', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3, [[3]], [-4]), [-36])
+ assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21])
+ for p in 'cz':
+ f = getattr(fblas, p+'gemv', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j])
+ assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]),
+ [-48-21j])
+
+ def test_ger(self):
+
+ for p in 'sd':
+ f = getattr(fblas, p+'ger', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]])
+ assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]),
+ [[6, 8], [12, 16], [18, 24]])
+
+ assert_array_almost_equal(f(1, [1, 2], [3, 4],
+ a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]])
+
+ for p in 'cz':
+ f = getattr(fblas, p+'geru', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
+ [[3j, 4j], [6, 8]])
+ assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]),
+ [[6, 8], [12, 16], [18, 24]])
+
+ for p in 'cz':
+ for name in ('ger', 'gerc'):
+ f = getattr(fblas, p+name, None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
+ [[3j, 4j], [6, 8]])
+ assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]),
+ [[6, 8], [12, 16], [18, 24]])
+
+ def test_syr_her(self):
+ x = np.arange(1, 5, dtype='d')
+ resx = np.triu(x[:, np.newaxis] * x)
+ resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1])
+
+ y = np.linspace(0, 8.5, 17, endpoint=False)
+
+ z = np.arange(1, 9, dtype='d').view('D')
+ resz = np.triu(z[:, np.newaxis] * z)
+ resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1])
+ rehz = np.triu(z[:, np.newaxis] * z.conj())
+ rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj())
+
+ w = np.c_[np.zeros(4), z, np.zeros(4)].ravel()
+
+ for p, rtol in zip('sd', [1e-7, 1e-14]):
+ f = getattr(fblas, p+'syr', None)
+ if f is None:
+ continue
+ assert_allclose(f(1.0, x), resx, rtol=rtol)
+ assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol)
+ assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol)
+ # negative increments imply reversed vectors in blas
+ assert_allclose(f(1.0, y, incx=-2, offx=2, n=4),
+ resx_reverse, rtol=rtol)
+
+ a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
+ b = f(1.0, x, a=a, overwrite_a=True)
+ assert_allclose(a, resx, rtol=rtol)
+
+ b = f(2.0, x, a=a)
+ assert_(a is not b)
+ assert_allclose(b, 3*resx, rtol=rtol)
+
+ assert_raises(Exception, f, 1.0, x, incx=0)
+ assert_raises(Exception, f, 1.0, x, offx=5)
+ assert_raises(Exception, f, 1.0, x, offx=-2)
+ assert_raises(Exception, f, 1.0, x, n=-2)
+ assert_raises(Exception, f, 1.0, x, n=5)
+ assert_raises(Exception, f, 1.0, x, lower=2)
+ assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+ for p, rtol in zip('cz', [1e-7, 1e-14]):
+ f = getattr(fblas, p+'syr', None)
+ if f is None:
+ continue
+ assert_allclose(f(1.0, z), resz, rtol=rtol)
+ assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol)
+ assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol)
+ # negative increments imply reversed vectors in blas
+ assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
+ resz_reverse, rtol=rtol)
+
+ a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+ b = f(1.0, z, a=a, overwrite_a=True)
+ assert_allclose(a, resz, rtol=rtol)
+
+ b = f(2.0, z, a=a)
+ assert_(a is not b)
+ assert_allclose(b, 3*resz, rtol=rtol)
+
+ assert_raises(Exception, f, 1.0, x, incx=0)
+ assert_raises(Exception, f, 1.0, x, offx=5)
+ assert_raises(Exception, f, 1.0, x, offx=-2)
+ assert_raises(Exception, f, 1.0, x, n=-2)
+ assert_raises(Exception, f, 1.0, x, n=5)
+ assert_raises(Exception, f, 1.0, x, lower=2)
+ assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+ for p, rtol in zip('cz', [1e-7, 1e-14]):
+ f = getattr(fblas, p+'her', None)
+ if f is None:
+ continue
+ assert_allclose(f(1.0, z), rehz, rtol=rtol)
+ assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol)
+ assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol)
+ # negative increments imply reversed vectors in blas
+ assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
+ rehz_reverse, rtol=rtol)
+
+ a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+ b = f(1.0, z, a=a, overwrite_a=True)
+ assert_allclose(a, rehz, rtol=rtol)
+
+ b = f(2.0, z, a=a)
+ assert_(a is not b)
+ assert_allclose(b, 3*rehz, rtol=rtol)
+
+ assert_raises(Exception, f, 1.0, x, incx=0)
+ assert_raises(Exception, f, 1.0, x, offx=5)
+ assert_raises(Exception, f, 1.0, x, offx=-2)
+ assert_raises(Exception, f, 1.0, x, n=-2)
+ assert_raises(Exception, f, 1.0, x, n=5)
+ assert_raises(Exception, f, 1.0, x, lower=2)
+ assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+ def test_syr2(self):
+ x = np.arange(1, 5, dtype='d')
+ y = np.arange(5, 9, dtype='d')
+ resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x)
+ resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1]
+ + y[::-1, np.newaxis] * x[::-1])
+
+ q = np.linspace(0, 8.5, 17, endpoint=False)
+
+ for p, rtol in zip('sd', [1e-7, 1e-14]):
+ f = getattr(fblas, p+'syr2', None)
+ if f is None:
+ continue
+ assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
+ assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
+ assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol)
+
+ assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10),
+ resxy, rtol=rtol)
+ assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3),
+ resxy[:3, :3], rtol=rtol)
+ # negative increments imply reversed vectors in blas
+ assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10),
+ resxy_reverse, rtol=rtol)
+
+ a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
+ b = f(1.0, x, y, a=a, overwrite_a=True)
+ assert_allclose(a, resxy, rtol=rtol)
+
+ b = f(2.0, x, y, a=a)
+ assert_(a is not b)
+ assert_allclose(b, 3*resxy, rtol=rtol)
+
+ assert_raises(Exception, f, 1.0, x, y, incx=0)
+ assert_raises(Exception, f, 1.0, x, y, offx=5)
+ assert_raises(Exception, f, 1.0, x, y, offx=-2)
+ assert_raises(Exception, f, 1.0, x, y, incy=0)
+ assert_raises(Exception, f, 1.0, x, y, offy=5)
+ assert_raises(Exception, f, 1.0, x, y, offy=-2)
+ assert_raises(Exception, f, 1.0, x, y, n=-2)
+ assert_raises(Exception, f, 1.0, x, y, n=5)
+ assert_raises(Exception, f, 1.0, x, y, lower=2)
+ assert_raises(Exception, f, 1.0, x, y,
+ a=np.zeros((2, 2), 'd', 'F'))
+
+ def test_her2(self):
+ x = np.arange(1, 9, dtype='d').view('D')
+ y = np.arange(9, 17, dtype='d').view('D')
+ resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj()
+ resxy = np.triu(resxy)
+
+ resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj()
+ resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj()
+ resxy_reverse = np.triu(resxy_reverse)
+
+ u = np.c_[np.zeros(4), x, np.zeros(4)].ravel()
+ v = np.c_[np.zeros(4), y, np.zeros(4)].ravel()
+
+ for p, rtol in zip('cz', [1e-7, 1e-14]):
+ f = getattr(fblas, p+'her2', None)
+ if f is None:
+ continue
+ assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
+ assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
+ assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(),
+ rtol=rtol)
+
+ assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1),
+ resxy, rtol=rtol)
+ assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3),
+ resxy[:3, :3], rtol=rtol)
+ # negative increments imply reversed vectors in blas
+ assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1),
+ resxy_reverse, rtol=rtol)
+
+ a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+ b = f(1.0, x, y, a=a, overwrite_a=True)
+ assert_allclose(a, resxy, rtol=rtol)
+
+ b = f(2.0, x, y, a=a)
+ assert_(a is not b)
+ assert_allclose(b, 3*resxy, rtol=rtol)
+
+ assert_raises(Exception, f, 1.0, x, y, incx=0)
+ assert_raises(Exception, f, 1.0, x, y, offx=5)
+ assert_raises(Exception, f, 1.0, x, y, offx=-2)
+ assert_raises(Exception, f, 1.0, x, y, incy=0)
+ assert_raises(Exception, f, 1.0, x, y, offy=5)
+ assert_raises(Exception, f, 1.0, x, y, offy=-2)
+ assert_raises(Exception, f, 1.0, x, y, n=-2)
+ assert_raises(Exception, f, 1.0, x, y, n=5)
+ assert_raises(Exception, f, 1.0, x, y, lower=2)
+ assert_raises(Exception, f, 1.0, x, y,
+ a=np.zeros((2, 2), 'd', 'F'))
+
+ def test_gbmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 7
+ m = 5
+ kl = 1
+ ku = 2
+ # fake a banded matrix via toeplitz
+ A = toeplitz(append(rand(kl+1), zeros(m-kl-1)),
+ append(rand(ku+1), zeros(n-ku-1)))
+ A = A.astype(dtype)
+ Ab = zeros((kl+ku+1, n), dtype=dtype)
+
+ # Form the banded storage
+ Ab[2, :5] = A[0, 0] # diag
+ Ab[1, 1:6] = A[0, 1] # sup1
+ Ab[0, 2:7] = A[0, 2] # sup2
+ Ab[3, :4] = A[1, 0] # sub1
+
+ x = rand(n).astype(dtype)
+ y = rand(m).astype(dtype)
+ alpha, beta = dtype(3), dtype(-5)
+
+ func, = get_blas_funcs(('gbmv',), dtype=dtype)
+ y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab,
+ x=x, y=y, beta=beta)
+ y2 = alpha * A.dot(x) + beta * y
+ assert_array_almost_equal(y1, y2)
+
+ def test_sbmv_hbmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 6
+ k = 2
+ A = zeros((n, n), dtype=dtype)
+ Ab = zeros((k+1, n), dtype=dtype)
+
+ # Form the array and its packed banded storage
+ A[arange(n), arange(n)] = rand(n)
+ for ind2 in range(1, k+1):
+ temp = rand(n-ind2)
+ A[arange(n-ind2), arange(ind2, n)] = temp
+ Ab[-1-ind2, ind2:] = temp
+ A = A.astype(dtype)
+ A = A + A.T if ind < 2 else A + A.conj().T
+ Ab[-1, :] = diag(A)
+ x = rand(n).astype(dtype)
+ y = rand(n).astype(dtype)
+ alpha, beta = dtype(1.25), dtype(3)
+
+ if ind > 1:
+ func, = get_blas_funcs(('hbmv',), dtype=dtype)
+ else:
+ func, = get_blas_funcs(('sbmv',), dtype=dtype)
+ y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta)
+ y2 = alpha * A.dot(x) + beta * y
+ assert_array_almost_equal(y1, y2)
+
+ def test_spmv_hpmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+ n = 3
+ A = rand(n, n).astype(dtype)
+ if ind > 1:
+ A += rand(n, n)*1j
+ A = A.astype(dtype)
+ A = A + A.T if ind < 4 else A + A.conj().T
+ c, r = tril_indices(n)
+ Ap = A[r, c]
+ x = rand(n).astype(dtype)
+ y = rand(n).astype(dtype)
+ xlong = arange(2*n).astype(dtype)
+ ylong = ones(2*n).astype(dtype)
+ alpha, beta = dtype(1.25), dtype(2)
+
+ if ind > 3:
+ func, = get_blas_funcs(('hpmv',), dtype=dtype)
+ else:
+ func, = get_blas_funcs(('spmv',), dtype=dtype)
+ y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta)
+ y2 = alpha * A.dot(x) + beta * y
+ assert_array_almost_equal(y1, y2)
+
+ # Test inc and offsets
+ y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap,
+ incx=2, incy=2, offx=n, offy=n)
+ y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2]
+ assert_array_almost_equal(y1[3::2], y2)
+ assert_almost_equal(y1[4], ylong[4])
+
+ def test_spr_hpr(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+ n = 3
+ A = rand(n, n).astype(dtype)
+ if ind > 1:
+ A += rand(n, n)*1j
+ A = A.astype(dtype)
+ A = A + A.T if ind < 4 else A + A.conj().T
+ c, r = tril_indices(n)
+ Ap = A[r, c]
+ x = rand(n).astype(dtype)
+ alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5)
+
+ if ind > 3:
+ func, = get_blas_funcs(('hpr',), dtype=dtype)
+ y2 = alpha * x[:, None].dot(x[None, :].conj()) + A
+ else:
+ func, = get_blas_funcs(('spr',), dtype=dtype)
+ y2 = alpha * x[:, None].dot(x[None, :]) + A
+
+ y1 = func(n=n, alpha=alpha, ap=Ap, x=x)
+ y1f = zeros((3, 3), dtype=dtype)
+ y1f[r, c] = y1
+ y1f[c, r] = y1.conj() if ind > 3 else y1
+ assert_array_almost_equal(y1f, y2)
+
+ def test_spr2_hpr2(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 3
+ A = rand(n, n).astype(dtype)
+ if ind > 1:
+ A += rand(n, n)*1j
+ A = A.astype(dtype)
+ A = A + A.T if ind < 2 else A + A.conj().T
+ c, r = tril_indices(n)
+ Ap = A[r, c]
+ x = rand(n).astype(dtype)
+ y = rand(n).astype(dtype)
+ alpha = dtype(2)
+
+ if ind > 1:
+ func, = get_blas_funcs(('hpr2',), dtype=dtype)
+ else:
+ func, = get_blas_funcs(('spr2',), dtype=dtype)
+
+ u = alpha.conj() * x[:, None].dot(y[None, :].conj())
+ y2 = A + u + u.conj().T
+ y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap)
+ y1f = zeros((3, 3), dtype=dtype)
+ y1f[r, c] = y1
+ y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj()
+ assert_array_almost_equal(y1f, y2)
+
+ def test_tbmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 10
+ k = 3
+ x = rand(n).astype(dtype)
+ A = zeros((n, n), dtype=dtype)
+ # Banded upper triangular array
+ for sup in range(k+1):
+ A[arange(n-sup), arange(sup, n)] = rand(n-sup)
+
+ # Add complex parts for c,z
+ if ind > 1:
+ A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
+
+ # Form the banded storage
+ Ab = zeros((k+1, n), dtype=dtype)
+ for row in range(k+1):
+ Ab[-row-1, row:] = diag(A, k=row)
+ func, = get_blas_funcs(('tbmv',), dtype=dtype)
+
+ y1 = func(k=k, a=Ab, x=x)
+ y2 = A.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = A.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
+ y2 = A.T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
+ y2 = A.conj().T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ def test_tbsv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 6
+ k = 3
+ x = rand(n).astype(dtype)
+ A = zeros((n, n), dtype=dtype)
+ # Banded upper triangular array
+ for sup in range(k+1):
+ A[arange(n-sup), arange(sup, n)] = rand(n-sup)
+
+ # Add complex parts for c,z
+ if ind > 1:
+ A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
+
+ # Form the banded storage
+ Ab = zeros((k+1, n), dtype=dtype)
+ for row in range(k+1):
+ Ab[-row-1, row:] = diag(A, k=row)
+ func, = get_blas_funcs(('tbsv',), dtype=dtype)
+
+ y1 = func(k=k, a=Ab, x=x)
+ y2 = solve(A, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = solve(A, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
+ y2 = solve(A.T, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
+ y2 = solve(A.conj().T, x)
+ assert_array_almost_equal(y1, y2)
+
+ def test_tpmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 10
+ x = rand(n).astype(dtype)
+ # Upper triangular array
+ A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
+ # Form the packed storage
+ c, r = tril_indices(n)
+ Ap = A[r, c]
+ func, = get_blas_funcs(('tpmv',), dtype=dtype)
+
+ y1 = func(n=n, ap=Ap, x=x)
+ y2 = A.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = A.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
+ y2 = A.T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
+ y2 = A.conj().T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ def test_tpsv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 10
+ x = rand(n).astype(dtype)
+ # Upper triangular array
+ A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
+ A += eye(n)
+ # Form the packed storage
+ c, r = tril_indices(n)
+ Ap = A[r, c]
+ func, = get_blas_funcs(('tpsv',), dtype=dtype)
+
+ y1 = func(n=n, ap=Ap, x=x)
+ y2 = solve(A, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = solve(A, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
+ y2 = solve(A.T, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
+ y2 = solve(A.conj().T, x)
+ assert_array_almost_equal(y1, y2)
+
+ def test_trmv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 3
+ A = (rand(n, n)+eye(n)).astype(dtype)
+ x = rand(3).astype(dtype)
+ func, = get_blas_funcs(('trmv',), dtype=dtype)
+
+ y1 = func(a=A, x=x)
+ y2 = triu(A).dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = triu(A).dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1, trans=1)
+ y2 = triu(A).T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1, trans=2)
+ y2 = triu(A).conj().T.dot(x)
+ assert_array_almost_equal(y1, y2)
+
+ def test_trsv(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 15
+ A = (rand(n, n)+eye(n)).astype(dtype)
+ x = rand(n).astype(dtype)
+ func, = get_blas_funcs(('trsv',), dtype=dtype)
+
+ y1 = func(a=A, x=x)
+ y2 = solve(triu(A), x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, lower=1)
+ y2 = solve(tril(A), x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1)
+ A[arange(n), arange(n)] = dtype(1)
+ y2 = solve(triu(A), x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1, trans=1)
+ y2 = solve(triu(A).T, x)
+ assert_array_almost_equal(y1, y2)
+
+ y1 = func(a=A, x=x, diag=1, trans=2)
+ y2 = solve(triu(A).conj().T, x)
+ assert_array_almost_equal(y1, y2)
+
+
+class TestFBLAS3Simple(object):
+
+ def test_gemm(self):
+ for p in 'sd':
+ f = getattr(fblas, p+'gemm', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3, [3], [-4]), [[-36]])
+ assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21])
+ for p in 'cz':
+ f = getattr(fblas, p+'gemm', None)
+ if f is None:
+ continue
+ assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]])
+ assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j])
+
+
+def _get_func(func, ps='sdzc'):
+ """Just a helper: return a specified BLAS function w/typecode."""
+ for p in ps:
+ f = getattr(fblas, p+func, None)
+ if f is None:
+ continue
+ yield f
+
+
+class TestBLAS3Symm(object):
+
+ def setup_method(self):
+ self.a = np.array([[1., 2.],
+ [0., 1.]])
+ self.b = np.array([[1., 0., 3.],
+ [0., -1., 2.]])
+ self.c = np.ones((2, 3))
+ self.t = np.array([[2., -1., 8.],
+ [3., 0., 9.]])
+
+ def test_symm(self):
+ for f in _get_func('symm'):
+ res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
+ assert_array_almost_equal(res, self.t)
+
+ res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
+ assert_array_almost_equal(res, self.t)
+
+ res = f(a=self.a, b=self.b.T, side=1, c=self.c.T,
+ alpha=1., beta=1.)
+ assert_array_almost_equal(res, self.t.T)
+
+ def test_summ_wrong_side(self):
+ f = getattr(fblas, 'dsymm', None)
+ if f is not None:
+ assert_raises(Exception, f, **{'a': self.a, 'b': self.b,
+ 'alpha': 1, 'side': 1})
+ # `side=1` means C <- B*A, hence shapes of A and B are to be
+ # compatible. Otherwise, f2py exception is raised
+
+ def test_symm_wrong_uplo(self):
+ """SYMM only considers the upper/lower part of A. Hence setting
+ wrong value for `lower` (default is lower=0, meaning upper triangle)
+ gives a wrong result.
+ """
+ f = getattr(fblas, 'dsymm', None)
+ if f is not None:
+ res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
+ assert np.allclose(res, self.t)
+
+ res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
+ assert not np.allclose(res, self.t)
+
+
+class TestBLAS3Syrk(object):
+ def setup_method(self):
+ self.a = np.array([[1., 0.],
+ [0., -2.],
+ [2., 3.]])
+ self.t = np.array([[1., 0., 2.],
+ [0., 4., -6.],
+ [2., -6., 13.]])
+ self.tt = np.array([[5., 6.],
+ [6., 13.]])
+
+ def test_syrk(self):
+ for f in _get_func('syrk'):
+ c = f(a=self.a, alpha=1.)
+ assert_array_almost_equal(np.triu(c), np.triu(self.t))
+
+ c = f(a=self.a, alpha=1., lower=1)
+ assert_array_almost_equal(np.tril(c), np.tril(self.t))
+
+ c0 = np.ones(self.t.shape)
+ c = f(a=self.a, alpha=1., beta=1., c=c0)
+ assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
+
+ c = f(a=self.a, alpha=1., trans=1)
+ assert_array_almost_equal(np.triu(c), np.triu(self.tt))
+
+ # prints '0-th dimension must be fixed to 3 but got 5',
+ # FIXME: suppress?
+ # FIXME: how to catch the _fblas.error?
+ def test_syrk_wrong_c(self):
+ f = getattr(fblas, 'dsyrk', None)
+ if f is not None:
+ assert_raises(Exception, f, **{'a': self.a, 'alpha': 1.,
+ 'c': np.ones((5, 8))})
+ # if C is supplied, it must have compatible dimensions
+
+
+class TestBLAS3Syr2k(object):
+ def setup_method(self):
+ self.a = np.array([[1., 0.],
+ [0., -2.],
+ [2., 3.]])
+ self.b = np.array([[0., 1.],
+ [1., 0.],
+ [0, 1.]])
+ self.t = np.array([[0., -1., 3.],
+ [-1., 0., 0.],
+ [3., 0., 6.]])
+ self.tt = np.array([[0., 1.],
+ [1., 6]])
+
+ def test_syr2k(self):
+ for f in _get_func('syr2k'):
+ c = f(a=self.a, b=self.b, alpha=1.)
+ assert_array_almost_equal(np.triu(c), np.triu(self.t))
+
+ c = f(a=self.a, b=self.b, alpha=1., lower=1)
+ assert_array_almost_equal(np.tril(c), np.tril(self.t))
+
+ c0 = np.ones(self.t.shape)
+ c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0)
+ assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
+
+ c = f(a=self.a, b=self.b, alpha=1., trans=1)
+ assert_array_almost_equal(np.triu(c), np.triu(self.tt))
+
+ # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress?
+ def test_syr2k_wrong_c(self):
+ f = getattr(fblas, 'dsyr2k', None)
+ if f is not None:
+ assert_raises(Exception, f, **{'a': self.a,
+ 'b': self.b,
+ 'alpha': 1.,
+ 'c': np.zeros((15, 8))})
+ # if C is supplied, it must have compatible dimensions
+
+
+class TestSyHe(object):
+ """Quick and simple tests for (zc)-symm, syrk, syr2k."""
+
+ def setup_method(self):
+ self.sigma_y = np.array([[0., -1.j],
+ [1.j, 0.]])
+
+ def test_symm_zc(self):
+ for f in _get_func('symm', 'zc'):
+ # NB: a is symmetric w/upper diag of ONLY
+ res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), np.diag([1, -1]))
+
+ def test_hemm_zc(self):
+ for f in _get_func('hemm', 'zc'):
+ # NB: a is hermitian w/upper diag of ONLY
+ res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
+
+ def test_syrk_zr(self):
+ for f in _get_func('syrk', 'zc'):
+ res = f(a=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), np.diag([-1, -1]))
+
+ def test_herk_zr(self):
+ for f in _get_func('herk', 'zc'):
+ res = f(a=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
+
+ def test_syr2k_zr(self):
+ for f in _get_func('syr2k', 'zc'):
+ res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1]))
+
+ def test_her2k_zr(self):
+ for f in _get_func('her2k', 'zc'):
+ res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+ assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1]))
+
+
+class TestTRMM(object):
+ """Quick and simple tests for dtrmm."""
+
+ def setup_method(self):
+ self.a = np.array([[1., 2., ],
+ [-2., 1.]])
+ self.b = np.array([[3., 4., -1.],
+ [5., 6., -2.]])
+
+ self.a2 = np.array([[1, 1, 2, 3],
+ [0, 1, 4, 5],
+ [0, 0, 1, 6],
+ [0, 0, 0, 1]], order="f")
+ self.b2 = np.array([[1, 4], [2, 5], [3, 6], [7, 8], [9, 10]],
+ order="f")
+
+ @pytest.mark.parametrize("dtype_", DTYPES)
+ def test_side(self, dtype_):
+ trmm = get_blas_funcs("trmm", dtype=dtype_)
+ # Provide large A array that works for side=1 but not 0 (see gh-10841)
+ assert_raises(Exception, trmm, 1.0, self.a2, self.b2)
+ res = trmm(1.0, self.a2.astype(dtype_), self.b2.astype(dtype_),
+ side=1)
+ k = self.b2.shape[1]
+ assert_allclose(res, self.b2 @ self.a2[:k, :k], rtol=0.,
+ atol=100*np.finfo(dtype_).eps)
+
+ def test_ab(self):
+ f = getattr(fblas, 'dtrmm', None)
+ if f is not None:
+ result = f(1., self.a, self.b)
+ # default a is upper triangular
+ expected = np.array([[13., 16., -5.],
+ [5., 6., -2.]])
+ assert_array_almost_equal(result, expected)
+
+ def test_ab_lower(self):
+ f = getattr(fblas, 'dtrmm', None)
+ if f is not None:
+ result = f(1., self.a, self.b, lower=True)
+ expected = np.array([[3., 4., -1.],
+ [-1., -2., 0.]]) # now a is lower triangular
+ assert_array_almost_equal(result, expected)
+
+ def test_b_overwrites(self):
+ # BLAS dtrmm modifies B argument in-place.
+ # Here the default is to copy, but this can be overridden
+ f = getattr(fblas, 'dtrmm', None)
+ if f is not None:
+ for overwr in [True, False]:
+ bcopy = self.b.copy()
+ result = f(1., self.a, bcopy, overwrite_b=overwr)
+ # C-contiguous arrays are copied
+ assert_(bcopy.flags.f_contiguous is False and
+ np.may_share_memory(bcopy, result) is False)
+ assert_equal(bcopy, self.b)
+
+ bcopy = np.asfortranarray(self.b.copy()) # or just transpose it
+ result = f(1., self.a, bcopy, overwrite_b=True)
+ assert_(bcopy.flags.f_contiguous is True and
+ np.may_share_memory(bcopy, result) is True)
+ assert_array_almost_equal(bcopy, result)
+
+
+def test_trsm():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ tol = np.finfo(dtype).eps*1000
+ func, = get_blas_funcs(('trsm',), dtype=dtype)
+
+ # Test protection against size mismatches
+ A = rand(4, 5).astype(dtype)
+ B = rand(4, 4).astype(dtype)
+ alpha = dtype(1)
+ assert_raises(Exception, func, alpha, A, B)
+ assert_raises(Exception, func, alpha, A.T, B)
+
+ n = 8
+ m = 7
+ alpha = dtype(-2.5)
+ A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m)
+ A = A.astype(dtype)
+ Au = triu(A)
+ Al = tril(A)
+ B1 = rand(m, n).astype(dtype)
+ B2 = rand(n, m).astype(dtype)
+
+ x1 = func(alpha=alpha, a=A, b=B1)
+ assert_equal(B1.shape, x1.shape)
+ x2 = solve(Au, alpha*B1)
+ assert_allclose(x1, x2, atol=tol)
+
+ x1 = func(alpha=alpha, a=A, b=B1, trans_a=1)
+ x2 = solve(Au.T, alpha*B1)
+ assert_allclose(x1, x2, atol=tol)
+
+ x1 = func(alpha=alpha, a=A, b=B1, trans_a=2)
+ x2 = solve(Au.conj().T, alpha*B1)
+ assert_allclose(x1, x2, atol=tol)
+
+ x1 = func(alpha=alpha, a=A, b=B1, diag=1)
+ Au[arange(m), arange(m)] = dtype(1)
+ x2 = solve(Au, alpha*B1)
+ assert_allclose(x1, x2, atol=tol)
+
+ x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1)
+ x2 = solve(Au.conj().T, alpha*B2.conj().T)
+ assert_allclose(x1, x2.conj().T, atol=tol)
+
+ x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1)
+ Al[arange(m), arange(m)] = dtype(1)
+ x2 = solve(Al.conj().T, alpha*B2.conj().T)
+ assert_allclose(x1, x2.conj().T, atol=tol)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_build.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_build.py
new file mode 100644
index 0000000..f8936fb
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_build.py
@@ -0,0 +1,55 @@
+from subprocess import call, PIPE, Popen
+import sys
+import re
+
+import pytest
+from numpy.testing import assert_
+from numpy.compat import asbytes
+
+from scipy.linalg import _flapack as flapack
+
+# XXX: this is copied from numpy trunk. Can be removed when we will depend on
+# numpy 1.3
+
+
+class FindDependenciesLdd:
+ def __init__(self):
+ self.cmd = ['ldd']
+
+ try:
+ call(self.cmd, stdout=PIPE, stderr=PIPE)
+ except OSError as e:
+ raise RuntimeError("command %s cannot be run" % self.cmd) from e
+
+ def get_dependencies(self, file):
+ p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
+ stdout, stderr = p.communicate()
+ if not (p.returncode == 0):
+ raise RuntimeError("Failed to check dependencies for %s" % file)
+
+ return stdout
+
+ def grep_dependencies(self, file, deps):
+ stdout = self.get_dependencies(file)
+
+ rdeps = dict([(asbytes(dep), re.compile(asbytes(dep))) for dep in deps])
+ founds = []
+ for l in stdout.splitlines():
+ for k, v in rdeps.items():
+ if v.search(l):
+ founds.append(k)
+
+ return founds
+
+
+class TestF77Mismatch(object):
+ @pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
+ reason="Skipping fortran compiler mismatch on non Linux platform")
+ def test_lapack(self):
+ f = FindDependenciesLdd()
+ deps = f.grep_dependencies(flapack.__file__,
+ ['libg2c', 'libgfortran'])
+ assert_(not (len(deps) > 1),
+"""Both g77 and gfortran runtimes linked in scipy.linalg.flapack ! This is
+likely to cause random crashes and wrong results. See numpy INSTALL.rst.txt for
+more information.""")
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_blas.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_blas.py
new file mode 100644
index 0000000..67a4159
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_blas.py
@@ -0,0 +1,120 @@
+import numpy as np
+from numpy.testing import (assert_allclose,
+ assert_equal)
+import scipy.linalg.cython_blas as blas
+
+class TestDGEMM(object):
+
+ def test_transposes(self):
+
+ a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2]
+ b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2]
+ c = np.empty((2, 4))[:2,:2]
+
+ blas._test_dgemm(1., a, b, 0., c)
+ assert_allclose(c, a.dot(b))
+
+ blas._test_dgemm(1., a.T, b, 0., c)
+ assert_allclose(c, a.T.dot(b))
+
+ blas._test_dgemm(1., a, b.T, 0., c)
+ assert_allclose(c, a.dot(b.T))
+
+ blas._test_dgemm(1., a.T, b.T, 0., c)
+ assert_allclose(c, a.T.dot(b.T))
+
+ blas._test_dgemm(1., a, b, 0., c.T)
+ assert_allclose(c, a.dot(b).T)
+
+ blas._test_dgemm(1., a.T, b, 0., c.T)
+ assert_allclose(c, a.T.dot(b).T)
+
+ blas._test_dgemm(1., a, b.T, 0., c.T)
+ assert_allclose(c, a.dot(b.T).T)
+
+ blas._test_dgemm(1., a.T, b.T, 0., c.T)
+ assert_allclose(c, a.T.dot(b.T).T)
+
+ def test_shapes(self):
+ a = np.arange(6, dtype='d').reshape((3, 2))
+ b = np.arange(-6, 2, dtype='d').reshape((2, 4))
+ c = np.empty((3, 4))
+
+ blas._test_dgemm(1., a, b, 0., c)
+ assert_allclose(c, a.dot(b))
+
+ blas._test_dgemm(1., b.T, a.T, 0., c.T)
+ assert_allclose(c, b.T.dot(a.T).T)
+
+class TestWfuncPointers(object):
+ """ Test the function pointers that are expected to fail on
+ Mac OS X without the additional entry statement in their definitions
+ in fblas_l1.pyf.src. """
+
+ def test_complex_args(self):
+
+ cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64)
+ cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64)
+
+ assert_allclose(blas._test_cdotc(cx, cy),
+ -17.6468753815+21.3718757629j, 5)
+ assert_allclose(blas._test_cdotu(cx, cy),
+ -6.11562538147+30.3156242371j, 5)
+
+ assert_equal(blas._test_icamax(cx), 3)
+
+ assert_allclose(blas._test_scasum(cx), 18.625, 5)
+ assert_allclose(blas._test_scnrm2(cx), 13.1796483994, 5)
+
+ assert_allclose(blas._test_cdotc(cx[::2], cy[::2]),
+ -18.1000003815+21.2000007629j, 5)
+ assert_allclose(blas._test_cdotu(cx[::2], cy[::2]),
+ -6.10000038147+30.7999992371j, 5)
+ assert_allclose(blas._test_scasum(cx[::2]), 18., 5)
+ assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499, 5)
+
+ def test_double_args(self):
+
+ x = np.array([5., -3, -.5], np.float64)
+ y = np.array([2, 1, .5], np.float64)
+
+ assert_allclose(blas._test_dasum(x), 8.5, 10)
+ assert_allclose(blas._test_ddot(x, y), 6.75, 10)
+ assert_allclose(blas._test_dnrm2(x), 5.85234975815, 10)
+
+ assert_allclose(blas._test_dasum(x[::2]), 5.5, 10)
+ assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75, 10)
+ assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297, 10)
+
+ assert_equal(blas._test_idamax(x), 1)
+
+ def test_float_args(self):
+
+ x = np.array([5., -3, -.5], np.float32)
+ y = np.array([2, 1, .5], np.float32)
+
+ assert_equal(blas._test_isamax(x), 1)
+
+ assert_allclose(blas._test_sasum(x), 8.5, 5)
+ assert_allclose(blas._test_sdot(x, y), 6.75, 5)
+ assert_allclose(blas._test_snrm2(x), 5.85234975815, 5)
+
+ assert_allclose(blas._test_sasum(x[::2]), 5.5, 5)
+ assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75, 5)
+ assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297, 5)
+
+ def test_double_complex_args(self):
+
+ cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128)
+ cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128)
+
+ assert_equal(blas._test_izamax(cx), 3)
+
+ assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j, 10)
+ assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j, 10)
+
+ assert_allclose(blas._test_zdotc(cx[::2], cy[::2]),
+ -18.5625+22.125j, 10)
+ assert_allclose(blas._test_zdotu(cx[::2], cy[::2]),
+ -6.5625+31.875j, 10)
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_lapack.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_lapack.py
new file mode 100644
index 0000000..905ba76
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_cython_lapack.py
@@ -0,0 +1,17 @@
+from numpy.testing import assert_allclose
+from scipy.linalg import cython_lapack as cython_lapack
+from scipy.linalg import lapack
+
+
+class TestLamch(object):
+
+ def test_slamch(self):
+ for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
+ assert_allclose(cython_lapack._test_slamch(c),
+ lapack.slamch(c))
+
+ def test_dlamch(self):
+ for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
+ assert_allclose(cython_lapack._test_dlamch(c),
+ lapack.dlamch(c))
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp.py
new file mode 100644
index 0000000..78def78
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp.py
@@ -0,0 +1,2901 @@
+""" Test functions for linalg.decomp module
+
+"""
+__usage__ = """
+Build linalg:
+ python setup_linalg.py build
+Run tests if scipy is installed:
+ python -c 'import scipy;scipy.linalg.test()'
+"""
+
+import itertools
+import platform
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_array_equal,
+ assert_, assert_allclose)
+
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
+ schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd,
+ hessenberg, rq, eig_banded, eigvals_banded, eigh,
+ eigvalsh, qr_multiply, qz, orth, ordqz,
+ subspace_angles, hadamard, eigvalsh_tridiagonal,
+ eigh_tridiagonal, null_space, cdf2rdf, LinAlgError)
+
+from scipy.linalg.lapack import (dgbtrf, dgbtrs, zgbtrf, zgbtrs, dsbev,
+ dsbevd, dsbevx, zhbevd, zhbevx)
+
+from scipy.linalg.misc import norm
+from scipy.linalg._decomp_qz import _select_function
+from scipy.stats import ortho_group
+
+from numpy import (array, diag, ones, full, linalg, argsort, zeros, arange,
+ float32, complex64, ravel, sqrt, iscomplex, shape, sort,
+ sign, asarray, isfinite, ndarray, eye, dtype, triu, tril)
+
+from numpy.random import seed, random
+
+from scipy.linalg._testutils import assert_no_overwrite
+from scipy.sparse.sputils import matrix
+
+from scipy._lib._testutils import check_free_memory
+from scipy.linalg.blas import HAS_ILP64
+
+
+def _random_hermitian_matrix(n, posdef=False, dtype=float):
+ "Generate random sym/hermitian array of the given size n"
+ if dtype in COMPLEX_DTYPES:
+ A = np.random.rand(n, n) + np.random.rand(n, n)*1.0j
+ A = (A + A.conj().T)/2
+ else:
+ A = np.random.rand(n, n)
+ A = (A + A.T)/2
+
+ if posdef:
+ A += sqrt(2*n)*np.eye(n)
+
+ return A.astype(dtype)
+
+
+REAL_DTYPES = [np.float32, np.float64]
+COMPLEX_DTYPES = [np.complex64, np.complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def clear_fuss(ar, fuss_binary_bits=7):
+ """Clears trailing `fuss_binary_bits` of mantissa of a floating number"""
+ x = np.asanyarray(ar)
+ if np.iscomplexobj(x):
+ return clear_fuss(x.real) + 1j * clear_fuss(x.imag)
+
+ significant_binary_bits = np.finfo(x.dtype).nmant
+ x_mant, x_exp = np.frexp(x)
+ f = 2.0**(significant_binary_bits - fuss_binary_bits)
+ x_mant *= f
+ np.rint(x_mant, out=x_mant)
+ x_mant /= f
+
+ return np.ldexp(x_mant, x_exp)
+
+
+# XXX: This function should be available through numpy.testing
+def assert_dtype_equal(act, des):
+ if isinstance(act, ndarray):
+ act = act.dtype
+ else:
+ act = dtype(act)
+
+ if isinstance(des, ndarray):
+ des = des.dtype
+ else:
+ des = dtype(des)
+
+ assert_(act == des,
+ 'dtype mismatch: "{}" (should be "{}")'.format(act, des))
+
+
+# XXX: This function should not be defined here, but somewhere in
+# scipy.linalg namespace
+def symrand(dim_or_eigv):
+ """Return a random symmetric (Hermitian) matrix.
+
+ If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
+ uniformly distributed on (-1,1).
+
+ If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
+ eigenvalues are 'a'.
+ """
+ if isinstance(dim_or_eigv, int):
+ dim = dim_or_eigv
+ d = random(dim)*2 - 1
+ elif (isinstance(dim_or_eigv, ndarray) and
+ len(dim_or_eigv.shape) == 1):
+ dim = dim_or_eigv.shape[0]
+ d = dim_or_eigv
+ else:
+ raise TypeError("input type not supported.")
+
+ v = ortho_group.rvs(dim)
+ h = v.T.conj() @ diag(d) @ v
+ # to avoid roundoff errors, symmetrize the matrix (again)
+ h = 0.5*(h.T+h)
+ return h
+
+
+def _complex_symrand(dim, dtype):
+ a1, a2 = symrand(dim), symrand(dim)
+ # add antisymmetric matrix as imag part
+ a = a1 + 1j*(triu(a2)-tril(a2))
+ return a.astype(dtype)
+
+
+class TestEigVals(object):
+
+ def test_simple(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ w = eigvals(a)
+ exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+ assert_array_almost_equal(w, exact_w)
+
+ def test_simple_tr(self):
+ a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]], 'd').T
+ a = a.copy()
+ a = a.T
+ w = eigvals(a)
+ exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+ assert_array_almost_equal(w, exact_w)
+
+ def test_simple_complex(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]
+ w = eigvals(a)
+ exact_w = [(9+1j+sqrt(92+6j))/2,
+ 0,
+ (9+1j-sqrt(92+6j))/2]
+ assert_array_almost_equal(w, exact_w)
+
+ def test_finite(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ w = eigvals(a, check_finite=False)
+ exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+ assert_array_almost_equal(w, exact_w)
+
+
+class TestEig(object):
+
+ def test_simple(self):
+ a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+ w, v = eig(a)
+ exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+ v0 = array([1, 1, (1+sqrt(93)/3)/2])
+ v1 = array([3., 0, -1])
+ v2 = array([1, 1, (1-sqrt(93)/3)/2])
+ v0 = v0 / norm(v0)
+ v1 = v1 / norm(v1)
+ v2 = v2 / norm(v2)
+ assert_array_almost_equal(w, exact_w)
+ assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
+ assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
+ assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
+ for i in range(3):
+ assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
+ w, v = eig(a, left=1, right=0)
+ for i in range(3):
+ assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i])
+
+ def test_simple_complex_eig(self):
+ a = array([[1, 2], [-2, 1]])
+ w, vl, vr = eig(a, left=1, right=1)
+ assert_array_almost_equal(w, array([1+2j, 1-2j]))
+ for i in range(2):
+ assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
+ for i in range(2):
+ assert_array_almost_equal(a.conj().T @ vl[:, i],
+ w[i].conj()*vl[:, i])
+
+ def test_simple_complex(self):
+ a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
+ w, vl, vr = eig(a, left=1, right=1)
+ for i in range(3):
+ assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
+ for i in range(3):
+ assert_array_almost_equal(a.conj().T @ vl[:, i],
+ w[i].conj()*vl[:, i])
+
+ def test_gh_3054(self):
+ a = [[1]]
+ b = [[0]]
+ w, vr = eig(a, b, homogeneous_eigvals=True)
+ assert_allclose(w[1, 0], 0)
+ assert_(w[0, 0] != 0)
+ assert_allclose(vr, 1)
+
+ w, vr = eig(a, b)
+ assert_equal(w, np.inf)
+ assert_allclose(vr, 1)
+
+ def _check_gen_eig(self, A, B):
+ if B is not None:
+ A, B = asarray(A), asarray(B)
+ B0 = B
+ else:
+ A = asarray(A)
+ B0 = B
+ B = np.eye(*A.shape)
+ msg = "\n%r\n%r" % (A, B)
+
+ # Eigenvalues in homogeneous coordinates
+ w, vr = eig(A, B0, homogeneous_eigvals=True)
+ wt = eigvals(A, B0, homogeneous_eigvals=True)
+ val1 = A @ vr * w[1, :]
+ val2 = B @ vr * w[0, :]
+ for i in range(val1.shape[1]):
+ assert_allclose(val1[:, i], val2[:, i],
+ rtol=1e-13, atol=1e-13, err_msg=msg)
+
+ if B0 is None:
+ assert_allclose(w[1, :], 1)
+ assert_allclose(wt[1, :], 1)
+
+ perm = np.lexsort(w)
+ permt = np.lexsort(wt)
+ assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7,
+ err_msg=msg)
+
+ length = np.empty(len(vr))
+
+ for i in range(len(vr)):
+ length[i] = norm(vr[:, i])
+
+ assert_allclose(length, np.ones(length.size), err_msg=msg,
+ atol=1e-7, rtol=1e-7)
+
+ # Convert homogeneous coordinates
+ beta_nonzero = (w[1, :] != 0)
+ wh = w[0, beta_nonzero] / w[1, beta_nonzero]
+
+ # Eigenvalues in standard coordinates
+ w, vr = eig(A, B0)
+ wt = eigvals(A, B0)
+ val1 = A @ vr
+ val2 = B @ vr * w
+ res = val1 - val2
+ for i in range(res.shape[1]):
+ if np.all(isfinite(res[:, i])):
+ assert_allclose(res[:, i], 0,
+ rtol=1e-13, atol=1e-13, err_msg=msg)
+
+ w_fin = w[isfinite(w)]
+ wt_fin = wt[isfinite(wt)]
+ perm = argsort(clear_fuss(w_fin))
+ permt = argsort(clear_fuss(wt_fin))
+ assert_allclose(w[perm], wt[permt],
+ atol=1e-7, rtol=1e-7, err_msg=msg)
+
+ length = np.empty(len(vr))
+ for i in range(len(vr)):
+ length[i] = norm(vr[:, i])
+ assert_allclose(length, np.ones(length.size), err_msg=msg)
+
+ # Compare homogeneous and nonhomogeneous versions
+ assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
+
+ @pytest.mark.xfail(reason="See gh-2254")
+ def test_singular(self):
+ # Example taken from
+ # https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
+ A = array([[22, 34, 31, 31, 17],
+ [45, 45, 42, 19, 29],
+ [39, 47, 49, 26, 34],
+ [27, 31, 26, 21, 15],
+ [38, 44, 44, 24, 30]])
+ B = array([[13, 26, 25, 17, 24],
+ [31, 46, 40, 26, 37],
+ [26, 40, 19, 25, 25],
+ [16, 25, 27, 14, 23],
+ [24, 35, 18, 21, 22]])
+
+ with np.errstate(all='ignore'):
+ self._check_gen_eig(A, B)
+
+ def test_falker(self):
+ # Test matrices giving some Nan generalized eigenvalues.
+ M = diag(array(([1, 0, 3])))
+ K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2]))
+ D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0]))
+ Z = zeros((3, 3))
+ I3 = eye(3)
+ A = np.block([[I3, Z], [Z, -K]])
+ B = np.block([[Z, I3], [M, D]])
+
+ with np.errstate(all='ignore'):
+ self._check_gen_eig(A, B)
+
+ def test_bad_geneig(self):
+ # Ticket #709 (strange return values from DGGEV)
+
+ def matrices(omega):
+ c1 = -9 + omega**2
+ c2 = 2*omega
+ A = [[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, c1, 0],
+ [0, 0, 0, c1]]
+ B = [[0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [1, 0, 0, -c2],
+ [0, 1, c2, 0]]
+ return A, B
+
+ # With a buggy LAPACK, this can fail for different omega on different
+ # machines -- so we need to test several values
+ with np.errstate(all='ignore'):
+ for k in range(100):
+ A, B = matrices(omega=k*5./100)
+ self._check_gen_eig(A, B)
+
+ def test_make_eigvals(self):
+ # Step through all paths in _make_eigvals
+ seed(1234)
+ # Real eigenvalues
+ A = symrand(3)
+ self._check_gen_eig(A, None)
+ B = symrand(3)
+ self._check_gen_eig(A, B)
+ # Complex eigenvalues
+ A = random((3, 3)) + 1j*random((3, 3))
+ self._check_gen_eig(A, None)
+ B = random((3, 3)) + 1j*random((3, 3))
+ self._check_gen_eig(A, B)
+
+ def test_check_finite(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ w, v = eig(a, check_finite=False)
+ exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+ v0 = array([1, 1, (1+sqrt(93)/3)/2])
+ v1 = array([3., 0, -1])
+ v2 = array([1, 1, (1-sqrt(93)/3)/2])
+ v0 = v0 / norm(v0)
+ v1 = v1 / norm(v1)
+ v2 = v2 / norm(v2)
+ assert_array_almost_equal(w, exact_w)
+ assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
+ assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
+ assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
+ for i in range(3):
+ assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
+
+ def test_not_square_error(self):
+ """Check that passing a non-square array raises a ValueError."""
+ A = np.arange(6).reshape(3, 2)
+ assert_raises(ValueError, eig, A)
+
+ def test_shape_mismatch(self):
+ """Check that passing arrays of with different shapes
+ raises a ValueError."""
+ A = eye(2)
+ B = np.arange(9.0).reshape(3, 3)
+ assert_raises(ValueError, eig, A, B)
+ assert_raises(ValueError, eig, B, A)
+
+
+class TestEigBanded(object):
+ def setup_method(self):
+ self.create_bandmat()
+
+ def create_bandmat(self):
+ """Create the full matrix `self.fullmat` and
+ the corresponding band matrix `self.bandmat`."""
+ N = 10
+ self.KL = 2 # number of subdiagonals (below the diagonal)
+ self.KU = 2 # number of superdiagonals (above the diagonal)
+
+ # symmetric band matrix
+ self.sym_mat = (diag(full(N, 1.0))
+ + diag(full(N-1, -1.0), -1) + diag(full(N-1, -1.0), 1)
+ + diag(full(N-2, -2.0), -2) + diag(full(N-2, -2.0), 2))
+
+ # hermitian band matrix
+ self.herm_mat = (diag(full(N, -1.0))
+ + 1j*diag(full(N-1, 1.0), -1)
+ - 1j*diag(full(N-1, 1.0), 1)
+ + diag(full(N-2, -2.0), -2)
+ + diag(full(N-2, -2.0), 2))
+
+ # general real band matrix
+ self.real_mat = (diag(full(N, 1.0))
+ + diag(full(N-1, -1.0), -1) + diag(full(N-1, -3.0), 1)
+ + diag(full(N-2, 2.0), -2) + diag(full(N-2, -2.0), 2))
+
+ # general complex band matrix
+ self.comp_mat = (1j*diag(full(N, 1.0))
+ + diag(full(N-1, -1.0), -1)
+ + 1j*diag(full(N-1, -3.0), 1)
+ + diag(full(N-2, 2.0), -2)
+ + diag(full(N-2, -2.0), 2))
+
+ # Eigenvalues and -vectors from linalg.eig
+ ew, ev = linalg.eig(self.sym_mat)
+ ew = ew.real
+ args = argsort(ew)
+ self.w_sym_lin = ew[args]
+ self.evec_sym_lin = ev[:, args]
+
+ ew, ev = linalg.eig(self.herm_mat)
+ ew = ew.real
+ args = argsort(ew)
+ self.w_herm_lin = ew[args]
+ self.evec_herm_lin = ev[:, args]
+
+ # Extract upper bands from symmetric and hermitian band matrices
+ # (for use in dsbevd, dsbevx, zhbevd, zhbevx
+ # and their single precision versions)
+ LDAB = self.KU + 1
+ self.bandmat_sym = zeros((LDAB, N), dtype=float)
+ self.bandmat_herm = zeros((LDAB, N), dtype=complex)
+ for i in range(LDAB):
+ self.bandmat_sym[LDAB-i-1, i:N] = diag(self.sym_mat, i)
+ self.bandmat_herm[LDAB-i-1, i:N] = diag(self.herm_mat, i)
+
+ # Extract bands from general real and complex band matrix
+ # (for use in dgbtrf, dgbtrs and their single precision versions)
+ LDAB = 2*self.KL + self.KU + 1
+ self.bandmat_real = zeros((LDAB, N), dtype=float)
+ self.bandmat_real[2*self.KL, :] = diag(self.real_mat) # diagonal
+ for i in range(self.KL):
+ # superdiagonals
+ self.bandmat_real[2*self.KL-1-i, i+1:N] = diag(self.real_mat, i+1)
+ # subdiagonals
+ self.bandmat_real[2*self.KL+1+i, 0:N-1-i] = diag(self.real_mat,
+ -i-1)
+
+ self.bandmat_comp = zeros((LDAB, N), dtype=complex)
+ self.bandmat_comp[2*self.KL, :] = diag(self.comp_mat) # diagonal
+ for i in range(self.KL):
+ # superdiagonals
+ self.bandmat_comp[2*self.KL-1-i, i+1:N] = diag(self.comp_mat, i+1)
+ # subdiagonals
+ self.bandmat_comp[2*self.KL+1+i, 0:N-1-i] = diag(self.comp_mat,
+ -i-1)
+
+ # absolute value for linear equation system A*x = b
+ self.b = 1.0*arange(N)
+ self.bc = self.b * (1 + 1j)
+
+ #####################################################################
+
+ def test_dsbev(self):
+ """Compare dsbev eigenvalues and eigenvectors with
+ the result of linalg.eig."""
+ w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w_sym_lin)
+ assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+ def test_dsbevd(self):
+ """Compare dsbevd eigenvalues and eigenvectors with
+ the result of linalg.eig."""
+ w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w_sym_lin)
+ assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+ def test_dsbevx(self):
+ """Compare dsbevx eigenvalues and eigenvectors
+ with the result of linalg.eig."""
+ N, N = shape(self.sym_mat)
+ # Achtung: Argumente 0.0,0.0,range?
+ w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
+ compute_v=1, range=2)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w_sym_lin)
+ assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+ def test_zhbevd(self):
+ """Compare zhbevd eigenvalues and eigenvectors
+ with the result of linalg.eig."""
+ w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w_herm_lin)
+ assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
+
+ def test_zhbevx(self):
+ """Compare zhbevx eigenvalues and eigenvectors
+ with the result of linalg.eig."""
+ N, N = shape(self.herm_mat)
+ # Achtung: Argumente 0.0,0.0,range?
+ w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
+ compute_v=1, range=2)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w_herm_lin)
+ assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
+
+ def test_eigvals_banded(self):
+ """Compare eigenvalues of eigvals_banded with those of linalg.eig."""
+ w_sym = eigvals_banded(self.bandmat_sym)
+ w_sym = w_sym.real
+ assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+
+ w_herm = eigvals_banded(self.bandmat_herm)
+ w_herm = w_herm.real
+ assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
+
+ # extracting eigenvalues with respect to an index range
+ ind1 = 2
+ ind2 = np.longlong(6)
+ w_sym_ind = eigvals_banded(self.bandmat_sym,
+ select='i', select_range=(ind1, ind2))
+ assert_array_almost_equal(sort(w_sym_ind),
+ self.w_sym_lin[ind1:ind2+1])
+ w_herm_ind = eigvals_banded(self.bandmat_herm,
+ select='i', select_range=(ind1, ind2))
+ assert_array_almost_equal(sort(w_herm_ind),
+ self.w_herm_lin[ind1:ind2+1])
+
+ # extracting eigenvalues with respect to a value range
+ v_lower = self.w_sym_lin[ind1] - 1.0e-5
+ v_upper = self.w_sym_lin[ind2] + 1.0e-5
+ w_sym_val = eigvals_banded(self.bandmat_sym,
+ select='v', select_range=(v_lower, v_upper))
+ assert_array_almost_equal(sort(w_sym_val),
+ self.w_sym_lin[ind1:ind2+1])
+
+ v_lower = self.w_herm_lin[ind1] - 1.0e-5
+ v_upper = self.w_herm_lin[ind2] + 1.0e-5
+ w_herm_val = eigvals_banded(self.bandmat_herm,
+ select='v',
+ select_range=(v_lower, v_upper))
+ assert_array_almost_equal(sort(w_herm_val),
+ self.w_herm_lin[ind1:ind2+1])
+
+ w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
+ w_sym = w_sym.real
+ assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+
+ def test_eig_banded(self):
+ """Compare eigenvalues and eigenvectors of eig_banded
+ with those of linalg.eig. """
+ w_sym, evec_sym = eig_banded(self.bandmat_sym)
+ evec_sym_ = evec_sym[:, argsort(w_sym.real)]
+ assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+ assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
+
+ w_herm, evec_herm = eig_banded(self.bandmat_herm)
+ evec_herm_ = evec_herm[:, argsort(w_herm.real)]
+ assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
+ assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
+
+ # extracting eigenvalues with respect to an index range
+ ind1 = 2
+ ind2 = 6
+ w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
+ select='i',
+ select_range=(ind1, ind2))
+ assert_array_almost_equal(sort(w_sym_ind),
+ self.w_sym_lin[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec_sym_ind),
+ abs(self.evec_sym_lin[:, ind1:ind2+1]))
+
+ w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
+ select='i',
+ select_range=(ind1, ind2))
+ assert_array_almost_equal(sort(w_herm_ind),
+ self.w_herm_lin[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec_herm_ind),
+ abs(self.evec_herm_lin[:, ind1:ind2+1]))
+
+ # extracting eigenvalues with respect to a value range
+ v_lower = self.w_sym_lin[ind1] - 1.0e-5
+ v_upper = self.w_sym_lin[ind2] + 1.0e-5
+ w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
+ select='v',
+ select_range=(v_lower, v_upper))
+ assert_array_almost_equal(sort(w_sym_val),
+ self.w_sym_lin[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec_sym_val),
+ abs(self.evec_sym_lin[:, ind1:ind2+1]))
+
+ v_lower = self.w_herm_lin[ind1] - 1.0e-5
+ v_upper = self.w_herm_lin[ind2] + 1.0e-5
+ w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
+ select='v',
+ select_range=(v_lower, v_upper))
+ assert_array_almost_equal(sort(w_herm_val),
+ self.w_herm_lin[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec_herm_val),
+ abs(self.evec_herm_lin[:, ind1:ind2+1]))
+
+ w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
+ evec_sym_ = evec_sym[:, argsort(w_sym.real)]
+ assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+ assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
+
+ def test_dgbtrf(self):
+ """Compare dgbtrf LU factorisation with the LU factorisation result
+ of linalg.lu."""
+ M, N = shape(self.real_mat)
+ lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
+
+ # extract matrix u from lu_symm_band
+ u = diag(lu_symm_band[2*self.KL, :])
+ for i in range(self.KL + self.KU):
+ u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
+
+ p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
+ assert_array_almost_equal(u, u_lin)
+
+ def test_zgbtrf(self):
+ """Compare zgbtrf LU factorisation with the LU factorisation result
+ of linalg.lu."""
+ M, N = shape(self.comp_mat)
+ lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
+
+ # extract matrix u from lu_symm_band
+ u = diag(lu_symm_band[2*self.KL, :])
+ for i in range(self.KL + self.KU):
+ u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
+
+ p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
+ assert_array_almost_equal(u, u_lin)
+
+ def test_dgbtrs(self):
+ """Compare dgbtrs solutions for linear equation system A*x = b
+ with solutions of linalg.solve."""
+
+ lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
+ y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
+
+ y_lin = linalg.solve(self.real_mat, self.b)
+ assert_array_almost_equal(y, y_lin)
+
+ def test_zgbtrs(self):
+ """Compare zgbtrs solutions for linear equation system A*x = b
+ with solutions of linalg.solve."""
+
+ lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
+ y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
+
+ y_lin = linalg.solve(self.comp_mat, self.bc)
+ assert_array_almost_equal(y, y_lin)
+
+
+class TestEigTridiagonal(object):
+ def setup_method(self):
+ self.create_trimat()
+
+ def create_trimat(self):
+ """Create the full matrix `self.fullmat`, `self.d`, and `self.e`."""
+ N = 10
+
+ # symmetric band matrix
+ self.d = full(N, 1.0)
+ self.e = full(N-1, -1.0)
+ self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1))
+
+ ew, ev = linalg.eig(self.full_mat)
+ ew = ew.real
+ args = argsort(ew)
+ self.w = ew[args]
+ self.evec = ev[:, args]
+
+ def test_degenerate(self):
+ """Test error conditions."""
+ # Wrong sizes
+ assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1])
+ # Must be real
+ assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j)
+ # Bad driver
+ assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e,
+ lapack_driver=1.)
+ assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+ lapack_driver='foo')
+ # Bad bounds
+ assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+ select='i', select_range=(0, -1))
+
+ def test_eigvalsh_tridiagonal(self):
+ """Compare eigenvalues of eigvalsh_tridiagonal with those of eig."""
+ # can't use ?STERF with subselection
+ for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'):
+ w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver)
+ assert_array_almost_equal(sort(w), self.w)
+
+ for driver in ('sterf', 'stev'):
+ assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+ lapack_driver='stev', select='i',
+ select_range=(0, 1))
+ for driver in ('stebz', 'stemr', 'auto'):
+ # extracting eigenvalues with respect to the full index range
+ w_ind = eigvalsh_tridiagonal(
+ self.d, self.e, select='i', select_range=(0, len(self.d)-1),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w_ind), self.w)
+
+ # extracting eigenvalues with respect to an index range
+ ind1 = 2
+ ind2 = 6
+ w_ind = eigvalsh_tridiagonal(
+ self.d, self.e, select='i', select_range=(ind1, ind2),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1])
+
+ # extracting eigenvalues with respect to a value range
+ v_lower = self.w[ind1] - 1.0e-5
+ v_upper = self.w[ind2] + 1.0e-5
+ w_val = eigvalsh_tridiagonal(
+ self.d, self.e, select='v', select_range=(v_lower, v_upper),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1])
+
+ def test_eigh_tridiagonal(self):
+ """Compare eigenvalues and eigenvectors of eigh_tridiagonal
+ with those of eig. """
+ # can't use ?STERF when eigenvectors are requested
+ assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
+ lapack_driver='sterf')
+ for driver in ('stebz', 'stev', 'stemr', 'auto'):
+ w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver)
+ evec_ = evec[:, argsort(w)]
+ assert_array_almost_equal(sort(w), self.w)
+ assert_array_almost_equal(abs(evec_), abs(self.evec))
+
+ assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
+ lapack_driver='stev', select='i', select_range=(0, 1))
+ for driver in ('stebz', 'stemr', 'auto'):
+ # extracting eigenvalues with respect to an index range
+ ind1 = 0
+ ind2 = len(self.d)-1
+ w, evec = eigh_tridiagonal(
+ self.d, self.e, select='i', select_range=(ind1, ind2),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w), self.w)
+ assert_array_almost_equal(abs(evec), abs(self.evec))
+ ind1 = 2
+ ind2 = 6
+ w, evec = eigh_tridiagonal(
+ self.d, self.e, select='i', select_range=(ind1, ind2),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec),
+ abs(self.evec[:, ind1:ind2+1]))
+
+ # extracting eigenvalues with respect to a value range
+ v_lower = self.w[ind1] - 1.0e-5
+ v_upper = self.w[ind2] + 1.0e-5
+ w, evec = eigh_tridiagonal(
+ self.d, self.e, select='v', select_range=(v_lower, v_upper),
+ lapack_driver=driver)
+ assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
+ assert_array_almost_equal(abs(evec),
+ abs(self.evec[:, ind1:ind2+1]))
+
+
+class TestEigh:
+ def setup_class(self):
+ seed(1234)
+
+ def test_wrong_inputs(self):
+ # Nonsquare a
+ assert_raises(ValueError, eigh, np.ones([1, 2]))
+ # Nonsquare b
+ assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1]))
+ # Incompatible a, b sizes
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2]))
+ # Wrong type parameter for generalized problem
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ type=4)
+ # Both value and index subsets requested
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ subset_by_value=[1, 2], eigvals=[2, 4])
+ # Invalid upper index spec
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ eigvals=[0, 4])
+ # Invalid lower index
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ eigvals=[-2, 2])
+ # Invalid index spec #2
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ eigvals=[2, 0])
+ # Invalid value spec
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ subset_by_value=[2, 0])
+ # Invalid driver name
+ assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong')
+ # Generalized driver selection without b
+ assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx')
+ # Standard driver with b
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ driver='evr', turbo=False)
+ # Subset request from invalid driver
+ assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+ driver='gvd', eigvals=[1, 2], turbo=False)
+
+ def test_nonpositive_b(self):
+ assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3]))
+
+ # index based subsets are done in the legacy test_eigh()
+ def test_value_subsets(self):
+ for ind, dt in enumerate(DTYPES):
+
+ a = _random_hermitian_matrix(20, dtype=dt)
+ w, v = eigh(a, subset_by_value=[-2, 2])
+ assert_equal(v.shape[1], len(w))
+ assert all((w > -2) & (w < 2))
+
+ b = _random_hermitian_matrix(20, posdef=True, dtype=dt)
+ w, v = eigh(a, b, subset_by_value=[-2, 2])
+ assert_equal(v.shape[1], len(w))
+ assert all((w > -2) & (w < 2))
+
+ def test_eigh_integer(self):
+ a = array([[1, 2], [2, 7]])
+ b = array([[3, 1], [1, 5]])
+ w, z = eigh(a)
+ w, z = eigh(a, b)
+
+ def test_eigh_of_sparse(self):
+ # This tests the rejection of inputs that eigh cannot currently handle.
+ import scipy.sparse
+ a = scipy.sparse.identity(2).tocsc()
+ b = np.atleast_2d(a)
+ assert_raises(ValueError, eigh, a)
+ assert_raises(ValueError, eigh, b)
+
+ @pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx"))
+ def test_various_drivers_standard(self, driver):
+ a = _random_hermitian_matrix(20)
+ w, v = eigh(a, driver=driver)
+ assert_allclose(a @ v - (v * w), 0., atol=1000*np.spacing(1.), rtol=0.)
+
+ @pytest.mark.parametrize('type', (1, 2, 3))
+ @pytest.mark.parametrize('driver', ("gv", "gvd", "gvx"))
+ def test_various_drivers_generalized(self, driver, type):
+ atol = np.spacing(5000.)
+ a = _random_hermitian_matrix(20)
+ b = _random_hermitian_matrix(20, posdef=True)
+ w, v = eigh(a=a, b=b, driver=driver, type=type)
+ if type == 1:
+ assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.)
+ elif type == 2:
+ assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.)
+ else:
+ assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.)
+
+ # Old eigh tests kept for backwards compatibility
+ @pytest.mark.parametrize('eigvals', (None, (2, 4)))
+ @pytest.mark.parametrize('turbo', (True, False))
+ @pytest.mark.parametrize('lower', (True, False))
+ @pytest.mark.parametrize('overwrite', (True, False))
+ @pytest.mark.parametrize('dtype_', ('f', 'd', 'F', 'D'))
+ @pytest.mark.parametrize('dim', (6,))
+ def test_eigh(self, dim, dtype_, overwrite, lower, turbo, eigvals):
+ atol = 1e-11 if dtype_ in ('dD') else 1e-4
+ a = _random_hermitian_matrix(n=dim, dtype=dtype_)
+ w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigvals)
+ assert_dtype_equal(z.dtype, dtype_)
+ w = w.astype(dtype_)
+ diag_ = diag(z.T.conj() @ a @ z).real
+ assert_allclose(diag_, w, rtol=0., atol=atol)
+
+ a = _random_hermitian_matrix(n=dim, dtype=dtype_)
+ b = _random_hermitian_matrix(n=dim, dtype=dtype_, posdef=True)
+ w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
+ overwrite_b=overwrite, turbo=turbo, eigvals=eigvals)
+ assert_dtype_equal(z.dtype, dtype_)
+ w = w.astype(dtype_)
+ diag1_ = diag(z.T.conj() @ a @ z).real
+ assert_allclose(diag1_, w, rtol=0., atol=atol)
+ diag2_ = diag(z.T.conj() @ b @ z).real
+ assert_allclose(diag2_, ones(diag2_.shape[0]), rtol=0., atol=atol)
+
+ def test_eigvalsh_new_args(self):
+ a = _random_hermitian_matrix(5)
+ w = eigvalsh(a, eigvals=[1, 2])
+ assert_equal(len(w), 2)
+
+ w2 = eigvalsh(a, subset_by_index=[1, 2])
+ assert_equal(len(w2), 2)
+ assert_allclose(w, w2)
+
+ b = np.diag([1, 1.2, 1.3, 1.5, 2])
+ w3 = eigvalsh(b, subset_by_value=[1, 1.4])
+ assert_equal(len(w3), 2)
+ assert_allclose(w3, np.array([1.2, 1.3]))
+
+
+class TestLU(object):
+ def setup_method(self):
+ self.a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+ self.ca = array([[1, 2, 3], [1, 2, 3], [2, 5j, 6]])
+ # Those matrices are more robust to detect problems in permutation
+ # matrices than the ones above
+ self.b = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ self.cb = array([[1j, 2j, 3j], [4j, 5j, 6j], [7j, 8j, 9j]])
+
+ # Reectangular matrices
+ self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
+ self.chrect = 1.j * array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 12, 12]])
+
+ self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
+ self.cvrect = 1.j * array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 12, 12]])
+
+ # Medium sizes matrices
+ self.med = random((30, 40))
+ self.cmed = random((30, 40)) + 1.j * random((30, 40))
+
+ def _test_common(self, data):
+ p, l, u = lu(data)
+ assert_array_almost_equal(p @ l @ u, data)
+ pl, u = lu(data, permute_l=1)
+ assert_array_almost_equal(pl @ u, data)
+
+ # Simple tests
+ def test_simple(self):
+ self._test_common(self.a)
+
+ def test_simple_complex(self):
+ self._test_common(self.ca)
+
+ def test_simple2(self):
+ self._test_common(self.b)
+
+ def test_simple2_complex(self):
+ self._test_common(self.cb)
+
+ # rectangular matrices tests
+ def test_hrectangular(self):
+ self._test_common(self.hrect)
+
+ def test_vrectangular(self):
+ self._test_common(self.vrect)
+
+ def test_hrectangular_complex(self):
+ self._test_common(self.chrect)
+
+ def test_vrectangular_complex(self):
+ self._test_common(self.cvrect)
+
+ # Bigger matrices
+ def test_medium1(self):
+ """Check lu decomposition on medium size, rectangular matrix."""
+ self._test_common(self.med)
+
+ def test_medium1_complex(self):
+ """Check lu decomposition on medium size, rectangular matrix."""
+ self._test_common(self.cmed)
+
+ def test_check_finite(self):
+ p, l, u = lu(self.a, check_finite=False)
+ assert_array_almost_equal(p @ l @ u, self.a)
+
+ def test_simple_known(self):
+ # Ticket #1458
+ for order in ['C', 'F']:
+ A = np.array([[2, 1], [0, 1.]], order=order)
+ LU, P = lu_factor(A)
+ assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
+ assert_array_equal(P, np.array([0, 1]))
+
+
+class TestLUSingle(TestLU):
+ """LU testers for single precision, real and double"""
+
+ def setup_method(self):
+ TestLU.setup_method(self)
+
+ self.a = self.a.astype(float32)
+ self.ca = self.ca.astype(complex64)
+ self.b = self.b.astype(float32)
+ self.cb = self.cb.astype(complex64)
+
+ self.hrect = self.hrect.astype(float32)
+ self.chrect = self.hrect.astype(complex64)
+
+ self.vrect = self.vrect.astype(float32)
+ self.cvrect = self.vrect.astype(complex64)
+
+ self.med = self.vrect.astype(float32)
+ self.cmed = self.vrect.astype(complex64)
+
+
+class TestLUSolve(object):
+ def setup_method(self):
+ seed(1234)
+
+ def test_lu(self):
+ a0 = random((10, 10))
+ b = random((10,))
+
+ for order in ['C', 'F']:
+ a = np.array(a0, order=order)
+ x1 = solve(a, b)
+ lu_a = lu_factor(a)
+ x2 = lu_solve(lu_a, b)
+ assert_array_almost_equal(x1, x2)
+
+ def test_check_finite(self):
+ a = random((10, 10))
+ b = random((10,))
+ x1 = solve(a, b)
+ lu_a = lu_factor(a, check_finite=False)
+ x2 = lu_solve(lu_a, b, check_finite=False)
+ assert_array_almost_equal(x1, x2)
+
+
+class TestSVD_GESDD(object):
+ def setup_method(self):
+ self.lapack_driver = 'gesdd'
+ seed(1234)
+
+ def test_degenerate(self):
+ assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
+ assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
+
+ def test_simple(self):
+ a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(3))
+ assert_array_almost_equal(vh.T @ vh, eye(3))
+ sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_simple_singular(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(3))
+ assert_array_almost_equal(vh.T @ vh, eye(3))
+ sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_simple_underdet(self):
+ a = [[1, 2, 3], [4, 5, 6]]
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(u.shape[0]))
+ sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_simple_overdet(self):
+ a = [[1, 2], [4, 5], [3, 4]]
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
+ assert_array_almost_equal(vh.T @ vh, eye(2))
+ sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_random(self):
+ n = 20
+ m = 15
+ for i in range(3):
+ for a in [random([n, m]), random([m, n])]:
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
+ assert_array_almost_equal(vh @ vh.T, eye(vh.shape[0]))
+ sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_simple_complex(self):
+ a = [[1, 2, 3], [1, 2j, 3], [2, 5, 6]]
+ for full_matrices in (True, False):
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.conj().T @ u, eye(u.shape[1]))
+ assert_array_almost_equal(vh.conj().T @ vh, eye(vh.shape[0]))
+ sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_random_complex(self):
+ n = 20
+ m = 15
+ for i in range(3):
+ for full_matrices in (True, False):
+ for a in [random([n, m]), random([m, n])]:
+ a = a + 1j*random(list(a.shape))
+ u, s, vh = svd(a, full_matrices=full_matrices,
+ lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.conj().T @ u,
+ eye(u.shape[1]))
+ # This fails when [m,n]
+ # assert_array_almost_equal(vh.conj().T @ vh,
+ # eye(len(vh),dtype=vh.dtype.char))
+ sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_crash_1580(self):
+ sizes = [(13, 23), (30, 50), (60, 100)]
+ np.random.seed(1234)
+ for sz in sizes:
+ for dt in [np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.random.rand(*sz).astype(dt)
+ # should not crash
+ svd(a, lapack_driver=self.lapack_driver)
+
+ def test_check_finite(self):
+ a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
+ u, s, vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
+ assert_array_almost_equal(u.T @ u, eye(3))
+ assert_array_almost_equal(vh.T @ vh, eye(3))
+ sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+ for i in range(len(s)):
+ sigma[i, i] = s[i]
+ assert_array_almost_equal(u @ sigma @ vh, a)
+
+ def test_gh_5039(self):
+ # This is a smoke test for https://github.com/scipy/scipy/issues/5039
+ #
+ # The following is reported to raise "ValueError: On entry to DGESDD
+ # parameter number 12 had an illegal value".
+ # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
+ # This is reported to only show up on LAPACK 3.0.3.
+ #
+ # The matrix below is taken from the call to
+ # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
+ b = np.array(
+ [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
+ [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
+ [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
+ [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
+ svd(b, lapack_driver=self.lapack_driver)
+
+ @pytest.mark.skipif(not HAS_ILP64, reason="64-bit LAPACK required")
+ @pytest.mark.slow
+ def test_large_matrix(self):
+ check_free_memory(free_mb=17000)
+ A = np.zeros([1, 2**31], dtype=np.float32)
+ A[0, -1] = 1
+ u, s, vh = svd(A, full_matrices=False)
+ assert_allclose(s[0], 1.0)
+ assert_allclose(u[0, 0] * vh[0, -1], 1.0)
+
+
+class TestSVD_GESVD(TestSVD_GESDD):
+ def setup_method(self):
+ self.lapack_driver = 'gesvd'
+ seed(1234)
+
+
+class TestSVDVals(object):
+
+ def test_empty(self):
+ for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
+ s = svdvals(a)
+ assert_equal(s, np.empty(0))
+
+ def test_simple(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ s = svdvals(a)
+ assert_(len(s) == 3)
+ assert_(s[0] >= s[1] >= s[2])
+
+ def test_simple_underdet(self):
+ a = [[1, 2, 3], [4, 5, 6]]
+ s = svdvals(a)
+ assert_(len(s) == 2)
+ assert_(s[0] >= s[1])
+
+ def test_simple_overdet(self):
+ a = [[1, 2], [4, 5], [3, 4]]
+ s = svdvals(a)
+ assert_(len(s) == 2)
+ assert_(s[0] >= s[1])
+
+ def test_simple_complex(self):
+ a = [[1, 2, 3], [1, 20, 3j], [2, 5, 6]]
+ s = svdvals(a)
+ assert_(len(s) == 3)
+ assert_(s[0] >= s[1] >= s[2])
+
+ def test_simple_underdet_complex(self):
+ a = [[1, 2, 3], [4, 5j, 6]]
+ s = svdvals(a)
+ assert_(len(s) == 2)
+ assert_(s[0] >= s[1])
+
+ def test_simple_overdet_complex(self):
+ a = [[1, 2], [4, 5], [3j, 4]]
+ s = svdvals(a)
+ assert_(len(s) == 2)
+ assert_(s[0] >= s[1])
+
+ def test_check_finite(self):
+ a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+ s = svdvals(a, check_finite=False)
+ assert_(len(s) == 3)
+ assert_(s[0] >= s[1] >= s[2])
+
+ @pytest.mark.slow
+ def test_crash_2609(self):
+ np.random.seed(1234)
+ a = np.random.rand(1500, 2800)
+ # Shouldn't crash:
+ svdvals(a)
+
+
+class TestDiagSVD(object):
+
+ def test_simple(self):
+ assert_array_almost_equal(diagsvd([1, 0, 0], 3, 3),
+ [[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+
+class TestQR(object):
+
+ def setup_method(self):
+ seed(1234)
+
+ def test_simple(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_simple_left(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r = qr(a)
+ c = [1, 2, 3]
+ qc, r2 = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ assert_array_almost_equal(r, r2)
+ qc, r2 = qr_multiply(a, eye(3), "left")
+ assert_array_almost_equal(q, qc)
+
+ def test_simple_right(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r = qr(a)
+ c = [1, 2, 3]
+ qc, r2 = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, qc)
+ assert_array_almost_equal(r, r2)
+ qc, r = qr_multiply(a, eye(3))
+ assert_array_almost_equal(q, qc)
+
+ def test_simple_pivoting(self):
+ a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_left_pivoting(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r, jpvt = qr(a, pivoting=True)
+ c = [1, 2, 3]
+ qc, r, jpvt = qr_multiply(a, c, "left", True)
+ assert_array_almost_equal(q @ c, qc)
+
+ def test_simple_right_pivoting(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r, jpvt = qr(a, pivoting=True)
+ c = [1, 2, 3]
+ qc, r, jpvt = qr_multiply(a, c, pivoting=True)
+ assert_array_almost_equal(c @ q, qc)
+
+ def test_simple_trap(self):
+ a = [[8, 2, 3], [2, 9, 3]]
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_simple_trap_pivoting(self):
+ a = np.asarray([[8, 2, 3], [2, 9, 3]])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_tall(self):
+ # full version
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_simple_tall_pivoting(self):
+ # full version pivoting
+ a = np.asarray([[8, 2], [2, 9], [5, 3]])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_tall_e(self):
+ # economy version
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r = qr(a, mode='economic')
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a)
+ assert_equal(q.shape, (3, 2))
+ assert_equal(r.shape, (2, 2))
+
+ def test_simple_tall_e_pivoting(self):
+ # economy version pivoting
+ a = np.asarray([[8, 2], [2, 9], [5, 3]])
+ q, r, p = qr(a, pivoting=True, mode='economic')
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p], mode='economic')
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_tall_left(self):
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r = qr(a, mode="economic")
+ c = [1, 2]
+ qc, r2 = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ assert_array_almost_equal(r, r2)
+ c = array([1, 2, 0])
+ qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
+ assert_array_almost_equal(q @ c[:2], qc)
+ qc, r = qr_multiply(a, eye(2), "left")
+ assert_array_almost_equal(qc, q)
+
+ def test_simple_tall_left_pivoting(self):
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r, jpvt = qr(a, mode="economic", pivoting=True)
+ c = [1, 2]
+ qc, r, kpvt = qr_multiply(a, c, "left", True)
+ assert_array_equal(jpvt, kpvt)
+ assert_array_almost_equal(q @ c, qc)
+ qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
+ assert_array_almost_equal(qc, q)
+
+ def test_simple_tall_right(self):
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r = qr(a, mode="economic")
+ c = [1, 2, 3]
+ cq, r2 = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, cq)
+ assert_array_almost_equal(r, r2)
+ cq, r = qr_multiply(a, eye(3))
+ assert_array_almost_equal(cq, q)
+
+ def test_simple_tall_right_pivoting(self):
+ a = [[8, 2], [2, 9], [5, 3]]
+ q, r, jpvt = qr(a, pivoting=True, mode="economic")
+ c = [1, 2, 3]
+ cq, r, jpvt = qr_multiply(a, c, pivoting=True)
+ assert_array_almost_equal(c @ q, cq)
+ cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True)
+ assert_array_almost_equal(cq, q)
+
+ def test_simple_fat(self):
+ # full version
+ a = [[8, 2, 5], [2, 9, 3]]
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a)
+ assert_equal(q.shape, (2, 2))
+ assert_equal(r.shape, (2, 3))
+
+ def test_simple_fat_pivoting(self):
+ # full version pivoting
+ a = np.asarray([[8, 2, 5], [2, 9, 3]])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a[:, p])
+ assert_equal(q.shape, (2, 2))
+ assert_equal(r.shape, (2, 3))
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_fat_e(self):
+ # economy version
+ a = [[8, 2, 3], [2, 9, 5]]
+ q, r = qr(a, mode='economic')
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a)
+ assert_equal(q.shape, (2, 2))
+ assert_equal(r.shape, (2, 3))
+
+ def test_simple_fat_e_pivoting(self):
+ # economy version pivoting
+ a = np.asarray([[8, 2, 3], [2, 9, 5]])
+ q, r, p = qr(a, pivoting=True, mode='economic')
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(q @ r, a[:, p])
+ assert_equal(q.shape, (2, 2))
+ assert_equal(r.shape, (2, 3))
+ q2, r2 = qr(a[:, p], mode='economic')
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_fat_left(self):
+ a = [[8, 2, 3], [2, 9, 5]]
+ q, r = qr(a, mode="economic")
+ c = [1, 2]
+ qc, r2 = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ assert_array_almost_equal(r, r2)
+ qc, r = qr_multiply(a, eye(2), "left")
+ assert_array_almost_equal(qc, q)
+
+ def test_simple_fat_left_pivoting(self):
+ a = [[8, 2, 3], [2, 9, 5]]
+ q, r, jpvt = qr(a, mode="economic", pivoting=True)
+ c = [1, 2]
+ qc, r, jpvt = qr_multiply(a, c, "left", True)
+ assert_array_almost_equal(q @ c, qc)
+ qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
+ assert_array_almost_equal(qc, q)
+
+ def test_simple_fat_right(self):
+ a = [[8, 2, 3], [2, 9, 5]]
+ q, r = qr(a, mode="economic")
+ c = [1, 2]
+ cq, r2 = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, cq)
+ assert_array_almost_equal(r, r2)
+ cq, r = qr_multiply(a, eye(2))
+ assert_array_almost_equal(cq, q)
+
+ def test_simple_fat_right_pivoting(self):
+ a = [[8, 2, 3], [2, 9, 5]]
+ q, r, jpvt = qr(a, pivoting=True, mode="economic")
+ c = [1, 2]
+ cq, r, jpvt = qr_multiply(a, c, pivoting=True)
+ assert_array_almost_equal(c @ q, cq)
+ cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True)
+ assert_array_almost_equal(cq, q)
+
+ def test_simple_complex(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ q, r = qr(a)
+ assert_array_almost_equal(q.conj().T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_simple_complex_left(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ q, r = qr(a)
+ c = [1, 2, 3+4j]
+ qc, r = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ qc, r = qr_multiply(a, eye(3), "left")
+ assert_array_almost_equal(q, qc)
+
+ def test_simple_complex_right(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ q, r = qr(a)
+ c = [1, 2, 3+4j]
+ qc, r = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, qc)
+ qc, r = qr_multiply(a, eye(3))
+ assert_array_almost_equal(q, qc)
+
+ def test_simple_tall_complex_left(self):
+ a = [[8, 2+3j], [2, 9], [5+7j, 3]]
+ q, r = qr(a, mode="economic")
+ c = [1, 2+2j]
+ qc, r2 = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ assert_array_almost_equal(r, r2)
+ c = array([1, 2, 0])
+ qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
+ assert_array_almost_equal(q @ c[:2], qc)
+ qc, r = qr_multiply(a, eye(2), "left")
+ assert_array_almost_equal(qc, q)
+
+ def test_simple_complex_left_conjugate(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ q, r = qr(a)
+ c = [1, 2, 3+4j]
+ qc, r = qr_multiply(a, c, "left", conjugate=True)
+ assert_array_almost_equal(q.conj() @ c, qc)
+
+ def test_simple_complex_tall_left_conjugate(self):
+ a = [[3, 3+4j], [5, 2+2j], [3, 2]]
+ q, r = qr(a, mode='economic')
+ c = [1, 3+4j]
+ qc, r = qr_multiply(a, c, "left", conjugate=True)
+ assert_array_almost_equal(q.conj() @ c, qc)
+
+ def test_simple_complex_right_conjugate(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ q, r = qr(a)
+ c = np.array([1, 2, 3+4j])
+ qc, r = qr_multiply(a, c, conjugate=True)
+ assert_array_almost_equal(c @ q.conj(), qc)
+
+ def test_simple_complex_pivoting(self):
+ a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.conj().T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_simple_complex_left_pivoting(self):
+ a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+ q, r, jpvt = qr(a, pivoting=True)
+ c = [1, 2, 3+4j]
+ qc, r, jpvt = qr_multiply(a, c, "left", True)
+ assert_array_almost_equal(q @ c, qc)
+
+ def test_simple_complex_right_pivoting(self):
+ a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+ q, r, jpvt = qr(a, pivoting=True)
+ c = [1, 2, 3+4j]
+ qc, r, jpvt = qr_multiply(a, c, pivoting=True)
+ assert_array_almost_equal(c @ q, qc)
+
+ def test_random(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_random_left(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ q, r = qr(a)
+ c = random([n])
+ qc, r = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ qc, r = qr_multiply(a, eye(n), "left")
+ assert_array_almost_equal(q, qc)
+
+ def test_random_right(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ q, r = qr(a)
+ c = random([n])
+ cq, r = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, cq)
+ cq, r = qr_multiply(a, eye(n))
+ assert_array_almost_equal(q, cq)
+
+ def test_random_pivoting(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_random_tall(self):
+ # full version
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(m))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_random_tall_left(self):
+ # full version
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r = qr(a, mode="economic")
+ c = random([n])
+ qc, r = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ qc, r = qr_multiply(a, eye(n), "left")
+ assert_array_almost_equal(qc, q)
+
+ def test_random_tall_right(self):
+ # full version
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r = qr(a, mode="economic")
+ c = random([m])
+ cq, r = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, cq)
+ cq, r = qr_multiply(a, eye(m))
+ assert_array_almost_equal(cq, q)
+
+ def test_random_tall_pivoting(self):
+ # full version pivoting
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(m))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_random_tall_e(self):
+ # economy version
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r = qr(a, mode='economic')
+ assert_array_almost_equal(q.T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a)
+ assert_equal(q.shape, (m, n))
+ assert_equal(r.shape, (n, n))
+
+ def test_random_tall_e_pivoting(self):
+ # economy version pivoting
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ q, r, p = qr(a, pivoting=True, mode='economic')
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a[:, p])
+ assert_equal(q.shape, (m, n))
+ assert_equal(r.shape, (n, n))
+ q2, r2 = qr(a[:, p], mode='economic')
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_random_trap(self):
+ m = 100
+ n = 200
+ for k in range(2):
+ a = random([m, n])
+ q, r = qr(a)
+ assert_array_almost_equal(q.T @ q, eye(m))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_random_trap_pivoting(self):
+ m = 100
+ n = 200
+ for k in range(2):
+ a = random([m, n])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.T @ q, eye(m))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_random_complex(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ q, r = qr(a)
+ assert_array_almost_equal(q.conj().T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_random_complex_left(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ q, r = qr(a)
+ c = random([n])+1j*random([n])
+ qc, r = qr_multiply(a, c, "left")
+ assert_array_almost_equal(q @ c, qc)
+ qc, r = qr_multiply(a, eye(n), "left")
+ assert_array_almost_equal(q, qc)
+
+ def test_random_complex_right(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ q, r = qr(a)
+ c = random([n])+1j*random([n])
+ cq, r = qr_multiply(a, c)
+ assert_array_almost_equal(c @ q, cq)
+ cq, r = qr_multiply(a, eye(n))
+ assert_array_almost_equal(q, cq)
+
+ def test_random_complex_pivoting(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ q, r, p = qr(a, pivoting=True)
+ d = abs(diag(r))
+ assert_(np.all(d[1:] <= d[:-1]))
+ assert_array_almost_equal(q.conj().T @ q, eye(n))
+ assert_array_almost_equal(q @ r, a[:, p])
+ q2, r2 = qr(a[:, p])
+ assert_array_almost_equal(q, q2)
+ assert_array_almost_equal(r, r2)
+
+ def test_check_finite(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ q, r = qr(a, check_finite=False)
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(q @ r, a)
+
+ def test_lwork(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ # Get comparison values
+ q, r = qr(a, lwork=None)
+
+ # Test against minimum valid lwork
+ q2, r2 = qr(a, lwork=3)
+ assert_array_almost_equal(q2, q)
+ assert_array_almost_equal(r2, r)
+
+ # Test against larger lwork
+ q3, r3 = qr(a, lwork=10)
+ assert_array_almost_equal(q3, q)
+ assert_array_almost_equal(r3, r)
+
+ # Test against explicit lwork=-1
+ q4, r4 = qr(a, lwork=-1)
+ assert_array_almost_equal(q4, q)
+ assert_array_almost_equal(r4, r)
+
+ # Test against invalid lwork
+ assert_raises(Exception, qr, (a,), {'lwork': 0})
+ assert_raises(Exception, qr, (a,), {'lwork': 2})
+
+
+class TestRQ(object):
+
+ def setup_method(self):
+ seed(1234)
+
+ def test_simple(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.T, eye(3))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_r(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ r, q = rq(a)
+ r2 = rq(a, mode='r')
+ assert_array_almost_equal(r, r2)
+
+ def test_random(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.T, eye(n))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_simple_trap(self):
+ a = [[8, 2, 3], [2, 9, 3]]
+ r, q = rq(a)
+ assert_array_almost_equal(q.T @ q, eye(3))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_simple_tall(self):
+ a = [[8, 2], [2, 9], [5, 3]]
+ r, q = rq(a)
+ assert_array_almost_equal(q.T @ q, eye(2))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_simple_fat(self):
+ a = [[8, 2, 5], [2, 9, 3]]
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.T, eye(3))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_simple_complex(self):
+ a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.conj().T, eye(3))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_random_tall(self):
+ m = 200
+ n = 100
+ for k in range(2):
+ a = random([m, n])
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.T, eye(n))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_random_trap(self):
+ m = 100
+ n = 200
+ for k in range(2):
+ a = random([m, n])
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.T, eye(n))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_random_trap_economic(self):
+ m = 100
+ n = 200
+ for k in range(2):
+ a = random([m, n])
+ r, q = rq(a, mode='economic')
+ assert_array_almost_equal(q @ q.T, eye(m))
+ assert_array_almost_equal(r @ q, a)
+ assert_equal(q.shape, (m, n))
+ assert_equal(r.shape, (m, m))
+
+ def test_random_complex(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ r, q = rq(a)
+ assert_array_almost_equal(q @ q.conj().T, eye(n))
+ assert_array_almost_equal(r @ q, a)
+
+ def test_random_complex_economic(self):
+ m = 100
+ n = 200
+ for k in range(2):
+ a = random([m, n])+1j*random([m, n])
+ r, q = rq(a, mode='economic')
+ assert_array_almost_equal(q @ q.conj().T, eye(m))
+ assert_array_almost_equal(r @ q, a)
+ assert_equal(q.shape, (m, n))
+ assert_equal(r.shape, (m, m))
+
+ def test_check_finite(self):
+ a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+ r, q = rq(a, check_finite=False)
+ assert_array_almost_equal(q @ q.T, eye(3))
+ assert_array_almost_equal(r @ q, a)
+
+
+class TestSchur(object):
+
+ def test_simple(self):
+ a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
+ t, z = schur(a)
+ assert_array_almost_equal(z @ t @ z.conj().T, a)
+ tc, zc = schur(a, 'complex')
+ assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc))))
+ assert_array_almost_equal(zc @ tc @ zc.conj().T, a)
+ tc2, zc2 = rsf2csf(tc, zc)
+ assert_array_almost_equal(zc2 @ tc2 @ zc2.conj().T, a)
+
+ def test_sort(self):
+ a = [[4., 3., 1., -1.],
+ [-4.5, -3.5, -1., 1.],
+ [9., 6., -4., 4.5],
+ [6., 4., -3., 3.5]]
+ s, u, sdim = schur(a, sort='lhp')
+ assert_array_almost_equal([[0.1134, 0.5436, 0.8316, 0.],
+ [-0.1134, -0.8245, 0.5544, 0.],
+ [-0.8213, 0.1308, 0.0265, -0.5547],
+ [-0.5475, 0.0872, 0.0177, 0.8321]],
+ u, 3)
+ assert_array_almost_equal([[-1.4142, 0.1456, -11.5816, -7.7174],
+ [0., -0.5000, 9.4472, -0.7184],
+ [0., 0., 1.4142, -0.1456],
+ [0., 0., 0., 0.5]],
+ s, 3)
+ assert_equal(2, sdim)
+
+ s, u, sdim = schur(a, sort='rhp')
+ assert_array_almost_equal([[0.4862, -0.4930, 0.1434, -0.7071],
+ [-0.4862, 0.4930, -0.1434, -0.7071],
+ [0.6042, 0.3944, -0.6924, 0.],
+ [0.4028, 0.5986, 0.6924, 0.]],
+ u, 3)
+ assert_array_almost_equal([[1.4142, -0.9270, 4.5368, -14.4130],
+ [0., 0.5, 6.5809, -3.1870],
+ [0., 0., -1.4142, 0.9270],
+ [0., 0., 0., -0.5]],
+ s, 3)
+ assert_equal(2, sdim)
+
+ s, u, sdim = schur(a, sort='iuc')
+ assert_array_almost_equal([[0.5547, 0., -0.5721, -0.6042],
+ [-0.8321, 0., -0.3814, -0.4028],
+ [0., 0.7071, -0.5134, 0.4862],
+ [0., 0.7071, 0.5134, -0.4862]],
+ u, 3)
+ assert_array_almost_equal([[-0.5000, 0.0000, -6.5809, -4.0974],
+ [0., 0.5000, -3.3191, -14.4130],
+ [0., 0., 1.4142, 2.1573],
+ [0., 0., 0., -1.4142]],
+ s, 3)
+ assert_equal(2, sdim)
+
+ s, u, sdim = schur(a, sort='ouc')
+ assert_array_almost_equal([[0.4862, -0.5134, 0.7071, 0.],
+ [-0.4862, 0.5134, 0.7071, 0.],
+ [0.6042, 0.5721, 0., -0.5547],
+ [0.4028, 0.3814, 0., 0.8321]],
+ u, 3)
+ assert_array_almost_equal([[1.4142, -2.1573, 14.4130, 4.0974],
+ [0., -1.4142, 3.3191, 6.5809],
+ [0., 0., -0.5000, 0.],
+ [0., 0., 0., 0.5000]],
+ s, 3)
+ assert_equal(2, sdim)
+
+ s, u, sdim = schur(a, sort=lambda x: x >= 0.0)
+ assert_array_almost_equal([[0.4862, -0.4930, 0.1434, -0.7071],
+ [-0.4862, 0.4930, -0.1434, -0.7071],
+ [0.6042, 0.3944, -0.6924, 0.],
+ [0.4028, 0.5986, 0.6924, 0.]],
+ u, 3)
+ assert_array_almost_equal([[1.4142, -0.9270, 4.5368, -14.4130],
+ [0., 0.5, 6.5809, -3.1870],
+ [0., 0., -1.4142, 0.9270],
+ [0., 0., 0., -0.5]],
+ s, 3)
+ assert_equal(2, sdim)
+
+ def test_sort_errors(self):
+ a = [[4., 3., 1., -1.],
+ [-4.5, -3.5, -1., 1.],
+ [9., 6., -4., 4.5],
+ [6., 4., -3., 3.5]]
+ assert_raises(ValueError, schur, a, sort='unsupported')
+ assert_raises(ValueError, schur, a, sort=1)
+
+ def test_check_finite(self):
+ a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
+ t, z = schur(a, check_finite=False)
+ assert_array_almost_equal(z @ t @ z.conj().T, a)
+
+
+class TestHessenberg(object):
+
+ def test_simple(self):
+ a = [[-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]]
+ h1 = [[-149.0000, 42.2037, -156.3165],
+ [-537.6783, 152.5511, -554.9272],
+ [0, 0.0728, 2.4489]]
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.T @ a @ q, h)
+ assert_array_almost_equal(h, h1, decimal=4)
+
+ def test_simple_complex(self):
+ a = [[-149, -50, -154],
+ [537, 180j, 546],
+ [-27j, -9, -25]]
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.conj().T @ a @ q, h)
+
+ def test_simple2(self):
+ a = [[1, 2, 3, 4, 5, 6, 7],
+ [0, 2, 3, 4, 6, 7, 2],
+ [0, 2, 2, 3, 0, 3, 2],
+ [0, 0, 2, 8, 0, 0, 2],
+ [0, 3, 1, 2, 0, 1, 2],
+ [0, 1, 2, 3, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 2]]
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.T @ a @ q, h)
+
+ def test_simple3(self):
+ a = np.eye(3)
+ a[-1, 0] = 2
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.T @ a @ q, h)
+
+ def test_random(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.T @ a @ q, h)
+
+ def test_random_complex(self):
+ n = 20
+ for k in range(2):
+ a = random([n, n])+1j*random([n, n])
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q.conj().T @ a @ q, h)
+
+ def test_check_finite(self):
+ a = [[-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]]
+ h1 = [[-149.0000, 42.2037, -156.3165],
+ [-537.6783, 152.5511, -554.9272],
+ [0, 0.0728, 2.4489]]
+ h, q = hessenberg(a, calc_q=1, check_finite=False)
+ assert_array_almost_equal(q.T @ a @ q, h)
+ assert_array_almost_equal(h, h1, decimal=4)
+
+ def test_2x2(self):
+ a = [[2, 1], [7, 12]]
+
+ h, q = hessenberg(a, calc_q=1)
+ assert_array_almost_equal(q, np.eye(2))
+ assert_array_almost_equal(h, a)
+
+ b = [[2-7j, 1+2j], [7+3j, 12-2j]]
+ h2, q2 = hessenberg(b, calc_q=1)
+ assert_array_almost_equal(q2, np.eye(2))
+ assert_array_almost_equal(h2, b)
+
+
+class TestQZ(object):
+ def setup_method(self):
+ seed(12345)
+
+ def test_qz_single(self):
+ n = 5
+ A = random([n, n]).astype(float32)
+ B = random([n, n]).astype(float32)
+ AA, BB, Q, Z = qz(A, B)
+ assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5)
+ assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5)
+ assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5)
+ assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5)
+ assert_(np.all(diag(BB) >= 0))
+
+ def test_qz_double(self):
+ n = 5
+ A = random([n, n])
+ B = random([n, n])
+ AA, BB, Q, Z = qz(A, B)
+ assert_array_almost_equal(Q @ AA @ Z.T, A)
+ assert_array_almost_equal(Q @ BB @ Z.T, B)
+ assert_array_almost_equal(Q @ Q.T, eye(n))
+ assert_array_almost_equal(Z @ Z.T, eye(n))
+ assert_(np.all(diag(BB) >= 0))
+
+ def test_qz_complex(self):
+ n = 5
+ A = random([n, n]) + 1j*random([n, n])
+ B = random([n, n]) + 1j*random([n, n])
+ AA, BB, Q, Z = qz(A, B)
+ assert_array_almost_equal(Q @ AA @ Z.conj().T, A)
+ assert_array_almost_equal(Q @ BB @ Z.conj().T, B)
+ assert_array_almost_equal(Q @ Q.conj().T, eye(n))
+ assert_array_almost_equal(Z @ Z.conj().T, eye(n))
+ assert_(np.all(diag(BB) >= 0))
+ assert_(np.all(diag(BB).imag == 0))
+
+ def test_qz_complex64(self):
+ n = 5
+ A = (random([n, n]) + 1j*random([n, n])).astype(complex64)
+ B = (random([n, n]) + 1j*random([n, n])).astype(complex64)
+ AA, BB, Q, Z = qz(A, B)
+ assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5)
+ assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5)
+ assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5)
+ assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5)
+ assert_(np.all(diag(BB) >= 0))
+ assert_(np.all(diag(BB).imag == 0))
+
+ def test_qz_double_complex(self):
+ n = 5
+ A = random([n, n])
+ B = random([n, n])
+ AA, BB, Q, Z = qz(A, B, output='complex')
+ aa = Q @ AA @ Z.conj().T
+ assert_array_almost_equal(aa.real, A)
+ assert_array_almost_equal(aa.imag, 0)
+ bb = Q @ BB @ Z.conj().T
+ assert_array_almost_equal(bb.real, B)
+ assert_array_almost_equal(bb.imag, 0)
+ assert_array_almost_equal(Q @ Q.conj().T, eye(n))
+ assert_array_almost_equal(Z @ Z.conj().T, eye(n))
+ assert_(np.all(diag(BB) >= 0))
+
+ def test_qz_double_sort(self):
+ # from https://www.nag.com/lapack-ex/node119.html
+ # NOTE: These matrices may be ill-conditioned and lead to a
+ # seg fault on certain python versions when compiled with
+ # sse2 or sse3 older ATLAS/LAPACK binaries for windows
+ # A = np.array([[3.9, 12.5, -34.5, -0.5],
+ # [ 4.3, 21.5, -47.5, 7.5],
+ # [ 4.3, 21.5, -43.5, 3.5],
+ # [ 4.4, 26.0, -46.0, 6.0 ]])
+
+ # B = np.array([[ 1.0, 2.0, -3.0, 1.0],
+ # [1.0, 3.0, -5.0, 4.0],
+ # [1.0, 3.0, -4.0, 3.0],
+ # [1.0, 3.0, -4.0, 4.0]])
+ A = np.array([[3.9, 12.5, -34.5, 2.5],
+ [4.3, 21.5, -47.5, 7.5],
+ [4.3, 1.5, -43.5, 3.5],
+ [4.4, 6.0, -46.0, 6.0]])
+
+ B = np.array([[1.0, 1.0, -3.0, 1.0],
+ [1.0, 3.0, -5.0, 4.4],
+ [1.0, 2.0, -4.0, 1.0],
+ [1.2, 3.0, -4.0, 4.0]])
+
+ assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0)
+ if False:
+ AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0)
+ # assert_(sdim == 2)
+ assert_(sdim == 4)
+ assert_array_almost_equal(Q @ AA @ Z.T, A)
+ assert_array_almost_equal(Q @ BB @ Z.T, B)
+
+ # test absolute values bc the sign is ambiguous and
+ # might be platform dependent
+ assert_array_almost_equal(np.abs(AA), np.abs(np.array(
+ [[35.7864, -80.9061, -12.0629, -9.498],
+ [0., 2.7638, -2.3505, 7.3256],
+ [0., 0., 0.6258, -0.0398],
+ [0., 0., 0., -12.8217]])), 4)
+ assert_array_almost_equal(np.abs(BB), np.abs(np.array(
+ [[4.5324, -8.7878, 3.2357, -3.5526],
+ [0., 1.4314, -2.1894, 0.9709],
+ [0., 0., 1.3126, -0.3468],
+ [0., 0., 0., 0.559]])), 4)
+ assert_array_almost_equal(np.abs(Q), np.abs(np.array(
+ [[-0.4193, -0.605, -0.1894, -0.6498],
+ [-0.5495, 0.6987, 0.2654, -0.3734],
+ [-0.4973, -0.3682, 0.6194, 0.4832],
+ [-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
+ assert_array_almost_equal(np.abs(Z), np.abs(np.array(
+ [[-0.9471, -0.2971, -0.1217, 0.0055],
+ [-0.0367, 0.1209, 0.0358, 0.9913],
+ [0.3171, -0.9041, -0.2547, 0.1312],
+ [0.0346, 0.2824, -0.9587, 0.0014]])), 4)
+
+ # test absolute values bc the sign is ambiguous and might be platform
+ # dependent
+ # assert_array_almost_equal(abs(AA), abs(np.array([
+ # [3.8009, -69.4505, 50.3135, -43.2884],
+ # [0.0000, 9.2033, -0.2001, 5.9881],
+ # [0.0000, 0.0000, 1.4279, 4.4453],
+ # [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
+ # assert_array_almost_equal(abs(BB), abs(np.array([
+ # [1.9005, -10.2285, 0.8658, -5.2134],
+ # [0.0000, 2.3008, 0.7915, 0.4262],
+ # [0.0000, 0.0000, 0.8101, 0.0000],
+ # [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
+ # assert_array_almost_equal(abs(Q), abs(np.array([
+ # [0.4642, 0.7886, 0.2915, -0.2786],
+ # [0.5002, -0.5986, 0.5638, -0.2713],
+ # [0.5002, 0.0154, -0.0107, 0.8657],
+ # [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
+ # assert_array_almost_equal(dot(Q,Q.T), eye(4))
+ # assert_array_almost_equal(abs(Z), abs(np.array([
+ # [0.9961, -0.0014, 0.0887, -0.0026],
+ # [0.0057, -0.0404, -0.0938, -0.9948],
+ # [0.0626, 0.7194, -0.6908, 0.0363],
+ # [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
+ # assert_array_almost_equal(dot(Z,Z.T), eye(4))
+
+ # def test_qz_complex_sort(self):
+ # cA = np.array([
+ # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
+ # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
+ # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
+ # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
+
+ # cB = np.array([
+ # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
+ # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
+ # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
+ # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
+
+ # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
+
+ # eigenvalues = diag(AAS)/diag(BBS)
+ # assert_(np.all(np.real(eigenvalues[:sdim] < 0)))
+ # assert_(np.all(np.real(eigenvalues[sdim:] > 0)))
+
+ def test_check_finite(self):
+ n = 5
+ A = random([n, n])
+ B = random([n, n])
+ AA, BB, Q, Z = qz(A, B, check_finite=False)
+ assert_array_almost_equal(Q @ AA @ Z.T, A)
+ assert_array_almost_equal(Q @ BB @ Z.T, B)
+ assert_array_almost_equal(Q @ Q.T, eye(n))
+ assert_array_almost_equal(Z @ Z.T, eye(n))
+ assert_(np.all(diag(BB) >= 0))
+
+
+def _make_pos(X):
+ # the decompositions can have different signs than verified results
+ return np.sign(X)*X
+
+
+class TestOrdQZ(object):
+ @classmethod
+ def setup_class(cls):
+ # https://www.nag.com/lapack-ex/node119.html
+ A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
+ 7.5 + 0.5j],
+ [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
+ -10.5 - 1.5j],
+ [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
+ -7.5 - 3.5j],
+ [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
+ -19.0 - 32.5j]])
+
+ B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
+ [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
+ [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
+ [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
+
+ # https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
+ A2 = np.array([[3.9, 12.5, -34.5, -0.5],
+ [4.3, 21.5, -47.5, 7.5],
+ [4.3, 21.5, -43.5, 3.5],
+ [4.4, 26.0, -46.0, 6.0]])
+
+ B2 = np.array([[1, 2, -3, 1],
+ [1, 3, -5, 4],
+ [1, 3, -4, 3],
+ [1, 3, -4, 4]])
+
+ # example with the eigenvalues
+ # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
+ # 0.61244091
+ # thus featuring:
+ # * one complex conjugate eigenvalue pair,
+ # * one eigenvalue in the lhp
+ # * 2 eigenvalues in the unit circle
+ # * 2 non-real eigenvalues
+ A3 = np.array([[5., 1., 3., 3.],
+ [4., 4., 2., 7.],
+ [7., 4., 1., 3.],
+ [0., 4., 8., 7.]])
+ B3 = np.array([[8., 10., 6., 10.],
+ [7., 7., 2., 9.],
+ [9., 1., 6., 6.],
+ [5., 1., 4., 7.]])
+
+ # example with infinite eigenvalues
+ A4 = np.eye(2)
+ B4 = np.diag([0, 1])
+
+ # example with (alpha, beta) = (0, 0)
+ A5 = np.diag([1, 0])
+
+ cls.A = [A1, A2, A3, A4, A5]
+ cls.B = [B1, B2, B3, B4, A5]
+
+ def qz_decomp(self, sort):
+ with np.errstate(all='raise'):
+ ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
+ return tuple(ret)
+
+ def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
+ Id = np.eye(*A.shape)
+ # make sure Q and Z are orthogonal
+ assert_array_almost_equal(Q @ Q.T.conj(), Id)
+ assert_array_almost_equal(Z @ Z.T.conj(), Id)
+ # check factorization
+ assert_array_almost_equal(Q @ AA, A @ Z)
+ assert_array_almost_equal(Q @ BB, B @ Z)
+ # check shape of AA and BB
+ assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
+ assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
+ # check eigenvalues
+ for i in range(A.shape[0]):
+ # does the current diagonal element belong to a 2-by-2 block
+ # that was already checked?
+ if i > 0 and A[i, i - 1] != 0:
+ continue
+ # take care of 2-by-2 blocks
+ if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
+ evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
+ # make sure the pair of complex conjugate eigenvalues
+ # is ordered consistently (positive imaginary part first)
+ if evals[0].imag < 0:
+ evals = evals[[1, 0]]
+ tmp = alpha[i:i + 2]/beta[i:i + 2]
+ if tmp[0].imag < 0:
+ tmp = tmp[[1, 0]]
+ assert_array_almost_equal(evals, tmp)
+ else:
+ if alpha[i] == 0 and beta[i] == 0:
+ assert_equal(AA[i, i], 0)
+ assert_equal(BB[i, i], 0)
+ elif beta[i] == 0:
+ assert_equal(BB[i, i], 0)
+ else:
+ assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
+ sortfun = _select_function(sort)
+ lastsort = True
+ for i in range(A.shape[0]):
+ cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
+ # once the sorting criterion was not matched all subsequent
+ # eigenvalues also shouldn't match
+ if not lastsort:
+ assert(not cursort)
+ lastsort = cursort
+
+ def check_all(self, sort):
+ ret = self.qz_decomp(sort)
+
+ for reti, Ai, Bi in zip(ret, self.A, self.B):
+ self.check(Ai, Bi, sort, *reti)
+
+ def test_lhp(self):
+ self.check_all('lhp')
+
+ def test_rhp(self):
+ self.check_all('rhp')
+
+ def test_iuc(self):
+ self.check_all('iuc')
+
+ def test_ouc(self):
+ self.check_all('ouc')
+
+ def test_ref(self):
+ # real eigenvalues first (top-left corner)
+ def sort(x, y):
+ out = np.empty_like(x, dtype=bool)
+ nonzero = (y != 0)
+ out[~nonzero] = False
+ out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
+ return out
+
+ self.check_all(sort)
+
+ def test_cef(self):
+ # complex eigenvalues first (top-left corner)
+ def sort(x, y):
+ out = np.empty_like(x, dtype=bool)
+ nonzero = (y != 0)
+ out[~nonzero] = False
+ out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
+ return out
+
+ self.check_all(sort)
+
+ def test_diff_input_types(self):
+ ret = ordqz(self.A[1], self.B[2], sort='lhp')
+ self.check(self.A[1], self.B[2], 'lhp', *ret)
+
+ ret = ordqz(self.B[2], self.A[1], sort='lhp')
+ self.check(self.B[2], self.A[1], 'lhp', *ret)
+
+ def test_sort_explicit(self):
+ # Test order of the eigenvalues in the 2 x 2 case where we can
+ # explicitly compute the solution
+ A1 = np.eye(2)
+ B1 = np.diag([-2, 0.5])
+ expected1 = [('lhp', [-0.5, 2]),
+ ('rhp', [2, -0.5]),
+ ('iuc', [-0.5, 2]),
+ ('ouc', [2, -0.5])]
+ A2 = np.eye(2)
+ B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
+ expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
+ ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
+ ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
+ ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
+ # 'lhp' is ambiguous so don't test it
+ A3 = np.eye(2)
+ B3 = np.diag([2, 0])
+ expected3 = [('rhp', [0.5, np.inf]),
+ ('iuc', [0.5, np.inf]),
+ ('ouc', [np.inf, 0.5])]
+ # 'rhp' is ambiguous so don't test it
+ A4 = np.eye(2)
+ B4 = np.diag([-2, 0])
+ expected4 = [('lhp', [-0.5, np.inf]),
+ ('iuc', [-0.5, np.inf]),
+ ('ouc', [np.inf, -0.5])]
+ A5 = np.diag([0, 1])
+ B5 = np.diag([0, 0.5])
+ # 'lhp' and 'iuc' are ambiguous so don't test them
+ expected5 = [('rhp', [2, np.nan]),
+ ('ouc', [2, np.nan])]
+
+ A = [A1, A2, A3, A4, A5]
+ B = [B1, B2, B3, B4, B5]
+ expected = [expected1, expected2, expected3, expected4, expected5]
+ for Ai, Bi, expectedi in zip(A, B, expected):
+ for sortstr, expected_eigvals in expectedi:
+ _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
+ azero = (alpha == 0)
+ bzero = (beta == 0)
+ x = np.empty_like(alpha)
+ x[azero & bzero] = np.nan
+ x[~azero & bzero] = np.inf
+ x[~bzero] = alpha[~bzero]/beta[~bzero]
+ assert_allclose(expected_eigvals, x)
+
+
+class TestOrdQZWorkspaceSize(object):
+
+ def setup_method(self):
+ seed(12345)
+
+ def test_decompose(self):
+
+ N = 202
+
+ # raises error if lwork parameter to dtrsen is too small
+ for ddtype in [np.float32, np.float64]:
+ A = random((N, N)).astype(ddtype)
+ B = random((N, N)).astype(ddtype)
+ # sort = lambda ar, ai, b: ar**2 + ai**2 < b**2
+ _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
+ output='real')
+
+ for ddtype in [np.complex128, np.complex64]:
+ A = random((N, N)).astype(ddtype)
+ B = random((N, N)).astype(ddtype)
+ _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
+ output='complex')
+
+ @pytest.mark.slow
+ def test_decompose_ouc(self):
+
+ N = 202
+
+ # segfaults if lwork parameter to dtrsen is too small
+ for ddtype in [np.float32, np.float64, np.complex128, np.complex64]:
+ A = random((N, N)).astype(ddtype)
+ B = random((N, N)).astype(ddtype)
+ S, T, alpha, beta, U, V = ordqz(A, B, sort='ouc')
+
+
+class TestDatacopied(object):
+
+ def test_datacopied(self):
+ from scipy.linalg.decomp import _datacopied
+
+ M = matrix([[0, 1], [2, 3]])
+ A = asarray(M)
+ L = M.tolist()
+ M2 = M.copy()
+
+ class Fake1:
+ def __array__(self):
+ return A
+
+ class Fake2:
+ __array_interface__ = A.__array_interface__
+
+ F1 = Fake1()
+ F2 = Fake2()
+
+ for item, status in [(M, False), (A, False), (L, True),
+ (M2, False), (F1, False), (F2, False)]:
+ arr = asarray(item)
+ assert_equal(_datacopied(arr, item), status,
+ err_msg=repr(item))
+
+
+def test_aligned_mem_float():
+ """Check linalg works with non-aligned memory (float32)"""
+ # Allocate 402 bytes of memory (allocated on boundary)
+ a = arange(402, dtype=np.uint8)
+
+ # Create an array with boundary offset 4
+ z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
+ z.shape = 10, 10
+
+ eig(z, overwrite_a=True)
+ eig(z.T, overwrite_a=True)
+
+
+@pytest.mark.skip(platform.machine() == 'ppc64le',
+ reason="crashes on ppc64le")
+def test_aligned_mem():
+ """Check linalg works with non-aligned memory (float64)"""
+ # Allocate 804 bytes of memory (allocated on boundary)
+ a = arange(804, dtype=np.uint8)
+
+ # Create an array with boundary offset 4
+ z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
+ z.shape = 10, 10
+
+ eig(z, overwrite_a=True)
+ eig(z.T, overwrite_a=True)
+
+
+def test_aligned_mem_complex():
+ """Check that complex objects don't need to be completely aligned"""
+ # Allocate 1608 bytes of memory (allocated on boundary)
+ a = zeros(1608, dtype=np.uint8)
+
+ # Create an array with boundary offset 8
+ z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
+ z.shape = 10, 10
+
+ eig(z, overwrite_a=True)
+ # This does not need special handling
+ eig(z.T, overwrite_a=True)
+
+
+def check_lapack_misaligned(func, args, kwargs):
+ args = list(args)
+ for i in range(len(args)):
+ a = args[:]
+ if isinstance(a[i], np.ndarray):
+ # Try misaligning a[i]
+ aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
+ aa = np.frombuffer(aa.data, offset=4, count=a[i].size,
+ dtype=a[i].dtype)
+ aa.shape = a[i].shape
+ aa[...] = a[i]
+ a[i] = aa
+ func(*a, **kwargs)
+ if len(a[i].shape) > 1:
+ a[i] = a[i].T
+ func(*a, **kwargs)
+
+
+@pytest.mark.xfail(run=False,
+ reason="Ticket #1152, triggers a segfault in rare cases.")
+def test_lapack_misaligned():
+ M = np.eye(10, dtype=float)
+ R = np.arange(100)
+ R.shape = 10, 10
+ S = np.arange(20000, dtype=np.uint8)
+ S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
+ S.shape = 10, 10
+ b = np.ones(10)
+ LU, piv = lu_factor(S)
+ for (func, args, kwargs) in [
+ (eig, (S,), dict(overwrite_a=True)), # crash
+ (eigvals, (S,), dict(overwrite_a=True)), # no crash
+ (lu, (S,), dict(overwrite_a=True)), # no crash
+ (lu_factor, (S,), dict(overwrite_a=True)), # no crash
+ (lu_solve, ((LU, piv), b), dict(overwrite_b=True)),
+ (solve, (S, b), dict(overwrite_a=True, overwrite_b=True)),
+ (svd, (M,), dict(overwrite_a=True)), # no crash
+ (svd, (R,), dict(overwrite_a=True)), # no crash
+ (svd, (S,), dict(overwrite_a=True)), # crash
+ (svdvals, (S,), dict()), # no crash
+ (svdvals, (S,), dict(overwrite_a=True)), # crash
+ (cholesky, (M,), dict(overwrite_a=True)), # no crash
+ (qr, (S,), dict(overwrite_a=True)), # crash
+ (rq, (S,), dict(overwrite_a=True)), # crash
+ (hessenberg, (S,), dict(overwrite_a=True)), # crash
+ (schur, (S,), dict(overwrite_a=True)), # crash
+ ]:
+ check_lapack_misaligned(func, args, kwargs)
+# not properly tested
+# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
+
+
+class TestOverwrite(object):
+ def test_eig(self):
+ assert_no_overwrite(eig, [(3, 3)])
+ assert_no_overwrite(eig, [(3, 3), (3, 3)])
+
+ def test_eigh(self):
+ assert_no_overwrite(eigh, [(3, 3)])
+ assert_no_overwrite(eigh, [(3, 3), (3, 3)])
+
+ def test_eig_banded(self):
+ assert_no_overwrite(eig_banded, [(3, 2)])
+
+ def test_eigvals(self):
+ assert_no_overwrite(eigvals, [(3, 3)])
+
+ def test_eigvalsh(self):
+ assert_no_overwrite(eigvalsh, [(3, 3)])
+
+ def test_eigvals_banded(self):
+ assert_no_overwrite(eigvals_banded, [(3, 2)])
+
+ def test_hessenberg(self):
+ assert_no_overwrite(hessenberg, [(3, 3)])
+
+ def test_lu_factor(self):
+ assert_no_overwrite(lu_factor, [(3, 3)])
+
+ def test_lu_solve(self):
+ x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 8]])
+ xlu = lu_factor(x)
+ assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
+
+ def test_lu(self):
+ assert_no_overwrite(lu, [(3, 3)])
+
+ def test_qr(self):
+ assert_no_overwrite(qr, [(3, 3)])
+
+ def test_rq(self):
+ assert_no_overwrite(rq, [(3, 3)])
+
+ def test_schur(self):
+ assert_no_overwrite(schur, [(3, 3)])
+
+ def test_schur_complex(self):
+ assert_no_overwrite(lambda a: schur(a, 'complex'), [(3, 3)],
+ dtypes=[np.float32, np.float64])
+
+ def test_svd(self):
+ assert_no_overwrite(svd, [(3, 3)])
+ assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3, 3)])
+
+ def test_svdvals(self):
+ assert_no_overwrite(svdvals, [(3, 3)])
+
+
+def _check_orth(n, dtype, skip_big=False):
+ X = np.ones((n, 2), dtype=float).astype(dtype)
+
+ eps = np.finfo(dtype).eps
+ tol = 1000 * eps
+
+ Y = orth(X)
+ assert_equal(Y.shape, (n, 1))
+ assert_allclose(Y, Y.mean(), atol=tol)
+
+ Y = orth(X.T)
+ assert_equal(Y.shape, (2, 1))
+ assert_allclose(Y, Y.mean(), atol=tol)
+
+ if n > 5 and not skip_big:
+ np.random.seed(1)
+ X = np.random.rand(n, 5) @ np.random.rand(5, n)
+ X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
+ X = X.astype(dtype)
+
+ Y = orth(X, rcond=1e-3)
+ assert_equal(Y.shape, (n, 5))
+
+ Y = orth(X, rcond=1e-6)
+ assert_equal(Y.shape, (n, 5 + 1))
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+ reason="test only on 64-bit, else too slow")
+def test_orth_memory_efficiency():
+ # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
+ # Keep in mind that @pytest.mark.slow tests are likely to be running
+ # under configurations that support 4Gb+ memory for tests related to
+ # 32 bit overflow.
+ n = 10*1000*1000
+ try:
+ _check_orth(n, np.float64, skip_big=True)
+ except MemoryError as e:
+ raise AssertionError(
+ 'memory error perhaps caused by orth regression'
+ ) from e
+
+
+def test_orth():
+ dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+ sizes = [1, 2, 3, 10, 100]
+ for dt, n in itertools.product(dtypes, sizes):
+ _check_orth(n, dt)
+
+
+def test_null_space():
+ np.random.seed(1)
+
+ dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+ sizes = [1, 2, 3, 10, 100]
+
+ for dt, n in itertools.product(dtypes, sizes):
+ X = np.ones((2, n), dtype=dt)
+
+ eps = np.finfo(dt).eps
+ tol = 1000 * eps
+
+ Y = null_space(X)
+ assert_equal(Y.shape, (n, n-1))
+ assert_allclose(X @ Y, 0, atol=tol)
+
+ Y = null_space(X.T)
+ assert_equal(Y.shape, (2, 1))
+ assert_allclose(X.T @ Y, 0, atol=tol)
+
+ X = np.random.randn(1 + n//2, n)
+ Y = null_space(X)
+ assert_equal(Y.shape, (n, n - 1 - n//2))
+ assert_allclose(X @ Y, 0, atol=tol)
+
+ if n > 5:
+ np.random.seed(1)
+ X = np.random.rand(n, 5) @ np.random.rand(5, n)
+ X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
+ X = X.astype(dt)
+
+ Y = null_space(X, rcond=1e-3)
+ assert_equal(Y.shape, (n, n - 5))
+
+ Y = null_space(X, rcond=1e-6)
+ assert_equal(Y.shape, (n, n - 6))
+
+
+def test_subspace_angles():
+ H = hadamard(8, float)
+ A = H[:, :3]
+ B = H[:, 3:]
+ assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14)
+ assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14)
+ for x in (A, B):
+ assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]),
+ atol=1e-14)
+ # From MATLAB function "subspace", which effectively only returns the
+ # last value that we calculate
+ x = np.array(
+ [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501
+ [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501
+ [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501
+ [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501
+ expected = 1.481454682101605
+ assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected,
+ rtol=1e-12)
+ assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected,
+ rtol=1e-12)
+ expected = 0.746361174247302
+ assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12)
+ assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12)
+ expected = 0.487163718534313
+ assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12)
+ assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12)
+ expected = 0.328950515907756
+ assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0],
+ atol=1e-12)
+ # Degenerate conditions
+ assert_raises(ValueError, subspace_angles, x[0], x)
+ assert_raises(ValueError, subspace_angles, x, x[0])
+ assert_raises(ValueError, subspace_angles, x[:-1], x)
+
+ # Test branch if mask.any is True:
+ A = np.array([[1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1],
+ [0, 0, 0],
+ [0, 0, 0]])
+ B = np.array([[1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 1]])
+ expected = np.array([np.pi/2, 0, 0])
+ assert_allclose(subspace_angles(A, B), expected, rtol=1e-12)
+
+ # Complex
+ # second column in "b" does not affect result, just there so that
+ # b can have more cols than a, and vice-versa (both conditional code paths)
+ a = [[1 + 1j], [0]]
+ b = [[1 - 1j, 0], [0, 1]]
+ assert_allclose(subspace_angles(a, b), 0., atol=1e-14)
+ assert_allclose(subspace_angles(b, a), 0., atol=1e-14)
+
+
+class TestCDF2RDF(object):
+
+ def matmul(self, a, b):
+ return np.einsum('...ij,...jk->...ik', a, b)
+
+ def assert_eig_valid(self, w, v, x):
+ assert_array_almost_equal(
+ self.matmul(v, w),
+ self.matmul(x, v)
+ )
+
+ def test_single_array0x0real(self):
+ # eig doesn't support 0x0 in old versions of numpy
+ X = np.empty((0, 0))
+ w, v = np.empty(0), np.empty((0, 0))
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_single_array2x2_real(self):
+ X = np.array([[1, 2], [3, -1]])
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_single_array2x2_complex(self):
+ X = np.array([[1, 2], [-2, 1]])
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_single_array3x3_real(self):
+ X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_single_array3x3_complex(self):
+ X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_random_1d_stacked_arrays(self):
+ # cannot test M == 0 due to bug in old numpy
+ for M in range(1, 7):
+ np.random.seed(999999999)
+ X = np.random.rand(100, M, M)
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_random_2d_stacked_arrays(self):
+ # cannot test M == 0 due to bug in old numpy
+ for M in range(1, 7):
+ X = np.random.rand(10, 10, M, M)
+ w, v = np.linalg.eig(X)
+ wr, vr = cdf2rdf(w, v)
+ self.assert_eig_valid(wr, vr, X)
+
+ def test_low_dimensionality_error(self):
+ w, v = np.empty(()), np.array((2,))
+ assert_raises(ValueError, cdf2rdf, w, v)
+
+ def test_not_square_error(self):
+ # Check that passing a non-square array raises a ValueError.
+ w, v = np.arange(3), np.arange(6).reshape(3, 2)
+ assert_raises(ValueError, cdf2rdf, w, v)
+
+ def test_swapped_v_w_error(self):
+ # Check that exchanging places of w and v raises ValueError.
+ X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+ w, v = np.linalg.eig(X)
+ assert_raises(ValueError, cdf2rdf, v, w)
+
+ def test_non_associated_error(self):
+ # Check that passing non-associated eigenvectors raises a ValueError.
+ w, v = np.arange(3), np.arange(16).reshape(4, 4)
+ assert_raises(ValueError, cdf2rdf, w, v)
+
+ def test_not_conjugate_pairs(self):
+ # Check that passing non-conjugate pairs raises a ValueError.
+ X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
+ w, v = np.linalg.eig(X)
+ assert_raises(ValueError, cdf2rdf, w, v)
+
+ # different arrays in the stack, so not conjugate
+ X = np.array([
+ [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]],
+ [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]],
+ ])
+ w, v = np.linalg.eig(X)
+ assert_raises(ValueError, cdf2rdf, w, v)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cholesky.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cholesky.py
new file mode 100644
index 0000000..21edd20
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cholesky.py
@@ -0,0 +1,202 @@
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from pytest import raises as assert_raises
+
+from numpy import array, transpose, dot, conjugate, zeros_like, empty
+from numpy.random import random
+from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \
+ cho_factor, cho_solve
+
+from scipy.linalg._testutils import assert_no_overwrite
+
+
+class TestCholesky(object):
+
+ def test_simple(self):
+ a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
+ c = cholesky(a)
+ assert_array_almost_equal(dot(transpose(c), c), a)
+ c = transpose(c)
+ a = dot(c, transpose(c))
+ assert_array_almost_equal(cholesky(a, lower=1), c)
+
+ def test_check_finite(self):
+ a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
+ c = cholesky(a, check_finite=False)
+ assert_array_almost_equal(dot(transpose(c), c), a)
+ c = transpose(c)
+ a = dot(c, transpose(c))
+ assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c)
+
+ def test_simple_complex(self):
+ m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]])
+ a = dot(transpose(conjugate(m)), m)
+ c = cholesky(a)
+ a1 = dot(transpose(conjugate(c)), c)
+ assert_array_almost_equal(a, a1)
+ c = transpose(c)
+ a = dot(c, transpose(conjugate(c)))
+ assert_array_almost_equal(cholesky(a, lower=1), c)
+
+ def test_random(self):
+ n = 20
+ for k in range(2):
+ m = random([n, n])
+ for i in range(n):
+ m[i, i] = 20*(.1+m[i, i])
+ a = dot(transpose(m), m)
+ c = cholesky(a)
+ a1 = dot(transpose(c), c)
+ assert_array_almost_equal(a, a1)
+ c = transpose(c)
+ a = dot(c, transpose(c))
+ assert_array_almost_equal(cholesky(a, lower=1), c)
+
+ def test_random_complex(self):
+ n = 20
+ for k in range(2):
+ m = random([n, n])+1j*random([n, n])
+ for i in range(n):
+ m[i, i] = 20*(.1+abs(m[i, i]))
+ a = dot(transpose(conjugate(m)), m)
+ c = cholesky(a)
+ a1 = dot(transpose(conjugate(c)), c)
+ assert_array_almost_equal(a, a1)
+ c = transpose(c)
+ a = dot(c, transpose(conjugate(c)))
+ assert_array_almost_equal(cholesky(a, lower=1), c)
+
+
+class TestCholeskyBanded(object):
+ """Tests for cholesky_banded() and cho_solve_banded."""
+
+ def test_check_finite(self):
+ # Symmetric positive definite banded matrix `a`
+ a = array([[4.0, 1.0, 0.0, 0.0],
+ [1.0, 4.0, 0.5, 0.0],
+ [0.0, 0.5, 4.0, 0.2],
+ [0.0, 0.0, 0.2, 4.0]])
+ # Banded storage form of `a`.
+ ab = array([[-1.0, 1.0, 0.5, 0.2],
+ [4.0, 4.0, 4.0, 4.0]])
+ c = cholesky_banded(ab, lower=False, check_finite=False)
+ ufac = zeros_like(a)
+ ufac[list(range(4)), list(range(4))] = c[-1]
+ ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+ assert_array_almost_equal(a, dot(ufac.T, ufac))
+
+ b = array([0.0, 0.5, 4.2, 4.2])
+ x = cho_solve_banded((c, False), b, check_finite=False)
+ assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+ def test_upper_real(self):
+ # Symmetric positive definite banded matrix `a`
+ a = array([[4.0, 1.0, 0.0, 0.0],
+ [1.0, 4.0, 0.5, 0.0],
+ [0.0, 0.5, 4.0, 0.2],
+ [0.0, 0.0, 0.2, 4.0]])
+ # Banded storage form of `a`.
+ ab = array([[-1.0, 1.0, 0.5, 0.2],
+ [4.0, 4.0, 4.0, 4.0]])
+ c = cholesky_banded(ab, lower=False)
+ ufac = zeros_like(a)
+ ufac[list(range(4)), list(range(4))] = c[-1]
+ ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+ assert_array_almost_equal(a, dot(ufac.T, ufac))
+
+ b = array([0.0, 0.5, 4.2, 4.2])
+ x = cho_solve_banded((c, False), b)
+ assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+ def test_upper_complex(self):
+ # Hermitian positive definite banded matrix `a`
+ a = array([[4.0, 1.0, 0.0, 0.0],
+ [1.0, 4.0, 0.5, 0.0],
+ [0.0, 0.5, 4.0, -0.2j],
+ [0.0, 0.0, 0.2j, 4.0]])
+ # Banded storage form of `a`.
+ ab = array([[-1.0, 1.0, 0.5, -0.2j],
+ [4.0, 4.0, 4.0, 4.0]])
+ c = cholesky_banded(ab, lower=False)
+ ufac = zeros_like(a)
+ ufac[list(range(4)), list(range(4))] = c[-1]
+ ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+ assert_array_almost_equal(a, dot(ufac.conj().T, ufac))
+
+ b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0])
+ x = cho_solve_banded((c, False), b)
+ assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+ def test_lower_real(self):
+ # Symmetric positive definite banded matrix `a`
+ a = array([[4.0, 1.0, 0.0, 0.0],
+ [1.0, 4.0, 0.5, 0.0],
+ [0.0, 0.5, 4.0, 0.2],
+ [0.0, 0.0, 0.2, 4.0]])
+ # Banded storage form of `a`.
+ ab = array([[4.0, 4.0, 4.0, 4.0],
+ [1.0, 0.5, 0.2, -1.0]])
+ c = cholesky_banded(ab, lower=True)
+ lfac = zeros_like(a)
+ lfac[list(range(4)), list(range(4))] = c[0]
+ lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
+ assert_array_almost_equal(a, dot(lfac, lfac.T))
+
+ b = array([0.0, 0.5, 4.2, 4.2])
+ x = cho_solve_banded((c, True), b)
+ assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+ def test_lower_complex(self):
+ # Hermitian positive definite banded matrix `a`
+ a = array([[4.0, 1.0, 0.0, 0.0],
+ [1.0, 4.0, 0.5, 0.0],
+ [0.0, 0.5, 4.0, -0.2j],
+ [0.0, 0.0, 0.2j, 4.0]])
+ # Banded storage form of `a`.
+ ab = array([[4.0, 4.0, 4.0, 4.0],
+ [1.0, 0.5, 0.2j, -1.0]])
+ c = cholesky_banded(ab, lower=True)
+ lfac = zeros_like(a)
+ lfac[list(range(4)), list(range(4))] = c[0]
+ lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
+ assert_array_almost_equal(a, dot(lfac, lfac.conj().T))
+
+ b = array([0.0, 0.5j, 3.8j, 3.8])
+ x = cho_solve_banded((c, True), b)
+ assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0])
+
+
+class TestOverwrite(object):
+ def test_cholesky(self):
+ assert_no_overwrite(cholesky, [(3, 3)])
+
+ def test_cho_factor(self):
+ assert_no_overwrite(cho_factor, [(3, 3)])
+
+ def test_cho_solve(self):
+ x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
+ xcho = cho_factor(x)
+ assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)])
+
+ def test_cholesky_banded(self):
+ assert_no_overwrite(cholesky_banded, [(2, 3)])
+
+ def test_cho_solve_banded(self):
+ x = array([[0, -1, -1], [2, 2, 2]])
+ xcho = cholesky_banded(x)
+ assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b),
+ [(3,)])
+
+
+class TestEmptyArray(object):
+ def test_cho_factor_empty_square(self):
+ a = empty((0, 0))
+ b = array([])
+ c = array([[]])
+ d = []
+ e = [[]]
+
+ x, _ = cho_factor(a)
+ assert_array_equal(x, a)
+
+ for x in ([b, c, d, e]):
+ assert_raises(ValueError, cho_factor, x)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cossin.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cossin.py
new file mode 100644
index 0000000..56a908a
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_cossin.py
@@ -0,0 +1,155 @@
+import pytest
+import numpy as np
+from numpy.random import seed
+from numpy.testing import assert_allclose
+
+from scipy.linalg.lapack import _compute_lwork
+from scipy.stats import ortho_group, unitary_group
+from scipy.linalg import cossin, get_lapack_funcs
+
+REAL_DTYPES = (np.float32, np.float64)
+COMPLEX_DTYPES = (np.complex64, np.complex128)
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+@pytest.mark.parametrize('dtype_', DTYPES)
+@pytest.mark.parametrize('m, p, q',
+ [
+ (2, 1, 1),
+ (3, 2, 1),
+ (3, 1, 2),
+ (4, 2, 2),
+ (4, 1, 2),
+ (40, 12, 20),
+ (40, 30, 1),
+ (40, 1, 30),
+ (100, 50, 1),
+ (100, 50, 50),
+ ])
+@pytest.mark.parametrize('swap_sign', [True, False])
+def test_cossin(dtype_, m, p, q, swap_sign):
+ seed(1234)
+ if dtype_ in COMPLEX_DTYPES:
+ x = np.array(unitary_group.rvs(m), dtype=dtype_)
+ else:
+ x = np.array(ortho_group.rvs(m), dtype=dtype_)
+
+ u, cs, vh = cossin(x, p, q,
+ swap_sign=swap_sign)
+ assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
+ assert u.dtype == dtype_
+ # Test for float32 or float 64
+ assert cs.dtype == np.real(u).dtype
+ assert vh.dtype == dtype_
+
+ u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
+ swap_sign=swap_sign)
+ assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
+ assert u.dtype == dtype_
+ assert cs.dtype == np.real(u).dtype
+ assert vh.dtype == dtype_
+
+ _, cs2, vh2 = cossin(x, p, q,
+ compute_u=False,
+ swap_sign=swap_sign)
+ assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+ assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+ u2, cs2, _ = cossin(x, p, q,
+ compute_vh=False,
+ swap_sign=swap_sign)
+ assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
+ assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+ _, cs2, _ = cossin(x, p, q,
+ compute_u=False,
+ compute_vh=False,
+ swap_sign=swap_sign)
+ assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+
+def test_cossin_mixed_types():
+ seed(1234)
+ x = np.array(ortho_group.rvs(4), dtype=np.float64)
+ u, cs, vh = cossin([x[:2, :2],
+ np.array(x[:2, 2:], dtype=np.complex128),
+ x[2:, :2],
+ x[2:, 2:]])
+
+ assert u.dtype == np.complex128
+ assert cs.dtype == np.float64
+ assert vh.dtype == np.complex128
+ assert_allclose(x, u @ cs @ vh, rtol=0.,
+ atol=1e4 * np.finfo(np.complex128).eps)
+
+
+def test_cossin_error_incorrect_subblocks():
+ with pytest.raises(ValueError, match="be due to missing p, q arguments."):
+ cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
+
+
+def test_cossin_error_empty_subblocks():
+ with pytest.raises(ValueError, match="x11.*empty"):
+ cossin(([], [], [], []))
+ with pytest.raises(ValueError, match="x12.*empty"):
+ cossin(([1, 2], [], [6, 7], [8, 9, 10]))
+ with pytest.raises(ValueError, match="x21.*empty"):
+ cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
+ with pytest.raises(ValueError, match="x22.*empty"):
+ cossin(([1, 2], [3, 4, 5], [2], []))
+
+
+def test_cossin_error_missing_partitioning():
+ with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
+ cossin(unitary_group.rvs(2))
+
+ with pytest.raises(ValueError, match=".*might be due to missing p, q"):
+ cossin(unitary_group.rvs(4))
+
+
+def test_cossin_error_non_iterable():
+ with pytest.raises(ValueError, match="containing the subblocks of X"):
+ cossin(12j)
+
+
+def test_cossin_error_non_square():
+ with pytest.raises(ValueError, match="only supports square"):
+ cossin(np.array([[1, 2]]), 1, 1)
+
+def test_cossin_error_partitioning():
+ x = np.array(ortho_group.rvs(4), dtype=np.float64)
+ with pytest.raises(ValueError, match="invalid p=0.*0= n:
+ assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+ else:
+ assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+ # p is Hermitian positive semidefinite.
+ assert_allclose(p.conj().T, p)
+ evals = eigh(p, eigvals_only=True)
+ nonzero_evals = evals[abs(evals) > 1e-14]
+ assert_((nonzero_evals >= 0).all())
+
+ u, p = polar(a, side='left')
+ assert_equal(u.shape, (m, n))
+ assert_equal(p.shape, (m, m))
+ # a = pu
+ assert_allclose(p.dot(u), a, atol=product_atol)
+ if m >= n:
+ assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+ else:
+ assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+ # p is Hermitian positive semidefinite.
+ assert_allclose(p.conj().T, p)
+ evals = eigh(p, eigvals_only=True)
+ nonzero_evals = evals[abs(evals) > 1e-14]
+ assert_((nonzero_evals >= 0).all())
+
+
+def test_precomputed_cases():
+ for a, side, expected_u, expected_p in precomputed_cases:
+ check_precomputed_polar(a, side, expected_u, expected_p)
+
+
+def test_verify_cases():
+ for a in verify_cases:
+ verify_polar(a)
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_update.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_update.py
new file mode 100644
index 0000000..a8bbf8b
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_decomp_update.py
@@ -0,0 +1,1697 @@
+import itertools
+
+import numpy as np
+from numpy.testing import assert_, assert_allclose, assert_equal
+from pytest import raises as assert_raises
+from scipy import linalg
+import scipy.linalg._decomp_update as _decomp_update
+from scipy.linalg._decomp_update import qr_delete, qr_update, qr_insert
+
+def assert_unitary(a, rtol=None, atol=None, assert_sqr=True):
+ if rtol is None:
+ rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+ if atol is None:
+ atol = 10*np.finfo(a.dtype).eps
+
+ if assert_sqr:
+ assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square')
+ aTa = np.dot(a.T.conj(), a)
+ assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol)
+
+def assert_upper_tri(a, rtol=None, atol=None):
+ if rtol is None:
+ rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+ if atol is None:
+ atol = 2*np.finfo(a.dtype).eps
+ mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_)
+ assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol)
+
+def check_qr(q, r, a, rtol, atol, assert_sqr=True):
+ assert_unitary(q, rtol, atol, assert_sqr)
+ assert_upper_tri(r, rtol, atol)
+ assert_allclose(q.dot(r), a, rtol=rtol, atol=atol)
+
+def make_strided(arrs):
+ strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)]
+ kmax = len(strides)
+ k = 0
+ ret = []
+ for a in arrs:
+ if a.ndim == 1:
+ s = strides[k % kmax]
+ k += 1
+ base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype)
+ view = base[s[1]::s[0]]
+ view[...] = a
+ elif a.ndim == 2:
+ s = strides[k % kmax]
+ t = strides[(k+1) % kmax]
+ k += 2
+ base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]),
+ a.dtype)
+ view = base[s[1]::s[0], t[1]::t[0]]
+ view[...] = a
+ else:
+ raise ValueError('make_strided only works for ndim = 1 or'
+ ' 2 arrays')
+ ret.append(view)
+ return ret
+
+def negate_strides(arrs):
+ ret = []
+ for a in arrs:
+ b = np.zeros_like(a)
+ if b.ndim == 2:
+ b = b[::-1, ::-1]
+ elif b.ndim == 1:
+ b = b[::-1]
+ else:
+ raise ValueError('negate_strides only works for ndim = 1 or'
+ ' 2 arrays')
+ b[...] = a
+ ret.append(b)
+ return ret
+
+def nonitemsize_strides(arrs):
+ out = []
+ for a in arrs:
+ a_dtype = a.dtype
+ b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')])
+ c = b.getfield(a_dtype)
+ c[...] = a
+ out.append(c)
+ return out
+
+
+def make_nonnative(arrs):
+ return [a.astype(a.dtype.newbyteorder()) for a in arrs]
+
+
+class BaseQRdeltas(object):
+ def setup_method(self):
+ self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2)
+ self.atol = 10 * np.finfo(self.dtype).eps
+
+ def generate(self, type, mode='full'):
+ np.random.seed(29382)
+ shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12),
+ 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type]
+ a = np.random.random(shape)
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(shape)
+ a = a + 1j * b
+ a = a.astype(self.dtype)
+ q, r = linalg.qr(a, mode=mode)
+ return a, q, r
+
+class BaseQRdelete(BaseQRdeltas):
+ def test_sqr_1_row(self):
+ a, q, r = self.generate('sqr')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_row(self):
+ a, q, r = self.generate('sqr')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_1_col(self):
+ a, q, r = self.generate('sqr')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_col(self):
+ a, q, r = self.generate('sqr')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_row(self):
+ a, q, r = self.generate('tall')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_row(self):
+ a, q, r = self.generate('tall')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_col(self):
+ a, q, r = self.generate('tall')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_col(self):
+ a, q, r = self.generate('tall')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_1_row(self):
+ a, q, r = self.generate('fat')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_row(self):
+ a, q, r = self.generate('fat')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_1_col(self):
+ a, q, r = self.generate('fat')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_col(self):
+ a, q, r = self.generate('fat')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_1_row(self):
+ # this test always starts and ends with an economic decomp.
+ a, q, r = self.generate('tall', 'economic')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ # for economic row deletes
+ # eco - prow = eco
+ # eco - prow = sqr
+ # eco - prow = fat
+ def base_economic_p_row_xxx(self, ndel):
+ a, q, r = self.generate('tall', 'economic')
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_row_economic(self):
+ # (12, 7) - (3, 7) = (9,7) --> stays economic
+ self.base_economic_p_row_xxx(3)
+
+ def test_economic_p_row_sqr(self):
+ # (12, 7) - (5, 7) = (7, 7) --> becomes square
+ self.base_economic_p_row_xxx(5)
+
+ def test_economic_p_row_fat(self):
+ # (12, 7) - (7,7) = (5, 7) --> becomes fat
+ self.base_economic_p_row_xxx(7)
+
+ def test_economic_1_col(self):
+ a, q, r = self.generate('tall', 'economic')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_col(self):
+ a, q, r = self.generate('tall', 'economic')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_1_row(self):
+ a, q, r = self.generate('Mx1')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_row(self):
+ a, q, r = self.generate('Mx1')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_1_col(self):
+ a, q, r = self.generate('1xN')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_col(self):
+ a, q, r = self.generate('1xN')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_1_row(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_row(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_delete_last_1_row(self):
+ # full and eco are the same for 1xN
+ a, q, r = self.generate('1xN')
+ q1, r1 = qr_delete(q, r, 0, 1, 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ def test_delete_last_p_row(self):
+ a, q, r = self.generate('tall', 'full')
+ q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ a, q, r = self.generate('tall', 'economic')
+ q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ def test_delete_last_1_col(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+ a, q, r = self.generate('Mx1', 'full')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ def test_delete_last_p_col(self):
+ a, q, r = self.generate('tall', 'full')
+ q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ a, q, r = self.generate('tall', 'economic')
+ q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+ assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+ def test_delete_1x1_row_col(self):
+ a, q, r = self.generate('1x1')
+ q1, r1 = qr_delete(q, r, 0, 1, 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ a, q, r = self.generate('1x1')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ # all full qr, row deletes and single column deletes should be able to
+ # handle any non negative strides. (only row and column vector
+ # operations are used.) p column delete require fortran ordered
+ # Q and R and will make a copy as necessary. Economic qr row deletes
+ # requre a contigous q.
+
+ def base_non_simple_strides(self, adjust_strides, ks, p, which,
+ overwriteable):
+ if which == 'row':
+ qind = (slice(p,None), slice(p,None))
+ rind = (slice(p,None), slice(None))
+ else:
+ qind = (slice(None), slice(None))
+ rind = (slice(None), slice(None,-p))
+
+ for type, k in itertools.product(['sqr', 'tall', 'fat'], ks):
+ a, q0, r0, = self.generate(type)
+ qs, rs = adjust_strides((q0, r0))
+ if p == 1:
+ a1 = np.delete(a, k, 0 if which == 'row' else 1)
+ else:
+ s = slice(k,k+p)
+ if k < 0:
+ s = slice(k, k + p +
+ (a.shape[0] if which == 'row' else a.shape[1]))
+ a1 = np.delete(a, s, 0 if which == 'row' else 1)
+
+ # for each variable, q, r we try with it strided and
+ # overwrite=False. Then we try with overwrite=True, and make
+ # sure that q and r are still overwritten.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q1, r1 = qr_delete(qs, r, k, p, which, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ q1o, r1o = qr_delete(qs, r, k, p, which, True)
+ check_qr(q1o, r1o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q2, r2 = qr_delete(q, rs, k, p, which, False)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ q2o, r2o = qr_delete(q, rs, k, p, which, True)
+ check_qr(q2o, r2o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ # since some of these were consumed above
+ qs, rs = adjust_strides((q, r))
+ q3, r3 = qr_delete(qs, rs, k, p, which, False)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ q3o, r3o = qr_delete(qs, rs, k, p, which, True)
+ check_qr(q3o, r3o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+ def test_non_unit_strides_1_row(self):
+ self.base_non_simple_strides(make_strided, [0], 1, 'row', True)
+
+ def test_non_unit_strides_p_row(self):
+ self.base_non_simple_strides(make_strided, [0], 3, 'row', True)
+
+ def test_non_unit_strides_1_col(self):
+ self.base_non_simple_strides(make_strided, [0], 1, 'col', True)
+
+ def test_non_unit_strides_p_col(self):
+ self.base_non_simple_strides(make_strided, [0], 3, 'col', False)
+
+ def test_neg_strides_1_row(self):
+ self.base_non_simple_strides(negate_strides, [0], 1, 'row', False)
+
+ def test_neg_strides_p_row(self):
+ self.base_non_simple_strides(negate_strides, [0], 3, 'row', False)
+
+ def test_neg_strides_1_col(self):
+ self.base_non_simple_strides(negate_strides, [0], 1, 'col', False)
+
+ def test_neg_strides_p_col(self):
+ self.base_non_simple_strides(negate_strides, [0], 3, 'col', False)
+
+ def test_non_itemize_strides_1_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False)
+
+ def test_non_itemize_strides_p_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False)
+
+ def test_non_itemize_strides_1_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False)
+
+ def test_non_itemize_strides_p_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False)
+
+ def test_non_native_byte_order_1_row(self):
+ self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False)
+
+ def test_non_native_byte_order_p_row(self):
+ self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False)
+
+ def test_non_native_byte_order_1_col(self):
+ self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False)
+
+ def test_non_native_byte_order_p_col(self):
+ self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False)
+
+ def test_neg_k(self):
+ a, q, r = self.generate('sqr')
+ for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']):
+ q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False)
+ if w == 'row':
+ a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0)
+ else:
+ a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'):
+ assert_sqr = True if mode == 'full' else False
+ if which == 'row':
+ qind = (slice(p,None), slice(p,None))
+ rind = (slice(p,None), slice(None))
+ else:
+ qind = (slice(None), slice(None))
+ rind = (slice(None), slice(None,-p))
+ a, q0, r0 = self.generate('sqr', mode)
+ if p == 1:
+ a1 = np.delete(a, 3, 0 if which == 'row' else 1)
+ else:
+ a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1)
+
+ # don't overwrite
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q1, r1 = qr_delete(q, r, 3, p, which, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr)
+ check_qr(q, r, a, self.rtol, self.atol, assert_sqr)
+
+ if test_F:
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q2, r2 = qr_delete(q, r, 3, p, which, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr)
+ # verify the overwriting
+ assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol)
+
+ if test_C:
+ q = q0.copy('C')
+ r = r0.copy('C')
+ q3, r3 = qr_delete(q, r, 3, p, which, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr)
+ assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qr_1_row(self):
+ # any positively strided q and r.
+ self.base_overwrite_qr('row', 1, True, True)
+
+ def test_overwrite_economic_qr_1_row(self):
+ # Any contiguous q and positively strided r.
+ self.base_overwrite_qr('row', 1, True, True, 'economic')
+
+ def test_overwrite_qr_1_col(self):
+ # any positively strided q and r.
+ # full and eco share code paths
+ self.base_overwrite_qr('col', 1, True, True)
+
+ def test_overwrite_qr_p_row(self):
+ # any positively strided q and r.
+ self.base_overwrite_qr('row', 3, True, True)
+
+ def test_overwrite_economic_qr_p_row(self):
+ # any contiguous q and positively strided r
+ self.base_overwrite_qr('row', 3, True, True, 'economic')
+
+ def test_overwrite_qr_p_col(self):
+ # only F orderd q and r can be overwritten for cols
+ # full and eco share code paths
+ self.base_overwrite_qr('col', 3, False, True)
+
+ def test_bad_which(self):
+ a, q, r = self.generate('sqr')
+ assert_raises(ValueError, qr_delete, q, r, 0, which='foo')
+
+ def test_bad_k(self):
+ a, q, r = self.generate('tall')
+ assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1)
+ assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1)
+ assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col')
+
+ def test_bad_p(self):
+ a, q, r = self.generate('tall')
+ # p must be positive
+ assert_raises(ValueError, qr_delete, q, r, 0, -1)
+ assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col')
+
+ # and nonzero
+ assert_raises(ValueError, qr_delete, q, r, 0, 0)
+ assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col')
+
+ # must have at least k+p rows or cols, depending.
+ assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2)
+ assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col')
+
+ def test_empty_q(self):
+ a, q, r = self.generate('tall')
+ # same code path for 'row' and 'col'
+ assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1)
+
+ def test_empty_r(self):
+ a, q, r = self.generate('tall')
+ # same code path for 'row' and 'col'
+ assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1)
+
+ def test_mismatched_q_and_r(self):
+ a, q, r = self.generate('tall')
+ r = r[1:]
+ assert_raises(ValueError, qr_delete, q, r, 0, 1)
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'longcomplex',
+ 'bool']
+ a, q0, r0 = self.generate('tall')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ r = r0.real.astype(dtype)
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col')
+
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col')
+
+ def test_check_finite(self):
+ a0, q0, r0 = self.generate('tall')
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col')
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col')
+
+ def test_qr_scalar(self):
+ a, q, r = self.generate('1x1')
+ assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col')
+
+class TestQRdelete_f(BaseQRdelete):
+ dtype = np.dtype('f')
+
+class TestQRdelete_F(BaseQRdelete):
+ dtype = np.dtype('F')
+
+class TestQRdelete_d(BaseQRdelete):
+ dtype = np.dtype('d')
+
+class TestQRdelete_D(BaseQRdelete):
+ dtype = np.dtype('D')
+
+class BaseQRinsert(BaseQRdeltas):
+ def generate(self, type, mode='full', which='row', p=1):
+ a, q, r = super(BaseQRinsert, self).generate(type, mode)
+
+ assert_(p > 0)
+
+ # super call set the seed...
+ if which == 'row':
+ if p == 1:
+ u = np.random.random(a.shape[1])
+ else:
+ u = np.random.random((p, a.shape[1]))
+ elif which == 'col':
+ if p == 1:
+ u = np.random.random(a.shape[0])
+ else:
+ u = np.random.random((a.shape[0], p))
+ else:
+ ValueError('which should be either "row" or "col"')
+
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(u.shape)
+ u = u + 1j * b
+
+ u = u.astype(self.dtype)
+ return a, q, r, u
+
+ def test_sqr_1_row(self):
+ a, q, r, u = self.generate('sqr', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_row(self):
+ # sqr + rows --> fat always
+ a, q, r, u = self.generate('sqr', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_1_col(self):
+ a, q, r, u = self.generate('sqr', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_col(self):
+ # sqr + cols --> fat always
+ a, q, r, u = self.generate('sqr', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_row(self):
+ a, q, r, u = self.generate('tall', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_row(self):
+ # tall + rows --> tall always
+ a, q, r, u = self.generate('tall', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_col(self):
+ a, q, r, u = self.generate('tall', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ # for column adds to tall matrices there are three cases to test
+ # tall + pcol --> tall
+ # tall + pcol --> sqr
+ # tall + pcol --> fat
+ def base_tall_p_col_xxx(self, p):
+ a, q, r, u = self.generate('tall', which='col', p=p)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_col_tall(self):
+ # 12x7 + 12x3 = 12x10 --> stays tall
+ self.base_tall_p_col_xxx(3)
+
+ def test_tall_p_col_sqr(self):
+ # 12x7 + 12x5 = 12x12 --> becomes sqr
+ self.base_tall_p_col_xxx(5)
+
+ def test_tall_p_col_fat(self):
+ # 12x7 + 12x7 = 12x14 --> becomes fat
+ self.base_tall_p_col_xxx(7)
+
+ def test_fat_1_row(self):
+ a, q, r, u = self.generate('fat', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ # for row adds to fat matrices there are three cases to test
+ # fat + prow --> fat
+ # fat + prow --> sqr
+ # fat + prow --> tall
+ def base_fat_p_row_xxx(self, p):
+ a, q, r, u = self.generate('fat', which='row', p=p)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(p, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_row_fat(self):
+ # 7x12 + 3x12 = 10x12 --> stays fat
+ self.base_fat_p_row_xxx(3)
+
+ def test_fat_p_row_sqr(self):
+ # 7x12 + 5x12 = 12x12 --> becomes sqr
+ self.base_fat_p_row_xxx(5)
+
+ def test_fat_p_row_tall(self):
+ # 7x12 + 7x12 = 14x12 --> becomes tall
+ self.base_fat_p_row_xxx(7)
+
+ def test_fat_1_col(self):
+ a, q, r, u = self.generate('fat', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_col(self):
+ # fat + cols --> fat always
+ a, q, r, u = self.generate('fat', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_1_row(self):
+ a, q, r, u = self.generate('tall', 'economic', 'row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_row(self):
+ # tall + rows --> tall always
+ a, q, r, u = self.generate('tall', 'economic', 'row', 3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_1_col(self):
+ a, q, r, u = self.generate('tall', 'economic', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_1_col_bad_update(self):
+ # When the column to be added lies in the span of Q, the update is
+ # not meaningful. This is detected, and a LinAlgError is issued.
+ q = np.eye(5, 3, dtype=self.dtype)
+ r = np.eye(3, dtype=self.dtype)
+ u = np.array([1, 0, 0, 0, 0], self.dtype)
+ assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')
+
+ # for column adds to economic matrices there are three cases to test
+ # eco + pcol --> eco
+ # eco + pcol --> sqr
+ # eco + pcol --> fat
+ def base_economic_p_col_xxx(self, p):
+ a, q, r, u = self.generate('tall', 'economic', which='col', p=p)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_col_eco(self):
+ # 12x7 + 12x3 = 12x10 --> stays eco
+ self.base_economic_p_col_xxx(3)
+
+ def test_economic_p_col_sqr(self):
+ # 12x7 + 12x5 = 12x12 --> becomes sqr
+ self.base_economic_p_col_xxx(5)
+
+ def test_economic_p_col_fat(self):
+ # 12x7 + 12x7 = 12x14 --> becomes fat
+ self.base_economic_p_col_xxx(7)
+
+ def test_Mx1_1_row(self):
+ a, q, r, u = self.generate('Mx1', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_row(self):
+ a, q, r, u = self.generate('Mx1', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_1_col(self):
+ a, q, r, u = self.generate('Mx1', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_col(self):
+ a, q, r, u = self.generate('Mx1', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_1_row(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_row(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'row', 3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_1_col(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_col(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'col', 3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_1xN_1_row(self):
+ a, q, r, u = self.generate('1xN', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_row(self):
+ a, q, r, u = self.generate('1xN', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_1_col(self):
+ a, q, r, u = self.generate('1xN', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_col(self):
+ a, q, r, u = self.generate('1xN', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_row(self):
+ a, q, r, u = self.generate('1x1', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_p_row(self):
+ a, q, r, u = self.generate('1x1', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_col(self):
+ a, q, r, u = self.generate('1x1', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_p_col(self):
+ a, q, r, u = self.generate('1x1', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_scalar(self):
+ a, q, r, u = self.generate('1x1', which='row')
+ assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row')
+
+ assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col')
+
+ def base_non_simple_strides(self, adjust_strides, k, p, which):
+ for type in ['sqr', 'tall', 'fat']:
+ a, q0, r0, u0 = self.generate(type, which=which, p=p)
+ qs, rs, us = adjust_strides((q0, r0, u0))
+ if p == 1:
+ ai = np.insert(a, k, u0, 0 if which == 'row' else 1)
+ else:
+ ai = np.insert(a, np.full(p, k, np.intp),
+ u0 if which == 'row' else u0,
+ 0 if which == 'row' else 1)
+
+ # for each variable, q, r, u we try with it strided and
+ # overwrite=False. Then we try with overwrite=True. Nothing
+ # is checked to see if it can be overwritten, since only
+ # F ordered Q can be overwritten when adding columns.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False)
+ check_qr(q1, r1, ai, self.rtol, self.atol)
+ q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True)
+ check_qr(q1o, r1o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False)
+ check_qr(q2, r2, ai, self.rtol, self.atol)
+ q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True)
+ check_qr(q2o, r2o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False)
+ check_qr(q3, r3, ai, self.rtol, self.atol)
+ q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True)
+ check_qr(q3o, r3o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ # since some of these were consumed above
+ qs, rs, us = adjust_strides((q, r, u))
+ q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False)
+ check_qr(q5, r5, ai, self.rtol, self.atol)
+ q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True)
+ check_qr(q5o, r5o, ai, self.rtol, self.atol)
+
+ def test_non_unit_strides_1_row(self):
+ self.base_non_simple_strides(make_strided, 0, 1, 'row')
+
+ def test_non_unit_strides_p_row(self):
+ self.base_non_simple_strides(make_strided, 0, 3, 'row')
+
+ def test_non_unit_strides_1_col(self):
+ self.base_non_simple_strides(make_strided, 0, 1, 'col')
+
+ def test_non_unit_strides_p_col(self):
+ self.base_non_simple_strides(make_strided, 0, 3, 'col')
+
+ def test_neg_strides_1_row(self):
+ self.base_non_simple_strides(negate_strides, 0, 1, 'row')
+
+ def test_neg_strides_p_row(self):
+ self.base_non_simple_strides(negate_strides, 0, 3, 'row')
+
+ def test_neg_strides_1_col(self):
+ self.base_non_simple_strides(negate_strides, 0, 1, 'col')
+
+ def test_neg_strides_p_col(self):
+ self.base_non_simple_strides(negate_strides, 0, 3, 'col')
+
+ def test_non_itemsize_strides_1_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row')
+
+ def test_non_itemsize_strides_p_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row')
+
+ def test_non_itemsize_strides_1_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col')
+
+ def test_non_itemsize_strides_p_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col')
+
+ def test_non_native_byte_order_1_row(self):
+ self.base_non_simple_strides(make_nonnative, 0, 1, 'row')
+
+ def test_non_native_byte_order_p_row(self):
+ self.base_non_simple_strides(make_nonnative, 0, 3, 'row')
+
+ def test_non_native_byte_order_1_col(self):
+ self.base_non_simple_strides(make_nonnative, 0, 1, 'col')
+
+ def test_non_native_byte_order_p_col(self):
+ self.base_non_simple_strides(make_nonnative, 0, 3, 'col')
+
+ def test_overwrite_qu_rank_1(self):
+ # when inserting rows, the size of both Q and R change, so only
+ # column inserts can overwrite q. Only complex column inserts
+ # with C ordered Q overwrite u. Any contiguous Q is overwritten
+ # when inserting 1 column
+ a, q0, r, u, = self.generate('sqr', which='col', p=1)
+ q = q0.copy('C')
+ u0 = u.copy()
+ # don't overwrite
+ q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+ a1 = np.insert(a, 0, u0, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(u, u0.conj(), self.rtol, self.atol)
+
+ # now try with a fortran ordered Q
+ qF = q0.copy('F')
+ u1 = u0.copy()
+ q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ check_qr(qF, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True)
+ check_qr(q4, r4, a1, self.rtol, self.atol)
+ assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qu_rank_p(self):
+ # when inserting rows, the size of both Q and R change, so only
+ # column inserts can potentially overwrite Q. In practice, only
+ # F ordered Q are overwritten with a rank p update.
+ a, q0, r, u, = self.generate('sqr', which='col', p=3)
+ q = q0.copy('F')
+ a1 = np.insert(a, np.zeros(3, np.intp), u, 1)
+
+ # don't overwrite
+ q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+
+ def test_empty_inputs(self):
+ a, q, r, u = self.generate('sqr', which='row')
+ assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row')
+ assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col')
+
+ def test_mismatched_shapes(self):
+ a, q, r, u = self.generate('tall', which='row')
+ assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col')
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'longcomplex',
+ 'bool']
+ a, q0, r0, u0 = self.generate('sqr', which='row')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ r = r0.real.astype(dtype)
+ u = u0.real.astype(dtype)
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+ def test_check_finite(self):
+ a0, q0, r0, u0 = self.generate('sqr', which='row', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+class TestQRinsert_f(BaseQRinsert):
+ dtype = np.dtype('f')
+
+class TestQRinsert_F(BaseQRinsert):
+ dtype = np.dtype('F')
+
+class TestQRinsert_d(BaseQRinsert):
+ dtype = np.dtype('d')
+
+class TestQRinsert_D(BaseQRinsert):
+ dtype = np.dtype('D')
+
+class BaseQRupdate(BaseQRdeltas):
+ def generate(self, type, mode='full', p=1):
+ a, q, r = super(BaseQRupdate, self).generate(type, mode)
+
+ # super call set the seed...
+ if p == 1:
+ u = np.random.random(q.shape[0])
+ v = np.random.random(r.shape[1])
+ else:
+ u = np.random.random((q.shape[0], p))
+ v = np.random.random((r.shape[1], p))
+
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(u.shape)
+ u = u + 1j * b
+
+ c = np.random.random(v.shape)
+ v = v + 1j * c
+
+ u = u.astype(self.dtype)
+ v = v.astype(self.dtype)
+ return a, q, r, u, v
+
+ def test_sqr_rank_1(self):
+ a, q, r, u, v = self.generate('sqr')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_rank_p(self):
+ # test ndim = 2, rank 1 updates here too
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('sqr', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_rank_1(self):
+ a, q, r, u, v = self.generate('tall')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('tall', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_rank_1(self):
+ a, q, r, u, v = self.generate('fat')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('fat', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_rank_1(self):
+ a, q, r, u, v = self.generate('tall', 'economic')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('tall', 'economic', p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_rank_1(self):
+ a, q, r, u, v = self.generate('Mx1')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('Mx1', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_rank_1(self):
+ a, q, r, u, v = self.generate('Mx1', 'economic')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('Mx1', 'economic', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_1xN_rank_1(self):
+ a, q, r, u, v = self.generate('1xN')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('1xN', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_1(self):
+ a, q, r, u, v = self.generate('1x1')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('1x1', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_1_scalar(self):
+ a, q, r, u, v = self.generate('1x1')
+ assert_raises(ValueError, qr_update, q[0, 0], r, u, v)
+ assert_raises(ValueError, qr_update, q, r[0, 0], u, v)
+ assert_raises(ValueError, qr_update, q, r, u[0], v)
+ assert_raises(ValueError, qr_update, q, r, u, v[0])
+
+ def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):
+ assert_sqr = False if mode == 'economic' else True
+ for type in ['sqr', 'tall', 'fat']:
+ a, q0, r0, u0, v0 = self.generate(type, mode, p)
+ qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))
+ if p == 1:
+ aup = a + np.outer(u0, v0.conj())
+ else:
+ aup = a + np.dot(u0, v0.T.conj())
+
+ # for each variable, q, r, u, v we try with it strided and
+ # overwrite=False. Then we try with overwrite=True, and make
+ # sure that if p == 1, r and v are still overwritten.
+ # a strided q and u must always be copied.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q1, r1 = qr_update(qs, r, u, v, False)
+ check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)
+ q1o, r1o = qr_update(qs, r, u, v, True)
+ check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q2, r2 = qr_update(q, rs, u, v, False)
+ check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)
+ q2o, r2o = qr_update(q, rs, u, v, True)
+ check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, us, v, False)
+ check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)
+ q3o, r3o = qr_update(q, r, us, v, True)
+ check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q4, r4 = qr_update(q, r, u, vs, False)
+ check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)
+ q4o, r4o = qr_update(q, r, u, vs, True)
+ check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ # since some of these were consumed above
+ qs, rs, us, vs = adjust_strides((q, r, u, v))
+ q5, r5 = qr_update(qs, rs, us, vs, False)
+ check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)
+ q5o, r5o = qr_update(qs, rs, us, vs, True)
+ check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)
+ assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ def test_non_unit_strides_rank_1(self):
+ self.base_non_simple_strides(make_strided, 'full', 1, True)
+
+ def test_non_unit_strides_economic_rank_1(self):
+ self.base_non_simple_strides(make_strided, 'economic', 1, True)
+
+ def test_non_unit_strides_rank_p(self):
+ self.base_non_simple_strides(make_strided, 'full', 3, False)
+
+ def test_non_unit_strides_economic_rank_p(self):
+ self.base_non_simple_strides(make_strided, 'economic', 3, False)
+
+ def test_neg_strides_rank_1(self):
+ self.base_non_simple_strides(negate_strides, 'full', 1, False)
+
+ def test_neg_strides_economic_rank_1(self):
+ self.base_non_simple_strides(negate_strides, 'economic', 1, False)
+
+ def test_neg_strides_rank_p(self):
+ self.base_non_simple_strides(negate_strides, 'full', 3, False)
+
+ def test_neg_strides_economic_rank_p(self):
+ self.base_non_simple_strides(negate_strides, 'economic', 3, False)
+
+ def test_non_itemsize_strides_rank_1(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)
+
+ def test_non_itemsize_strides_economic_rank_1(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)
+
+ def test_non_itemsize_strides_rank_p(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)
+
+ def test_non_itemsize_strides_economic_rank_p(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)
+
+ def test_non_native_byte_order_rank_1(self):
+ self.base_non_simple_strides(make_nonnative, 'full', 1, False)
+
+ def test_non_native_byte_order_economic_rank_1(self):
+ self.base_non_simple_strides(make_nonnative, 'economic', 1, False)
+
+ def test_non_native_byte_order_rank_p(self):
+ self.base_non_simple_strides(make_nonnative, 'full', 3, False)
+
+ def test_non_native_byte_order_economic_rank_p(self):
+ self.base_non_simple_strides(make_nonnative, 'economic', 3, False)
+
+ def test_overwrite_qruv_rank_1(self):
+ # Any positive strided q, r, u, and v can be overwritten for a rank 1
+ # update, only checking C and F contiguous.
+ a, q0, r0, u0, v0 = self.generate('sqr')
+ a1 = a + np.outer(u0, v0.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('F')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('C')
+ r = r0.copy('C')
+ u = u0.copy('C')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, u, v, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qruv_rank_1_economic(self):
+ # updating economic decompositions can overwrite any contigous r,
+ # and positively strided r and u. V is only ever read.
+ # only checking C and F contiguous.
+ a, q0, r0, u0, v0 = self.generate('tall', 'economic')
+ a1 = a + np.outer(u0, v0.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('F')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+ check_qr(q, r, a, self.rtol, self.atol, False)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol, False)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('C')
+ r = r0.copy('C')
+ u = u0.copy('C')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, u, v, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol, False)
+ assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qruv_rank_p(self):
+ # for rank p updates, q r must be F contiguous, v must be C (v.T --> F)
+ # and u can be C or F, but is only overwritten if Q is C and complex
+ a, q0, r0, u0, v0 = self.generate('sqr', p=3)
+ a1 = a + np.dot(u0, v0.T.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ def test_empty_inputs(self):
+ a, q, r, u, v = self.generate('tall')
+ assert_raises(ValueError, qr_update, np.array([]), r, u, v)
+ assert_raises(ValueError, qr_update, q, np.array([]), u, v)
+ assert_raises(ValueError, qr_update, q, r, np.array([]), v)
+ assert_raises(ValueError, qr_update, q, r, u, np.array([]))
+
+ def test_mismatched_shapes(self):
+ a, q, r, u, v = self.generate('tall')
+ assert_raises(ValueError, qr_update, q, r[1:], u, v)
+ assert_raises(ValueError, qr_update, q[:-2], r, u, v)
+ assert_raises(ValueError, qr_update, q, r, u[1:], v)
+ assert_raises(ValueError, qr_update, q, r, u, v[1:])
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'longcomplex',
+ 'bool']
+ a, q0, r0, u0, v0 = self.generate('tall')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ r = r0.real.astype(dtype)
+ u = u0.real.astype(dtype)
+ v = v0.real.astype(dtype)
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+ assert_raises(ValueError, qr_update, q0, r0, u0, v)
+
+ def test_integer_input(self):
+ q = np.arange(16).reshape(4, 4)
+ r = q.copy() # doesn't matter
+ u = q[:, 0].copy()
+ v = r[0, :].copy()
+ assert_raises(ValueError, qr_update, q, r, u, v)
+
+ def test_check_finite(self):
+ a0, q0, r0, u0, v0 = self.generate('tall', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+ v = v0.copy('F')
+ v[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+ def test_economic_check_finite(self):
+ a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+ v = v0.copy('F')
+ v[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+ def test_u_exactly_in_span_q(self):
+ q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype)
+ r = np.array([[1, 0], [0, 1]], self.dtype)
+ u = np.array([0, 0, 0, -1], self.dtype)
+ v = np.array([1, 2], self.dtype)
+ q1, r1 = qr_update(q, r, u, v)
+ a1 = np.dot(q, r) + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+class TestQRupdate_f(BaseQRupdate):
+ dtype = np.dtype('f')
+
+class TestQRupdate_F(BaseQRupdate):
+ dtype = np.dtype('F')
+
+class TestQRupdate_d(BaseQRupdate):
+ dtype = np.dtype('d')
+
+class TestQRupdate_D(BaseQRupdate):
+ dtype = np.dtype('D')
+
+def test_form_qTu():
+ # We want to ensure that all of the code paths through this function are
+ # tested. Most of them should be hit with the rest of test suite, but
+ # explicit tests make clear precisely what is being tested.
+ #
+ # This function expects that Q is either C or F contiguous and square.
+ # Economic mode decompositions (Q is (M, N), M != N) do not go through this
+ # function. U may have any positive strides.
+ #
+ # Some of these test are duplicates, since contiguous 1d arrays are both C
+ # and F.
+
+ q_order = ['F', 'C']
+ q_shape = [(8, 8), ]
+ u_order = ['F', 'C', 'A'] # here A means is not F not C
+ u_shape = [1, 3]
+ dtype = ['f', 'd', 'F', 'D']
+
+ for qo, qs, uo, us, d in \
+ itertools.product(q_order, q_shape, u_order, u_shape, dtype):
+ if us == 1:
+ check_form_qTu(qo, qs, uo, us, 1, d)
+ check_form_qTu(qo, qs, uo, us, 2, d)
+ else:
+ check_form_qTu(qo, qs, uo, us, 2, d)
+
+def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):
+ np.random.seed(47)
+ if u_shape == 1 and u_ndim == 1:
+ u_shape = (q_shape[0],)
+ else:
+ u_shape = (q_shape[0], u_shape)
+ dtype = np.dtype(dtype)
+
+ if dtype.char in 'fd':
+ q = np.random.random(q_shape)
+ u = np.random.random(u_shape)
+ elif dtype.char in 'FD':
+ q = np.random.random(q_shape) + 1j*np.random.random(q_shape)
+ u = np.random.random(u_shape) + 1j*np.random.random(u_shape)
+ else:
+ ValueError("form_qTu doesn't support this dtype")
+
+ q = np.require(q, dtype, q_order)
+ if u_order != 'A':
+ u = np.require(u, dtype, u_order)
+ else:
+ u, = make_strided((u.astype(dtype),))
+
+ rtol = 10.0 ** -(np.finfo(dtype).precision-2)
+ atol = 2*np.finfo(dtype).eps
+
+ expected = np.dot(q.T.conj(), u)
+ res = _decomp_update._form_qTu(q, u)
+ assert_allclose(res, expected, rtol=rtol, atol=atol)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_fblas.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_fblas.py
new file mode 100644
index 0000000..8479194
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_fblas.py
@@ -0,0 +1,607 @@
+# Test interfaces to fortran blas.
+#
+# The tests are more of interface than they are of the underlying blas.
+# Only very small matrices checked -- N=3 or so.
+#
+# !! Complex calculations really aren't checked that carefully.
+# !! Only real valued complex numbers are used in tests.
+
+from numpy import float32, float64, complex64, complex128, arange, array, \
+ zeros, shape, transpose, newaxis, common_type, conjugate
+
+from scipy.linalg import _fblas as fblas
+
+from numpy.testing import assert_array_equal, \
+ assert_allclose, assert_array_almost_equal, assert_
+
+import pytest
+
+# decimal accuracy to require between Python and LAPACK/BLAS calculations
+accuracy = 5
+
+# Since numpy.dot likely uses the same blas, use this routine
+# to check.
+
+
+def matrixmultiply(a, b):
+ if len(b.shape) == 1:
+ b_is_vector = True
+ b = b[:, newaxis]
+ else:
+ b_is_vector = False
+ assert_(a.shape[1] == b.shape[0])
+ c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
+ for i in range(a.shape[0]):
+ for j in range(b.shape[1]):
+ s = 0
+ for k in range(a.shape[1]):
+ s += a[i, k] * b[k, j]
+ c[i, j] = s
+ if b_is_vector:
+ c = c.reshape((a.shape[0],))
+ return c
+
+##################################################
+# Test blas ?axpy
+
+
+class BaseAxpy(object):
+ ''' Mixin class for axpy tests '''
+
+ def test_default_a(self):
+ x = arange(3., dtype=self.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x*1.+y
+ y = self.blas_func(x, y)
+ assert_array_equal(real_y, y)
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x*3.+y
+ y = self.blas_func(x, y, a=3.)
+ assert_array_equal(real_y, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x[::2]*3.+y
+ y = self.blas_func(x, y, a=3., n=3, incx=2)
+ assert_array_equal(real_y, y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ real_y = x*3.+y[::2]
+ y = self.blas_func(x, y, a=3., n=3, incy=2)
+ assert_array_equal(real_y, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ real_y = x[::4]*3.+y[::2]
+ y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2)
+ assert_array_equal(real_y, y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+ class TestSaxpy(BaseAxpy):
+ blas_func = fblas.saxpy
+ dtype = float32
+except AttributeError:
+ class TestSaxpy:
+ pass
+
+
+class TestDaxpy(BaseAxpy):
+ blas_func = fblas.daxpy
+ dtype = float64
+
+
+try:
+ class TestCaxpy(BaseAxpy):
+ blas_func = fblas.caxpy
+ dtype = complex64
+except AttributeError:
+ class TestCaxpy:
+ pass
+
+
+class TestZaxpy(BaseAxpy):
+ blas_func = fblas.zaxpy
+ dtype = complex128
+
+
+##################################################
+# Test blas ?scal
+
+class BaseScal(object):
+ ''' Mixin class for scal testing '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ real_x = x*3.
+ x = self.blas_func(3., x)
+ assert_array_equal(real_x, x)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ real_x = x.copy()
+ real_x[::2] = x[::2]*array(3., self.dtype)
+ x = self.blas_func(3., x, n=3, incx=2)
+ assert_array_equal(real_x, x)
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(2., x, n=4, incx=5)
+
+
+try:
+ class TestSscal(BaseScal):
+ blas_func = fblas.sscal
+ dtype = float32
+except AttributeError:
+ class TestSscal:
+ pass
+
+
+class TestDscal(BaseScal):
+ blas_func = fblas.dscal
+ dtype = float64
+
+
+try:
+ class TestCscal(BaseScal):
+ blas_func = fblas.cscal
+ dtype = complex64
+except AttributeError:
+ class TestCscal:
+ pass
+
+
+class TestZscal(BaseScal):
+ blas_func = fblas.zscal
+ dtype = complex128
+
+
+##################################################
+# Test blas ?copy
+
+class BaseCopy(object):
+ ''' Mixin class for copy testing '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(shape(x), x.dtype)
+ y = self.blas_func(x, y)
+ assert_array_equal(x, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ y = self.blas_func(x, y, n=3, incx=2)
+ assert_array_equal(x[::2], y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ y = self.blas_func(x, y, n=3, incy=2)
+ assert_array_equal(x, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ y = self.blas_func(x, y, n=3, incx=4, incy=2)
+ assert_array_equal(x[::4], y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+ # def test_y_bad_type(self):
+ ## Hmmm. Should this work? What should be the output.
+ # x = arange(3.,dtype=self.dtype)
+ # y = zeros(shape(x))
+ # self.blas_func(x,y)
+ # assert_array_equal(x,y)
+
+
+try:
+ class TestScopy(BaseCopy):
+ blas_func = fblas.scopy
+ dtype = float32
+except AttributeError:
+ class TestScopy:
+ pass
+
+
+class TestDcopy(BaseCopy):
+ blas_func = fblas.dcopy
+ dtype = float64
+
+
+try:
+ class TestCcopy(BaseCopy):
+ blas_func = fblas.ccopy
+ dtype = complex64
+except AttributeError:
+ class TestCcopy:
+ pass
+
+
+class TestZcopy(BaseCopy):
+ blas_func = fblas.zcopy
+ dtype = complex128
+
+
+##################################################
+# Test blas ?swap
+
+class BaseSwap(object):
+ ''' Mixin class for swap tests '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(shape(x), x.dtype)
+ desired_x = y.copy()
+ desired_y = x.copy()
+ x, y = self.blas_func(x, y)
+ assert_array_equal(desired_x, x)
+ assert_array_equal(desired_y, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ desired_x = y.copy()
+ desired_y = x.copy()[::2]
+ x, y = self.blas_func(x, y, n=3, incx=2)
+ assert_array_equal(desired_x, x[::2])
+ assert_array_equal(desired_y, y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ desired_x = y.copy()[::2]
+ desired_y = x.copy()
+ x, y = self.blas_func(x, y, n=3, incy=2)
+ assert_array_equal(desired_x, x)
+ assert_array_equal(desired_y, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ desired_x = y.copy()[::2]
+ desired_y = x.copy()[::4]
+ x, y = self.blas_func(x, y, n=3, incx=4, incy=2)
+ assert_array_equal(desired_x, x[::4])
+ assert_array_equal(desired_y, y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+ class TestSswap(BaseSwap):
+ blas_func = fblas.sswap
+ dtype = float32
+except AttributeError:
+ class TestSswap:
+ pass
+
+
+class TestDswap(BaseSwap):
+ blas_func = fblas.dswap
+ dtype = float64
+
+
+try:
+ class TestCswap(BaseSwap):
+ blas_func = fblas.cswap
+ dtype = complex64
+except AttributeError:
+ class TestCswap:
+ pass
+
+
+class TestZswap(BaseSwap):
+ blas_func = fblas.zswap
+ dtype = complex128
+
+##################################################
+# Test blas ?gemv
+# This will be a mess to test all cases.
+
+
+class BaseGemv(object):
+ ''' Mixin class for gemv tests '''
+
+ def get_data(self, x_stride=1, y_stride=1):
+ mult = array(1, dtype=self.dtype)
+ if self.dtype in [complex64, complex128]:
+ mult = array(1+1j, dtype=self.dtype)
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1., dtype=self.dtype) * mult
+ beta = array(1., dtype=self.dtype) * mult
+ a = normal(0., 1., (3, 3)).astype(self.dtype) * mult
+ x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult
+ y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult
+ return alpha, beta, a, x, y
+
+ def test_simple(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(a, x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_default_beta_y(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = matrixmultiply(a, x)
+ y = self.blas_func(1, a, x)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_simple_transpose(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=1)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_simple_transpose_conj(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride(self):
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ desired_y = alpha*matrixmultiply(a, x[::2])+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, incx=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride_transpose(self):
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride_assert(self):
+ # What is the use of this test?
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ with pytest.raises(Exception, match='failed for 3rd argument'):
+ y = self.blas_func(1, a, x, 1, y, trans=0, incx=3)
+ with pytest.raises(Exception, match='failed for 3rd argument'):
+ y = self.blas_func(1, a, x, 1, y, trans=1, incx=3)
+
+ def test_y_stride(self):
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ desired_y = y.copy()
+ desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2]
+ y = self.blas_func(alpha, a, x, beta, y, incy=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_y_stride_transpose(self):
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ desired_y = y.copy()
+ desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2]
+ y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_y_stride_assert(self):
+ # What is the use of this test?
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ with pytest.raises(Exception, match='failed for 2nd keyword'):
+ y = self.blas_func(1, a, x, 1, y, trans=0, incy=3)
+ with pytest.raises(Exception, match='failed for 2nd keyword'):
+ y = self.blas_func(1, a, x, 1, y, trans=1, incy=3)
+
+
+try:
+ class TestSgemv(BaseGemv):
+ blas_func = fblas.sgemv
+ dtype = float32
+
+ def test_sgemv_on_osx(self):
+ from itertools import product
+ import sys
+ import numpy as np
+
+ if sys.platform != 'darwin':
+ return
+
+ def aligned_array(shape, align, dtype, order='C'):
+ # Make array shape `shape` with aligned at `align` bytes
+ d = dtype()
+ # Make array of correct size with `align` extra bytes
+ N = np.prod(shape)
+ tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+ address = tmp.__array_interface__["data"][0]
+ # Find offset into array giving desired alignment
+ for offset in range(align):
+ if (address + offset) % align == 0:
+ break
+ tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+ return tmp.reshape(shape, order=order)
+
+ def as_aligned(arr, align, dtype, order='C'):
+ # Copy `arr` into an aligned array with same shape
+ aligned = aligned_array(arr.shape, align, dtype, order)
+ aligned[:] = arr[:]
+ return aligned
+
+ def assert_dot_close(A, X, desired):
+ assert_allclose(self.blas_func(1.0, A, X), desired,
+ rtol=1e-5, atol=1e-7)
+
+ testdata = product((15, 32), (10000,), (200, 89), ('C', 'F'))
+ for align, m, n, a_order in testdata:
+ A_d = np.random.rand(m, n)
+ X_d = np.random.rand(n)
+ desired = np.dot(A_d, X_d)
+ # Calculation with aligned single precision
+ A_f = as_aligned(A_d, align, np.float32, order=a_order)
+ X_f = as_aligned(X_d, align, np.float32, order=a_order)
+ assert_dot_close(A_f, X_f, desired)
+
+except AttributeError:
+ class TestSgemv:
+ pass
+
+
+class TestDgemv(BaseGemv):
+ blas_func = fblas.dgemv
+ dtype = float64
+
+
+try:
+ class TestCgemv(BaseGemv):
+ blas_func = fblas.cgemv
+ dtype = complex64
+except AttributeError:
+ class TestCgemv:
+ pass
+
+
+class TestZgemv(BaseGemv):
+ blas_func = fblas.zgemv
+ dtype = complex128
+
+
+"""
+##################################################
+### Test blas ?ger
+### This will be a mess to test all cases.
+
+class BaseGer(object):
+ def get_data(self,x_stride=1,y_stride=1):
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1., dtype = self.dtype)
+ a = normal(0.,1.,(3,3)).astype(self.dtype)
+ x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
+ y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
+ return alpha,a,x,y
+ def test_simple(self):
+ alpha,a,x,y = self.get_data()
+ # tranpose takes care of Fortran vs. C(and Python) memory layout
+ desired_a = alpha*transpose(x[:,newaxis]*y) + a
+ self.blas_func(x,y,a)
+ assert_array_almost_equal(desired_a,a)
+ def test_x_stride(self):
+ alpha,a,x,y = self.get_data(x_stride=2)
+ desired_a = alpha*transpose(x[::2,newaxis]*y) + a
+ self.blas_func(x,y,a,incx=2)
+ assert_array_almost_equal(desired_a,a)
+ def test_x_stride_assert(self):
+ alpha,a,x,y = self.get_data(x_stride=2)
+ with pytest.raises(ValueError, match='foo'):
+ self.blas_func(x,y,a,incx=3)
+ def test_y_stride(self):
+ alpha,a,x,y = self.get_data(y_stride=2)
+ desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
+ self.blas_func(x,y,a,incy=2)
+ assert_array_almost_equal(desired_a,a)
+
+ def test_y_stride_assert(self):
+ alpha,a,x,y = self.get_data(y_stride=2)
+ with pytest.raises(ValueError, match='foo'):
+ self.blas_func(a,x,y,incy=3)
+
+class TestSger(BaseGer):
+ blas_func = fblas.sger
+ dtype = float32
+class TestDger(BaseGer):
+ blas_func = fblas.dger
+ dtype = float64
+"""
+##################################################
+# Test blas ?gerc
+# This will be a mess to test all cases.
+
+"""
+class BaseGerComplex(BaseGer):
+ def get_data(self,x_stride=1,y_stride=1):
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1+1j, dtype = self.dtype)
+ a = normal(0.,1.,(3,3)).astype(self.dtype)
+ a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype)
+ x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype)
+ x = x + x * array(1j, dtype = self.dtype)
+ y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype)
+ y = y + y * array(1j, dtype = self.dtype)
+ return alpha,a,x,y
+ def test_simple(self):
+ alpha,a,x,y = self.get_data()
+ # tranpose takes care of Fortran vs. C(and Python) memory layout
+ a = a * array(0.,dtype = self.dtype)
+ #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a
+ desired_a = alpha*transpose(x[:,newaxis]*y) + a
+ #self.blas_func(x,y,a,alpha = alpha)
+ fblas.cgeru(x,y,a,alpha = alpha)
+ assert_array_almost_equal(desired_a,a)
+
+ #def test_x_stride(self):
+ # alpha,a,x,y = self.get_data(x_stride=2)
+ # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a
+ # self.blas_func(x,y,a,incx=2)
+ # assert_array_almost_equal(desired_a,a)
+ #def test_y_stride(self):
+ # alpha,a,x,y = self.get_data(y_stride=2)
+ # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a
+ # self.blas_func(x,y,a,incy=2)
+ # assert_array_almost_equal(desired_a,a)
+
+class TestCgeru(BaseGerComplex):
+ blas_func = fblas.cgeru
+ dtype = complex64
+ def transform(self,x):
+ return x
+class TestZgeru(BaseGerComplex):
+ blas_func = fblas.zgeru
+ dtype = complex128
+ def transform(self,x):
+ return x
+
+class TestCgerc(BaseGerComplex):
+ blas_func = fblas.cgerc
+ dtype = complex64
+ def transform(self,x):
+ return conjugate(x)
+
+class TestZgerc(BaseGerComplex):
+ blas_func = fblas.zgerc
+ dtype = complex128
+ def transform(self,x):
+ return conjugate(x)
+"""
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_interpolative.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_interpolative.py
new file mode 100644
index 0000000..efdc0f6
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_interpolative.py
@@ -0,0 +1,295 @@
+#******************************************************************************
+# Copyright (C) 2013 Kenneth L. Ho
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer. Redistributions in binary
+# form must reproduce the above copyright notice, this list of conditions and
+# the following disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# None of the names of the copyright holders may be used to endorse or
+# promote products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+import scipy.linalg.interpolative as pymatrixid
+import numpy as np
+from scipy.linalg import hilbert, svdvals, norm
+from scipy.sparse.linalg import aslinearoperator
+from scipy.linalg.interpolative import interp_decomp
+import time
+import itertools
+
+from numpy.testing import assert_, assert_allclose
+from pytest import raises as assert_raises
+
+
+def _debug_print(s):
+ if 0:
+ print(s)
+
+
+class TestInterpolativeDecomposition(object):
+ def test_id(self):
+ for dtype in [np.float64, np.complex128]:
+ self.check_id(dtype)
+
+ def check_id(self, dtype):
+ # Test ID routines on a Hilbert matrix.
+
+ # set parameters
+ n = 300
+ eps = 1e-12
+
+ # construct Hilbert matrix
+ A = hilbert(n).astype(dtype)
+ if np.issubdtype(dtype, np.complexfloating):
+ A = A * (1 + 1j)
+ L = aslinearoperator(A)
+
+ # find rank
+ S = np.linalg.svd(A, compute_uv=False)
+ try:
+ rank = np.nonzero(S < eps)[0][0]
+ except IndexError:
+ rank = n
+
+ # print input summary
+ _debug_print("Hilbert matrix dimension: %8i" % n)
+ _debug_print("Working precision: %8.2e" % eps)
+ _debug_print("Rank to working precision: %8i" % rank)
+
+ # set print format
+ fmt = "%8.2e (s) / %5s"
+
+ # test real ID routines
+ _debug_print("-----------------------------------------")
+ _debug_print("Real ID routines")
+ _debug_print("-----------------------------------------")
+
+ # fixed precision
+ _debug_print("Calling iddp_id / idzp_id ...",)
+ t0 = time.time()
+ k, idx, proj = pymatrixid.interp_decomp(A, eps, rand=False)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddp_aid / idzp_aid ...",)
+ t0 = time.time()
+ k, idx, proj = pymatrixid.interp_decomp(A, eps)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddp_rid / idzp_rid ...",)
+ t0 = time.time()
+ k, idx, proj = pymatrixid.interp_decomp(L, eps)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ # fixed rank
+ k = rank
+
+ _debug_print("Calling iddr_id / idzr_id ...",)
+ t0 = time.time()
+ idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddr_aid / idzr_aid ...",)
+ t0 = time.time()
+ idx, proj = pymatrixid.interp_decomp(A, k)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddr_rid / idzr_rid ...",)
+ t0 = time.time()
+ idx, proj = pymatrixid.interp_decomp(L, k)
+ t = time.time() - t0
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ # check skeleton and interpolation matrices
+ idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_(np.allclose(B, A[:,idx[:k]], eps))
+ assert_(np.allclose(B.dot(P), A, eps))
+
+ # test SVD routines
+ _debug_print("-----------------------------------------")
+ _debug_print("SVD routines")
+ _debug_print("-----------------------------------------")
+
+ # fixed precision
+ _debug_print("Calling iddp_svd / idzp_svd ...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(A, eps, rand=False)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddp_asvd / idzp_asvd...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(A, eps)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddp_rsvd / idzp_rsvd...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(L, eps)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ # fixed rank
+ k = rank
+
+ _debug_print("Calling iddr_svd / idzr_svd ...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(A, k, rand=False)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddr_asvd / idzr_asvd ...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(A, k)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ _debug_print("Calling iddr_rsvd / idzr_rsvd ...",)
+ t0 = time.time()
+ U, S, V = pymatrixid.svd(L, k)
+ t = time.time() - t0
+ B = np.dot(U, np.dot(np.diag(S), V.T.conj()))
+ _debug_print(fmt % (t, np.allclose(A, B, eps)))
+ assert_(np.allclose(A, B, eps))
+
+ # ID to SVD
+ idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
+ Up, Sp, Vp = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
+ B = U.dot(np.diag(S).dot(V.T.conj()))
+ assert_(np.allclose(A, B, eps))
+
+ # Norm estimates
+ s = svdvals(A)
+ norm_2_est = pymatrixid.estimate_spectral_norm(A)
+ assert_(np.allclose(norm_2_est, s[0], 1e-6))
+
+ B = A.copy()
+ B[:,0] *= 1.2
+ s = svdvals(A - B)
+ norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B)
+ assert_(np.allclose(norm_2_est, s[0], 1e-6))
+
+ # Rank estimates
+ B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=dtype)
+ for M in [A, B]:
+ ML = aslinearoperator(M)
+
+ rank_tol = 1e-9
+ rank_np = np.linalg.matrix_rank(M, norm(M, 2)*rank_tol)
+ rank_est = pymatrixid.estimate_rank(M, rank_tol)
+ rank_est_2 = pymatrixid.estimate_rank(ML, rank_tol)
+
+ assert_(rank_est >= rank_np)
+ assert_(rank_est <= rank_np + 10)
+
+ assert_(rank_est_2 >= rank_np - 4)
+ assert_(rank_est_2 <= rank_np + 4)
+
+ def test_rand(self):
+ pymatrixid.seed('default')
+ assert_(np.allclose(pymatrixid.rand(2), [0.8932059, 0.64500803], 1e-4))
+
+ pymatrixid.seed(1234)
+ x1 = pymatrixid.rand(2)
+ assert_(np.allclose(x1, [0.7513823, 0.06861718], 1e-4))
+
+ np.random.seed(1234)
+ pymatrixid.seed()
+ x2 = pymatrixid.rand(2)
+
+ np.random.seed(1234)
+ pymatrixid.seed(np.random.rand(55))
+ x3 = pymatrixid.rand(2)
+
+ assert_allclose(x1, x2)
+ assert_allclose(x1, x3)
+
+ def test_badcall(self):
+ A = hilbert(5).astype(np.float32)
+ assert_raises(ValueError, pymatrixid.interp_decomp, A, 1e-6, rand=False)
+
+ def test_rank_too_large(self):
+ # svd(array, k) should not segfault
+ a = np.ones((4, 3))
+ with assert_raises(ValueError):
+ pymatrixid.svd(a, 4)
+
+ def test_full_rank(self):
+ eps = 1.0e-12
+
+ # fixed precision
+ A = np.random.rand(16, 8)
+ k, idx, proj = pymatrixid.interp_decomp(A, eps)
+ assert_(k == A.shape[1])
+
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_allclose(A, B.dot(P))
+
+ # fixed rank
+ idx, proj = pymatrixid.interp_decomp(A, k)
+
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_allclose(A, B.dot(P))
+
+ def test_bug_9793(self):
+ dtypes = [np.float_, np.complex_]
+ rands = [True, False]
+ epss = [1, 0.1]
+
+ for dtype, eps, rand in itertools.product(dtypes, epss, rands):
+ A = np.array([[-1, -1, -1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [1, 0, 0, 1, 0, 0],
+ [0, 1, 0, 0, 1, 0],
+ [0, 0, 1, 0, 0, 1]],
+ dtype=dtype, order="C")
+ B = A.copy()
+ interp_decomp(A.T, eps, rand=rand)
+ assert_(np.array_equal(A, B))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_lapack.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_lapack.py
new file mode 100644
index 0000000..7ca0d9c
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_lapack.py
@@ -0,0 +1,2981 @@
+#
+# Created by: Pearu Peterson, September 2002
+#
+
+import sys
+import subprocess
+import time
+from functools import reduce
+
+from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
+ assert_allclose, assert_almost_equal,
+ assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+
+import numpy as np
+from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
+ triu_indices)
+
+from numpy.random import rand, randint, seed
+
+from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
+ solve, ldl, norm, block_diag, qr, eigh)
+
+from scipy.linalg.lapack import _compute_lwork
+from scipy.stats import ortho_group, unitary_group
+
+
+import scipy.sparse as sps
+
+try:
+ from scipy.linalg import _clapack as clapack
+except ImportError:
+ clapack = None
+from scipy.linalg.lapack import get_lapack_funcs
+from scipy.linalg.blas import get_blas_funcs
+
+REAL_DTYPES = [np.float32, np.float64]
+COMPLEX_DTYPES = [np.complex64, np.complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def generate_random_dtype_array(shape, dtype):
+ # generates a random matrix of desired data type of shape
+ if dtype in COMPLEX_DTYPES:
+ return (np.random.rand(*shape)
+ + np.random.rand(*shape)*1.0j).astype(dtype)
+ return np.random.rand(*shape).astype(dtype)
+
+
+def test_lapack_documented():
+ """Test that all entries are in the doc."""
+ if lapack.__doc__ is None: # just in case there is a python -OO
+ pytest.skip('lapack.__doc__ is None')
+ names = set(lapack.__doc__.split())
+ ignore_list = set([
+ 'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
+ 'flapack', 'print_function', 'HAS_ILP64',
+ ])
+ missing = list()
+ for name in dir(lapack):
+ if (not name.startswith('_') and name not in ignore_list and
+ name not in names):
+ missing.append(name)
+ assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
+
+
+class TestFlapackSimple(object):
+
+ def test_gebal(self):
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+ a1 = [[1, 0, 0, 3e-4],
+ [4, 0, 0, 2e-3],
+ [7, 1, 0, 0],
+ [0, 1, 0, 0]]
+ for p in 'sdzc':
+ f = getattr(flapack, p+'gebal', None)
+ if f is None:
+ continue
+ ba, lo, hi, pivscale, info = f(a)
+ assert_(not info, repr(info))
+ assert_array_almost_equal(ba, a)
+ assert_equal((lo, hi), (0, len(a[0])-1))
+ assert_array_almost_equal(pivscale, np.ones(len(a)))
+
+ ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
+ assert_(not info, repr(info))
+ # print(a1)
+ # print(ba, lo, hi, pivscale)
+
+ def test_gehrd(self):
+ a = [[-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]]
+ for p in 'd':
+ f = getattr(flapack, p+'gehrd', None)
+ if f is None:
+ continue
+ ht, tau, info = f(a)
+ assert_(not info, repr(info))
+
+ def test_trsyl(self):
+ a = np.array([[1, 2], [0, 4]])
+ b = np.array([[5, 6], [0, 8]])
+ c = np.array([[9, 10], [11, 12]])
+ trans = 'T'
+
+ # Test single and double implementations, including most
+ # of the options
+ for dtype in 'fdFD':
+ a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
+ trsyl, = get_lapack_funcs(('trsyl',), (a1,))
+ if dtype.isupper(): # is complex dtype
+ a1[0] += 1j
+ trans = 'C'
+
+ x, scale, info = trsyl(a1, b1, c1)
+ assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
+ scale * c1)
+
+ x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
+ assert_array_almost_equal(
+ np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
+ scale * c1, decimal=4)
+
+ x, scale, info = trsyl(a1, b1, c1, isgn=-1)
+ assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
+ scale * c1, decimal=4)
+
+ def test_lange(self):
+ a = np.array([
+ [-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]])
+
+ for dtype in 'fdFD':
+ for norm_str in 'Mm1OoIiFfEe':
+ a1 = a.astype(dtype)
+ if dtype.isupper():
+ # is complex dtype
+ a1[0, 0] += 1j
+
+ lange, = get_lapack_funcs(('lange',), (a1,))
+ value = lange(norm_str, a1)
+
+ if norm_str in 'FfEe':
+ if dtype in 'Ff':
+ decimal = 3
+ else:
+ decimal = 7
+ ref = np.sqrt(np.sum(np.square(np.abs(a1))))
+ assert_almost_equal(value, ref, decimal)
+ else:
+ if norm_str in 'Mm':
+ ref = np.max(np.abs(a1))
+ elif norm_str in '1Oo':
+ ref = np.max(np.sum(np.abs(a1), axis=0))
+ elif norm_str in 'Ii':
+ ref = np.max(np.sum(np.abs(a1), axis=1))
+
+ assert_equal(value, ref)
+
+
+class TestLapack(object):
+
+ def test_flapack(self):
+ if hasattr(flapack, 'empty_module'):
+ # flapack module is empty
+ pass
+
+ def test_clapack(self):
+ if hasattr(clapack, 'empty_module'):
+ # clapack module is empty
+ pass
+
+
+class TestLeastSquaresSolvers(object):
+
+ def test_gels(self):
+ seed(1234)
+ # Test fat/tall matrix argument handling - gh-issue #8329
+ for ind, dtype in enumerate(DTYPES):
+ m = 10
+ n = 20
+ nrhs = 1
+ a1 = rand(m, n).astype(dtype)
+ b1 = rand(n).astype(dtype)
+ gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
+
+ # Request of sizes
+ lwork = _compute_lwork(glslw, m, n, nrhs)
+ _, _, info = gls(a1, b1, lwork=lwork)
+ assert_(info >= 0)
+ _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
+ assert_(info >= 0)
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gels, gels_lwork, geqrf = get_lapack_funcs(
+ ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+ lqr, x, info = gels(a1, b1, lwork=lwork)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ lqr_truth, _, _, _ = geqrf(a1)
+ assert_array_equal(lqr, lqr_truth)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gels, gels_lwork, geqrf = get_lapack_funcs(
+ ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+ lqr, x, info = gels(a1, b1, lwork=lwork)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+ lqr_truth, _, _, _ = geqrf(a1)
+ assert_array_equal(lqr, lqr_truth)
+
+ def test_gelsd(self):
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+ iwork_size = iwork
+
+ x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
+ -1, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([12.596017180511966,
+ 0.583396253199685], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+ rwork_size = int(rwork)
+ iwork_size = iwork
+
+ x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
+ -1, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s,
+ np.array([13.035514762572043, 4.337666985231382],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+
+ def test_gelss(self):
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelss_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+
+ v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([12.596017180511966,
+ 0.583396253199685], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelss_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+
+ v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([13.035514762572043,
+ 4.337666985231382], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ def test_gelsy(self):
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+ lwork = int(np.real(work))
+
+ jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+ v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+ lwork, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+ lwork = int(np.real(work))
+
+ jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+ v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+ lwork, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
+def test_geqrf_lwork(dtype, shape):
+ geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
+ m, n = shape
+ lwork, info = geqrf_lwork(m=m, n=n)
+ assert_equal(info, 0)
+
+
+class TestRegression(object):
+
+ def test_ticket_1645(self):
+ # Check that RQ routines have correct lwork
+ for dtype in DTYPES:
+ a = np.zeros((300, 2), dtype=dtype)
+
+ gerqf, = get_lapack_funcs(['gerqf'], [a])
+ assert_raises(Exception, gerqf, a, lwork=2)
+ rq, tau, work, info = gerqf(a)
+
+ if dtype in REAL_DTYPES:
+ orgrq, = get_lapack_funcs(['orgrq'], [a])
+ assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
+ orgrq(rq[-2:], tau, lwork=2)
+ elif dtype in COMPLEX_DTYPES:
+ ungrq, = get_lapack_funcs(['ungrq'], [a])
+ assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
+ ungrq(rq[-2:], tau, lwork=2)
+
+
+class TestDpotr(object):
+ def test_gh_2691(self):
+ # 'lower' argument of dportf/dpotri
+ for lower in [True, False]:
+ for clean in [True, False]:
+ np.random.seed(42)
+ x = np.random.normal(size=(3, 3))
+ a = x.dot(x.T)
+
+ dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
+
+ c, info = dpotrf(a, lower, clean=clean)
+ dpt = dpotri(c, lower)[0]
+
+ if lower:
+ assert_allclose(np.tril(dpt), np.tril(inv(a)))
+ else:
+ assert_allclose(np.triu(dpt), np.triu(inv(a)))
+
+
+class TestDlasd4(object):
+ def test_sing_val_update(self):
+
+ sigmas = np.array([4., 3., 2., 0])
+ m_vec = np.array([3.12, 5.7, -4.8, -2.2])
+
+ M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
+ np.zeros((1, len(m_vec) - 1)))),
+ m_vec[:, np.newaxis]))
+ SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
+ check_finite=False)
+
+ it_len = len(sigmas)
+ sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
+ mvc = np.concatenate((m_vec[::-1], (0,)))
+
+ lasd4 = get_lapack_funcs('lasd4', (sigmas,))
+
+ roots = []
+ for i in range(0, it_len):
+ res = lasd4(i, sgm, mvc)
+ roots.append(res[1])
+
+ assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
+ the singular value %i" % i)
+ roots = np.array(roots)[::-1]
+
+ assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
+ assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
+ rtol=100*np.finfo(np.float64).eps)
+
+
+class TestTbtrs(object):
+
+ @pytest.mark.parametrize('dtype', DTYPES)
+ def test_nag_example_f07vef_f07vsf(self, dtype):
+ """Test real (f07vef) and complex (f07vsf) examples from NAG
+
+ Examples available from:
+ * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
+ * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
+
+ """
+ if dtype in REAL_DTYPES:
+ ab = np.array([[-4.16, 4.78, 6.32, 0.16],
+ [-2.25, 5.86, -4.82, 0]],
+ dtype=dtype)
+ b = np.array([[-16.64, -4.16],
+ [-13.78, -16.59],
+ [13.10, -4.94],
+ [-14.14, -9.96]],
+ dtype=dtype)
+ x_out = np.array([[4, 1],
+ [-1, -3],
+ [3, 2],
+ [2, -2]],
+ dtype=dtype)
+ elif dtype in COMPLEX_DTYPES:
+ ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
+ [-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
+ [1.62+3.68j, -2.77-1.93j, 0, 0]],
+ dtype=dtype)
+ b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
+ [-15.57 - 23.41j, -57.97 + 8.14j],
+ [-7.63 + 22.78j, 19.09 - 29.51j],
+ [-14.74 - 2.40j, 19.17 + 21.33j]],
+ dtype=dtype)
+ x_out = np.array([[2j, 1 + 5j],
+ [1 - 3j, -7 - 2j],
+ [-4.001887 - 4.988417j, 3.026830 + 4.003182j],
+ [1.996158 - 1.045105j, -6.103357 - 8.986653j]],
+ dtype=dtype)
+ else:
+ raise ValueError(f"Datatype {dtype} not understood.")
+
+ tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
+ x, info = tbtrs(ab=ab, b=b, uplo='L')
+ assert_equal(info, 0)
+ assert_allclose(x, x_out, rtol=0, atol=1e-5)
+
+ @pytest.mark.parametrize('dtype,trans',
+ [(dtype, trans)
+ for dtype in DTYPES for trans in ['N', 'T', 'C']
+ if not (trans == 'C' and dtype in REAL_DTYPES)])
+ @pytest.mark.parametrize('uplo', ['U', 'L'])
+ @pytest.mark.parametrize('diag', ['N', 'U'])
+ def test_random_matrices(self, dtype, trans, uplo, diag):
+ seed(1724)
+ # n, nrhs, kd are used to specify A and b.
+ # A is of shape n x n with kd super/sub-diagonals
+ # b is of shape n x nrhs matrix
+ n, nrhs, kd = 4, 3, 2
+ tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
+
+ is_upper = (uplo == 'U')
+ ku = kd * is_upper
+ kl = kd - ku
+
+ # Construct the diagonal and kd super/sub diagonals of A with
+ # the corresponding offsets.
+ band_offsets = range(ku, -kl - 1, -1)
+ band_widths = [n - abs(x) for x in band_offsets]
+ bands = [generate_random_dtype_array((width,), dtype)
+ for width in band_widths]
+
+ if diag == 'U': # A must be unit triangular
+ bands[ku] = np.ones(n, dtype=dtype)
+
+ # Construct the diagonal banded matrix A from the bands and offsets.
+ a = sps.diags(bands, band_offsets, format='dia')
+
+ # Convert A into banded storage form
+ ab = np.zeros((kd + 1, n), dtype)
+ for row, k in enumerate(band_offsets):
+ ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
+
+ # The RHS values.
+ b = generate_random_dtype_array((n, nrhs), dtype)
+
+ x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
+ assert_equal(info, 0)
+
+ if trans == 'N':
+ assert_allclose(a @ x, b, rtol=5e-5)
+ elif trans == 'T':
+ assert_allclose(a.T @ x, b, rtol=5e-5)
+ elif trans == 'C':
+ assert_allclose(a.H @ x, b, rtol=5e-5)
+ else:
+ raise ValueError('Invalid trans argument')
+
+ @pytest.mark.parametrize('uplo,trans,diag',
+ [['U', 'N', 'Invalid'],
+ ['U', 'Invalid', 'N'],
+ ['Invalid', 'N', 'N']])
+ def test_invalid_argument_raises_exception(self, uplo, trans, diag):
+ """Test if invalid values of uplo, trans and diag raise exceptions"""
+ # Argument checks occur independently of used datatype.
+ # This mean we must not parameterize all available datatypes.
+ tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
+ ab = rand(4, 2)
+ b = rand(2, 4)
+ assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
+
+ def test_zero_element_in_diagonal(self):
+ """Test if a matrix with a zero diagonal element is singular
+
+ If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
+ indicating the provided matrix is singular.
+
+ Note that ?tbtrs requires the matrix A to be stored in banded form.
+ In this form the diagonal corresponds to the last row."""
+ ab = np.ones((3, 4), dtype=float)
+ b = np.ones(4, dtype=float)
+ tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+
+ ab[-1, 3] = 0
+ _, info = tbtrs(ab=ab, b=b, uplo='U')
+ assert_equal(info, 4)
+
+ @pytest.mark.parametrize('ldab,n,ldb,nrhs', [
+ (5, 5, 0, 5),
+ (5, 5, 3, 5)
+ ])
+ def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
+ """Test ?tbtrs fails correctly if shapes are invalid."""
+ ab = np.ones((ldab, n), dtype=float)
+ b = np.ones((ldb, nrhs), dtype=float)
+ tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+ assert_raises(Exception, tbtrs, ab, b)
+
+
+def test_lartg():
+ for dtype in 'fdFD':
+ lartg = get_lapack_funcs('lartg', dtype=dtype)
+
+ f = np.array(3, dtype)
+ g = np.array(4, dtype)
+
+ if np.iscomplexobj(g):
+ g *= 1j
+
+ cs, sn, r = lartg(f, g)
+
+ assert_allclose(cs, 3.0/5.0)
+ assert_allclose(r, 5.0)
+
+ if np.iscomplexobj(g):
+ assert_allclose(sn, -4.0j/5.0)
+ assert_(type(r) == complex)
+ assert_(type(cs) == float)
+ else:
+ assert_allclose(sn, 4.0/5.0)
+
+
+def test_rot():
+ # srot, drot from blas and crot and zrot from lapack.
+
+ for dtype in 'fdFD':
+ c = 0.6
+ s = 0.8
+
+ u = np.full(4, 3, dtype)
+ v = np.full(4, 4, dtype)
+ atol = 10**-(np.finfo(dtype).precision-1)
+
+ if dtype in 'fd':
+ rot = get_blas_funcs('rot', dtype=dtype)
+ f = 4
+ else:
+ rot = get_lapack_funcs('rot', dtype=dtype)
+ s *= -1j
+ v *= 1j
+ f = 4j
+
+ assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
+ [0, 0, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
+ [0, 0, f, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, offy=2),
+ [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
+ [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
+ [[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
+ [[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
+ [[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
+
+ a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
+ assert_(a is u)
+ assert_(b is v)
+ assert_allclose(a, [5, 5, 5, 5], atol=atol)
+ assert_allclose(b, [0, 0, 0, 0], atol=atol)
+
+
+def test_larfg_larf():
+ np.random.seed(1234)
+ a0 = np.random.random((4, 4))
+ a0 = a0.T.dot(a0)
+
+ a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
+ a0j = a0j.T.conj().dot(a0j)
+
+ # our test here will be to do one step of reducing a hermetian matrix to
+ # tridiagonal form using householder transforms.
+
+ for dtype in 'fdFD':
+ larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
+
+ if dtype in 'FD':
+ a = a0j.copy()
+ else:
+ a = a0.copy()
+
+ # generate a householder transform to clear a[2:,0]
+ alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
+
+ # create expected output
+ expected = np.zeros_like(a[:, 0])
+ expected[0] = a[0, 0]
+ expected[1] = alpha
+
+ # assemble householder vector
+ v = np.zeros_like(a[1:, 0])
+ v[0] = 1.0
+ v[1:] = x
+
+ # apply transform from the left
+ a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
+
+ # apply transform from the right
+ a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
+
+ assert_allclose(a[:, 0], expected, atol=1e-5)
+ assert_allclose(a[0, :], expected, atol=1e-5)
+
+
+@pytest.mark.xslow
+def test_sgesdd_lwork_bug_workaround():
+ # Test that SGESDD lwork is sufficiently large for LAPACK.
+ #
+ # This checks that workaround around an apparent LAPACK bug
+ # actually works. cf. gh-5401
+ #
+ # xslow: requires 1GB+ of memory
+
+ p = subprocess.Popen([sys.executable, '-c',
+ 'import numpy as np; '
+ 'from scipy.linalg import svd; '
+ 'a = np.zeros([9537, 9537], dtype=np.float32); '
+ 'svd(a)'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ # Check if it an error occurred within 5 sec; the computation can
+ # take substantially longer, and we will not wait for it to finish
+ for j in range(50):
+ time.sleep(0.1)
+ if p.poll() is not None:
+ returncode = p.returncode
+ break
+ else:
+ # Didn't exit in time -- probably entered computation. The
+ # error is raised before entering computation, so things are
+ # probably OK.
+ returncode = 0
+ p.terminate()
+
+ assert_equal(returncode, 0,
+ "Code apparently failed: " + p.stdout.read().decode())
+
+
+class TestSytrd(object):
+ @pytest.mark.parametrize('dtype', REAL_DTYPES)
+ def test_sytrd_with_zero_dim_array(self, dtype):
+ # Assert that a 0x0 matrix raises an error
+ A = np.zeros((0, 0), dtype=dtype)
+ sytrd = get_lapack_funcs('sytrd', (A,))
+ assert_raises(ValueError, sytrd, A)
+
+ @pytest.mark.parametrize('dtype', REAL_DTYPES)
+ @pytest.mark.parametrize('n', (1, 3))
+ def test_sytrd(self, dtype, n):
+ A = np.zeros((n, n), dtype=dtype)
+
+ sytrd, sytrd_lwork = \
+ get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
+
+ # some upper triangular array
+ A[np.triu_indices_from(A)] = \
+ np.arange(1, n*(n+1)//2+1, dtype=dtype)
+
+ # query lwork
+ lwork, info = sytrd_lwork(n)
+ assert_equal(info, 0)
+
+ # check lower=1 behavior (shouldn't do much since the matrix is
+ # upper triangular)
+ data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
+ assert_equal(info, 0)
+
+ assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
+ assert_allclose(d, np.diag(A))
+ assert_allclose(e, 0.0)
+ assert_allclose(tau, 0.0)
+
+ # and now for the proper test (lower=0 is the default)
+ data, d, e, tau, info = sytrd(A, lwork=lwork)
+ assert_equal(info, 0)
+
+ # assert Q^T*A*Q = tridiag(e, d, e)
+
+ # build tridiagonal matrix
+ T = np.zeros_like(A, dtype=dtype)
+ k = np.arange(A.shape[0])
+ T[k, k] = d
+ k2 = np.arange(A.shape[0]-1)
+ T[k2+1, k2] = e
+ T[k2, k2+1] = e
+
+ # build Q
+ Q = np.eye(n, n, dtype=dtype)
+ for i in range(n-1):
+ v = np.zeros(n, dtype=dtype)
+ v[:i] = data[:i, i+1]
+ v[i] = 1.0
+ H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
+ Q = np.dot(H, Q)
+
+ # Make matrix fully symmetric
+ i_lower = np.tril_indices(n, -1)
+ A[i_lower] = A.T[i_lower]
+
+ QTAQ = np.dot(Q.T, np.dot(A, Q))
+
+ # disable rtol here since some values in QTAQ and T are very close
+ # to 0.
+ assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
+
+
+class TestHetrd(object):
+ @pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
+ def test_hetrd_with_zero_dim_array(self, complex_dtype):
+ # Assert that a 0x0 matrix raises an error
+ A = np.zeros((0, 0), dtype=complex_dtype)
+ hetrd = get_lapack_funcs('hetrd', (A,))
+ assert_raises(ValueError, hetrd, A)
+
+ @pytest.mark.parametrize('real_dtype,complex_dtype',
+ zip(REAL_DTYPES, COMPLEX_DTYPES))
+ @pytest.mark.parametrize('n', (1, 3))
+ def test_hetrd(self, n, real_dtype, complex_dtype):
+ A = np.zeros((n, n), dtype=complex_dtype)
+ hetrd, hetrd_lwork = \
+ get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
+
+ # some upper triangular array
+ A[np.triu_indices_from(A)] = (
+ np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ )
+ np.fill_diagonal(A, np.real(np.diag(A)))
+
+ # test query lwork
+ for x in [0, 1]:
+ _, info = hetrd_lwork(n, lower=x)
+ assert_equal(info, 0)
+ # lwork returns complex which segfaults hetrd call (gh-10388)
+ # use the safe and recommended option
+ lwork = _compute_lwork(hetrd_lwork, n)
+
+ # check lower=1 behavior (shouldn't do much since the matrix is
+ # upper triangular)
+ data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
+ assert_equal(info, 0)
+
+ assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
+
+ assert_allclose(d, np.real(np.diag(A)))
+ assert_allclose(e, 0.0)
+ assert_allclose(tau, 0.0)
+
+ # and now for the proper test (lower=0 is the default)
+ data, d, e, tau, info = hetrd(A, lwork=lwork)
+ assert_equal(info, 0)
+
+ # assert Q^T*A*Q = tridiag(e, d, e)
+
+ # build tridiagonal matrix
+ T = np.zeros_like(A, dtype=real_dtype)
+ k = np.arange(A.shape[0], dtype=int)
+ T[k, k] = d
+ k2 = np.arange(A.shape[0]-1, dtype=int)
+ T[k2+1, k2] = e
+ T[k2, k2+1] = e
+
+ # build Q
+ Q = np.eye(n, n, dtype=complex_dtype)
+ for i in range(n-1):
+ v = np.zeros(n, dtype=complex_dtype)
+ v[:i] = data[:i, i+1]
+ v[i] = 1.0
+ H = np.eye(n, n, dtype=complex_dtype) \
+ - tau[i] * np.outer(v, np.conj(v))
+ Q = np.dot(H, Q)
+
+ # Make matrix fully Hermitian
+ i_lower = np.tril_indices(n, -1)
+ A[i_lower] = np.conj(A.T[i_lower])
+
+ QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
+
+ # disable rtol here since some values in QTAQ and T are very close
+ # to 0.
+ assert_allclose(
+ QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
+ )
+
+
+def test_gglse():
+ # Example data taken from NAG manual
+ for ind, dtype in enumerate(DTYPES):
+ # DTYPES = gglse
+ func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
+ dtype=dtype)
+ lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
+ # For gglse
+ if ind < 2:
+ a = np.array([[-0.57, -1.28, -0.39, 0.25],
+ [-1.93, 1.08, -0.31, -2.14],
+ [2.30, 0.24, 0.40, -0.35],
+ [-1.93, 0.64, -0.66, 0.08],
+ [0.15, 0.30, 0.15, -2.13],
+ [-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
+ c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
+ d = np.array([0., 0.], dtype=dtype)
+ # For gglse
+ else:
+ a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
+ [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
+ [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
+ [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
+ [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
+ [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
+ c = np.array([[-2.54+0.09j],
+ [1.65-2.26j],
+ [-2.11-3.96j],
+ [1.82+3.30j],
+ [-6.41+3.77j],
+ [2.07+0.66j]])
+ d = np.zeros(2, dtype=dtype)
+
+ b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
+
+ _, _, _, result, _ = func(a, b, c, d, lwork=lwork)
+ if ind < 2:
+ expected = np.array([0.48904455,
+ 0.99754786,
+ 0.48904455,
+ 0.99754786])
+ else:
+ expected = np.array([1.08742917-1.96205783j,
+ -0.74093902+3.72973919j,
+ 1.08742917-1.96205759j,
+ -0.74093896+3.72973895j])
+ assert_array_almost_equal(result, expected, decimal=4)
+
+
+def test_sycon_hecon():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+ # DTYPES + COMPLEX DTYPES = sycon + hecon
+ n = 10
+ # For sycon
+ if ind < 4:
+ func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
+ funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
+ A = (rand(n, n)).astype(dtype)
+ # For hecon
+ else:
+ func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
+ funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+
+ # Since sycon only refers to upper/lower part, conj() is safe here.
+ A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
+
+ anorm = norm(A, 1)
+ lwork = _compute_lwork(func_lwork, n)
+ ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
+ rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
+ # The error is at most 1-fold
+ assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
+
+
+def test_sygst():
+ seed(1234)
+ for ind, dtype in enumerate(REAL_DTYPES):
+ # DTYPES = sygst
+ n = 10
+
+ potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
+ 'syevd', 'sygvd'),
+ dtype=dtype)
+
+ A = rand(n, n).astype(dtype)
+ A = (A + A.T)/2
+ # B must be positive definite
+ B = rand(n, n).astype(dtype)
+ B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
+
+ # Perform eig (sygvd)
+ eig_gvd, _, info = sygvd(A, B)
+ assert_(info == 0)
+
+ # Convert to std problem potrf
+ b, info = potrf(B)
+ assert_(info == 0)
+ a, info = sygst(A, b)
+ assert_(info == 0)
+
+ eig, _, info = syevd(a)
+ assert_(info == 0)
+ assert_allclose(eig, eig_gvd, rtol=1e-4)
+
+
+def test_hegst():
+ seed(1234)
+ for ind, dtype in enumerate(COMPLEX_DTYPES):
+ # DTYPES = hegst
+ n = 10
+
+ potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
+ 'heevd', 'hegvd'),
+ dtype=dtype)
+
+ A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
+ A = (A + A.conj().T)/2
+ # B must be positive definite
+ B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
+ B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
+
+ # Perform eig (hegvd)
+ eig_gvd, _, info = hegvd(A, B)
+ assert_(info == 0)
+
+ # Convert to std problem potrf
+ b, info = potrf(B)
+ assert_(info == 0)
+ a, info = hegst(A, b)
+ assert_(info == 0)
+
+ eig, _, info = heevd(a)
+ assert_(info == 0)
+ assert_allclose(eig, eig_gvd, rtol=1e-4)
+
+
+def test_tzrzf():
+ """
+ This test performs an RZ decomposition in which an m x n upper trapezoidal
+ array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
+ and Z is unitary.
+ """
+ seed(1234)
+ m, n = 10, 15
+ for ind, dtype in enumerate(DTYPES):
+ tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
+ dtype=dtype)
+ lwork = _compute_lwork(tzrzf_lw, m, n)
+
+ if ind < 2:
+ A = triu(rand(m, n).astype(dtype))
+ else:
+ A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
+
+ # assert wrong shape arg, f2py returns generic error
+ assert_raises(Exception, tzrzf, A.T)
+ rz, tau, info = tzrzf(A, lwork=lwork)
+ # Check success
+ assert_(info == 0)
+
+ # Get Z manually for comparison
+ R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
+ V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
+ Id = np.eye(n, dtype=dtype)
+ ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
+ Z = reduce(np.dot, ref)
+ assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
+ atol=10*np.spacing(dtype(1.0).real), rtol=0.)
+
+
+def test_tfsm():
+ """
+ Test for solving a linear system with the coefficient matrix is a
+ triangular array stored in Full Packed (RFP) format.
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
+ trans = 'C'
+ else:
+ A = triu(rand(n, n) + eye(n)).astype(dtype)
+ trans = 'T'
+
+ trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
+ dtype=dtype)
+
+ Afp, _ = trttf(A)
+ B = rand(n, 2).astype(dtype)
+ soln = tfsm(-1, Afp, B)
+ assert_array_almost_equal(soln, solve(-A, B),
+ decimal=4 if ind % 2 == 0 else 6)
+
+ soln = tfsm(-1, Afp, B, trans=trans)
+ assert_array_almost_equal(soln, solve(-A.conj().T, B),
+ decimal=4 if ind % 2 == 0 else 6)
+
+ # Make A, unit diagonal
+ A[np.arange(n), np.arange(n)] = dtype(1.)
+ soln = tfsm(-1, Afp, B, trans=trans, diag='U')
+ assert_array_almost_equal(soln, solve(-A.conj().T, B),
+ decimal=4 if ind % 2 == 0 else 6)
+
+ # Change side
+ B2 = rand(3, n).astype(dtype)
+ soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
+ assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
+ decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_ormrz_unmrz():
+ """
+ This test performs a matrix multiplication with an arbitrary m x n matric C
+ and a unitary matrix Q without explicitly forming the array. The array data
+ is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
+ size is inferred by m, n, side keywords.
+ """
+ seed(1234)
+ qm, qn, cn = 10, 15, 15
+ for ind, dtype in enumerate(DTYPES):
+ tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
+ dtype=dtype)
+ lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
+
+ if ind < 2:
+ A = triu(rand(qm, qn).astype(dtype))
+ C = rand(cn, cn).astype(dtype)
+ orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
+ dtype=dtype)
+ else:
+ A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
+ C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
+ orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
+ dtype=dtype)
+
+ lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
+ rz, tau, info = tzrzf(A, lwork=lwork_rz)
+
+ # Get Q manually for comparison
+ V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
+ Id = np.eye(qn, dtype=dtype)
+ ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
+ Q = reduce(np.dot, ref)
+
+ # Now that we have Q, we can test whether lapack results agree with
+ # each case of CQ, CQ^H, QC, and QC^H
+ trans = 'T' if ind < 2 else 'C'
+ tol = 10*np.spacing(dtype(1.0).real)
+
+ cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
+ assert_(info == 0)
+ assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
+
+ cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
+ assert_(info == 0)
+ assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
+ rtol=0.)
+
+ cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
+ assert_(info == 0)
+ assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
+
+ cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
+ assert_(info == 0)
+ assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
+ rtol=0.)
+
+
+def test_tfttr_trttf():
+ """
+ Test conversion routines between the Rectengular Full Packed (RFP) format
+ and Standard Triangular Array (TR)
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ transr = 'C'
+ else:
+ A_full = (rand(n, n)).astype(dtype)
+ transr = 'T'
+
+ trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
+ A_tf_U, info = trttf(A_full)
+ assert_(info == 0)
+ A_tf_L, info = trttf(A_full, uplo='L')
+ assert_(info == 0)
+ A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
+ assert_(info == 0)
+ A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
+ assert_(info == 0)
+
+ # Create the RFP array manually (n is even!)
+ A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
+ A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
+ A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
+
+ A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
+ A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
+ A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
+
+ assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
+ assert_array_almost_equal(A_tf_U_T,
+ A_tf_U_m.conj().T.reshape(-1, order='F'))
+
+ assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
+ assert_array_almost_equal(A_tf_L_T,
+ A_tf_L_m.conj().T.reshape(-1, order='F'))
+
+ # Get the original array from RFP
+ A_tr_U, info = tfttr(n, A_tf_U)
+ assert_(info == 0)
+ A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
+ assert_(info == 0)
+ A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
+ assert_(info == 0)
+ A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
+ assert_(info == 0)
+
+ assert_array_almost_equal(A_tr_U, triu(A_full))
+ assert_array_almost_equal(A_tr_U_T, triu(A_full))
+ assert_array_almost_equal(A_tr_L, tril(A_full))
+ assert_array_almost_equal(A_tr_L_T, tril(A_full))
+
+
+def test_tpttr_trttp():
+ """
+ Test conversion routines between the Rectengular Full Packed (RFP) format
+ and Standard Triangular Array (TR)
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ else:
+ A_full = (rand(n, n)).astype(dtype)
+
+ trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
+ A_tp_U, info = trttp(A_full)
+ assert_(info == 0)
+ A_tp_L, info = trttp(A_full, uplo='L')
+ assert_(info == 0)
+
+ # Create the TP array manually
+ inds = tril_indices(n)
+ A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
+ A_tp_U_m[:] = (triu(A_full).T)[inds]
+
+ inds = triu_indices(n)
+ A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
+ A_tp_L_m[:] = (tril(A_full).T)[inds]
+
+ assert_array_almost_equal(A_tp_U, A_tp_U_m)
+ assert_array_almost_equal(A_tp_L, A_tp_L_m)
+
+ # Get the original array from TP
+ A_tr_U, info = tpttr(n, A_tp_U)
+ assert_(info == 0)
+ A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
+ assert_(info == 0)
+
+ assert_array_almost_equal(A_tr_U, triu(A_full))
+ assert_array_almost_equal(A_tr_L, tril(A_full))
+
+
+def test_pftrf():
+ """
+ Test Cholesky factorization of a positive definite Rectengular Full
+ Packed (RFP) format array
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ A = A + A.conj().T + n*eye(n)
+ else:
+ A = (rand(n, n)).astype(dtype)
+ A = A + A.T + n*eye(n)
+
+ pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
+ dtype=dtype)
+
+ # Get the original array from TP
+ Afp, info = trttf(A)
+ Achol_rfp, info = pftrf(n, Afp)
+ assert_(info == 0)
+ A_chol_r, _ = tfttr(n, Achol_rfp)
+ Achol = cholesky(A)
+ assert_array_almost_equal(A_chol_r, Achol)
+
+
+def test_pftri():
+ """
+ Test Cholesky factorization of a positive definite Rectengular Full
+ Packed (RFP) format array to find its inverse
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ A = A + A.conj().T + n*eye(n)
+ else:
+ A = (rand(n, n)).astype(dtype)
+ A = A + A.T + n*eye(n)
+
+ pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
+ 'pftrf',
+ 'trttf',
+ 'tfttr'),
+ dtype=dtype)
+
+ # Get the original array from TP
+ Afp, info = trttf(A)
+ A_chol_rfp, info = pftrf(n, Afp)
+ A_inv_rfp, info = pftri(n, A_chol_rfp)
+ assert_(info == 0)
+ A_inv_r, _ = tfttr(n, A_inv_rfp)
+ Ainv = inv(A)
+ assert_array_almost_equal(A_inv_r, triu(Ainv),
+ decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_pftrs():
+ """
+ Test Cholesky factorization of a positive definite Rectengular Full
+ Packed (RFP) format array and solve a linear system
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ A = A + A.conj().T + n*eye(n)
+ else:
+ A = (rand(n, n)).astype(dtype)
+ A = A + A.T + n*eye(n)
+
+ B = ones((n, 3), dtype=dtype)
+ Bf1 = ones((n+2, 3), dtype=dtype)
+ Bf2 = ones((n-2, 3), dtype=dtype)
+ pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
+ 'pftrf',
+ 'trttf',
+ 'tfttr'),
+ dtype=dtype)
+
+ # Get the original array from TP
+ Afp, info = trttf(A)
+ A_chol_rfp, info = pftrf(n, Afp)
+ # larger B arrays shouldn't segfault
+ soln, info = pftrs(n, A_chol_rfp, Bf1)
+ assert_(info == 0)
+ assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
+ soln, info = pftrs(n, A_chol_rfp, B)
+ assert_(info == 0)
+ assert_array_almost_equal(solve(A, B), soln,
+ decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_sfrk_hfrk():
+ """
+ Test for performing a symmetric rank-k operation for matrix in RFP format.
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ A = A + A.conj().T + n*eye(n)
+ else:
+ A = (rand(n, n)).astype(dtype)
+ A = A + A.T + n*eye(n)
+
+ prefix = 's'if ind < 2 else 'h'
+ trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
+ ''.format(prefix)),
+ dtype=dtype)
+
+ Afp, _ = trttf(A)
+ C = np.random.rand(n, 2).astype(dtype)
+ Afp_out = shfrk(n, 2, -1, C, 2, Afp)
+ A_out, _ = tfttr(n, Afp_out)
+ assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
+ decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_syconv():
+ """
+ Test for going back and forth between the returned format of he/sytrf to
+ L and D factors/permutations.
+ """
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 10
+
+ if ind > 1:
+ A = (randint(-30, 30, (n, n)) +
+ randint(-30, 30, (n, n))*1j).astype(dtype)
+
+ A = A + A.conj().T
+ else:
+ A = randint(-30, 30, (n, n)).astype(dtype)
+ A = A + A.T + n*eye(n)
+
+ tol = 100*np.spacing(dtype(1.0).real)
+ syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
+ 'sytrf_lwork'), dtype=dtype)
+ lw = _compute_lwork(trf_lwork, n, lower=1)
+ L, D, perm = ldl(A, lower=1, hermitian=False)
+ lw = _compute_lwork(trf_lwork, n, lower=1)
+ ldu, ipiv, info = trf(A, lower=1, lwork=lw)
+ a, e, info = syconv(ldu, ipiv, lower=1)
+ assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
+
+ # Test also upper
+ U, D, perm = ldl(A, lower=0, hermitian=False)
+ ldu, ipiv, info = trf(A, lower=0)
+ a, e, info = syconv(ldu, ipiv, lower=0)
+ assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
+
+
+class TestBlockedQR(object):
+ """
+ Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
+ and tpmqr.
+ """
+
+ def test_geqrt_gemqrt(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ else:
+ A = (rand(n, n)).astype(dtype)
+
+ tol = 100*np.spacing(dtype(1.0).real)
+ geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
+
+ a, t, info = geqrt(n, A)
+ assert(info == 0)
+
+ # Extract elementary reflectors from lower triangle, adding the
+ # main diagonal of ones.
+ v = np.tril(a, -1) + np.eye(n, dtype=dtype)
+ # Generate the block Householder transform I - VTV^H
+ Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
+ R = np.triu(a)
+
+ # Test columns of Q are orthogonal
+ assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
+ rtol=0.)
+ assert_allclose(Q @ R, A, atol=tol, rtol=0.)
+
+ if ind > 1:
+ C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ transpose = 'C'
+ else:
+ C = (rand(n, n)).astype(dtype)
+ transpose = 'T'
+
+ for side in ('L', 'R'):
+ for trans in ('N', transpose):
+ c, info = gemqrt(a, t, C, side=side, trans=trans)
+ assert(info == 0)
+
+ if trans == transpose:
+ q = Q.T.conj()
+ else:
+ q = Q
+
+ if side == 'L':
+ qC = q @ C
+ else:
+ qC = C @ q
+
+ assert_allclose(c, qC, atol=tol, rtol=0.)
+
+ # Test default arguments
+ if (side, trans) == ('L', 'N'):
+ c_default, info = gemqrt(a, t, C)
+ assert(info == 0)
+ assert_equal(c_default, c)
+
+ # Test invalid side/trans
+ assert_raises(Exception, gemqrt, a, t, C, side='A')
+ assert_raises(Exception, gemqrt, a, t, C, trans='A')
+
+ def test_tpqrt_tpmqrt(self):
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ n = 20
+
+ if ind > 1:
+ A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ else:
+ A = (rand(n, n)).astype(dtype)
+ B = (rand(n, n)).astype(dtype)
+
+ tol = 100*np.spacing(dtype(1.0).real)
+ tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
+
+ # Test for the range of pentagonal B, from square to upper
+ # triangular
+ for l in (0, n // 2, n):
+ a, b, t, info = tpqrt(l, n, A, B)
+ assert(info == 0)
+
+ # Check that lower triangular part of A has not been modified
+ assert_equal(np.tril(a, -1), np.tril(A, -1))
+ # Check that elements not part of the pentagonal portion of B
+ # have not been modified.
+ assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
+
+ # Extract pentagonal portion of B
+ B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
+
+ # Generate elementary reflectors
+ v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
+ # Generate the block Householder transform I - VTV^H
+ Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
+ R = np.concatenate((np.triu(a), np.zeros_like(a)))
+
+ # Test columns of Q are orthogonal
+ assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
+ atol=tol, rtol=0.)
+ assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
+ atol=tol, rtol=0.)
+
+ if ind > 1:
+ C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+ transpose = 'C'
+ else:
+ C = (rand(n, n)).astype(dtype)
+ D = (rand(n, n)).astype(dtype)
+ transpose = 'T'
+
+ for side in ('L', 'R'):
+ for trans in ('N', transpose):
+ c, d, info = tpmqrt(l, b, t, C, D, side=side,
+ trans=trans)
+ assert(info == 0)
+
+ if trans == transpose:
+ q = Q.T.conj()
+ else:
+ q = Q
+
+ if side == 'L':
+ cd = np.concatenate((c, d), axis=0)
+ CD = np.concatenate((C, D), axis=0)
+ qCD = q @ CD
+ else:
+ cd = np.concatenate((c, d), axis=1)
+ CD = np.concatenate((C, D), axis=1)
+ qCD = CD @ q
+
+ assert_allclose(cd, qCD, atol=tol, rtol=0.)
+
+ if (side, trans) == ('L', 'N'):
+ c_default, d_default, info = tpmqrt(l, b, t, C, D)
+ assert(info == 0)
+ assert_equal(c_default, c)
+ assert_equal(d_default, d)
+
+ # Test invalid side/trans
+ assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
+ assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
+
+
+def test_pstrf():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ # DTYPES = pstrf
+ n = 10
+ r = 2
+ pstrf = get_lapack_funcs('pstrf', dtype=dtype)
+
+ # Create positive semidefinite A
+ if ind > 1:
+ A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+ A = A @ A.conj().T
+ else:
+ A = rand(n, n-r).astype(dtype)
+ A = A @ A.T
+
+ c, piv, r_c, info = pstrf(A)
+ U = triu(c)
+ U[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # python-dbg 3.5.2 runs cause trouble with the following assertion.
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+ c, piv, r_c, info = pstrf(A, lower=1)
+ L = tril(c)
+ L[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_pstf2():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ # DTYPES = pstf2
+ n = 10
+ r = 2
+ pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
+
+ # Create positive semidefinite A
+ if ind > 1:
+ A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+ A = A @ A.conj().T
+ else:
+ A = rand(n, n-r).astype(dtype)
+ A = A @ A.T
+
+ c, piv, r_c, info = pstf2(A)
+ U = triu(c)
+ U[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # python-dbg 3.5.2 runs cause trouble with the commented assertions.
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+ c, piv, r_c, info = pstf2(A, lower=1)
+ L = tril(c)
+ L[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_geequ():
+ desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
+ [1.0000, -0.5619, -1.0000, -1.0000],
+ [0.5874, -1.0000, -0.0596, -0.5341],
+ [-1.0000, -0.5946, -0.0294, 0.9957]])
+
+ desired_cplx = np.array([[-0.2816+0.5359*1j,
+ 0.0812+0.9188*1j,
+ -0.7439-0.2561*1j],
+ [-0.3562-0.2954*1j,
+ 0.9566-0.0434*1j,
+ -0.0174+0.1555*1j],
+ [0.8607+0.1393*1j,
+ -0.2759+0.7241*1j,
+ -0.1642-0.1365*1j]])
+
+ for ind, dtype in enumerate(DTYPES):
+ if ind < 2:
+ # Use examples from the NAG documentation
+ A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
+ [5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
+ [1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
+ [-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
+ A = A.astype(dtype)
+ else:
+ A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
+ [-1.70e+00, 3.31e+10, -0.15e+00],
+ [2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
+ A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
+ [-1.41e+00, -0.15e+10, 1.34e+00],
+ [0.39e-10, 1.47e+00, -0.69e-10]])*1j
+
+ A = A.astype(dtype)
+
+ geequ = get_lapack_funcs('geequ', dtype=dtype)
+ r, c, rowcnd, colcnd, amax, info = geequ(A)
+
+ if ind < 2:
+ assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
+ rtol=0, atol=1e-4)
+ else:
+ assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
+ rtol=0, atol=1e-4)
+
+
+def test_syequb():
+ desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
+
+ for ind, dtype in enumerate(DTYPES):
+ A = np.eye(10, dtype=dtype)
+ alpha = dtype(1. if ind < 2 else 1.j)
+ d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
+ A += np.rot90(np.diag(d))
+
+ syequb = get_lapack_funcs('syequb', dtype=dtype)
+ s, scond, amax, info = syequb(A)
+
+ assert_equal(np.log2(s).astype(int), desired_log2s)
+
+
+@pytest.mark.skipif(True,
+ reason="Failing on some OpenBLAS version, see gh-12276")
+def test_heequb():
+ # zheequb has a bug for versions =< LAPACK 3.9.0
+ # See Reference-LAPACK gh-61 and gh-408
+ # Hence the zheequb test is customized accordingly to avoid
+ # work scaling.
+ A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
+ s, scond, amax, info = lapack.zheequb(A)
+ assert_equal(info, 0)
+ assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
+
+ A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
+ A[5, 5] = 1024
+ A[5, 0] = 16j
+ s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
+ assert_equal(info, 0)
+ assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
+
+
+def test_getc2_gesc2():
+ np.random.seed(42)
+ n = 10
+ desired_real = np.random.rand(n)
+ desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
+
+ for ind, dtype in enumerate(DTYPES):
+ if ind < 2:
+ A = np.random.rand(n, n)
+ A = A.astype(dtype)
+ b = A @ desired_real
+ b = b.astype(dtype)
+ else:
+ A = np.random.rand(n, n) + np.random.rand(n, n)*1j
+ A = A.astype(dtype)
+ b = A @ desired_cplx
+ b = b.astype(dtype)
+
+ getc2 = get_lapack_funcs('getc2', dtype=dtype)
+ gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
+ lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
+ x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
+
+ if ind < 2:
+ assert_array_almost_equal(desired_real.astype(dtype),
+ x/scale, decimal=4)
+ else:
+ assert_array_almost_equal(desired_cplx.astype(dtype),
+ x/scale, decimal=4)
+
+
+@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
+@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
+@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
+@pytest.mark.parametrize('jobr', [0, 1])
+@pytest.mark.parametrize('jobp', [0, 1])
+def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
+ """Test the lapack routine ?gejsv.
+
+ This function tests that a singular value decomposition can be performed
+ on the random M-by-N matrix A. The test performs the SVD using ?gejsv
+ then performs the following checks:
+
+ * ?gejsv exist successfully (info == 0)
+ * The returned singular values are correct
+ * `A` can be reconstructed from `u`, `SIGMA`, `v`
+ * Ensure that u.T @ u is the identity matrix
+ * Ensure that v.T @ v is the identity matrix
+ * The reported matrix rank
+ * The reported number of singular values
+ * If denormalized floats are required
+
+ Notes
+ -----
+ joba specifies several choices effecting the calculation's accuracy
+ Although all arguments are tested, the tests only check that the correct
+ solution is returned - NOT that the prescribed actions are performed
+ internally.
+
+ jobt is, as of v3.9.0, still experimental and removed to cut down number of
+ test cases. However keyword itself is tested externally.
+ """
+ seed(42)
+
+ # Define some constants for later use:
+ m, n = size
+ atol = 100 * np.finfo(dtype).eps
+ A = generate_random_dtype_array(size, dtype)
+ gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+ # Set up checks for invalid job? combinations
+ # if an invalid combination occurs we set the appropriate
+ # exit status.
+ lsvec = jobu < 2 # Calculate left singular vectors
+ rsvec = jobv < 2 # Calculate right singular vectors
+ l2tran = (jobt == 1) and (m == n)
+ is_complex = np.iscomplexobj(A)
+
+ invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
+ invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
+ invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
+
+ # Set the exit status to the expected value.
+ # Here we only check for invalid combinations, not individual
+ # parameters.
+ if invalid_cplx_jobu:
+ exit_status = -2
+ elif invalid_real_jobv or invalid_cplx_jobv:
+ exit_status = -3
+ else:
+ exit_status = 0
+
+ if (jobu > 1) and (jobv == 1):
+ assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
+ else:
+ sva, u, v, work, iwork, info = gejsv(A,
+ joba=joba,
+ jobu=jobu,
+ jobv=jobv,
+ jobr=jobr,
+ jobt=jobt,
+ jobp=jobp)
+
+ # Check that ?gejsv exited successfully/as expected
+ assert_equal(info, exit_status)
+
+ # If exit_status is non-zero the combination of jobs is invalid.
+ # We test this above but no calculations are performed.
+ if not exit_status:
+
+ # Check the returned singular values
+ sigma = (work[0] / work[1]) * sva[:n]
+ assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
+
+ if jobu == 1:
+ # If JOBU = 'F', then u contains the M-by-M matrix of
+ # the left singular vectors, including an ONB of the orthogonal
+ # complement of the Range(A)
+ # However, to recalculate A we are concerned about the
+ # first n singular values and so can ignore the latter.
+ # TODO: Add a test for ONB?
+ u = u[:, :n]
+
+ if lsvec and rsvec:
+ assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
+ if lsvec:
+ assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
+ if rsvec:
+ assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
+
+ assert_equal(iwork[0], np.linalg.matrix_rank(A))
+ assert_equal(iwork[1], np.count_nonzero(sigma))
+ # iwork[2] is non-zero if requested accuracy is not warranted for
+ # the data. This should never occur for these tests.
+ assert_equal(iwork[2], 0)
+
+
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+def test_gejsv_edge_arguments(dtype):
+ """Test edge arguments return expected status"""
+ gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+ # scalar A
+ sva, u, v, work, iwork, info = gejsv(1.)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 1))
+ assert_equal(v.shape, (1, 1))
+ assert_equal(sva, np.array([1.], dtype=dtype))
+
+ # 1d A
+ A = np.ones((1,), dtype=dtype)
+ sva, u, v, work, iwork, info = gejsv(A)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 1))
+ assert_equal(v.shape, (1, 1))
+ assert_equal(sva, np.array([1.], dtype=dtype))
+
+ # 2d empty A
+ A = np.ones((1, 0), dtype=dtype)
+ sva, u, v, work, iwork, info = gejsv(A)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 0))
+ assert_equal(v.shape, (1, 0))
+ assert_equal(sva, np.array([], dtype=dtype))
+
+ # make sure "overwrite_a" is respected - user reported in gh-13191
+ A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
+ A = np.asfortranarray(A + A.T) # make it symmetric and column major
+ Ac = A.copy('A')
+ _ = gejsv(A)
+ assert_allclose(A, Ac)
+
+
+@pytest.mark.parametrize(('kwargs'),
+ ({'joba': 9},
+ {'jobu': 9},
+ {'jobv': 9},
+ {'jobr': 9},
+ {'jobt': 9},
+ {'jobp': 9})
+ )
+def test_gejsv_invalid_job_arguments(kwargs):
+ """Test invalid job arguments raise an Exception"""
+ A = np.ones((2, 2), dtype=float)
+ gejsv = get_lapack_funcs('gejsv', dtype=float)
+ assert_raises(Exception, gejsv, A, **kwargs)
+
+
+@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
+ [(np.array([[2.27, -1.54, 1.15, -1.94],
+ [0.28, -1.67, 0.94, -0.78],
+ [-0.48, -3.09, 0.99, -0.21],
+ [1.07, 1.22, 0.79, 0.63],
+ [-2.35, 2.93, -1.45, 2.30],
+ [0.62, -7.39, 1.03, -2.57]]),
+ np.array([9.9966, 3.6831, 1.3569, 0.5000]),
+ np.array([[0.2774, -0.6003, -0.1277, 0.1323],
+ [0.2020, -0.0301, 0.2805, 0.7034],
+ [0.2918, 0.3348, 0.6453, 0.1906],
+ [-0.0938, -0.3699, 0.6781, -0.5399],
+ [-0.4213, 0.5266, 0.0413, -0.0575],
+ [0.7816, 0.3353, -0.1645, -0.3957]]),
+ np.array([[0.1921, -0.8030, 0.0041, -0.5642],
+ [-0.8794, -0.3926, -0.0752, 0.2587],
+ [0.2140, -0.2980, 0.7827, 0.5027],
+ [-0.3795, 0.3351, 0.6178, -0.6017]]))])
+def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
+ """
+ This test implements the example found in the NAG manual, f08khf.
+ An example was not found for the complex case.
+ """
+ # NAG manual provides accuracy up to 4 decimals
+ atol = 1e-4
+ gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
+
+ sva, u, v, work, iwork, info = gejsv(A)
+
+ assert_allclose(sva_expect, sva, atol=atol)
+ assert_allclose(u_expect, u, atol=atol)
+ assert_allclose(v_expect, v, atol=atol)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+def test_gttrf_gttrs(dtype):
+ # The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
+ # tests that the output of ?gttrf define LU matricies, that input
+ # parameters are unmodified, transposal options function correctly, that
+ # incompatible matrix shapes raise an error, and singular matrices return
+ # non zero info.
+
+ seed(42)
+ n = 10
+ atol = 100 * np.finfo(dtype).eps
+
+ # create the matrix in accordance with the data type
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+
+ diag_cpy = [dl.copy(), d.copy(), du.copy()]
+
+ A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
+ x = np.random.rand(n)
+ b = A @ x
+
+ gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
+
+ _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+ # test to assure that the inputs of ?gttrf are unmodified
+ assert_array_equal(dl, diag_cpy[0])
+ assert_array_equal(d, diag_cpy[1])
+ assert_array_equal(du, diag_cpy[2])
+
+ # generate L and U factors from ?gttrf return values
+ # L/U are lower/upper triangular by construction (initially and at end)
+ U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
+ L = np.eye(n, dtype=dtype)
+
+ for i, m in enumerate(_dl):
+ # L is given in a factored form.
+ # See
+ # www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
+ piv = ipiv[i] - 1
+ # right multiply by permutation matrix
+ L[:, [i, piv]] = L[:, [piv, i]]
+ # right multiply by Li, rank-one modification of identity
+ L[:, i] += L[:, i+1]*m
+
+ # one last permutation
+ i, piv = -1, ipiv[-1] - 1
+ # right multiply by final permutation matrix
+ L[:, [i, piv]] = L[:, [piv, i]]
+
+ # check that the outputs of ?gttrf define an LU decomposition of A
+ assert_allclose(A, L @ U, atol=atol)
+
+ b_cpy = b.copy()
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+ # test that the inputs of ?gttrs are unmodified
+ assert_array_equal(b, b_cpy)
+ # test that the result of ?gttrs matches the expected input
+ assert_allclose(x, x_gttrs, atol=atol)
+
+ # test that ?gttrf and ?gttrs work with transposal options
+ if dtype in REAL_DTYPES:
+ trans = "T"
+ b_trans = A.T @ x
+ else:
+ trans = "C"
+ b_trans = A.conj().T @ x
+
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
+ assert_allclose(x, x_gttrs, atol=atol)
+
+ # test that ValueError is raised with incompatible matrix shapes
+ with assert_raises(ValueError):
+ gttrf(dl[:-1], d, du)
+ with assert_raises(ValueError):
+ gttrf(dl, d[:-1], du)
+ with assert_raises(ValueError):
+ gttrf(dl, d, du[:-1])
+
+ # test that matrix of size n=2 raises exception
+ with assert_raises(Exception):
+ gttrf(dl[0], d[:1], du[0])
+
+ # test that singular (row of all zeroes) matrix fails via info
+ du[0] = 0
+ d[0] = 0
+ __dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
+ np.testing.assert_(__d[info - 1] == 0,
+ "?gttrf: _d[info-1] is {}, not the illegal value :0."
+ .format(__d[info - 1]))
+
+
+@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
+ [(np.array([2.1, -1.0, 1.9, 8.0]),
+ np.array([3.0, 2.3, -5.0, -.9, 7.1]),
+ np.array([3.4, 3.6, 7.0, -6.0]),
+ np.array([2.3, -5, -.9, 7.1]),
+ np.array([3.4, 3.6, 7, -6, -1.015373]),
+ np.array([-1, 1.9, 8]),
+ np.array([2, 3, 4, 5, 5]),
+ np.array([[2.7, 6.6],
+ [-0.5, 10.8],
+ [2.6, -3.2],
+ [0.6, -11.2],
+ [2.7, 19.1]
+ ]),
+ np.array([[-4, 5],
+ [7, -4],
+ [3, -3],
+ [-4, -2],
+ [-3, 1]])),
+ (
+ np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([-1.3 + 1.3j, -1.3 + 1.3j,
+ -1.3 + 3.3j, - .3 + 4.3j,
+ -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+ # du exp
+ np.array([-1.3 + 1.3j, -1.3 + 3.3j,
+ -0.3 + 4.3j, -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
+ -1.3399 + 0.2875j]),
+ np.array([2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([2, 3, 4, 5, 5]),
+ np.array([[2.4 - 5j, 2.7 + 6.9j],
+ [3.4 + 18.2j, - 6.9 - 5.3j],
+ [-14.7 + 9.7j, - 6 - .6j],
+ [31.9 - 7.7j, -3.9 + 9.3j],
+ [-1 + 1.6j, -3 + 12.2j]]),
+ np.array([[1 + 1j, 2 - 1j],
+ [3 - 1j, 1 + 2j],
+ [4 + 5j, -1 + 1j],
+ [-1 - 2j, 2 + 1j],
+ [1 - 1j, 2 - 2j]])
+ )])
+def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
+ du2_exp, ipiv_exp, b, x):
+ # test to assure that wrapper is consistent with NAG Library Manual Mark 26
+ # example problems: f07cdf and f07cef (real)
+ # examples: f07crf and f07csf (complex)
+ # (Links may expire, so search for "NAG Library Manual Mark 26" online)
+
+ gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
+
+ _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+ assert_allclose(du2, du2_exp)
+ assert_allclose(_du, du_exp)
+ assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
+ assert_allclose(ipiv, ipiv_exp)
+
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+
+ assert_allclose(x_gttrs, x)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
+def test_geqrfp_lwork(dtype, shape):
+ geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
+ m, n = shape
+ lwork, info = geqrfp_lwork(m=m, n=n)
+ assert_equal(info, 0)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs(ddtype, dtype):
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100*np.finfo(dtype).eps
+ # n is the length diagonal of A
+ n = 10
+ # create diagonals according to size and dtype
+
+ # diagonal d should always be real.
+ # add 4 to d so it will be dominant for all dtypes
+ d = generate_random_dtype_array((n,), ddtype) + 4
+ # diagonal e may be real or complex.
+ e = generate_random_dtype_array((n-1,), dtype)
+
+ # assemble diagonals together into matrix
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ # store a copy of diagonals to later verify
+ diag_cpy = [d.copy(), e.copy()]
+
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+
+ _d, _e, info = pttrf(d, e)
+ # test to assure that the inputs of ?pttrf are unmodified
+ assert_array_equal(d, diag_cpy[0])
+ assert_array_equal(e, diag_cpy[1])
+ assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
+
+ # test that the factors from pttrf can be recombined to make A
+ L = np.diag(_e, -1) + np.diag(np.ones(n))
+ D = np.diag(_d)
+
+ assert_allclose(A, L@D@L.conjugate().T, atol=atol)
+
+ # generate random solution x
+ x = generate_random_dtype_array((n,), dtype)
+ # determine accompanying b to get soln x
+ b = A@x
+
+ # determine _x from pttrs
+ pttrs = get_lapack_funcs('pttrs', dtype=dtype)
+ _x, info = pttrs(_d, _e.conj(), b)
+ assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
+
+ # test that _x from pttrs matches the expected x
+ assert_allclose(x, _x, atol=atol)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
+ n = 10
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+ d = generate_random_dtype_array((n,), ddtype) + 2
+ e = generate_random_dtype_array((n-1,), dtype)
+ # test that ValueError is raised with incompatible matrix shapes
+ assert_raises(ValueError, pttrf, d[:-1], e)
+ assert_raises(ValueError, pttrf, d, e[:-1])
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
+ n = 10
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+ d = generate_random_dtype_array((n,), ddtype) + 2
+ e = generate_random_dtype_array((n-1,), dtype)
+ # test that singular (row of all zeroes) matrix fails via info
+ d[0] = 0
+ e[0] = 0
+ _d, _e, info = pttrf(d, e)
+ assert_equal(_d[info - 1], 0,
+ "?pttrf: _d[info-1] is {}, not the illegal value :0."
+ .format(_d[info - 1]))
+
+ # test with non-spd matrix
+ d = generate_random_dtype_array((n,), ddtype)
+ _d, _e, info = pttrf(d, e)
+ assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
+
+
+@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
+ (np.array([4, 10, 29, 25, 5]),
+ np.array([-2, -6, 15, 8]),
+ np.array([4, 9, 25, 16, 1]),
+ np.array([-.5, -.6667, .6, .5]),
+ np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+ [7, 23]]),
+ np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
+ [3, -5]])
+ ), (
+ np.array([16, 41, 46, 21]),
+ np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+ np.array([16, 9, 1, 4]),
+ np.array([1+1j, 2-1j, 1-4j]),
+ np.array([[64+16j, -16-32j], [93+62j, 61-66j],
+ [78-80j, 71-74j], [14-27j, 35+15j]]),
+ np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
+ [1-1j, 2+1j]])
+ )])
+def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
+ # test to assure that wrapper is consistent with NAG Manual Mark 26
+ # example problems: f07jdf and f07jef (real)
+ # examples: f07jrf and f07csf (complex)
+ # NAG examples provide 4 decimals.
+ # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+ atol = 1e-4
+ pttrf = get_lapack_funcs('pttrf', dtype=e[0])
+ _d, _e, info = pttrf(d, e)
+ assert_allclose(_d, d_expect, atol=atol)
+ assert_allclose(_e, e_expect, atol=atol)
+
+ pttrs = get_lapack_funcs('pttrs', dtype=e[0])
+ _x, info = pttrs(_d, _e.conj(), b)
+ assert_allclose(_x, x_expect, atol=atol)
+
+ # also test option `lower`
+ if e.dtype in COMPLEX_DTYPES:
+ _x, info = pttrs(_d, _e, b, lower=1)
+ assert_allclose(_x, x_expect, atol=atol)
+
+
+def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
+ # used by ?pteqr tests to build parameters
+ # returns tuple of (d, e, A, z)
+ if compute_z == 1:
+ # build Hermitian A from Q**T * tri * Q = A by creating Q and tri
+ A_eig = generate_random_dtype_array((n, n), dtype)
+ A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
+ A_eig = (A_eig + A_eig.conj().T) / 2
+ # obtain right eigenvectors (orthogonal)
+ vr = eigh(A_eig)[1]
+ # create tridiagonal matrix
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), realtype)
+ tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ # Build A using these factors that sytrd would: (Q**T * tri * Q = A)
+ A = vr @ tri @ vr.conj().T
+ # vr is orthogonal
+ z = vr
+
+ else:
+ # d and e are always real per lapack docs.
+ d = generate_random_dtype_array((n,), realtype)
+ e = generate_random_dtype_array((n-1,), realtype)
+
+ # make SPD
+ d = d + 4
+ A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
+ return (d, e, A, z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr(dtype, realtype, compute_z):
+ '''
+ Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
+ It generates random SPD matrix diagonals d and e, and then confirms
+ correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
+ tests that z can reform A.
+ '''
+ seed(42)
+ atol = 1000*np.finfo(dtype).eps
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+ n = 10
+
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+ assert_equal(info, 0, "info = {}, should be 0.".format(info))
+
+ # compare the routine's eigenvalues with scipy.linalg.eig's.
+ assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
+
+ if compute_z:
+ # verify z_pteqr as orthogonal
+ assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
+ atol=atol)
+ # verify that z_pteqr recombines to A
+ assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
+ A, atol=atol)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_non_spd(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+ # test with non-spd matrix
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
+ assert info > 0
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+ # test with incorrect/incompatible array sizes
+ assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
+ assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
+ if compute_z:
+ assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_singular(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+ # test with singular matrix
+ d[0] = 0
+ e[0] = 0
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
+ assert info > 0
+
+
+@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
+ [(2, # "I"
+ np.array([4.16, 5.25, 1.09, .62]),
+ np.array([3.17, -.97, .55]),
+ np.array([8.0023, 1.9926, 1.0014, 0.1237]),
+ np.array([[0.6326, 0.6245, -0.4191, 0.1847],
+ [0.7668, -0.4270, 0.4176, -0.2352],
+ [-0.1082, 0.6071, 0.4594, -0.6393],
+ [-0.0081, 0.2432, 0.6625, 0.7084]])),
+ ])
+def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
+ '''
+ Implements real (f08jgf) example from NAG Manual Mark 26.
+ Tests for correct outputs.
+ '''
+ # the NAG manual has 4 decimals accuracy
+ atol = 1e-4
+ pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
+
+ z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ _d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+ assert_allclose(_d, d_expect, atol=atol)
+ assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
+def test_geqrfp(dtype, matrix_size):
+ # Tests for all dytpes, tall, wide, and square matrices.
+ # Using the routine with random matrix A, Q and R are obtained and then
+ # tested such that R is upper triangular and non-negative on the diagonal,
+ # and Q is an orthagonal matrix. Verifies that A=Q@R. It also
+ # tests against a matrix that for which the linalg.qr method returns
+ # negative diagonals, and for error messaging.
+
+ # set test tolerance appropriate for dtype
+ np.random.seed(42)
+ rtol = 250*np.finfo(dtype).eps
+ atol = 100*np.finfo(dtype).eps
+ # get appropriate ?geqrfp for dtype
+ geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
+ gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
+
+ m, n = matrix_size
+
+ # create random matrix of dimentions m x n
+ A = generate_random_dtype_array((m, n), dtype=dtype)
+ # create qr matrix using geqrfp
+ qr_A, tau, info = geqrfp(A)
+
+ # obtain r from the upper triangular area
+ r = np.triu(qr_A)
+
+ # obtain q from the orgqr lapack routine
+ # based on linalg.qr's extraction strategy of q with orgqr
+
+ if m > n:
+ # this adds an extra column to the end of qr_A
+ # let qqr be an empty m x m matrix
+ qqr = np.zeros((m, m), dtype=dtype)
+ # set first n columns of qqr to qr_A
+ qqr[:, :n] = qr_A
+ # determine q from this qqr
+ # note that m is a sufficient for lwork based on LAPACK documentation
+ q = gqr(qqr, tau=tau, lwork=m)[0]
+ else:
+ q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
+
+ # test that q and r still make A
+ assert_allclose(q@r, A, rtol=rtol)
+ # ensure that q is orthogonal (that q @ transposed q is the identity)
+ assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
+ atol=atol)
+ # ensure r is upper tri by comparing original r to r as upper triangular
+ assert_allclose(r, np.triu(r), rtol=rtol)
+ # make sure diagonals of r are positive for this random solution
+ assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
+ # ensure that info is zero for this success
+ assert_(info == 0)
+
+ # test that this routine gives r diagonals that are positive for a
+ # matrix that returns negatives in the diagonal with scipy.linalg.rq
+ A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
+ r_rq_neg, q_rq_neg = qr(A_negative)
+ rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
+ # assert that any of the entries on the diagonal from linalg.qr
+ # are negative and that all of geqrfp are positive.
+ assert_(np.any(np.diag(r_rq_neg) < 0) and
+ np.all(np.diag(r) > 0))
+
+
+def test_geqrfp_errors_with_empty_array():
+ # check that empty array raises good error message
+ A_empty = np.array([])
+ geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
+ assert_raises(Exception, geqrfp, A_empty)
+
+
+@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_standard_eigh_lworks(pfx, driver):
+ n = 1200 # Some sufficiently big arbitrary number
+ dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+ sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+ dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+ try:
+ _compute_lwork(sc_dlw, n, lower=1)
+ _compute_lwork(dz_dlw, n, lower=1)
+ except Exception as e:
+ pytest.fail("{}_lwork raised unexpected exception: {}"
+ "".format(pfx+driver, e))
+
+
+@pytest.mark.parametrize("driver", ['gv', 'gvx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_generalized_eigh_lworks(pfx, driver):
+ n = 1200 # Some sufficiently big arbitrary number
+ dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+ sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+ dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+ # Shouldn't raise any exceptions
+ try:
+ _compute_lwork(sc_dlw, n, uplo="L")
+ _compute_lwork(dz_dlw, n, uplo="L")
+ except Exception as e:
+ pytest.fail("{}_lwork raised unexpected exception: {}"
+ "".format(pfx+driver, e))
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+@pytest.mark.parametrize("m", [1, 10, 100, 1000])
+def test_orcsd_uncsd_lwork(dtype_, m):
+ seed(1234)
+ p = randint(0, m)
+ q = m - p
+ pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+ dlw = pfx + 'csd_lwork'
+ lw = get_lapack_funcs(dlw, dtype=dtype_)
+ lwval = _compute_lwork(lw, m, p, q)
+ lwval = lwval if pfx == 'un' else (lwval,)
+ assert all([x > 0 for x in lwval])
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+def test_orcsd_uncsd(dtype_):
+ m, p, q = 250, 80, 170
+
+ pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+ X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
+
+ drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
+ lwval = _compute_lwork(dlw, m, p, q)
+ lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
+ 'lrwork'], lwval))
+
+ cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
+ drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
+
+ assert info == 0
+
+ U = block_diag(u1, u2)
+ VH = block_diag(v1t, v2t)
+ r = min(min(p, q), min(m-p, m-q))
+ n11 = min(p, q) - r
+ n12 = min(p, m-q) - r
+ n21 = min(m-p, q) - r
+ n22 = min(m-p, m-q) - r
+
+ S = np.zeros((m, m), dtype=dtype_)
+ one = dtype_(1.)
+ for i in range(n11):
+ S[i, i] = one
+ for i in range(n22):
+ S[p+i, q+i] = one
+ for i in range(n12):
+ S[i+n11+r, i+n11+r+n21+n22+r] = -one
+ for i in range(n21):
+ S[p+n22+r+i, n11+r+i] = one
+
+ for i in range(r):
+ S[i+n11, i+n11] = np.cos(theta[i])
+ S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
+
+ S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
+ S[p+n22+i, i+n11] = np.sin(theta[i])
+
+ Xc = U @ S @ VH
+ assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx(dtype, trans_bool, fact):
+ """
+ These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
+ It tests that the outputs define an LU matrix, that inputs are unmodified,
+ transposal options, incompatible shapes, singular matrices, and
+ singular factorizations. It parametrizes DTYPES and the 'fact' value along
+ with the fact related inputs.
+ """
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100 * np.finfo(dtype).eps
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # store a copy of the inputs to check they haven't been modified later
+ inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
+
+ # assure that inputs are unmodified
+ assert_array_equal(dl, inputs_cpy[0])
+ assert_array_equal(d, inputs_cpy[1])
+ assert_array_equal(du, inputs_cpy[2])
+ assert_array_equal(b, inputs_cpy[3])
+
+ # test that x_soln matches the expected x
+ assert_allclose(x, x_soln, atol=atol)
+
+ # assert that the outputs are of correct type or shape
+ # rcond should be a scalar
+ assert_(hasattr(rcond, "__len__") is not True,
+ "rcond should be scalar but is {}".format(rcond))
+ # ferr should be length of # of cols in x
+ assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
+ .format(ferr.shape[0], b.shape[1]))
+ # berr should be length of # of cols in x
+ assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
+ .format(berr.shape[0], b.shape[1]))
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [0, 1])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_singular(dtype, trans_bool, fact):
+ seed(42)
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = "T" if dtype in REAL_DTYPES else "C"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # test with singular matrix
+ # no need to test inputs with fact "F" since ?gttrf already does.
+ if fact == "N":
+ # Construct a singular example manually
+ d[-1] = 0
+ dl[-1] = 0
+ # solve using routine
+ gtsvx_out = gtsvx(dl, d, du, b)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # test for the singular matrix.
+ assert info > 0, "info should be > 0 for singular matrix"
+
+ elif fact == 'F':
+ # assuming that a singular factorization is input
+ df_[-1] = 0
+ duf_[-1] = 0
+ du2f_[-1] = 0
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
+ du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # info should not be zero and should provide index of illegal value
+ assert info > 0, "info should be > 0 for singular matrix"
+
+
+@pytest.mark.parametrize("dtype", DTYPES*2)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
+ seed(42)
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = "T" if dtype in REAL_DTYPES else "C"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ if fact == "N":
+ assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ else:
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
+
+
+@pytest.mark.parametrize("du,d,dl,b,x",
+ [(np.array([2.1, -1.0, 1.9, 8.0]),
+ np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
+ np.array([3.4, 3.6, 7.0, -6.0]),
+ np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
+ [.6, -11.2], [2.7, 19.1]]),
+ np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
+ [-3, 1]])),
+ (np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
+ -.3 + 4.3j, -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+ np.array([[2.4 - 5j, 2.7 + 6.9j],
+ [3.4 + 18.2j, -6.9 - 5.3j],
+ [-14.7 + 9.7j, -6 - .6j],
+ [31.9 - 7.7j, -3.9 + 9.3j],
+ [-1 + 1.6j, -3 + 12.2j]]),
+ np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
+ [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
+ [1 - 1j, 2 - 2j]]))])
+def test_gtsvx_NAG(du, d, dl, b, x):
+ # Test to ensure wrapper is consistent with NAG Manual Mark 26
+ # example problems: real (f07cbf) and complex (f07cpf)
+ gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
+
+ gtsvx_out = gtsvx(dl, d, du, b)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+
+ assert_array_almost_equal(x, x_soln)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx(dtype, realtype, fact, df_de_lambda):
+ '''
+ This tests the ?ptsvx lapack routine wrapper to solve a random system
+ Ax = b for all dtypes and input variations. Tests for: unmodified
+ input parameters, fact options, incompatible matrix shapes raise an error,
+ and singular matrices return info of illegal value.
+ '''
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100 * np.finfo(dtype).eps
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ # create copy to later test that they are unmodified
+ diag_cpy = [d.copy(), e.copy(), b.copy()]
+
+ # solve using routine
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+ df=df, ef=ef)
+ # d, e, and b should be unmodified
+ assert_array_equal(d, diag_cpy[0])
+ assert_array_equal(e, diag_cpy[1])
+ assert_array_equal(b, diag_cpy[2])
+ assert_(info == 0, "info should be 0 but is {}.".format(info))
+ assert_array_almost_equal(x_soln, x)
+
+ # test that the factors from ptsvx can be recombined to make A
+ L = np.diag(ef, -1) + np.diag(np.ones(n))
+ D = np.diag(df)
+ assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
+
+ # assert that the outputs are of correct type or shape
+ # rcond should be a scalar
+ assert not hasattr(rcond, "__len__"), \
+ "rcond should be scalar but is {}".format(rcond)
+ # ferr should be length of # of cols in x
+ assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
+ .format(ferr.shape, x_soln.shape[1]))
+ # berr should be length of # of cols in x
+ assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
+ .format(berr.shape, x_soln.shape[1]))
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
+ seed(42)
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ # test with malformatted array sizes
+ assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
+ assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
+ assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
+ seed(42)
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ if fact == "N":
+ d[3] = 0
+ # obtain new df, ef
+ df, ef, info = df_de_lambda(d, e)
+ # solve using routine
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+ # test for the singular matrix.
+ assert info > 0 and info <= n
+
+ # non SPD matrix
+ d = generate_random_dtype_array((n,), realtype)
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+ assert info > 0 and info <= n
+ else:
+ # assuming that someone is using a singular factorization
+ df, ef, info = df_de_lambda(d, e)
+ df[0] = 0
+ ef[0] = 0
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+ df=df, ef=ef)
+ assert info > 0
+
+
+@pytest.mark.parametrize('d,e,b,x',
+ [(np.array([4, 10, 29, 25, 5]),
+ np.array([-2, -6, 15, 8]),
+ np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+ [7, 23]]),
+ np.array([[2.5, 2], [2, -1], [1, -3],
+ [-1, 6], [3, -5]])),
+ (np.array([16, 41, 46, 21]),
+ np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+ np.array([[64 + 16j, -16 - 32j],
+ [93 + 62j, 61 - 66j],
+ [78 - 80j, 71 - 74j],
+ [14 - 27j, 35 + 15j]]),
+ np.array([[2 + 1j, -3 - 2j],
+ [1 + 1j, 1 + 1j],
+ [1 - 2j, 1 - 2j],
+ [1 - 1j, 2 + 1j]]))])
+def test_ptsvx_NAG(d, e, b, x):
+ # test to assure that wrapper is consistent with NAG Manual Mark 26
+ # example problemss: f07jbf, f07jpf
+ # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+ # obtain routine with correct type based on e.dtype
+ ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
+ # solve using routine
+ df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
+ # determine ptsvx's solution and x are the same.
+ assert_array_almost_equal(x, x_ptsvx)
+
+
+@pytest.mark.parametrize('lower', [False, True])
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+ # Manual conversion to/from packed format is feasible here.
+ n, nrhs = 10, 4
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+ b = generate_random_dtype_array([n, nrhs], dtype=dtype)
+
+ a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
+ if lower:
+ inds = ([x for y in range(n) for x in range(y, n)],
+ [y for y in range(n) for x in range(y, n)])
+ else:
+ inds = ([x for y in range(1, n+1) for x in range(y)],
+ [y-1 for y in range(1, n+1) for x in range(y)])
+ ap = a[inds]
+ ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
+ ('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
+ dtype=dtype,
+ ilp64="preferred")
+
+ ul, info = pptrf(n, ap, lower=lower)
+ assert_equal(info, 0)
+ aul = cholesky(a, lower=lower)[inds]
+ assert_allclose(ul, aul, rtol=0, atol=atol)
+
+ uli, info = pptri(n, ul, lower=lower)
+ assert_equal(info, 0)
+ auli = inv(a)[inds]
+ assert_allclose(uli, auli, rtol=0, atol=atol)
+
+ x, info = pptrs(n, ul, b, lower=lower)
+ assert_equal(info, 0)
+ bx = solve(a, b)
+ assert_allclose(x, bx, rtol=0, atol=atol)
+
+ xv, info = ppsv(n, ap, b, lower=lower)
+ assert_equal(info, 0)
+ assert_allclose(xv, bx, rtol=0, atol=atol)
+
+ anorm = np.linalg.norm(a, 1)
+ rcond, info = ppcon(n, ap, anorm=anorm, lower=lower)
+ assert_equal(info, 0)
+ assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matfuncs.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matfuncs.py
new file mode 100644
index 0000000..6b8f78f
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matfuncs.py
@@ -0,0 +1,895 @@
+#
+# Created by: Pearu Peterson, March 2002
+#
+""" Test functions for linalg.matfuncs module
+
+"""
+import random
+import functools
+
+import numpy as np
+from numpy import array, identity, dot, sqrt
+from numpy.testing import (
+ assert_array_equal, assert_array_less, assert_equal,
+ assert_array_almost_equal,
+ assert_allclose, assert_, assert_warns)
+import pytest
+
+import scipy.linalg
+from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
+ expm, expm_frechet, expm_cond, norm, khatri_rao)
+from scipy.linalg import _matfuncs_inv_ssq
+import scipy.linalg._expm_frechet
+
+from scipy.optimize import minimize
+
+
+def _get_al_mohy_higham_2012_experiment_1():
+ """
+ Return the test matrix from Experiment (1) of [1]_.
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+ "Improved Inverse Scaling and Squaring Algorithms
+ for the Matrix Logarithm."
+ SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+ ISSN 1095-7197
+
+ """
+ A = np.array([
+ [3.2346e-1, 3e4, 3e4, 3e4],
+ [0, 3.0089e-1, 3e4, 3e4],
+ [0, 0, 3.2210e-1, 3e4],
+ [0, 0, 0, 3.0744e-1]], dtype=float)
+ return A
+
+
+class TestSignM(object):
+
+ def test_nils(self):
+ a = array([[29.2, -24.2, 69.5, 49.8, 7.],
+ [-9.2, 5.2, -18., -16.8, -2.],
+ [-10., 6., -20., -18., -2.],
+ [-9.6, 9.6, -25.5, -15.4, -2.],
+ [9.8, -4.8, 18., 18.2, 2.]])
+ cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
+ [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
+ [-4.08,0.56,-4.92,-7.6,0.56],
+ [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
+ [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
+ r = signm(a)
+ assert_array_almost_equal(r,cr)
+
+ def test_defective1(self):
+ a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_defective2(self):
+ a = array((
+ [29.2,-24.2,69.5,49.8,7.0],
+ [-9.2,5.2,-18.0,-16.8,-2.0],
+ [-10.0,6.0,-20.0,-18.0,-2.0],
+ [-9.6,9.6,-25.5,-15.4,-2.0],
+ [9.8,-4.8,18.0,18.2,2.0]))
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_defective3(self):
+ a = array([[-2., 25., 0., 0., 0., 0., 0.],
+ [0., -3., 10., 3., 3., 3., 0.],
+ [0., 0., 2., 15., 3., 3., 0.],
+ [0., 0., 0., 0., 15., 3., 0.],
+ [0., 0., 0., 0., 3., 10., 0.],
+ [0., 0., 0., 0., 0., -2., 25.],
+ [0., 0., 0., 0., 0., 0., -3.]])
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+
+class TestLogM(object):
+
+ def test_nils(self):
+ a = array([[-2., 25., 0., 0., 0., 0., 0.],
+ [0., -3., 10., 3., 3., 3., 0.],
+ [0., 0., 2., 15., 3., 3., 0.],
+ [0., 0., 0., 0., 15., 3., 0.],
+ [0., 0., 0., 0., 3., 10., 0.],
+ [0., 0., 0., 0., 0., -2., 25.],
+ [0., 0., 0., 0., 0., 0., -3.]])
+ m = (identity(7)*3.1+0j)-a
+ logm(m, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_al_mohy_higham_2012_experiment_1_logm(self):
+ # The logm completes the round trip successfully.
+ # Note that the expm leg of the round trip is badly conditioned.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_logm, info = logm(A, disp=False)
+ A_round_trip = expm(A_logm)
+ assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
+
+ def test_al_mohy_higham_2012_experiment_1_funm_log(self):
+ # The raw funm with np.log does not complete the round trip.
+ # Note that the expm leg of the round trip is badly conditioned.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_funm_log, info = funm(A, np.log, disp=False)
+ A_round_trip = expm(A_funm_log)
+ assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
+
+ def test_round_trip_random_float(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+
+ # Eigenvalues are related to the branch cut.
+ W = np.linalg.eigvals(M)
+ err_msg = 'M:{0} eivals:{1}'.format(M, W)
+
+ # Check sqrtm round trip because it is used within logm.
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ # Check logm round trip.
+ M_logm, info = logm(M, disp=False)
+ M_logm_round_trip = expm(M_logm)
+ assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
+
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_logm, info = logm(M, disp=False)
+ M_round_trip = expm(M_logm)
+ assert_allclose(M_round_trip, M)
+
+ def test_logm_type_preservation_and_conversion(self):
+ # The logm matrix function should preserve the type of a matrix
+ # whose eigenvalues are positive with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char in complex_dtype_chars)
+
+ # check float->complex type conversion for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char in complex_dtype_chars)
+
+ def test_complex_spectrum_real_logm(self):
+ # This matrix has complex eigenvalues and real logm.
+ # Its output dtype depends on its input dtype.
+ M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
+ for dt in float, complex:
+ X = np.array(M, dtype=dt)
+ w = scipy.linalg.eigvals(X)
+ assert_(1e-2 < np.absolute(w.imag).sum())
+ Y, info = logm(X, disp=False)
+ assert_(np.issubdtype(Y.dtype, np.inexact))
+ assert_allclose(expm(Y), X)
+
+ def test_real_mixed_sign_spectrum(self):
+ # These matrices have real eigenvalues with mixed signs.
+ # The output logm dtype is complex, regardless of input dtype.
+ for M in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]]):
+ for dt in float, complex:
+ A = np.array(M, dtype=dt)
+ A_logm, info = logm(A, disp=False)
+ assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
+
+ def test_exactly_singular(self):
+ A = np.array([[0, 0], [1j, 1j]])
+ B = np.asarray([[1, 1], [0, 0]])
+ for M in A, A.T, B, B.T:
+ expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
+ L, info = assert_warns(expected_warning, logm, M, disp=False)
+ E = expm(L)
+ assert_allclose(E, M, atol=1e-14)
+
+ def test_nearly_singular(self):
+ M = np.array([[1e-100]])
+ expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
+ L, info = assert_warns(expected_warning, logm, M, disp=False)
+ E = expm(L)
+ assert_allclose(E, M, atol=1e-14)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ # See gh-6113
+ E = [[0, 1], [-1, 0]]
+ L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+ E = [[1j, 4], [0, -1j]]
+ L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+ E = [[1j, 0], [0, -1j]]
+ L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+
+
+class TestSqrtM(object):
+ def test_round_trip_random_float(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ def test_bad(self):
+ # See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
+ e = 2**-5
+ se = sqrt(e)
+ a = array([[1.0,0,0,1],
+ [0,e,0,0],
+ [0,0,e,0],
+ [0,0,0,1]])
+ sa = array([[1,0,0,0.5],
+ [0,se,0,0],
+ [0,0,se,0],
+ [0,0,0,1]])
+ n = a.shape[0]
+ assert_array_almost_equal(dot(sa,sa),a)
+ # Check default sqrtm.
+ esa = sqrtm(a, disp=False, blocksize=n)[0]
+ assert_array_almost_equal(dot(esa,esa),a)
+ # Check sqrtm with 2x2 blocks.
+ esa = sqrtm(a, disp=False, blocksize=2)[0]
+ assert_array_almost_equal(dot(esa,esa),a)
+
+ def test_sqrtm_type_preservation_and_conversion(self):
+ # The sqrtm matrix function should preserve the type of a matrix
+ # whose eigenvalues are nonnegative with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]],
+ [[1, 1], [1, 1]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ # check float->complex type conversion for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]],
+ [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(any(w.imag or w.real < 0 for w in W))
+
+ # check complex->complex
+ A = np.array(matrix_as_list, dtype=complex)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ # check float->complex
+ A = np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ def test_blocksizes(self):
+ # Make sure I do not goof up the blocksizes when they do not divide n.
+ np.random.seed(1234)
+ for n in range(1, 8):
+ A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
+ A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
+ assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
+ for blocksize in range(1, 10):
+ A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
+ assert_allclose(A_sqrtm_default, A_sqrtm_new)
+
+ def test_al_mohy_higham_2012_experiment_1(self):
+ # Matrix square root of a tricky upper triangular matrix.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_sqrtm, info = sqrtm(A, disp=False)
+ A_round_trip = A_sqrtm.dot(A_sqrtm)
+ assert_allclose(A_round_trip, A, rtol=1e-5)
+ assert_allclose(np.tril(A_round_trip), np.tril(A))
+
+ def test_strict_upper_triangular(self):
+ # This matrix has no square root.
+ for dt in int, float:
+ A = np.array([
+ [0, 3, 0, 0],
+ [0, 0, 3, 0],
+ [0, 0, 0, 3],
+ [0, 0, 0, 0]], dtype=dt)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(np.isnan(A_sqrtm).all())
+
+ def test_weird_matrix(self):
+ # The square root of matrix B exists.
+ for dt in int, float:
+ A = np.array([
+ [0, 0, 1],
+ [0, 0, 0],
+ [0, 1, 0]], dtype=dt)
+ B = np.array([
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 0]], dtype=dt)
+ assert_array_equal(B, A.dot(A))
+
+ # But scipy sqrtm is not clever enough to find it.
+ B_sqrtm, info = sqrtm(B, disp=False)
+ assert_(np.isnan(B_sqrtm).all())
+
+ def test_disp(self):
+ np.random.seed(1234)
+
+ A = np.random.rand(3, 3)
+ B = sqrtm(A, disp=True)
+ assert_allclose(B.dot(B), A)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ M = [[2j, 4], [0, -2j]]
+ R = [[1+1j, 2], [0, 1-1j]]
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh4866(self):
+ M = np.array([[1, 0, 0, 1],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [1, 0, 0, 1]])
+ R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [sqrt(0.5), 0, 0, sqrt(0.5)]])
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh5336(self):
+ M = np.diag([2, 1, 0])
+ R = np.diag([sqrt(2), 1, 0])
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh7839(self):
+ M = np.zeros((2, 2))
+ R = np.zeros((2, 2))
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+
+class TestFractionalMatrixPower(object):
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for p in range(1, 5):
+ for n in range(1, 5):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_root = fractional_matrix_power(M, 1/p)
+ M_round_trip = np.linalg.matrix_power(M_root, p)
+ assert_allclose(M_round_trip, M)
+
+ def test_round_trip_random_float(self):
+ # This test is more annoying because it can hit the branch cut;
+ # this happens when the matrix has an eigenvalue
+ # with no imaginary component and with a real negative component,
+ # and it means that the principal branch does not exist.
+ np.random.seed(1234)
+ for p in range(1, 5):
+ for n in range(1, 5):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_root = fractional_matrix_power(M, 1/p)
+ M_round_trip = np.linalg.matrix_power(M_root, p)
+ assert_allclose(M_round_trip, M)
+
+ def test_larger_abs_fractional_matrix_powers(self):
+ np.random.seed(1234)
+ for n in (2, 3, 5):
+ for i in range(10):
+ M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ M_one_fifth = fractional_matrix_power(M, 0.2)
+ # Test the round trip.
+ M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
+ assert_allclose(M, M_round_trip)
+ # Test a large abs fractional power.
+ X = fractional_matrix_power(M, -5.4)
+ Y = np.linalg.matrix_power(M_one_fifth, -27)
+ assert_allclose(X, Y)
+ # Test another large abs fractional power.
+ X = fractional_matrix_power(M, 3.8)
+ Y = np.linalg.matrix_power(M_one_fifth, 19)
+ assert_allclose(X, Y)
+
+ def test_random_matrices_and_powers(self):
+ # Each independent iteration of this fuzz test picks random parameters.
+ # It tries to hit some edge cases.
+ np.random.seed(1234)
+ nsamples = 20
+ for i in range(nsamples):
+ # Sample a matrix size and a random real power.
+ n = random.randrange(1, 5)
+ p = np.random.randn()
+
+ # Sample a random real or complex matrix.
+ matrix_scale = np.exp(random.randrange(-4, 5))
+ A = np.random.randn(n, n)
+ if random.choice((True, False)):
+ A = A + 1j * np.random.randn(n, n)
+ A = A * matrix_scale
+
+ # Check a couple of analytically equivalent ways
+ # to compute the fractional matrix power.
+ # These can be compared because they both use the principal branch.
+ A_power = fractional_matrix_power(A, p)
+ A_logm, info = logm(A, disp=False)
+ A_power_expm_logm = expm(A_logm * p)
+ assert_allclose(A_power, A_power_expm_logm)
+
+ def test_al_mohy_higham_2012_experiment_1(self):
+ # Fractional powers of a tricky upper triangular matrix.
+ A = _get_al_mohy_higham_2012_experiment_1()
+
+ # Test remainder matrix power.
+ A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
+ A_power = fractional_matrix_power(A, 0.5)
+ assert_array_equal(A_rem_power, A_power)
+ assert_allclose(A_sqrtm, A_power)
+ assert_allclose(A_sqrtm, A_funm_sqrt)
+
+ # Test more fractional powers.
+ for p in (1/2, 5/3):
+ A_power = fractional_matrix_power(A, p)
+ A_round_trip = fractional_matrix_power(A_power, 1/p)
+ assert_allclose(A_round_trip, A, rtol=1e-2)
+ assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
+
+ def test_briggs_helper_function(self):
+ np.random.seed(1234)
+ for a in np.random.randn(10) + 1j * np.random.randn(10):
+ for k in range(5):
+ x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
+ x_expected = a ** np.exp2(-k) - 1
+ assert_allclose(x_observed, x_expected)
+
+ def test_type_preservation_and_conversion(self):
+ # The fractional_matrix_power matrix function should preserve
+ # the type of a matrix whose eigenvalues
+ # are positive with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # Check various positive and negative powers
+ # with absolute values bigger and smaller than 1.
+ for p in (-2.4, -0.9, 0.2, 3.3):
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ # check float->complex for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ def test_type_conversion_mixed_sign_or_complex_spectrum(self):
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]],
+ [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(any(w.imag or w.real < 0 for w in W))
+
+ # Check various positive and negative powers
+ # with absolute values bigger and smaller than 1.
+ for p in (-2.4, -0.9, 0.2, 3.3):
+
+ # check complex->complex
+ A = np.array(matrix_as_list, dtype=complex)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ # check float->complex
+ A = np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ @pytest.mark.xfail(reason='Too unstable across LAPACKs.')
+ def test_singular(self):
+ # Negative fractional powers do not work with singular matrices.
+ for matrix_as_list in (
+ [[0, 0], [0, 0]],
+ [[1, 1], [1, 1]],
+ [[1, 2], [3, 6]],
+ [[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
+
+ # Check fractional powers both for float and for complex types.
+ for newtype in (float, complex):
+ A = np.array(matrix_as_list, dtype=newtype)
+ for p in (-0.7, -0.9, -2.4, -1.3):
+ A_power = fractional_matrix_power(A, p)
+ assert_(np.isnan(A_power).all())
+ for p in (0.2, 1.43):
+ A_power = fractional_matrix_power(A, p)
+ A_round_trip = fractional_matrix_power(A_power, 1/p)
+ assert_allclose(A_round_trip, A)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ M = [[2j, 4], [0, -2j]]
+ R = [[1+1j, 2], [0, 1-1j]]
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
+
+
+class TestExpM(object):
+ def test_zero(self):
+ a = array([[0.,0],[0,0]])
+ assert_array_almost_equal(expm(a),[[1,0],[0,1]])
+
+ def test_single_elt(self):
+ # See gh-5853
+ from scipy.sparse import csc_matrix
+
+ vOne = -2.02683397006j
+ vTwo = -2.12817566856j
+
+ mOne = csc_matrix([[vOne]], dtype='complex')
+ mTwo = csc_matrix([[vTwo]], dtype='complex')
+
+ outOne = expm(mOne)
+ outTwo = expm(mTwo)
+
+ assert_equal(type(outOne), type(mOne))
+ assert_equal(type(outTwo), type(mTwo))
+
+ assert_allclose(outOne[0, 0], complex(-0.44039415155949196,
+ -0.8978045395698304))
+ assert_allclose(outTwo[0, 0], complex(-0.52896401032626006,
+ -0.84864425749518878))
+
+ def test_empty_matrix_input(self):
+ # handle gh-11082
+ A = np.zeros((0, 0))
+ result = expm(A)
+ assert result.size == 0
+
+
+class TestExpmFrechet(object):
+
+ def test_expm_frechet(self):
+ # a test of the basic functionality
+ M = np.array([
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [0, 0, 1, 2],
+ [0, 0, 5, 6],
+ ], dtype=float)
+ A = np.array([
+ [1, 2],
+ [5, 6],
+ ], dtype=float)
+ E = np.array([
+ [3, 4],
+ [7, 8],
+ ], dtype=float)
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+ for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
+ observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
+ assert_allclose(expected_expm, observed_expm)
+ assert_allclose(expected_frechet, observed_frechet)
+
+ def test_small_norm_expm_frechet(self):
+ # methodically test matrices with a range of norms, for better coverage
+ M_original = np.array([
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [0, 0, 1, 2],
+ [0, 0, 5, 6],
+ ], dtype=float)
+ A_original = np.array([
+ [1, 2],
+ [5, 6],
+ ], dtype=float)
+ E_original = np.array([
+ [3, 4],
+ [7, 8],
+ ], dtype=float)
+ A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+ selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
+ m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
+ for ma, mb in m_neighbor_pairs:
+ ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
+ ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
+ target_norm_1 = 0.5 * (ell_a + ell_b)
+ scale = target_norm_1 / A_original_norm_1
+ M = scale * M_original
+ A = scale * A_original
+ E = scale * E_original
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+ observed_expm, observed_frechet = expm_frechet(A, E)
+ assert_allclose(expected_expm, observed_expm)
+ assert_allclose(expected_frechet, observed_frechet)
+
+ def test_fuzz(self):
+ # try a bunch of crazy inputs
+ rfuncs = (
+ np.random.uniform,
+ np.random.normal,
+ np.random.standard_cauchy,
+ np.random.exponential)
+ ntests = 100
+ for i in range(ntests):
+ rfunc = random.choice(rfuncs)
+ target_norm_1 = random.expovariate(1.0)
+ n = random.randrange(2, 16)
+ A_original = rfunc(size=(n,n))
+ E_original = rfunc(size=(n,n))
+ A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+ scale = target_norm_1 / A_original_norm_1
+ A = scale * A_original
+ E = scale * E_original
+ M = np.vstack([
+ np.hstack([A, E]),
+ np.hstack([np.zeros_like(A), A])])
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:n, n:]
+ observed_expm, observed_frechet = expm_frechet(A, E)
+ assert_allclose(expected_expm, observed_expm)
+ assert_allclose(expected_frechet, observed_frechet)
+
+ def test_problematic_matrix(self):
+ # this test case uncovered a bug which has since been fixed
+ A = np.array([
+ [1.50591997, 1.93537998],
+ [0.41203263, 0.23443516],
+ ], dtype=float)
+ E = np.array([
+ [1.87864034, 2.07055038],
+ [1.34102727, 0.67341123],
+ ], dtype=float)
+ scipy.linalg.norm(A, 1)
+ sps_expm, sps_frechet = expm_frechet(
+ A, E, method='SPS')
+ blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+ A, E, method='blockEnlarge')
+ assert_allclose(sps_expm, blockEnlarge_expm)
+ assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+ @pytest.mark.slow
+ @pytest.mark.skip(reason='this test is deliberately slow')
+ def test_medium_matrix(self):
+ # profile this to see the speed difference
+ n = 1000
+ A = np.random.exponential(size=(n, n))
+ E = np.random.exponential(size=(n, n))
+ sps_expm, sps_frechet = expm_frechet(
+ A, E, method='SPS')
+ blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+ A, E, method='blockEnlarge')
+ assert_allclose(sps_expm, blockEnlarge_expm)
+ assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+
+def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
+ p = np.reshape(p, A.shape)
+ p_norm = norm(p)
+ perturbation = eps * p * (A_norm / p_norm)
+ X_prime = expm(A + perturbation)
+ scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
+ return -scaled_relative_error
+
+
+def _normalized_like(A, B):
+ return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
+
+
+def _relative_error(f, A, perturbation):
+ X = f(A)
+ X_prime = f(A + perturbation)
+ return norm(X_prime - X) / norm(X)
+
+
+class TestExpmConditionNumber(object):
+ def test_expm_cond_smoke(self):
+ np.random.seed(1234)
+ for n in range(1, 4):
+ A = np.random.randn(n, n)
+ kappa = expm_cond(A)
+ assert_array_less(0, kappa)
+
+ def test_expm_bad_condition_number(self):
+ A = np.array([
+ [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
+ [0, -1.201010529, 9.634696872e4, -4.681048289e9],
+ [0, 0, -1.132893222, 9.532491830e4],
+ [0, 0, 0, -1.179475332],
+ ])
+ kappa = expm_cond(A)
+ assert_array_less(1e36, kappa)
+
+ def test_univariate(self):
+ np.random.seed(12345)
+ for x in np.linspace(-5, 5, num=11):
+ A = np.array([[x]])
+ assert_allclose(expm_cond(A), abs(x))
+ for x in np.logspace(-2, 2, num=11):
+ A = np.array([[x]])
+ assert_allclose(expm_cond(A), abs(x))
+ for i in range(10):
+ A = np.random.randn(1, 1)
+ assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
+
+ @pytest.mark.slow
+ def test_expm_cond_fuzz(self):
+ np.random.seed(12345)
+ eps = 1e-5
+ nsamples = 10
+ for i in range(nsamples):
+ n = np.random.randint(2, 5)
+ A = np.random.randn(n, n)
+ A_norm = scipy.linalg.norm(A)
+ X = expm(A)
+ X_norm = scipy.linalg.norm(X)
+ kappa = expm_cond(A)
+
+ # Look for the small perturbation that gives the greatest
+ # relative error.
+ f = functools.partial(_help_expm_cond_search,
+ A, A_norm, X, X_norm, eps)
+ guess = np.ones(n*n)
+ out = minimize(f, guess, method='L-BFGS-B')
+ xopt = out.x
+ yopt = f(xopt)
+ p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
+ p_best_relerr = _relative_error(expm, A, p_best)
+ assert_allclose(p_best_relerr, -yopt * eps)
+
+ # Check that the identified perturbation indeed gives greater
+ # relative error than random perturbations with similar norms.
+ for j in range(5):
+ p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
+ assert_allclose(norm(p_best), norm(p_rand))
+ p_rand_relerr = _relative_error(expm, A, p_rand)
+ assert_array_less(p_rand_relerr, p_best_relerr)
+
+ # The greatest relative error should not be much greater than
+ # eps times the condition number kappa.
+ # In the limit as eps approaches zero it should never be greater.
+ assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
+
+
+class TestKhatriRao(object):
+
+ def test_basic(self):
+ a = khatri_rao(array([[1, 2], [3, 4]]),
+ array([[5, 6], [7, 8]]))
+
+ assert_array_equal(a, array([[5, 12],
+ [7, 16],
+ [15, 24],
+ [21, 32]]))
+
+ b = khatri_rao(np.empty([2, 2]), np.empty([2, 2]))
+ assert_array_equal(b.shape, (4, 2))
+
+ def test_number_of_columns_equality(self):
+ with pytest.raises(ValueError):
+ a = array([[1, 2, 3],
+ [4, 5, 6]])
+ b = array([[1, 2],
+ [3, 4]])
+ khatri_rao(a, b)
+
+ def test_to_assure_2d_array(self):
+ with pytest.raises(ValueError):
+ # both arrays are 1-D
+ a = array([1, 2, 3])
+ b = array([4, 5, 6])
+ khatri_rao(a, b)
+
+ with pytest.raises(ValueError):
+ # first array is 1-D
+ a = array([1, 2, 3])
+ b = array([
+ [1, 2, 3],
+ [4, 5, 6]
+ ])
+ khatri_rao(a, b)
+
+ with pytest.raises(ValueError):
+ # second array is 1-D
+ a = array([
+ [1, 2, 3],
+ [7, 8, 9]
+ ])
+ b = array([4, 5, 6])
+ khatri_rao(a, b)
+
+ def test_equality_of_two_equations(self):
+ a = array([[1, 2], [3, 4]])
+ b = array([[5, 6], [7, 8]])
+
+ res1 = khatri_rao(a, b)
+ res2 = np.vstack([np.kron(a[:, k], b[:, k])
+ for k in range(b.shape[1])]).T
+
+ assert_array_equal(res1, res2)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matmul_toeplitz.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matmul_toeplitz.py
new file mode 100644
index 0000000..b480e9d
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_matmul_toeplitz.py
@@ -0,0 +1,125 @@
+"""Test functions for linalg.matmul_toeplitz function
+"""
+
+import numpy as np
+from scipy.linalg import toeplitz, matmul_toeplitz
+
+from pytest import raises as assert_raises
+from numpy.testing import assert_allclose
+
+
+class TestMatmulToeplitz:
+
+ def setup_method(self):
+ self.rng = np.random.RandomState(42)
+ self.tolerance = 1.5e-13
+
+ def test_real(self):
+ cases = []
+
+ n = 1
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 2
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 101
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, True))
+
+ n = 1000
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, False))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = self.rng.normal(size=(n, 1))
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = None
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True, -1))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = None
+ x = self.rng.normal(size=n)
+ cases.append((x, c, r, False))
+
+ n = 101
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n-27)
+ x = self.rng.normal(size=(n-27, 1))
+ cases.append((x, c, r, True))
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n//4)
+ x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True))
+
+ [self.do(*i) for i in cases]
+
+ def test_complex(self):
+ n = 127
+ c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
+ self.do(x, c, r, False)
+
+ n = 100
+ c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ r = self.rng.normal(size=(n//2, 1)) +\
+ self.rng.normal(size=(n//2, 1))*1j
+ x = self.rng.normal(size=(n//2, 3)) +\
+ self.rng.normal(size=(n//2, 3))*1j
+ self.do(x, c, r, False)
+
+ def test_exceptions(self):
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=2*n)
+ x = self.rng.normal(size=n)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=n-1)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n//2)
+ x = self.rng.normal(size=n//2-1)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ # For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
+ def do(self, x, c, r=None, check_finite=False, workers=None):
+ if r is None:
+ actual = matmul_toeplitz(c, x, check_finite, workers)
+ else:
+ actual = matmul_toeplitz((c, r), x, check_finite)
+ desired = toeplitz(c, r) @ x
+ assert_allclose(actual, desired,
+ rtol=self.tolerance, atol=self.tolerance)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_procrustes.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_procrustes.py
new file mode 100644
index 0000000..ef2d768
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_procrustes.py
@@ -0,0 +1,191 @@
+from itertools import product, permutations
+
+import numpy as np
+from numpy.testing import assert_array_less, assert_allclose
+from pytest import raises as assert_raises
+
+from scipy.linalg import inv, eigh, norm
+from scipy.linalg import orthogonal_procrustes
+from scipy.sparse.sputils import matrix
+
+
+def test_orthogonal_procrustes_ndim_too_large():
+ np.random.seed(1234)
+ A = np.random.randn(3, 4, 5)
+ B = np.random.randn(3, 4, 5)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_ndim_too_small():
+ np.random.seed(1234)
+ A = np.random.randn(3)
+ B = np.random.randn(3)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_shape_mismatch():
+ np.random.seed(1234)
+ shapes = ((3, 3), (3, 4), (4, 3), (4, 4))
+ for a, b in permutations(shapes, 2):
+ A = np.random.randn(*a)
+ B = np.random.randn(*b)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_checkfinite_exception():
+ np.random.seed(1234)
+ m, n = 2, 3
+ A_good = np.random.randn(m, n)
+ B_good = np.random.randn(m, n)
+ for bad_value in np.inf, -np.inf, np.nan:
+ A_bad = A_good.copy()
+ A_bad[1, 2] = bad_value
+ B_bad = B_good.copy()
+ B_bad[1, 2] = bad_value
+ for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)):
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_scale_invariance():
+ np.random.seed(1234)
+ m, n = 4, 3
+ for i in range(3):
+ A_orig = np.random.randn(m, n)
+ B_orig = np.random.randn(m, n)
+ R_orig, s = orthogonal_procrustes(A_orig, B_orig)
+ for A_scale in np.square(np.random.randn(3)):
+ for B_scale in np.square(np.random.randn(3)):
+ R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale)
+ assert_allclose(R, R_orig)
+
+
+def test_orthogonal_procrustes_array_conversion():
+ np.random.seed(1234)
+ for m, n in ((6, 4), (4, 4), (4, 6)):
+ A_arr = np.random.randn(m, n)
+ B_arr = np.random.randn(m, n)
+ As = (A_arr, A_arr.tolist(), matrix(A_arr))
+ Bs = (B_arr, B_arr.tolist(), matrix(B_arr))
+ R_arr, s = orthogonal_procrustes(A_arr, B_arr)
+ AR_arr = A_arr.dot(R_arr)
+ for A, B in product(As, Bs):
+ R, s = orthogonal_procrustes(A, B)
+ AR = A_arr.dot(R)
+ assert_allclose(AR, AR_arr)
+
+
+def test_orthogonal_procrustes():
+ np.random.seed(1234)
+ for m, n in ((6, 4), (4, 4), (4, 6)):
+ # Sample a random target matrix.
+ B = np.random.randn(m, n)
+ # Sample a random orthogonal matrix
+ # by computing eigh of a sampled symmetric matrix.
+ X = np.random.randn(n, n)
+ w, V = eigh(X.T + X)
+ assert_allclose(inv(V), V.T)
+ # Compute a matrix with a known orthogonal transformation that gives B.
+ A = np.dot(B, V.T)
+ # Check that an orthogonal transformation from A to B can be recovered.
+ R, s = orthogonal_procrustes(A, B)
+ assert_allclose(inv(R), R.T)
+ assert_allclose(A.dot(R), B)
+ # Create a perturbed input matrix.
+ A_perturbed = A + 1e-2 * np.random.randn(m, n)
+ # Check that the orthogonal procrustes function can find an orthogonal
+ # transformation that is better than the orthogonal transformation
+ # computed from the original input matrix.
+ R_prime, s = orthogonal_procrustes(A_perturbed, B)
+ assert_allclose(inv(R_prime), R_prime.T)
+ # Compute the naive and optimal transformations of the perturbed input.
+ naive_approx = A_perturbed.dot(R)
+ optim_approx = A_perturbed.dot(R_prime)
+ # Compute the Frobenius norm errors of the matrix approximations.
+ naive_approx_error = norm(naive_approx - B, ord='fro')
+ optim_approx_error = norm(optim_approx - B, ord='fro')
+ # Check that the orthogonal Procrustes approximation is better.
+ assert_array_less(optim_approx_error, naive_approx_error)
+
+
+def _centered(A):
+ mu = A.mean(axis=0)
+ return A - mu, mu
+
+
+def test_orthogonal_procrustes_exact_example():
+ # Check a small application.
+ # It uses translation, scaling, reflection, and rotation.
+ #
+ # |
+ # a b |
+ # |
+ # d c | w
+ # |
+ # --------+--- x ----- z ---
+ # |
+ # | y
+ # |
+ #
+ A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+ B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float)
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ assert_allclose(B_approx, B_orig, atol=1e-8)
+
+
+def test_orthogonal_procrustes_stretched_example():
+ # Try again with a target with a stretched y axis.
+ A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+ B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float)
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float)
+ assert_allclose(B_approx, expected, atol=1e-8)
+ # Check disparity symmetry.
+ expected_disparity = 0.4501246882793018
+ AB_disparity = np.square(norm(B_approx - B_orig) / norm(B))
+ assert_allclose(AB_disparity, expected_disparity)
+ R, s = orthogonal_procrustes(B, A)
+ scale = s / np.square(norm(B))
+ A_approx = scale * np.dot(B, R) + A_mu
+ BA_disparity = np.square(norm(A_approx - A_orig) / norm(A))
+ assert_allclose(BA_disparity, expected_disparity)
+
+
+def test_orthogonal_procrustes_skbio_example():
+ # This transformation is also exact.
+ # It uses translation, scaling, and reflection.
+ #
+ # |
+ # | a
+ # | b
+ # | c d
+ # --+---------
+ # |
+ # | w
+ # |
+ # | x
+ # |
+ # | z y
+ # |
+ #
+ A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float)
+ B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float)
+ B_standardized = np.array([
+ [-0.13363062, 0.6681531],
+ [-0.13363062, 0.13363062],
+ [-0.13363062, -0.40089186],
+ [0.40089186, -0.40089186]])
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ assert_allclose(B_approx, B_orig)
+ assert_allclose(B / norm(B), B_standardized)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_sketches.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_sketches.py
new file mode 100644
index 0000000..d460aca
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_sketches.py
@@ -0,0 +1,118 @@
+"""Tests for _sketches.py."""
+
+import numpy as np
+from numpy.testing import assert_, assert_equal
+from scipy.linalg import clarkson_woodruff_transform
+from scipy.linalg._sketches import cwt_matrix
+from scipy.sparse import issparse, rand
+from scipy.sparse.linalg import norm
+
+
+class TestClarksonWoodruffTransform(object):
+ """
+ Testing the Clarkson Woodruff Transform
+ """
+ # set seed for generating test matrices
+ rng = np.random.RandomState(seed=1179103485)
+
+ # Test matrix parameters
+ n_rows = 2000
+ n_cols = 100
+ density = 0.1
+
+ # Sketch matrix dimensions
+ n_sketch_rows = 200
+
+ # Seeds to test with
+ seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
+ 1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
+
+ A_dense = rng.randn(n_rows, n_cols)
+ A_csc = rand(
+ n_rows, n_cols, density=density, format='csc', random_state=rng,
+ )
+ A_csr = rand(
+ n_rows, n_cols, density=density, format='csr', random_state=rng,
+ )
+ A_coo = rand(
+ n_rows, n_cols, density=density, format='coo', random_state=rng,
+ )
+
+ # Collect the test matrices
+ test_matrices = [
+ A_dense, A_csc, A_csr, A_coo,
+ ]
+
+ # Test vector with norm ~1
+ x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
+
+ def test_sketch_dimensions(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
+
+ def test_seed_returns_identical_transform_matrix(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ S1 = cwt_matrix(
+ self.n_sketch_rows, self.n_rows, seed=seed
+ ).todense()
+ S2 = cwt_matrix(
+ self.n_sketch_rows, self.n_rows, seed=seed
+ ).todense()
+ assert_equal(S1, S2)
+
+ def test_seed_returns_identically(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ sketch1 = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ sketch2 = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ if issparse(sketch1):
+ sketch1 = sketch1.todense()
+ if issparse(sketch2):
+ sketch2 = sketch2.todense()
+ assert_equal(sketch1, sketch2)
+
+ def test_sketch_preserves_frobenius_norm(self):
+ # Given the probabilistic nature of the sketches
+ # we run the test multiple times and check that
+ # we pass all/almost all the tries.
+ n_errors = 0
+ for A in self.test_matrices:
+ if issparse(A):
+ true_norm = norm(A)
+ else:
+ true_norm = np.linalg.norm(A)
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed,
+ )
+ if issparse(sketch):
+ sketch_norm = norm(sketch)
+ else:
+ sketch_norm = np.linalg.norm(sketch)
+
+ if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
+ n_errors += 1
+ assert_(n_errors == 0)
+
+ def test_sketch_preserves_vector_norm(self):
+ n_errors = 0
+ n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
+ true_norm = np.linalg.norm(self.x)
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ self.x, n_sketch_rows, seed=seed,
+ )
+ sketch_norm = np.linalg.norm(sketch)
+
+ if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
+ n_errors += 1
+ assert_(n_errors == 0)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solve_toeplitz.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solve_toeplitz.py
new file mode 100644
index 0000000..dbfa4d3
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solve_toeplitz.py
@@ -0,0 +1,121 @@
+"""Test functions for linalg._solve_toeplitz module
+"""
+import numpy as np
+from scipy.linalg._solve_toeplitz import levinson
+from scipy.linalg import solve, toeplitz, solve_toeplitz
+from numpy.testing import assert_equal, assert_allclose
+
+import pytest
+from pytest import raises as assert_raises
+
+
+def test_solve_equivalence():
+ # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
+ random = np.random.RandomState(1234)
+ for n in (1, 2, 3, 10):
+ c = random.randn(n)
+ if random.rand() < 0.5:
+ c = c + 1j * random.randn(n)
+ r = random.randn(n)
+ if random.rand() < 0.5:
+ r = r + 1j * random.randn(n)
+ y = random.randn(n)
+ if random.rand() < 0.5:
+ y = y + 1j * random.randn(n)
+
+ # Check equivalence when both the column and row are provided.
+ actual = solve_toeplitz((c,r), y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_allclose(actual, desired)
+
+ # Check equivalence when the column is provided but not the row.
+ actual = solve_toeplitz(c, b=y)
+ desired = solve(toeplitz(c), y)
+ assert_allclose(actual, desired)
+
+
+def test_multiple_rhs():
+ random = np.random.RandomState(1234)
+ c = random.randn(4)
+ r = random.randn(4)
+ for offset in [0, 1j]:
+ for yshape in ((4,), (4, 3), (4, 3, 2)):
+ y = random.randn(*yshape) + offset
+ actual = solve_toeplitz((c,r), b=y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_equal(actual.shape, yshape)
+ assert_equal(desired.shape, yshape)
+ assert_allclose(actual, desired)
+
+
+def test_native_list_arguments():
+ c = [1,2,4,7]
+ r = [1,3,9,12]
+ y = [5,1,4,2]
+ actual = solve_toeplitz((c,r), y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_allclose(actual, desired)
+
+
+def test_zero_diag_error():
+ # The Levinson-Durbin implementation fails when the diagonal is zero.
+ random = np.random.RandomState(1234)
+ n = 4
+ c = random.randn(n)
+ r = random.randn(n)
+ y = random.randn(n)
+ c[0] = 0
+ assert_raises(np.linalg.LinAlgError,
+ solve_toeplitz, (c, r), b=y)
+
+
+def test_wikipedia_counterexample():
+ # The Levinson-Durbin implementation also fails in other cases.
+ # This example is from the talk page of the wikipedia article.
+ random = np.random.RandomState(1234)
+ c = [2, 2, 1]
+ y = random.randn(3)
+ assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
+
+
+def test_reflection_coeffs():
+ # check that that the partial solutions are given by the reflection
+ # coefficients
+
+ random = np.random.RandomState(1234)
+ y_d = random.randn(10)
+ y_z = random.randn(10) + 1j
+ reflection_coeffs_d = [1]
+ reflection_coeffs_z = [1]
+ for i in range(2, 10):
+ reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
+ reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
+
+ y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
+ y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
+ _, ref_d = levinson(y_d_concat, b=y_d[1:])
+ _, ref_z = levinson(y_z_concat, b=y_z[1:])
+
+ assert_allclose(reflection_coeffs_d, ref_d[:-1])
+ assert_allclose(reflection_coeffs_z, ref_z[:-1])
+
+
+@pytest.mark.xfail(reason='Instability of Levinson iteration')
+def test_unstable():
+ # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
+ # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
+ # Partial Pivoting for Matrices with Displacement Structure"
+ # Mathematics of Computation, 64, 212 (1995), pp 1557-1576
+ # which can be unstable for levinson recursion.
+
+ # other fast toeplitz solvers such as GKO or Burg should be better.
+ random = np.random.RandomState(1234)
+ n = 100
+ c = 0.9 ** (np.arange(n)**2)
+ y = random.randn(n)
+
+ solution1 = solve_toeplitz(c, b=y)
+ solution2 = solve(toeplitz(c), y)
+
+ assert_allclose(solution1, solution2)
+
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solvers.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solvers.py
new file mode 100644
index 0000000..ee5dd03
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_solvers.py
@@ -0,0 +1,766 @@
+import os
+import numpy as np
+
+from numpy.testing import assert_array_almost_equal
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import solve_sylvester
+from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
+from scipy.linalg import solve_continuous_are, solve_discrete_are
+from scipy.linalg import block_diag, solve, LinAlgError
+from scipy.sparse.sputils import matrix
+
+
+def _load_data(name):
+ """
+ Load npz data file under data/
+ Returns a copy of the data, rather than keeping the npz file open.
+ """
+ filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', name)
+ with np.load(filename) as f:
+ return dict(f.items())
+
+
+class TestSolveLyapunov(object):
+
+ cases = [
+ (np.array([[1, 2], [3, 4]]),
+ np.array([[9, 10], [11, 12]])),
+ # a, q all complex.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a real; q complex.
+ (np.array([[1.0, 2.0], [3.0, 5.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a complex; q real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[2.0, 2.0], [-1.0, 2.0]])),
+ # An example from Kitagawa, 1977
+ (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
+ [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
+ np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
+ [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
+ # Companion matrix example. a complex; q real; a.shape[0] = 11
+ (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
+ 0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
+ 0.010+0.j],
+ [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+ 0.000+0.j]]),
+ np.eye(11)),
+ # https://github.com/scipy/scipy/issues/4176
+ (matrix([[0, 1], [-1/2, -1]]),
+ (matrix([0, 3]).T @ matrix([0, 3]).T.T)),
+ # https://github.com/scipy/scipy/issues/4176
+ (matrix([[0, 1], [-1/2, -1]]),
+ (np.array(matrix([0, 3]).T @ matrix([0, 3]).T.T))),
+ ]
+
+ def test_continuous_squareness_and_shape(self):
+ nsq = np.ones((3, 2))
+ sq = np.eye(3)
+ assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
+ assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
+ assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
+
+ def check_continuous_case(self, a, q):
+ x = solve_continuous_lyapunov(a, q)
+ assert_array_almost_equal(
+ np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
+
+ def check_discrete_case(self, a, q, method=None):
+ x = solve_discrete_lyapunov(a, q, method=method)
+ assert_array_almost_equal(
+ np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
+
+ def test_cases(self):
+ for case in self.cases:
+ self.check_continuous_case(case[0], case[1])
+ self.check_discrete_case(case[0], case[1])
+ self.check_discrete_case(case[0], case[1], method='direct')
+ self.check_discrete_case(case[0], case[1], method='bilinear')
+
+
+def test_solve_continuous_are():
+ mat6 = _load_data('carex_6_data.npz')
+ mat15 = _load_data('carex_15_data.npz')
+ mat18 = _load_data('carex_18_data.npz')
+ mat19 = _load_data('carex_19_data.npz')
+ mat20 = _load_data('carex_20_data.npz')
+ cases = [
+ # Carex examples taken from (with default parameters):
+ # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+ # Examples for the Numerical Solution of Algebraic Riccati
+ # Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
+ # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+ #
+ # The format of the data is (a, b, q, r, knownfailure), where
+ # knownfailure is None if the test passes or a string
+ # indicating the reason for failure.
+ #
+ # Test Case 0: carex #1
+ (np.diag([1.], 1),
+ np.array([[0], [1]]),
+ block_diag(1., 2.),
+ 1,
+ None),
+ # Test Case 1: carex #2
+ (np.array([[4, 3], [-4.5, -3.5]]),
+ np.array([[1], [-1]]),
+ np.array([[9, 6], [6, 4.]]),
+ 1,
+ None),
+ # Test Case 2: carex #3
+ (np.array([[0, 1, 0, 0],
+ [0, -1.89, 0.39, -5.53],
+ [0, -0.034, -2.98, 2.43],
+ [0.034, -0.0011, -0.99, -0.21]]),
+ np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
+ np.array([[2.313, 2.727, 0.688, 0.023],
+ [2.727, 4.271, 1.148, 0.323],
+ [0.688, 1.148, 0.313, 0.102],
+ [0.023, 0.323, 0.102, 0.083]]),
+ np.eye(2),
+ None),
+ # Test Case 3: carex #4
+ (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
+ [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
+ [0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
+ [0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
+ [0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
+ [0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
+ [0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
+ [0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
+ np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
+ [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
+ ).T * 0.001,
+ np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
+ [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
+ [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
+ np.eye(2),
+ None),
+ # Test Case 4: carex #5
+ (np.array(
+ [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
+ [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
+ [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
+ [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
+ [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
+ [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
+ [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
+ [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
+ [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
+ np.array([[0.010, -0.011, -0.151],
+ [0.003, -0.021, 0.000],
+ [0.009, -0.059, 0.000],
+ [0.024, -0.162, 0.000],
+ [0.068, -0.445, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000]]),
+ np.eye(9),
+ np.eye(3),
+ None),
+ # Test Case 5: carex #6
+ (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
+ # Test Case 6: carex #7
+ (np.array([[1, 0], [0, -2.]]),
+ np.array([[1e-6], [0]]),
+ np.ones((2, 2)),
+ 1.,
+ 'Bad residual accuracy'),
+ # Test Case 7: carex #8
+ (block_diag(-0.1, -0.02),
+ np.array([[0.100, 0.000], [0.001, 0.010]]),
+ np.array([[100, 1000], [1000, 10000]]),
+ np.ones((2, 2)) + block_diag(1e-6, 0),
+ None),
+ # Test Case 8: carex #9
+ (np.array([[0, 1e6], [0, 0]]),
+ np.array([[0], [1.]]),
+ np.eye(2),
+ 1.,
+ None),
+ # Test Case 9: carex #10
+ (np.array([[1.0000001, 1], [1., 1.0000001]]),
+ np.eye(2),
+ np.eye(2),
+ np.eye(2),
+ None),
+ # Test Case 10: carex #11
+ (np.array([[3, 1.], [4, 2]]),
+ np.array([[1], [1]]),
+ np.array([[-11, -5], [-5, -2.]]),
+ 1.,
+ None),
+ # Test Case 11: carex #12
+ (np.array([[7000000., 2000000., -0.],
+ [2000000., 6000000., -2000000.],
+ [0., -2000000., 5000000.]]) / 3,
+ np.eye(3),
+ np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
+ np.diag([1e-6, 1, 1e6])).dot(
+ np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
+ np.eye(3) * 1e6,
+ 'Bad Residual Accuracy'),
+ # Test Case 12: carex #13
+ (np.array([[0, 0.4, 0, 0],
+ [0, 0, 0.345, 0],
+ [0, -0.524e6, -0.465e6, 0.262e6],
+ [0, 0, 0, -1e6]]),
+ np.array([[0, 0, 0, 1e6]]).T,
+ np.diag([1, 0, 1, 0]),
+ 1.,
+ None),
+ # Test Case 13: carex #14
+ (np.array([[-1e-6, 1, 0, 0],
+ [-1, -1e-6, 0, 0],
+ [0, 0, 1e-6, 1],
+ [0, 0, -1, 1e-6]]),
+ np.ones((4, 1)),
+ np.ones((4, 4)),
+ 1.,
+ None),
+ # Test Case 14: carex #15
+ (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
+ # Test Case 15: carex #16
+ (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
+ block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
+ np.eye(64),
+ np.eye(64),
+ np.eye(64),
+ None),
+ # Test Case 16: carex #17
+ (np.diag(np.ones((20, )), 1),
+ np.flipud(np.eye(21, 1)),
+ np.eye(21, 1) * np.eye(21, 1).T,
+ 1,
+ 'Bad Residual Accuracy'),
+ # Test Case 17: carex #18
+ (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
+ # Test Case 18: carex #19
+ (mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
+ 'Bad Residual Accuracy'),
+ # Test Case 19: carex #20
+ (mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
+ 'Bad Residual Accuracy')
+ ]
+ # Makes the minimum precision requirements customized to the test.
+ # Here numbers represent the number of decimals that agrees with zero
+ # matrix when the solution x is plugged in to the equation.
+ #
+ # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+ #
+ # If the test is failing use "None" for that entry.
+ #
+ min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
+ None, 9, 14, 13, 14, None, 12, None, None)
+
+ def _test_factory(case, dec):
+ """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
+ a, b, q, r, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_continuous_are(a, b, q, r)
+ res = x.dot(a) + a.conj().T.dot(x) + q
+ out_fact = x.dot(b)
+ res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, min_decimal[ind])
+
+
+def test_solve_discrete_are():
+
+ cases = [
+ # Darex examples taken from (with default parameters):
+ # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+ # Examples for the Numerical Solution of Algebraic Riccati
+ # Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
+ # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+ # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
+ # Discrete-Time Algebraic Riccati Equation to Enhance Stability
+ # of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
+ #
+ # The format of the data is (a, b, q, r, knownfailure), where
+ # knownfailure is None if the test passes or a string
+ # indicating the reason for failure.
+ #
+ # TEST CASE 0 : Complex a; real b, q, r
+ (np.array([[2, 1-2j], [0, -3j]]),
+ np.array([[0], [1]]),
+ np.array([[1, 0], [0, 2]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 1 :Real a, q, r; complex b
+ (np.array([[2, 1], [0, -1]]),
+ np.array([[-2j], [1j]]),
+ np.array([[1, 0], [0, 2]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 2 : Real a, b; complex q, r
+ (np.array([[3, 1], [0, -1]]),
+ np.array([[1, 2], [1, 3]]),
+ np.array([[1, 1+1j], [1-1j, 2]]),
+ np.array([[2, -2j], [2j, 3]]),
+ None),
+ # TEST CASE 3 : User-reported gh-2251 (Trac #1732)
+ (np.array([[0.63399379, 0.54906824, 0.76253406],
+ [0.5404729, 0.53745766, 0.08731853],
+ [0.27524045, 0.84922129, 0.4681622]]),
+ np.array([[0.96861695], [0.05532739], [0.78934047]]),
+ np.eye(3),
+ np.eye(1),
+ None),
+ # TEST CASE 4 : darex #1
+ (np.array([[4, 3], [-4.5, -3.5]]),
+ np.array([[1], [-1]]),
+ np.array([[9, 6], [6, 4]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 5 : darex #2
+ (np.array([[0.9512, 0], [0, 0.9048]]),
+ np.array([[4.877, 4.877], [-1.1895, 3.569]]),
+ np.array([[0.005, 0], [0, 0.02]]),
+ np.array([[1/3, 0], [0, 3]]),
+ None),
+ # TEST CASE 6 : darex #3
+ (np.array([[2, -1], [1, 0]]),
+ np.array([[1], [0]]),
+ np.array([[0, 0], [0, 1]]),
+ np.array([[0]]),
+ None),
+ # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
+ (np.array([[0, 1], [0, -1]]),
+ np.array([[1, 0], [2, 1]]),
+ np.array([[-4, -4], [-4, 7]]) * (1/11),
+ np.array([[9, 3], [3, 1]]),
+ None),
+ # TEST CASE 8 : darex #5
+ (np.array([[0, 1], [0, 0]]),
+ np.array([[0], [1]]),
+ np.array([[1, 2], [2, 4]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 9 : darex #6
+ (np.array([[0.998, 0.067, 0, 0],
+ [-.067, 0.998, 0, 0],
+ [0, 0, 0.998, 0.153],
+ [0, 0, -.153, 0.998]]),
+ np.array([[0.0033, 0.0200],
+ [0.1000, -.0007],
+ [0.0400, 0.0073],
+ [-.0028, 0.1000]]),
+ np.array([[1.87, 0, 0, -0.244],
+ [0, 0.744, 0.205, 0],
+ [0, 0.205, 0.589, 0],
+ [-0.244, 0, 0, 1.048]]),
+ np.eye(2),
+ None),
+ # TEST CASE 10 : darex #7
+ (np.array([[0.984750, -.079903, 0.0009054, -.0010765],
+ [0.041588, 0.998990, -.0358550, 0.0126840],
+ [-.546620, 0.044916, -.3299100, 0.1931800],
+ [2.662400, -.100450, -.9245500, -.2632500]]),
+ np.array([[0.0037112, 0.0007361],
+ [-.0870510, 9.3411e-6],
+ [-1.198440, -4.1378e-4],
+ [-3.192700, 9.2535e-4]]),
+ np.eye(4)*1e-2,
+ np.eye(2),
+ None),
+ # TEST CASE 11 : darex #8
+ (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
+ [1.0000000, 0.6000000, 0.8000000, 3.3999820],
+ [0.0000000, 1.0000000, 1.8000000, 3.7999820],
+ [0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
+ np.array([[1.0, -1.0, -1.0, -1.0],
+ [0.0, 1.0, -1.0, -1.0],
+ [0.0, 0.0, 1.0, -1.0],
+ [0.0, 0.0, 0.0, 1.0]]),
+ np.array([[2, 1, 3, 6],
+ [1, 2, 2, 5],
+ [3, 2, 6, 11],
+ [6, 5, 11, 22]]),
+ np.eye(4),
+ None),
+ # TEST CASE 12 : darex #9
+ (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
+ [40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
+ [12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
+ [4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
+ [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
+ np.array([[0.0434, -0.0122],
+ [2.6606, -1.0453],
+ [3.7530, -5.5100],
+ [3.6076, -6.6000],
+ [0.4617, -0.9148]]) * 0.01,
+ np.eye(5),
+ np.eye(2),
+ None),
+ # TEST CASE 13 : darex #10
+ (np.kron(np.eye(2), np.diag([1, 1], k=1)),
+ np.kron(np.eye(2), np.array([[0], [0], [1]])),
+ np.array([[1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, -1, 0],
+ [0, 0, 0, -1, 1, 0],
+ [0, 0, 0, 0, 0, 0]]),
+ np.array([[3, 0], [0, 1]]),
+ None),
+ # TEST CASE 14 : darex #11
+ (0.001 * np.array(
+ [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
+ [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
+ [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
+ [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
+ [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
+ [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
+ [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
+ [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
+ [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
+ np.array([[4.7600, -0.5701, -83.6800],
+ [0.8790, -4.7730, -2.7300],
+ [1.4820, -13.1200, 8.8760],
+ [3.8920, -35.1300, 24.8000],
+ [10.3400, -92.7500, 66.8000],
+ [7.2030, -61.5900, 38.3400],
+ [4.4540, -36.8300, 20.2900],
+ [1.9710, -15.5400, 6.9370],
+ [3.7730, -30.2800, 14.6900]]) * 0.001,
+ np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
+ np.eye(3),
+ None),
+ # TEST CASE 15 : darex #12 - numerically least accurate example
+ (np.array([[0, 1e6], [0, 0]]),
+ np.array([[0], [1]]),
+ np.eye(2),
+ np.array([[1]]),
+ None),
+ # TEST CASE 16 : darex #13
+ (np.array([[16, 10, -2],
+ [10, 13, -8],
+ [-2, -8, 7]]) * (1/9),
+ np.eye(3),
+ 1e6 * np.eye(3),
+ 1e6 * np.eye(3),
+ None),
+ # TEST CASE 17 : darex #14
+ (np.array([[1 - 1/1e8, 0, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]),
+ np.array([[1e-08], [0], [0], [0]]),
+ np.diag([0, 0, 0, 1]),
+ np.array([[0.25]]),
+ None),
+ # TEST CASE 18 : darex #15
+ (np.eye(100, k=1),
+ np.flipud(np.eye(100, 1)),
+ np.eye(100),
+ np.array([[1]]),
+ None)
+ ]
+
+ # Makes the minimum precision requirements customized to the test.
+ # Here numbers represent the number of decimals that agrees with zero
+ # matrix when the solution x is plugged in to the equation.
+ #
+ # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+ #
+ # If the test is failing use "None" for that entry.
+ #
+ min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13,
+ 14, 13, 13, 14, 12, 2, 5, 6, 10)
+
+ def _test_factory(case, dec):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_discrete_are(a, b, q, r)
+ res = a.conj().T.dot(x.dot(a)) - x + q
+ res -= a.conj().T.dot(x.dot(b)).dot(
+ solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
+ )
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, min_decimal[ind])
+
+ # An infeasible example taken from https://arxiv.org/abs/1505.04861v1
+ A = np.triu(np.ones((3, 3)))
+ A[0, 1] = -1
+ B = np.array([[1, 1, 0], [0, 0, 1]]).T
+ Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
+ R = np.diag([-10, 0.1])
+ assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
+
+
+def test_solve_generalized_continuous_are():
+ cases = [
+ # Two random examples differ by s term
+ # in the absence of any literature for demanding examples.
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.zeros((3, 2)),
+ None),
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.ones((3, 2)),
+ None)
+ ]
+
+ min_decimal = (10, 10)
+
+ def _test_factory(case, dec):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, e, s, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_continuous_are(a, b, q, r, e, s)
+ res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
+ out_fact = e.conj().T.dot(x).dot(b) + s
+ res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, min_decimal[ind])
+
+
+def test_solve_generalized_discrete_are():
+ mat20170120 = _load_data('gendare_20170120_data.npz')
+
+ cases = [
+ # Two random examples differ by s term
+ # in the absence of any literature for demanding examples.
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.zeros((3, 2)),
+ None),
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.ones((3, 2)),
+ None),
+ # user-reported (under PR-6616) 20-Jan-2017
+ # tests against the case where E is None but S is provided
+ (mat20170120['A'],
+ mat20170120['B'],
+ mat20170120['Q'],
+ mat20170120['R'],
+ None,
+ mat20170120['S'],
+ None),
+ ]
+
+ min_decimal = (11, 11, 16)
+
+ def _test_factory(case, dec):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, e, s, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_discrete_are(a, b, q, r, e, s)
+ if e is None:
+ e = np.eye(a.shape[0])
+ if s is None:
+ s = np.zeros_like(b)
+ res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
+ res -= (a.conj().T.dot(x.dot(b)) + s).dot(
+ solve(r+b.conj().T.dot(x.dot(b)),
+ (b.conj().T.dot(x.dot(a)) + s.conj().T)
+ )
+ )
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, min_decimal[ind])
+
+
+def test_are_validate_args():
+
+ def test_square_shape():
+ nsq = np.ones((3, 2))
+ sq = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, nsq, 1, 1, 1)
+ assert_raises(ValueError, x, sq, sq, nsq, 1)
+ assert_raises(ValueError, x, sq, sq, sq, nsq)
+ assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
+
+ def test_compatible_sizes():
+ nsq = np.ones((3, 2))
+ sq = np.eye(4)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sq, nsq, 1, 1)
+ assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
+ assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
+ assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
+ assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
+
+ def test_symmetry():
+ nsym = np.arange(9).reshape(3, 3)
+ sym = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sym, sym, nsym, sym)
+ assert_raises(ValueError, x, sym, sym, sym, nsym)
+
+ def test_singularity():
+ sing = np.full((3, 3), 1e12)
+ sing[2, 2] -= 1
+ sq = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sq, sq, sq, sq, sing)
+
+ assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
+
+ def test_finiteness():
+ nm = np.full((2, 2), np.nan)
+ sq = np.eye(2)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, nm, sq, sq, sq)
+ assert_raises(ValueError, x, sq, nm, sq, sq)
+ assert_raises(ValueError, x, sq, sq, nm, sq)
+ assert_raises(ValueError, x, sq, sq, sq, nm)
+ assert_raises(ValueError, x, sq, sq, sq, sq, nm)
+ assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
+
+
+class TestSolveSylvester(object):
+
+ cases = [
+ # a, b, c all real.
+ (np.array([[1, 2], [0, 4]]),
+ np.array([[5, 6], [0, 8]]),
+ np.array([[9, 10], [11, 12]])),
+ # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
+ # quasi-triangular form.
+ (np.array([[1.0, 0, 0, 0],
+ [0, 1.0, 2.0, 0.0],
+ [0, 0, 3.0, -4],
+ [0, 0, 2, 5]]),
+ np.array([[2.0, 0, 0, 1.0],
+ [0, 1.0, 0.0, 0.0],
+ [0, 0, 1.0, -1],
+ [0, 0, 1, 1]]),
+ np.array([[1.0, 0, 0, 0],
+ [0, 1.0, 0, 0],
+ [0, 0, 1.0, 0],
+ [0, 0, 0, 1.0]])),
+ # a, b, c all complex.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 2j], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a and b real; c complex.
+ (np.array([[1.0, 2.0], [3.0, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a and c complex; b real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a complex; b and c real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0, 2.0], [-1.0, 2.0]])),
+ # not square matrices, real
+ (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
+ np.array([[2, 3], [4, 5]]),
+ np.array([[1, 2], [3, 4], [5, 6]])),
+ # not square matrices, complex
+ (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
+ np.array([[2, 3], [4, 5-1j]]),
+ np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
+ ]
+
+ def check_case(self, a, b, c):
+ x = solve_sylvester(a, b, c)
+ assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
+
+ def test_cases(self):
+ for case in self.cases:
+ self.check_case(case[0], case[1], case[2])
+
+ def test_trivial(self):
+ a = np.array([[1.0, 0.0], [0.0, 1.0]])
+ b = np.array([[1.0]])
+ c = np.array([2.0, 2.0]).reshape(-1, 1)
+ x = solve_sylvester(a, b, c)
+ assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_special_matrices.py b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_special_matrices.py
new file mode 100644
index 0000000..66b1067
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/linalg/tests/test_special_matrices.py
@@ -0,0 +1,690 @@
+
+import pytest
+import numpy as np
+from numpy import arange, add, array, eye, copy, sqrt
+from numpy.testing import (assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from scipy.fft import fft
+from scipy.special import comb
+from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, dft,
+ companion, tri, triu, tril, kron, block_diag,
+ helmert, hilbert, invhilbert, pascal, invpascal,
+ fiedler, fiedler_companion, eigvals,
+ convolution_matrix)
+from numpy.linalg import cond
+
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data, data)
+ return data
+
+
+class TestTri(object):
+ def test_basic(self):
+ assert_equal(tri(4), array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 1, 1, 0],
+ [1, 1, 1, 1]]))
+ assert_equal(tri(4, dtype='f'), array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 1, 1, 0],
+ [1, 1, 1, 1]], 'f'))
+
+ def test_diag(self):
+ assert_equal(tri(4, k=1), array([[1, 1, 0, 0],
+ [1, 1, 1, 0],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]]))
+ assert_equal(tri(4, k=-1), array([[0, 0, 0, 0],
+ [1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 1, 1, 0]]))
+
+ def test_2d(self):
+ assert_equal(tri(4, 3), array([[1, 0, 0],
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 1]]))
+ assert_equal(tri(3, 4), array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 1, 1, 0]]))
+
+ def test_diag2d(self):
+ assert_equal(tri(3, 4, k=2), array([[1, 1, 1, 0],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]]))
+ assert_equal(tri(4, 3, k=-2), array([[0, 0, 0],
+ [0, 0, 0],
+ [1, 0, 0],
+ [1, 1, 0]]))
+
+
+class TestTril(object):
+ def test_basic(self):
+ a = (100*get_mat(5)).astype('l')
+ b = a.copy()
+ for k in range(5):
+ for l in range(k+1, 5):
+ b[k, l] = 0
+ assert_equal(tril(a), b)
+
+ def test_diag(self):
+ a = (100*get_mat(5)).astype('f')
+ b = a.copy()
+ for k in range(5):
+ for l in range(k+3, 5):
+ b[k, l] = 0
+ assert_equal(tril(a, k=2), b)
+ b = a.copy()
+ for k in range(5):
+ for l in range(max((k-1, 0)), 5):
+ b[k, l] = 0
+ assert_equal(tril(a, k=-2), b)
+
+
+class TestTriu(object):
+ def test_basic(self):
+ a = (100*get_mat(5)).astype('l')
+ b = a.copy()
+ for k in range(5):
+ for l in range(k+1, 5):
+ b[l, k] = 0
+ assert_equal(triu(a), b)
+
+ def test_diag(self):
+ a = (100*get_mat(5)).astype('f')
+ b = a.copy()
+ for k in range(5):
+ for l in range(max((k-1, 0)), 5):
+ b[l, k] = 0
+ assert_equal(triu(a, k=2), b)
+ b = a.copy()
+ for k in range(5):
+ for l in range(k+3, 5):
+ b[l, k] = 0
+ assert_equal(triu(a, k=-2), b)
+
+
+class TestToeplitz(object):
+
+ def test_basic(self):
+ y = toeplitz([1, 2, 3])
+ assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
+ y = toeplitz([1, 2, 3], [1, 4, 5])
+ assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
+
+ def test_complex_01(self):
+ data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
+ x = copy(data)
+ t = toeplitz(x)
+ # Calling toeplitz should not change x.
+ assert_array_equal(x, data)
+ # According to the docstring, x should be the first column of t.
+ col0 = t[:, 0]
+ assert_array_equal(col0, data)
+ assert_array_equal(t[0, 1:], data[1:].conj())
+
+ def test_scalar_00(self):
+ """Scalar arguments still produce a 2D array."""
+ t = toeplitz(10)
+ assert_array_equal(t, [[10]])
+ t = toeplitz(10, 20)
+ assert_array_equal(t, [[10]])
+
+ def test_scalar_01(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, 1)
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_02(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, array(1))
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_03(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, array([1]))
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_04(self):
+ r = array([10, 2, 3])
+ t = toeplitz(1, r)
+ assert_array_equal(t, [[1, 2, 3]])
+
+
+class TestHankel(object):
+ def test_basic(self):
+ y = hankel([1, 2, 3])
+ assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]])
+ y = hankel([1, 2, 3], [3, 4, 5])
+ assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]])
+
+
+class TestCirculant(object):
+ def test_basic(self):
+ y = circulant([1, 2, 3])
+ assert_array_equal(y, [[1, 3, 2], [2, 1, 3], [3, 2, 1]])
+
+
+class TestHadamard(object):
+
+ def test_basic(self):
+
+ y = hadamard(1)
+ assert_array_equal(y, [[1]])
+
+ y = hadamard(2, dtype=float)
+ assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
+
+ y = hadamard(4)
+ assert_array_equal(y, [[1, 1, 1, 1],
+ [1, -1, 1, -1],
+ [1, 1, -1, -1],
+ [1, -1, -1, 1]])
+
+ assert_raises(ValueError, hadamard, 0)
+ assert_raises(ValueError, hadamard, 5)
+
+
+class TestLeslie(object):
+
+ def test_bad_shapes(self):
+ assert_raises(ValueError, leslie, [[1, 1], [2, 2]], [3, 4, 5])
+ assert_raises(ValueError, leslie, [3, 4, 5], [[1, 1], [2, 2]])
+ assert_raises(ValueError, leslie, [1, 2], [1, 2])
+ assert_raises(ValueError, leslie, [1], [])
+
+ def test_basic(self):
+ a = leslie([1, 2, 3], [0.25, 0.5])
+ expected = array([[1.0, 2.0, 3.0],
+ [0.25, 0.0, 0.0],
+ [0.0, 0.5, 0.0]])
+ assert_array_equal(a, expected)
+
+
+class TestCompanion(object):
+
+ def test_bad_shapes(self):
+ assert_raises(ValueError, companion, [[1, 1], [2, 2]])
+ assert_raises(ValueError, companion, [0, 4, 5])
+ assert_raises(ValueError, companion, [1])
+ assert_raises(ValueError, companion, [])
+
+ def test_basic(self):
+ c = companion([1, 2, 3])
+ expected = array([
+ [-2.0, -3.0],
+ [1.0, 0.0]])
+ assert_array_equal(c, expected)
+
+ c = companion([2.0, 5.0, -10.0])
+ expected = array([
+ [-2.5, 5.0],
+ [1.0, 0.0]])
+ assert_array_equal(c, expected)
+
+
+class TestBlockDiag:
+ def test_basic(self):
+ x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]])
+ assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 2, 0, 0, 0],
+ [0, 0, 3, 4, 0, 0, 0],
+ [0, 0, 5, 6, 0, 0, 0],
+ [0, 0, 0, 0, 1, 2, 3]])
+
+ def test_dtype(self):
+ x = block_diag([[1.5]])
+ assert_equal(x.dtype, float)
+
+ x = block_diag([[True]])
+ assert_equal(x.dtype, bool)
+
+ def test_mixed_dtypes(self):
+ actual = block_diag([[1]], [[1j]])
+ desired = np.array([[1, 0], [0, 1j]])
+ assert_array_equal(actual, desired)
+
+ def test_scalar_and_1d_args(self):
+ a = block_diag(1)
+ assert_equal(a.shape, (1, 1))
+ assert_array_equal(a, [[1]])
+
+ a = block_diag([2, 3], 4)
+ assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
+
+ def test_bad_arg(self):
+ assert_raises(ValueError, block_diag, [[[1]]])
+
+ def test_no_args(self):
+ a = block_diag()
+ assert_equal(a.ndim, 2)
+ assert_equal(a.nbytes, 0)
+
+ def test_empty_matrix_arg(self):
+ # regression test for gh-4596: check the shape of the result
+ # for empty matrix inputs. Empty matrices are no longer ignored
+ # (gh-4908) it is viewed as a shape (1, 0) matrix.
+ a = block_diag([[1, 0], [0, 1]],
+ [],
+ [[2, 3], [4, 5], [6, 7]])
+ assert_array_equal(a, [[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 2, 3],
+ [0, 0, 4, 5],
+ [0, 0, 6, 7]])
+
+ def test_zerosized_matrix_arg(self):
+ # test for gh-4908: check the shape of the result for
+ # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0).
+ # note that [[]] takes shape (1,0)
+ a = block_diag([[1, 0], [0, 1]],
+ [[]],
+ [[2, 3], [4, 5], [6, 7]],
+ np.zeros([0, 2], dtype='int32'))
+ assert_array_equal(a, [[1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 3, 0, 0],
+ [0, 0, 4, 5, 0, 0],
+ [0, 0, 6, 7, 0, 0]])
+
+
+class TestKron:
+
+ def test_basic(self):
+
+ a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
+ assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]]))
+
+ m1 = array([[1, 2], [3, 4]])
+ m2 = array([[10], [11]])
+ a = kron(m1, m2)
+ expected = array([[10, 20],
+ [11, 22],
+ [30, 40],
+ [33, 44]])
+ assert_array_equal(a, expected)
+
+
+class TestHelmert(object):
+
+ def test_orthogonality(self):
+ for n in range(1, 7):
+ H = helmert(n, full=True)
+ Id = np.eye(n)
+ assert_allclose(H.dot(H.T), Id, atol=1e-12)
+ assert_allclose(H.T.dot(H), Id, atol=1e-12)
+
+ def test_subspace(self):
+ for n in range(2, 7):
+ H_full = helmert(n, full=True)
+ H_partial = helmert(n)
+ for U in H_full[1:, :].T, H_partial.T:
+ C = np.eye(n) - np.full((n, n), 1 / n)
+ assert_allclose(U.dot(U.T), C)
+ assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
+
+
+class TestHilbert(object):
+
+ def test_basic(self):
+ h3 = array([[1.0, 1/2., 1/3.],
+ [1/2., 1/3., 1/4.],
+ [1/3., 1/4., 1/5.]])
+ assert_array_almost_equal(hilbert(3), h3)
+
+ assert_array_equal(hilbert(1), [[1.0]])
+
+ h0 = hilbert(0)
+ assert_equal(h0.shape, (0, 0))
+
+
+class TestInvHilbert(object):
+
+ def test_basic(self):
+ invh1 = array([[1]])
+ assert_array_equal(invhilbert(1, exact=True), invh1)
+ assert_array_equal(invhilbert(1), invh1)
+
+ invh2 = array([[4, -6],
+ [-6, 12]])
+ assert_array_equal(invhilbert(2, exact=True), invh2)
+ assert_array_almost_equal(invhilbert(2), invh2)
+
+ invh3 = array([[9, -36, 30],
+ [-36, 192, -180],
+ [30, -180, 180]])
+ assert_array_equal(invhilbert(3, exact=True), invh3)
+ assert_array_almost_equal(invhilbert(3), invh3)
+
+ invh4 = array([[16, -120, 240, -140],
+ [-120, 1200, -2700, 1680],
+ [240, -2700, 6480, -4200],
+ [-140, 1680, -4200, 2800]])
+ assert_array_equal(invhilbert(4, exact=True), invh4)
+ assert_array_almost_equal(invhilbert(4), invh4)
+
+ invh5 = array([[25, -300, 1050, -1400, 630],
+ [-300, 4800, -18900, 26880, -12600],
+ [1050, -18900, 79380, -117600, 56700],
+ [-1400, 26880, -117600, 179200, -88200],
+ [630, -12600, 56700, -88200, 44100]])
+ assert_array_equal(invhilbert(5, exact=True), invh5)
+ assert_array_almost_equal(invhilbert(5), invh5)
+
+ invh17 = array([
+ [289, -41616, 1976760, -46124400, 629598060, -5540462928,
+ 33374693352, -143034400080, 446982500250, -1033026222800,
+ 1774926873720, -2258997839280, 2099709530100, -1384423866000,
+ 613101997800, -163493866080, 19835652870],
+ [-41616, 7990272, -426980160, 10627061760, -151103534400,
+ 1367702848512, -8410422724704, 36616806420480, -115857864064800,
+ 270465047424000, -468580694662080, 600545887119360,
+ -561522320049600, 372133135180800, -165537539406000,
+ 44316454993920, -5395297580640],
+ [1976760, -426980160, 24337869120, -630981792000, 9228108708000,
+ -85267724461920, 532660105897920, -2348052711713280,
+ 7504429831470000, -17664748409880000, 30818191841236800,
+ -39732544853164800, 37341234283298400, -24857330514030000,
+ 11100752642520000, -2982128117299200, 364182586693200],
+ [-46124400, 10627061760, -630981792000, 16826181120000,
+ -251209625940000, 2358021022156800, -14914482965141760,
+ 66409571644416000, -214015221119700000, 507295338950400000,
+ -890303319857952000, 1153715376477081600, -1089119333262870000,
+ 727848632044800000, -326170262829600000, 87894302404608000,
+ -10763618673376800],
+ [629598060, -151103534400, 9228108708000,
+ -251209625940000, 3810012660090000, -36210360321495360,
+ 231343968720664800, -1038687206500944000, 3370739732635275000,
+ -8037460526495400000, 14178080368737885600, -18454939322943942000,
+ 17489975175339030000, -11728977435138600000, 5272370630081100000,
+ -1424711708039692800, 174908803442373000],
+ [-5540462928, 1367702848512, -85267724461920, 2358021022156800,
+ -36210360321495360, 347619459086355456, -2239409617216035264,
+ 10124803292907663360, -33052510749726468000,
+ 79217210949138662400, -140362995650505067440,
+ 183420385176741672960, -174433352415381259200,
+ 117339159519533952000, -52892422160973595200,
+ 14328529177999196160, -1763080738699119840],
+ [33374693352, -8410422724704, 532660105897920,
+ -14914482965141760, 231343968720664800, -2239409617216035264,
+ 14527452132196331328, -66072377044391477760,
+ 216799987176909536400, -521925895055522958000,
+ 928414062734059661760, -1217424500995626443520,
+ 1161358898976091015200, -783401860847777371200,
+ 354015418167362952000, -96120549902411274240,
+ 11851820521255194480],
+ [-143034400080, 36616806420480, -2348052711713280,
+ 66409571644416000, -1038687206500944000, 10124803292907663360,
+ -66072377044391477760, 302045152202932469760,
+ -995510145200094810000, 2405996923185123840000,
+ -4294704507885446054400, 5649058909023744614400,
+ -5403874060541811254400, 3654352703663101440000,
+ -1655137020003255360000, 450325202737117593600,
+ -55630994283442749600],
+ [446982500250, -115857864064800, 7504429831470000,
+ -214015221119700000, 3370739732635275000, -33052510749726468000,
+ 216799987176909536400, -995510145200094810000,
+ 3293967392206196062500, -7988661659013106500000,
+ 14303908928401362270000, -18866974090684772052000,
+ 18093328327706957325000, -12263364009096700500000,
+ 5565847995255512250000, -1517208935002984080000,
+ 187754605706619279900],
+ [-1033026222800, 270465047424000, -17664748409880000,
+ 507295338950400000, -8037460526495400000, 79217210949138662400,
+ -521925895055522958000, 2405996923185123840000,
+ -7988661659013106500000, 19434404971634224000000,
+ -34894474126569249192000, 46141453390504792320000,
+ -44349976506971935800000, 30121928988527376000000,
+ -13697025107665828500000, 3740200989399948902400,
+ -463591619028689580000],
+ [1774926873720, -468580694662080,
+ 30818191841236800, -890303319857952000, 14178080368737885600,
+ -140362995650505067440, 928414062734059661760,
+ -4294704507885446054400, 14303908928401362270000,
+ -34894474126569249192000, 62810053427824648545600,
+ -83243376594051600326400, 80177044485212743068000,
+ -54558343880470209780000, 24851882355348879230400,
+ -6797096028813368678400, 843736746632215035600],
+ [-2258997839280, 600545887119360, -39732544853164800,
+ 1153715376477081600, -18454939322943942000, 183420385176741672960,
+ -1217424500995626443520, 5649058909023744614400,
+ -18866974090684772052000, 46141453390504792320000,
+ -83243376594051600326400, 110552468520163390156800,
+ -106681852579497947388000, 72720410752415168870400,
+ -33177973900974346080000, 9087761081682520473600,
+ -1129631016152221783200],
+ [2099709530100, -561522320049600, 37341234283298400,
+ -1089119333262870000, 17489975175339030000,
+ -174433352415381259200, 1161358898976091015200,
+ -5403874060541811254400, 18093328327706957325000,
+ -44349976506971935800000, 80177044485212743068000,
+ -106681852579497947388000, 103125790826848015808400,
+ -70409051543137015800000, 32171029219823375700000,
+ -8824053728865840192000, 1098252376814660067000],
+ [-1384423866000, 372133135180800,
+ -24857330514030000, 727848632044800000, -11728977435138600000,
+ 117339159519533952000, -783401860847777371200,
+ 3654352703663101440000, -12263364009096700500000,
+ 30121928988527376000000, -54558343880470209780000,
+ 72720410752415168870400, -70409051543137015800000,
+ 48142941226076592000000, -22027500987368499000000,
+ 6049545098753157120000, -753830033789944188000],
+ [613101997800, -165537539406000,
+ 11100752642520000, -326170262829600000, 5272370630081100000,
+ -52892422160973595200, 354015418167362952000,
+ -1655137020003255360000, 5565847995255512250000,
+ -13697025107665828500000, 24851882355348879230400,
+ -33177973900974346080000, 32171029219823375700000,
+ -22027500987368499000000, 10091416708498869000000,
+ -2774765838662800128000, 346146444087219270000],
+ [-163493866080, 44316454993920, -2982128117299200,
+ 87894302404608000, -1424711708039692800,
+ 14328529177999196160, -96120549902411274240,
+ 450325202737117593600, -1517208935002984080000,
+ 3740200989399948902400, -6797096028813368678400,
+ 9087761081682520473600, -8824053728865840192000,
+ 6049545098753157120000, -2774765838662800128000,
+ 763806510427609497600, -95382575704033754400],
+ [19835652870, -5395297580640, 364182586693200, -10763618673376800,
+ 174908803442373000, -1763080738699119840, 11851820521255194480,
+ -55630994283442749600, 187754605706619279900,
+ -463591619028689580000, 843736746632215035600,
+ -1129631016152221783200, 1098252376814660067000,
+ -753830033789944188000, 346146444087219270000,
+ -95382575704033754400, 11922821963004219300]
+ ])
+ assert_array_equal(invhilbert(17, exact=True), invh17)
+ assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
+
+ def test_inverse(self):
+ for n in range(1, 10):
+ a = hilbert(n)
+ b = invhilbert(n)
+ # The Hilbert matrix is increasingly badly conditioned,
+ # so take that into account in the test
+ c = cond(a)
+ assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
+
+
+class TestPascal(object):
+
+ cases = [
+ (1, array([[1]]), array([[1]])),
+ (2, array([[1, 1],
+ [1, 2]]),
+ array([[1, 0],
+ [1, 1]])),
+ (3, array([[1, 1, 1],
+ [1, 2, 3],
+ [1, 3, 6]]),
+ array([[1, 0, 0],
+ [1, 1, 0],
+ [1, 2, 1]])),
+ (4, array([[1, 1, 1, 1],
+ [1, 2, 3, 4],
+ [1, 3, 6, 10],
+ [1, 4, 10, 20]]),
+ array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 2, 1, 0],
+ [1, 3, 3, 1]])),
+ ]
+
+ def check_case(self, n, sym, low):
+ assert_array_equal(pascal(n), sym)
+ assert_array_equal(pascal(n, kind='lower'), low)
+ assert_array_equal(pascal(n, kind='upper'), low.T)
+ assert_array_almost_equal(pascal(n, exact=False), sym)
+ assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
+ assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
+
+ def test_cases(self):
+ for n, sym, low in self.cases:
+ self.check_case(n, sym, low)
+
+ def test_big(self):
+ p = pascal(50)
+ assert_equal(p[-1, -1], comb(98, 49, exact=True))
+
+ def test_threshold(self):
+ # Regression test. An early version of `pascal` returned an
+ # array of type np.uint64 for n=35, but that data type is too small
+ # to hold p[-1, -1]. The second assert_equal below would fail
+ # because p[-1, -1] overflowed.
+ p = pascal(34)
+ assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
+ p = pascal(35)
+ assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35")
+
+
+def test_invpascal():
+
+ def check_invpascal(n, kind, exact):
+ ip = invpascal(n, kind=kind, exact=exact)
+ p = pascal(n, kind=kind, exact=exact)
+ # Matrix-multiply ip and p, and check that we get the identity matrix.
+ # We can't use the simple expression e = ip.dot(p), because when
+ # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
+ # np.int64. The product of those dtypes is np.float64, which loses
+ # precision when n is greater than 18. Instead we'll cast both to
+ # object arrays, and then multiply.
+ e = ip.astype(object).dot(p.astype(object))
+ assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" %
+ (n, kind, exact))
+
+ kinds = ['symmetric', 'lower', 'upper']
+
+ ns = [1, 2, 5, 18]
+ for n in ns:
+ for kind in kinds:
+ for exact in [True, False]:
+ check_invpascal(n, kind, exact)
+
+ ns = [19, 34, 35, 50]
+ for n in ns:
+ for kind in kinds:
+ check_invpascal(n, kind, True)
+
+
+def test_dft():
+ m = dft(2)
+ expected = array([[1.0, 1.0], [1.0, -1.0]])
+ assert_array_almost_equal(m, expected)
+ m = dft(2, scale='n')
+ assert_array_almost_equal(m, expected/2.0)
+ m = dft(2, scale='sqrtn')
+ assert_array_almost_equal(m, expected/sqrt(2.0))
+
+ x = array([0, 1, 2, 3, 4, 5, 0, 1])
+ m = dft(8)
+ mx = m.dot(x)
+ fx = fft(x)
+ assert_array_almost_equal(mx, fx)
+
+
+def test_fiedler():
+ f = fiedler([])
+ assert_equal(f.size, 0)
+ f = fiedler([123.])
+ assert_array_equal(f, np.array([[0.]]))
+ f = fiedler(np.arange(1, 7))
+ des = np.array([[0, 1, 2, 3, 4, 5],
+ [1, 0, 1, 2, 3, 4],
+ [2, 1, 0, 1, 2, 3],
+ [3, 2, 1, 0, 1, 2],
+ [4, 3, 2, 1, 0, 1],
+ [5, 4, 3, 2, 1, 0]])
+ assert_array_equal(f, des)
+
+
+def test_fiedler_companion():
+ fc = fiedler_companion([])
+ assert_equal(fc.size, 0)
+ fc = fiedler_companion([1.])
+ assert_equal(fc.size, 0)
+ fc = fiedler_companion([1., 2.])
+ assert_array_equal(fc, np.array([[-2.]]))
+ fc = fiedler_companion([1e-12, 2., 3.])
+ assert_array_almost_equal(fc, companion([1e-12, 2., 3.]))
+ with assert_raises(ValueError):
+ fiedler_companion([0, 1, 2])
+ fc = fiedler_companion([1., -16., 86., -176., 105.])
+ assert_array_almost_equal(eigvals(fc),
+ np.array([7., 5., 3., 1.]))
+
+
+class TestConvolutionMatrix:
+ """
+ Test convolution_matrix vs. numpy.convolve for various parameters.
+ """
+
+ def create_vector(self, n, cpx):
+ """Make a complex or real test vector of length n."""
+ x = np.linspace(-2.5, 2.2, n)
+ if cpx:
+ x = x + 1j*np.linspace(-1.5, 3.1, n)
+ return x
+
+ def test_bad_n(self):
+ # n must be a positive integer
+ with pytest.raises(ValueError, match='n must be a positive integer'):
+ convolution_matrix([1, 2, 3], 0)
+
+ def test_bad_first_arg(self):
+ # first arg must be a 1d array, otherwise ValueError
+ with pytest.raises(ValueError, match='one-dimensional'):
+ convolution_matrix(1, 4)
+
+ def test_empty_first_arg(self):
+ # first arg must have at least one value
+ with pytest.raises(ValueError, match=r'len\(a\)'):
+ convolution_matrix([], 4)
+
+ def test_bad_mode(self):
+ # mode must be in ('full', 'valid', 'same')
+ with pytest.raises(ValueError, match='mode.*must be one of'):
+ convolution_matrix((1, 1), 4, mode='invalid argument')
+
+ @pytest.mark.parametrize('cpx', [False, True])
+ @pytest.mark.parametrize('na', [1, 2, 9])
+ @pytest.mark.parametrize('nv', [1, 2, 9])
+ @pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same'])
+ def test_against_numpy_convolve(self, cpx, na, nv, mode):
+ a = self.create_vector(na, cpx)
+ v = self.create_vector(nv, cpx)
+ if mode is None:
+ y1 = np.convolve(v, a)
+ A = convolution_matrix(a, nv)
+ else:
+ y1 = np.convolve(v, a, mode)
+ A = convolution_matrix(a, nv, mode)
+ y2 = A @ v
+ assert_array_almost_equal(y1, y2)
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/misc/__init__.py b/dem-S-SAR/ISCEApp/_internal/scipy/misc/__init__.py
new file mode 100644
index 0000000..86c59c1
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/misc/__init__.py
@@ -0,0 +1,32 @@
+"""
+==========================================
+Miscellaneous routines (:mod:`scipy.misc`)
+==========================================
+
+.. currentmodule:: scipy.misc
+
+Various utilities that don't have another home.
+
+.. autosummary::
+ :toctree: generated/
+
+ ascent - Get example image for processing
+ central_diff_weights - Weights for an n-point central mth derivative
+ derivative - Find the nth derivative of a function at a point
+ face - Get example image for processing
+ electrocardiogram - Load an example of a 1-D signal.
+
+"""
+
+from . import doccer
+from .common import *
+
+__all__ = ['doccer']
+
+from . import common
+__all__ += common.__all__
+del common
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dem-S-SAR/ISCEApp/_internal/scipy/misc/ascent.dat b/dem-S-SAR/ISCEApp/_internal/scipy/misc/ascent.dat
new file mode 100644
index 0000000..f360246
--- /dev/null
+++ b/dem-S-SAR/ISCEApp/_internal/scipy/misc/ascent.dat
@@ -0,0 +1,749 @@
+]q(]q(KSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZK[KZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKcKeKdKdKdKdKeKbK^KOKQKRKTKRKVKTKVKNKRKMKOKIKPKYKXKRKPKUK`KjK[KSKRKUK9K!K$K%K&K&K'K*K0K K
+K
KKKKKCKBKAKEK*KKKK!K)K-K(K)K-K+K"KKKK8KBKK9K2K/K/K+K"KKK!K/K0K$K+K3K5K4K?KGKAK;K9K-K+K+K+K$K8KGKFKFKFKFKFKFKFKFKFKFKGK6KK$KBKIKJKJKHKHKAK9K=K=K=KKKHKFKFKFKFKFKFKFKGKFKGKHK2KK*KEKFKHKIKHKGK?KKdKsKrKtKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKtKsKtKtKtKtKvKtKsKsKsKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KPKQKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKUKUKWKXKUKUKUKUKUKUKUKWKWKUKVKXKWKWKUKVKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KYKWKWKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKdKdKdKdKeKcKaKcK^KNKTKTKVKQKVKTKSKQKUKOKTKIKCKVKZKYKMKCKJKNKVKUKSKPK*K$K&K%K!KKKKKK
+K
+KKKK?KAK@KK=K;K;K?K?K=KK.K-K+K)K KKKK'K'K&K%K)K$K K"K%K%K1K>K(K)K)K+K"KKKK0KDKDKFKGKFKFKFKFKFKGKFKFKFK)KK4KFKGKIKHKFKEK@K;KK=K=K=KK=KK:K:K9KK?K=KK?K=K=K=K;K4K*K,K0K4K8K7K5K4K3K1K0K/K0K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K6KYKrKtKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKuKuKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKRKSKSKSKSKSKSKSKSKSKSKSKSKRKRKRKRKRKRKSKUKTKRKSKRKSKSKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKVKTKVKUKUKUKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKUKUKXKWKWKXKWKXKYK[KYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K]K\K\K\K\K]K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKdKdKeKcKbKdKeKdKdKeKcKbKbKXKOKQKWKWKTKVKUKWKSKWKRKVKLKMKLKPKDKNKSK]KhKPKVKVKBK!K&K%K&KKKK
+K KKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
KKKKKKKKKKKKK#K)K'K)K&KKKKKKKKKKKKKKK1K/K(K+K(K%KKKKKKKKKKKKKK#K/K)K'K)K)K&KKKKKKK1KGKGKGKFKFKFKFKFKFKGKFKHKBK!KK:KHKHKIKIKGKCK?K;K=K=K=K=K>K>K=KK;KK?K=K