update
This commit is contained in:
161
.CondaPkg/env/Lib/site-packages/scipy/__config__.py
vendored
Normal file
161
.CondaPkg/env/Lib/site-packages/scipy/__config__.py
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
# This file is generated by SciPy's build process
|
||||
# It contains system_info results at the time of building this package.
|
||||
from enum import Enum
|
||||
|
||||
__all__ = ["show"]
|
||||
_built_with_meson = True
|
||||
|
||||
|
||||
class DisplayModes(Enum):
|
||||
stdout = "stdout"
|
||||
dicts = "dicts"
|
||||
|
||||
|
||||
def _cleanup(d):
|
||||
"""
|
||||
Removes empty values in a `dict` recursively
|
||||
This ensures we remove values that Meson could not provide to CONFIG
|
||||
"""
|
||||
if isinstance(d, dict):
|
||||
return { k: _cleanup(v) for k, v in d.items() if v != '' and _cleanup(v) != '' }
|
||||
else:
|
||||
return d
|
||||
|
||||
|
||||
CONFIG = _cleanup(
|
||||
{
|
||||
"Compilers": {
|
||||
"c": {
|
||||
"name": "gcc",
|
||||
"linker": r"ld.bfd",
|
||||
"version": "10.3.0",
|
||||
"commands": r"cc",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"cython": {
|
||||
"name": r"cython",
|
||||
"linker": r"cython",
|
||||
"version": r"3.0.11",
|
||||
"commands": r"cython",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"c++": {
|
||||
"name": "gcc",
|
||||
"linker": r"ld.bfd",
|
||||
"version": "10.3.0",
|
||||
"commands": r"c++",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"fortran": {
|
||||
"name": "gcc",
|
||||
"linker": r"ld.bfd",
|
||||
"version": "10.3.0",
|
||||
"commands": r"gfortran",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"pythran": {
|
||||
"version": r"0.16.1",
|
||||
"include directory": r"C:\Users\runneradmin\AppData\Local\Temp\pip-build-env-sf8gymnb\overlay\Lib\site-packages/pythran"
|
||||
},
|
||||
},
|
||||
"Machine Information": {
|
||||
"host": {
|
||||
"cpu": r"x86_64",
|
||||
"family": r"x86_64",
|
||||
"endian": r"little",
|
||||
"system": r"windows",
|
||||
},
|
||||
"build": {
|
||||
"cpu": r"x86_64",
|
||||
"family": r"x86_64",
|
||||
"endian": r"little",
|
||||
"system": r"windows",
|
||||
},
|
||||
"cross-compiled": bool("False".lower().replace('false', '')),
|
||||
},
|
||||
"Build Dependencies": {
|
||||
"blas": {
|
||||
"name": "scipy-openblas",
|
||||
"found": bool("True".lower().replace('false', '')),
|
||||
"version": "0.3.27.dev",
|
||||
"detection method": "pkgconfig",
|
||||
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-do_lqnow/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas32/include",
|
||||
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-do_lqnow/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas32/lib",
|
||||
"openblas configuration": r"OpenBLAS 0.3.27.dev DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=24",
|
||||
"pc file directory": r"D:/a/scipy/scipy",
|
||||
},
|
||||
"lapack": {
|
||||
"name": "scipy-openblas",
|
||||
"found": bool("True".lower().replace('false', '')),
|
||||
"version": "0.3.27.dev",
|
||||
"detection method": "pkgconfig",
|
||||
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-do_lqnow/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas32/include",
|
||||
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-do_lqnow/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas32/lib",
|
||||
"openblas configuration": r"OpenBLAS 0.3.27.dev DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=24",
|
||||
"pc file directory": r"D:/a/scipy/scipy",
|
||||
},
|
||||
"pybind11": {
|
||||
"name": "pybind11",
|
||||
"version": "2.12.0",
|
||||
"detection method": "config-tool",
|
||||
"include directory": r"unknown",
|
||||
},
|
||||
},
|
||||
"Python Information": {
|
||||
"path": r"C:\Users\runneradmin\AppData\Local\Temp\cibw-run-do_lqnow\cp312-win_amd64\build\venv\Scripts\python.exe",
|
||||
"version": "3.12",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _check_pyyaml():
|
||||
import yaml
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def show(mode=DisplayModes.stdout.value):
|
||||
"""
|
||||
Show libraries and system information on which SciPy was built
|
||||
and is being used
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {`'stdout'`, `'dicts'`}, optional.
|
||||
Indicates how to display the config information.
|
||||
`'stdout'` prints to console, `'dicts'` returns a dictionary
|
||||
of the configuration.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : {`dict`, `None`}
|
||||
If mode is `'dicts'`, a dict is returned, else None
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. The `'stdout'` mode will give more readable
|
||||
output if ``pyyaml`` is installed
|
||||
|
||||
"""
|
||||
if mode == DisplayModes.stdout.value:
|
||||
try: # Non-standard library, check import
|
||||
yaml = _check_pyyaml()
|
||||
|
||||
print(yaml.dump(CONFIG))
|
||||
except ModuleNotFoundError:
|
||||
import warnings
|
||||
import json
|
||||
|
||||
warnings.warn("Install `pyyaml` for better output", stacklevel=1)
|
||||
print(json.dumps(CONFIG, indent=2))
|
||||
elif mode == DisplayModes.dicts.value:
|
||||
return CONFIG
|
||||
else:
|
||||
raise AttributeError(
|
||||
f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
|
||||
)
|
||||
154
.CondaPkg/env/Lib/site-packages/scipy/__init__.py
vendored
Normal file
154
.CondaPkg/env/Lib/site-packages/scipy/__init__.py
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
"""
|
||||
SciPy: A scientific computing package for Python
|
||||
================================================
|
||||
|
||||
Documentation is available in the docstrings and
|
||||
online at https://docs.scipy.org.
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
Using any of these subpackages requires an explicit import. For example,
|
||||
``import scipy.cluster``.
|
||||
|
||||
::
|
||||
|
||||
cluster --- Vector Quantization / Kmeans
|
||||
constants --- Physical and mathematical constants and units
|
||||
datasets --- Dataset methods
|
||||
fft --- Discrete Fourier transforms
|
||||
fftpack --- Legacy discrete Fourier transforms
|
||||
integrate --- Integration routines
|
||||
interpolate --- Interpolation Tools
|
||||
io --- Data input and output
|
||||
linalg --- Linear algebra routines
|
||||
misc --- Utilities that don't have another home.
|
||||
ndimage --- N-D image package
|
||||
odr --- Orthogonal Distance Regression
|
||||
optimize --- Optimization Tools
|
||||
signal --- Signal Processing Tools
|
||||
sparse --- Sparse Matrices
|
||||
spatial --- Spatial data structures and algorithms
|
||||
special --- Special functions
|
||||
stats --- Statistical Functions
|
||||
|
||||
Public API in the main SciPy namespace
|
||||
--------------------------------------
|
||||
::
|
||||
|
||||
__version__ --- SciPy version string
|
||||
LowLevelCallable --- Low-level callback function
|
||||
show_config --- Show scipy build configuration
|
||||
test --- Run scipy unittests
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# start delvewheel patch
|
||||
def _delvewheel_patch_1_8_0():
|
||||
import os
|
||||
libs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'scipy.libs'))
|
||||
if os.path.isdir(libs_dir):
|
||||
os.add_dll_directory(libs_dir)
|
||||
|
||||
|
||||
_delvewheel_patch_1_8_0()
|
||||
del _delvewheel_patch_1_8_0
|
||||
# end delvewheel patch
|
||||
|
||||
import importlib as _importlib
|
||||
|
||||
from numpy import __version__ as __numpy_version__
|
||||
|
||||
|
||||
try:
|
||||
from scipy.__config__ import show as show_config
|
||||
except ImportError as e:
|
||||
msg = """Error importing SciPy: you cannot import SciPy while
|
||||
being in scipy source directory; please exit the SciPy source
|
||||
tree first and relaunch your Python interpreter."""
|
||||
raise ImportError(msg) from e
|
||||
|
||||
|
||||
from scipy.version import version as __version__
|
||||
|
||||
|
||||
# Allow distributors to run custom init code
|
||||
from . import _distributor_init
|
||||
del _distributor_init
|
||||
|
||||
|
||||
from scipy._lib import _pep440
|
||||
# In maintenance branch, change to np_maxversion N+3 if numpy is at N
|
||||
np_minversion = '1.23.5'
|
||||
np_maxversion = '2.3.0'
|
||||
if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or
|
||||
_pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)):
|
||||
import warnings
|
||||
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
|
||||
f" is required for this version of SciPy (detected "
|
||||
f"version {__numpy_version__})",
|
||||
UserWarning, stacklevel=2)
|
||||
del _pep440
|
||||
|
||||
|
||||
# This is the first import of an extension module within SciPy. If there's
|
||||
# a general issue with the install, such that extension modules are missing
|
||||
# or cannot be imported, this is where we'll get a failure - so give an
|
||||
# informative error message.
|
||||
try:
|
||||
from scipy._lib._ccallback import LowLevelCallable
|
||||
except ImportError as e:
|
||||
msg = "The `scipy` install you are using seems to be broken, " + \
|
||||
"(extension modules cannot be imported), " + \
|
||||
"please try reinstalling."
|
||||
raise ImportError(msg) from e
|
||||
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
|
||||
submodules = [
|
||||
'cluster',
|
||||
'constants',
|
||||
'datasets',
|
||||
'fft',
|
||||
'fftpack',
|
||||
'integrate',
|
||||
'interpolate',
|
||||
'io',
|
||||
'linalg',
|
||||
'misc',
|
||||
'ndimage',
|
||||
'odr',
|
||||
'optimize',
|
||||
'signal',
|
||||
'sparse',
|
||||
'spatial',
|
||||
'special',
|
||||
'stats'
|
||||
]
|
||||
|
||||
__all__ = submodules + [
|
||||
'LowLevelCallable',
|
||||
'test',
|
||||
'show_config',
|
||||
'__version__',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name in submodules:
|
||||
return _importlib.import_module(f'scipy.{name}')
|
||||
else:
|
||||
try:
|
||||
return globals()[name]
|
||||
except KeyError:
|
||||
raise AttributeError(
|
||||
f"Module 'scipy' has no attribute '{name}'"
|
||||
)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/__config__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/__config__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/_distributor_init.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/_distributor_init.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/conftest.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/conftest.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/version.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/__pycache__/version.cpython-312.pyc
vendored
Normal file
Binary file not shown.
18
.CondaPkg/env/Lib/site-packages/scipy/_distributor_init.py
vendored
Normal file
18
.CondaPkg/env/Lib/site-packages/scipy/_distributor_init.py
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
""" Distributor init file
|
||||
|
||||
Distributors: you can replace the contents of this file with your own custom
|
||||
code to support particular distributions of SciPy.
|
||||
|
||||
For example, this is a good place to put any checks for hardware requirements
|
||||
or BLAS/LAPACK library initialization.
|
||||
|
||||
The SciPy standard source distribution will not put code in this file beyond
|
||||
the try-except import of `_distributor_init_local` (which is not part of a
|
||||
standard source distribution), so you can safely replace this file with your
|
||||
own version.
|
||||
"""
|
||||
|
||||
try:
|
||||
from . import _distributor_init_local # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
14
.CondaPkg/env/Lib/site-packages/scipy/_lib/__init__.py
vendored
Normal file
14
.CondaPkg/env/Lib/site-packages/scipy/_lib/__init__.py
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
Module containing private utility functions
|
||||
===========================================
|
||||
|
||||
The ``scipy._lib`` namespace is empty (for now). Tests for all
|
||||
utilities in submodules of ``_lib`` can be run with::
|
||||
|
||||
from scipy import _lib
|
||||
_lib.test()
|
||||
|
||||
"""
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_array_api.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_array_api.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_bunch.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_bunch.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_pep440.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_pep440.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_testutils.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_testutils.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_util.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/_util.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/decorator.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/decorator.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/deprecation.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/deprecation.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/doccer.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/doccer.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/uarray.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/__pycache__/uarray.cpython-312.pyc
vendored
Normal file
Binary file not shown.
524
.CondaPkg/env/Lib/site-packages/scipy/_lib/_array_api.py
vendored
Normal file
524
.CondaPkg/env/Lib/site-packages/scipy/_lib/_array_api.py
vendored
Normal file
@@ -0,0 +1,524 @@
|
||||
"""Utility functions to use Python Array API compatible libraries.
|
||||
|
||||
For the context about the Array API see:
|
||||
https://data-apis.org/array-api/latest/purpose_and_scope.html
|
||||
|
||||
The SciPy use case of the Array API is described on the following page:
|
||||
https://data-apis.org/array-api/latest/use_cases.html#use-case-scipy
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from types import ModuleType
|
||||
from typing import Any, Literal, TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
|
||||
from scipy._lib import array_api_compat
|
||||
from scipy._lib.array_api_compat import (
|
||||
is_array_api_obj,
|
||||
size,
|
||||
numpy as np_compat,
|
||||
device
|
||||
)
|
||||
|
||||
__all__ = ['array_namespace', '_asarray', 'size', 'device']
|
||||
|
||||
|
||||
# To enable array API and strict array-like input validation
|
||||
SCIPY_ARRAY_API: str | bool = os.environ.get("SCIPY_ARRAY_API", False)
|
||||
# To control the default device - for use in the test suite only
|
||||
SCIPY_DEVICE = os.environ.get("SCIPY_DEVICE", "cpu")
|
||||
|
||||
_GLOBAL_CONFIG = {
|
||||
"SCIPY_ARRAY_API": SCIPY_ARRAY_API,
|
||||
"SCIPY_DEVICE": SCIPY_DEVICE,
|
||||
}
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
Array = Any # To be changed to a Protocol later (see array-api#589)
|
||||
ArrayLike = Array | npt.ArrayLike
|
||||
|
||||
|
||||
def compliance_scipy(arrays: list[ArrayLike]) -> list[Array]:
|
||||
"""Raise exceptions on known-bad subclasses.
|
||||
|
||||
The following subclasses are not supported and raise and error:
|
||||
- `numpy.ma.MaskedArray`
|
||||
- `numpy.matrix`
|
||||
- NumPy arrays which do not have a boolean or numerical dtype
|
||||
- Any array-like which is neither array API compatible nor coercible by NumPy
|
||||
- Any array-like which is coerced by NumPy to an unsupported dtype
|
||||
"""
|
||||
for i in range(len(arrays)):
|
||||
array = arrays[i]
|
||||
if isinstance(array, np.ma.MaskedArray):
|
||||
raise TypeError("Inputs of type `numpy.ma.MaskedArray` are not supported.")
|
||||
elif isinstance(array, np.matrix):
|
||||
raise TypeError("Inputs of type `numpy.matrix` are not supported.")
|
||||
if isinstance(array, (np.ndarray, np.generic)):
|
||||
dtype = array.dtype
|
||||
if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)):
|
||||
raise TypeError(f"An argument has dtype `{dtype!r}`; "
|
||||
f"only boolean and numerical dtypes are supported.")
|
||||
elif not is_array_api_obj(array):
|
||||
try:
|
||||
array = np.asanyarray(array)
|
||||
except TypeError:
|
||||
raise TypeError("An argument is neither array API compatible nor "
|
||||
"coercible by NumPy.")
|
||||
dtype = array.dtype
|
||||
if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)):
|
||||
message = (
|
||||
f"An argument was coerced to an unsupported dtype `{dtype!r}`; "
|
||||
f"only boolean and numerical dtypes are supported."
|
||||
)
|
||||
raise TypeError(message)
|
||||
arrays[i] = array
|
||||
return arrays
|
||||
|
||||
|
||||
def _check_finite(array: Array, xp: ModuleType) -> None:
|
||||
"""Check for NaNs or Infs."""
|
||||
msg = "array must not contain infs or NaNs"
|
||||
try:
|
||||
if not xp.all(xp.isfinite(array)):
|
||||
raise ValueError(msg)
|
||||
except TypeError:
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def array_namespace(*arrays: Array) -> ModuleType:
|
||||
"""Get the array API compatible namespace for the arrays xs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
*arrays : sequence of array_like
|
||||
Arrays used to infer the common namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
namespace : module
|
||||
Common namespace.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Thin wrapper around `array_api_compat.array_namespace`.
|
||||
|
||||
1. Check for the global switch: SCIPY_ARRAY_API. This can also be accessed
|
||||
dynamically through ``_GLOBAL_CONFIG['SCIPY_ARRAY_API']``.
|
||||
2. `compliance_scipy` raise exceptions on known-bad subclasses. See
|
||||
its definition for more details.
|
||||
|
||||
When the global switch is False, it defaults to the `numpy` namespace.
|
||||
In that case, there is no compliance check. This is a convenience to
|
||||
ease the adoption. Otherwise, arrays must comply with the new rules.
|
||||
"""
|
||||
if not _GLOBAL_CONFIG["SCIPY_ARRAY_API"]:
|
||||
# here we could wrap the namespace if needed
|
||||
return np_compat
|
||||
|
||||
_arrays = [array for array in arrays if array is not None]
|
||||
|
||||
_arrays = compliance_scipy(_arrays)
|
||||
|
||||
return array_api_compat.array_namespace(*_arrays)
|
||||
|
||||
|
||||
def _asarray(
|
||||
array: ArrayLike,
|
||||
dtype: Any = None,
|
||||
order: Literal['K', 'A', 'C', 'F'] | None = None,
|
||||
copy: bool | None = None,
|
||||
*,
|
||||
xp: ModuleType | None = None,
|
||||
check_finite: bool = False,
|
||||
subok: bool = False,
|
||||
) -> Array:
|
||||
"""SciPy-specific replacement for `np.asarray` with `order`, `check_finite`, and
|
||||
`subok`.
|
||||
|
||||
Memory layout parameter `order` is not exposed in the Array API standard.
|
||||
`order` is only enforced if the input array implementation
|
||||
is NumPy based, otherwise `order` is just silently ignored.
|
||||
|
||||
`check_finite` is also not a keyword in the array API standard; included
|
||||
here for convenience rather than that having to be a separate function
|
||||
call inside SciPy functions.
|
||||
|
||||
`subok` is included to allow this function to preserve the behaviour of
|
||||
`np.asanyarray` for NumPy based inputs.
|
||||
"""
|
||||
if xp is None:
|
||||
xp = array_namespace(array)
|
||||
if xp.__name__ in {"numpy", "scipy._lib.array_api_compat.numpy"}:
|
||||
# Use NumPy API to support order
|
||||
if copy is True:
|
||||
array = np.array(array, order=order, dtype=dtype, subok=subok)
|
||||
elif subok:
|
||||
array = np.asanyarray(array, order=order, dtype=dtype)
|
||||
else:
|
||||
array = np.asarray(array, order=order, dtype=dtype)
|
||||
|
||||
# At this point array is a NumPy ndarray. We convert it to an array
|
||||
# container that is consistent with the input's namespace.
|
||||
array = xp.asarray(array)
|
||||
else:
|
||||
try:
|
||||
array = xp.asarray(array, dtype=dtype, copy=copy)
|
||||
except TypeError:
|
||||
coerced_xp = array_namespace(xp.asarray(3))
|
||||
array = coerced_xp.asarray(array, dtype=dtype, copy=copy)
|
||||
|
||||
if check_finite:
|
||||
_check_finite(array, xp)
|
||||
|
||||
return array
|
||||
|
||||
|
||||
def atleast_nd(x: Array, *, ndim: int, xp: ModuleType | None = None) -> Array:
|
||||
"""Recursively expand the dimension to have at least `ndim`."""
|
||||
if xp is None:
|
||||
xp = array_namespace(x)
|
||||
x = xp.asarray(x)
|
||||
if x.ndim < ndim:
|
||||
x = xp.expand_dims(x, axis=0)
|
||||
x = atleast_nd(x, ndim=ndim, xp=xp)
|
||||
return x
|
||||
|
||||
|
||||
def copy(x: Array, *, xp: ModuleType | None = None) -> Array:
|
||||
"""
|
||||
Copies an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array
|
||||
|
||||
xp : array_namespace
|
||||
|
||||
Returns
|
||||
-------
|
||||
copy : array
|
||||
Copied array
|
||||
|
||||
Notes
|
||||
-----
|
||||
This copy function does not offer all the semantics of `np.copy`, i.e. the
|
||||
`subok` and `order` keywords are not used.
|
||||
"""
|
||||
# Note: xp.asarray fails if xp is numpy.
|
||||
if xp is None:
|
||||
xp = array_namespace(x)
|
||||
|
||||
return _asarray(x, copy=True, xp=xp)
|
||||
|
||||
|
||||
def is_numpy(xp: ModuleType) -> bool:
|
||||
return xp.__name__ in ('numpy', 'scipy._lib.array_api_compat.numpy')
|
||||
|
||||
|
||||
def is_cupy(xp: ModuleType) -> bool:
|
||||
return xp.__name__ in ('cupy', 'scipy._lib.array_api_compat.cupy')
|
||||
|
||||
|
||||
def is_torch(xp: ModuleType) -> bool:
|
||||
return xp.__name__ in ('torch', 'scipy._lib.array_api_compat.torch')
|
||||
|
||||
def is_jax(xp):
|
||||
return xp.__name__ in ('jax.numpy', 'jax.experimental.array_api')
|
||||
|
||||
|
||||
def _strict_check(actual, desired, xp,
|
||||
check_namespace=True, check_dtype=True, check_shape=True):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
if check_namespace:
|
||||
_assert_matching_namespace(actual, desired)
|
||||
|
||||
desired = xp.asarray(desired)
|
||||
|
||||
if check_dtype:
|
||||
_msg = f"dtypes do not match.\nActual: {actual.dtype}\nDesired: {desired.dtype}"
|
||||
assert actual.dtype == desired.dtype, _msg
|
||||
|
||||
if check_shape:
|
||||
_msg = f"Shapes do not match.\nActual: {actual.shape}\nDesired: {desired.shape}"
|
||||
assert actual.shape == desired.shape, _msg
|
||||
_check_scalar(actual, desired, xp)
|
||||
|
||||
desired = xp.broadcast_to(desired, actual.shape)
|
||||
return desired
|
||||
|
||||
|
||||
def _assert_matching_namespace(actual, desired):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
actual = actual if isinstance(actual, tuple) else (actual,)
|
||||
desired_space = array_namespace(desired)
|
||||
for arr in actual:
|
||||
arr_space = array_namespace(arr)
|
||||
_msg = (f"Namespaces do not match.\n"
|
||||
f"Actual: {arr_space.__name__}\n"
|
||||
f"Desired: {desired_space.__name__}")
|
||||
assert arr_space == desired_space, _msg
|
||||
|
||||
|
||||
def _check_scalar(actual, desired, xp):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
# Shape check alone is sufficient unless desired.shape == (). Also,
|
||||
# only NumPy distinguishes between scalars and arrays.
|
||||
if desired.shape != () or not is_numpy(xp):
|
||||
return
|
||||
# We want to follow the conventions of the `xp` library. Libraries like
|
||||
# NumPy, for which `np.asarray(0)[()]` returns a scalar, tend to return
|
||||
# a scalar even when a 0D array might be more appropriate:
|
||||
# import numpy as np
|
||||
# np.mean([1, 2, 3]) # scalar, not 0d array
|
||||
# np.asarray(0)*2 # scalar, not 0d array
|
||||
# np.sin(np.asarray(0)) # scalar, not 0d array
|
||||
# Libraries like CuPy, for which `cp.asarray(0)[()]` returns a 0D array,
|
||||
# tend to return a 0D array in scenarios like those above.
|
||||
# Therefore, regardless of whether the developer provides a scalar or 0D
|
||||
# array for `desired`, we would typically want the type of `actual` to be
|
||||
# the type of `desired[()]`. If the developer wants to override this
|
||||
# behavior, they can set `check_shape=False`.
|
||||
desired = desired[()]
|
||||
_msg = f"Types do not match:\n Actual: {type(actual)}\n Desired: {type(desired)}"
|
||||
assert (xp.isscalar(actual) and xp.isscalar(desired)
|
||||
or (not xp.isscalar(actual) and not xp.isscalar(desired))), _msg
|
||||
|
||||
|
||||
def xp_assert_equal(actual, desired, check_namespace=True, check_dtype=True,
|
||||
check_shape=True, err_msg='', xp=None):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
if xp is None:
|
||||
xp = array_namespace(actual)
|
||||
desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
|
||||
check_dtype=check_dtype, check_shape=check_shape)
|
||||
if is_cupy(xp):
|
||||
return xp.testing.assert_array_equal(actual, desired, err_msg=err_msg)
|
||||
elif is_torch(xp):
|
||||
# PyTorch recommends using `rtol=0, atol=0` like this
|
||||
# to test for exact equality
|
||||
err_msg = None if err_msg == '' else err_msg
|
||||
return xp.testing.assert_close(actual, desired, rtol=0, atol=0, equal_nan=True,
|
||||
check_dtype=False, msg=err_msg)
|
||||
# JAX uses `np.testing`
|
||||
return np.testing.assert_array_equal(actual, desired, err_msg=err_msg)
|
||||
|
||||
|
||||
def xp_assert_close(actual, desired, rtol=None, atol=0, check_namespace=True,
|
||||
check_dtype=True, check_shape=True, err_msg='', xp=None):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
if xp is None:
|
||||
xp = array_namespace(actual)
|
||||
desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
|
||||
check_dtype=check_dtype, check_shape=check_shape)
|
||||
|
||||
floating = xp.isdtype(actual.dtype, ('real floating', 'complex floating'))
|
||||
if rtol is None and floating:
|
||||
# multiplier of 4 is used as for `np.float64` this puts the default `rtol`
|
||||
# roughly half way between sqrt(eps) and the default for
|
||||
# `numpy.testing.assert_allclose`, 1e-7
|
||||
rtol = xp.finfo(actual.dtype).eps**0.5 * 4
|
||||
elif rtol is None:
|
||||
rtol = 1e-7
|
||||
|
||||
if is_cupy(xp):
|
||||
return xp.testing.assert_allclose(actual, desired, rtol=rtol,
|
||||
atol=atol, err_msg=err_msg)
|
||||
elif is_torch(xp):
|
||||
err_msg = None if err_msg == '' else err_msg
|
||||
return xp.testing.assert_close(actual, desired, rtol=rtol, atol=atol,
|
||||
equal_nan=True, check_dtype=False, msg=err_msg)
|
||||
# JAX uses `np.testing`
|
||||
return np.testing.assert_allclose(actual, desired, rtol=rtol,
|
||||
atol=atol, err_msg=err_msg)
|
||||
|
||||
|
||||
def xp_assert_less(actual, desired, check_namespace=True, check_dtype=True,
|
||||
check_shape=True, err_msg='', verbose=True, xp=None):
|
||||
__tracebackhide__ = True # Hide traceback for py.test
|
||||
if xp is None:
|
||||
xp = array_namespace(actual)
|
||||
desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
|
||||
check_dtype=check_dtype, check_shape=check_shape)
|
||||
if is_cupy(xp):
|
||||
return xp.testing.assert_array_less(actual, desired,
|
||||
err_msg=err_msg, verbose=verbose)
|
||||
elif is_torch(xp):
|
||||
if actual.device.type != 'cpu':
|
||||
actual = actual.cpu()
|
||||
if desired.device.type != 'cpu':
|
||||
desired = desired.cpu()
|
||||
# JAX uses `np.testing`
|
||||
return np.testing.assert_array_less(actual, desired,
|
||||
err_msg=err_msg, verbose=verbose)
|
||||
|
||||
|
||||
def cov(x: Array, *, xp: ModuleType | None = None) -> Array:
|
||||
if xp is None:
|
||||
xp = array_namespace(x)
|
||||
|
||||
X = copy(x, xp=xp)
|
||||
dtype = xp.result_type(X, xp.float64)
|
||||
|
||||
X = atleast_nd(X, ndim=2, xp=xp)
|
||||
X = xp.asarray(X, dtype=dtype)
|
||||
|
||||
avg = xp.mean(X, axis=1)
|
||||
fact = X.shape[1] - 1
|
||||
|
||||
if fact <= 0:
|
||||
warnings.warn("Degrees of freedom <= 0 for slice",
|
||||
RuntimeWarning, stacklevel=2)
|
||||
fact = 0.0
|
||||
|
||||
X -= avg[:, None]
|
||||
X_T = X.T
|
||||
if xp.isdtype(X_T.dtype, 'complex floating'):
|
||||
X_T = xp.conj(X_T)
|
||||
c = X @ X_T
|
||||
c /= fact
|
||||
axes = tuple(axis for axis, length in enumerate(c.shape) if length == 1)
|
||||
return xp.squeeze(c, axis=axes)
|
||||
|
||||
|
||||
def xp_unsupported_param_msg(param: Any) -> str:
|
||||
return f'Providing {param!r} is only supported for numpy arrays.'
|
||||
|
||||
|
||||
def is_complex(x: Array, xp: ModuleType) -> bool:
|
||||
return xp.isdtype(x.dtype, 'complex floating')
|
||||
|
||||
|
||||
def get_xp_devices(xp: ModuleType) -> list[str] | list[None]:
|
||||
"""Returns a list of available devices for the given namespace."""
|
||||
devices: list[str] = []
|
||||
if is_torch(xp):
|
||||
devices += ['cpu']
|
||||
import torch # type: ignore[import]
|
||||
num_cuda = torch.cuda.device_count()
|
||||
for i in range(0, num_cuda):
|
||||
devices += [f'cuda:{i}']
|
||||
if torch.backends.mps.is_available():
|
||||
devices += ['mps']
|
||||
return devices
|
||||
elif is_cupy(xp):
|
||||
import cupy # type: ignore[import]
|
||||
num_cuda = cupy.cuda.runtime.getDeviceCount()
|
||||
for i in range(0, num_cuda):
|
||||
devices += [f'cuda:{i}']
|
||||
return devices
|
||||
elif is_jax(xp):
|
||||
import jax # type: ignore[import]
|
||||
num_cpu = jax.device_count(backend='cpu')
|
||||
for i in range(0, num_cpu):
|
||||
devices += [f'cpu:{i}']
|
||||
num_gpu = jax.device_count(backend='gpu')
|
||||
for i in range(0, num_gpu):
|
||||
devices += [f'gpu:{i}']
|
||||
num_tpu = jax.device_count(backend='tpu')
|
||||
for i in range(0, num_tpu):
|
||||
devices += [f'tpu:{i}']
|
||||
return devices
|
||||
|
||||
# given namespace is not known to have a list of available devices;
|
||||
# return `[None]` so that one can use this in tests for `device=None`.
|
||||
return [None]
|
||||
|
||||
|
||||
def scipy_namespace_for(xp: ModuleType) -> ModuleType:
|
||||
"""
|
||||
Return the `scipy` namespace for alternative backends, where it exists,
|
||||
such as `cupyx.scipy` and `jax.scipy`. Useful for ad hoc dispatching.
|
||||
|
||||
Default: return `scipy` (this package).
|
||||
"""
|
||||
|
||||
|
||||
if is_cupy(xp):
|
||||
import cupyx # type: ignore[import-not-found,import-untyped]
|
||||
return cupyx.scipy
|
||||
|
||||
if is_jax(xp):
|
||||
import jax # type: ignore[import-not-found]
|
||||
return jax.scipy
|
||||
|
||||
import scipy
|
||||
return scipy
|
||||
|
||||
|
||||
# temporary substitute for xp.minimum, which is not yet in all backends
|
||||
# or covered by array_api_compat.
|
||||
def xp_minimum(x1: Array, x2: Array, /) -> Array:
|
||||
# xp won't be passed in because it doesn't need to be passed in to xp.minimum
|
||||
xp = array_namespace(x1, x2)
|
||||
if hasattr(xp, 'minimum'):
|
||||
return xp.minimum(x1, x2)
|
||||
x1, x2 = xp.broadcast_arrays(x1, x2)
|
||||
i = (x2 < x1) | xp.isnan(x2)
|
||||
res = xp.where(i, x2, x1)
|
||||
return res[()] if res.ndim == 0 else res
|
||||
|
||||
|
||||
# temporary substitute for xp.clip, which is not yet in all backends
|
||||
# or covered by array_api_compat.
|
||||
def xp_clip(
|
||||
x: Array,
|
||||
/,
|
||||
min: int | float | Array | None = None,
|
||||
max: int | float | Array | None = None,
|
||||
*,
|
||||
xp: ModuleType | None = None) -> Array:
|
||||
xp = array_namespace(x) if xp is None else xp
|
||||
a, b = xp.asarray(min, dtype=x.dtype), xp.asarray(max, dtype=x.dtype)
|
||||
if hasattr(xp, 'clip'):
|
||||
return xp.clip(x, a, b)
|
||||
x, a, b = xp.broadcast_arrays(x, a, b)
|
||||
y = xp.asarray(x, copy=True)
|
||||
ia = y < a
|
||||
y[ia] = a[ia]
|
||||
ib = y > b
|
||||
y[ib] = b[ib]
|
||||
return y[()] if y.ndim == 0 else y
|
||||
|
||||
|
||||
# temporary substitute for xp.moveaxis, which is not yet in all backends
|
||||
# or covered by array_api_compat.
|
||||
def xp_moveaxis_to_end(
|
||||
x: Array,
|
||||
source: int,
|
||||
/, *,
|
||||
xp: ModuleType | None = None) -> Array:
|
||||
xp = array_namespace(xp) if xp is None else xp
|
||||
axes = list(range(x.ndim))
|
||||
temp = axes.pop(source)
|
||||
axes = axes + [temp]
|
||||
return xp.permute_dims(x, axes)
|
||||
|
||||
|
||||
# temporary substitute for xp.copysign, which is not yet in all backends
|
||||
# or covered by array_api_compat.
|
||||
def xp_copysign(x1: Array, x2: Array, /, *, xp: ModuleType | None = None) -> Array:
|
||||
# no attempt to account for special cases
|
||||
xp = array_namespace(x1, x2) if xp is None else xp
|
||||
abs_x1 = xp.abs(x1)
|
||||
return xp.where(x2 >= 0, abs_x1, -abs_x1)
|
||||
|
||||
|
||||
# partial substitute for xp.sign, which does not cover the NaN special case
|
||||
# that I need. (https://github.com/data-apis/array-api-compat/issues/136)
|
||||
def xp_sign(x: Array, /, *, xp: ModuleType | None = None) -> Array:
|
||||
xp = array_namespace(x) if xp is None else xp
|
||||
if is_numpy(xp): # only NumPy implements the special cases correctly
|
||||
return xp.sign(x)
|
||||
sign = xp.full_like(x, xp.nan)
|
||||
one = xp.asarray(1, dtype=x.dtype)
|
||||
sign = xp.where(x > 0, one, sign)
|
||||
sign = xp.where(x < 0, -one, sign)
|
||||
sign = xp.where(x == 0, 0*one, sign)
|
||||
return sign
|
||||
225
.CondaPkg/env/Lib/site-packages/scipy/_lib/_bunch.py
vendored
Normal file
225
.CondaPkg/env/Lib/site-packages/scipy/_lib/_bunch.py
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
import sys as _sys
|
||||
from keyword import iskeyword as _iskeyword
|
||||
|
||||
|
||||
def _validate_names(typename, field_names, extra_field_names):
|
||||
"""
|
||||
Ensure that all the given names are valid Python identifiers that
|
||||
do not start with '_'. Also check that there are no duplicates
|
||||
among field_names + extra_field_names.
|
||||
"""
|
||||
for name in [typename] + field_names + extra_field_names:
|
||||
if not isinstance(name, str):
|
||||
raise TypeError('typename and all field names must be strings')
|
||||
if not name.isidentifier():
|
||||
raise ValueError('typename and all field names must be valid '
|
||||
f'identifiers: {name!r}')
|
||||
if _iskeyword(name):
|
||||
raise ValueError('typename and all field names cannot be a '
|
||||
f'keyword: {name!r}')
|
||||
|
||||
seen = set()
|
||||
for name in field_names + extra_field_names:
|
||||
if name.startswith('_'):
|
||||
raise ValueError('Field names cannot start with an underscore: '
|
||||
f'{name!r}')
|
||||
if name in seen:
|
||||
raise ValueError(f'Duplicate field name: {name!r}')
|
||||
seen.add(name)
|
||||
|
||||
|
||||
# Note: This code is adapted from CPython:Lib/collections/__init__.py
|
||||
def _make_tuple_bunch(typename, field_names, extra_field_names=None,
|
||||
module=None):
|
||||
"""
|
||||
Create a namedtuple-like class with additional attributes.
|
||||
|
||||
This function creates a subclass of tuple that acts like a namedtuple
|
||||
and that has additional attributes.
|
||||
|
||||
The additional attributes are listed in `extra_field_names`. The
|
||||
values assigned to these attributes are not part of the tuple.
|
||||
|
||||
The reason this function exists is to allow functions in SciPy
|
||||
that currently return a tuple or a namedtuple to returned objects
|
||||
that have additional attributes, while maintaining backwards
|
||||
compatibility.
|
||||
|
||||
This should only be used to enhance *existing* functions in SciPy.
|
||||
New functions are free to create objects as return values without
|
||||
having to maintain backwards compatibility with an old tuple or
|
||||
namedtuple return value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
typename : str
|
||||
The name of the type.
|
||||
field_names : list of str
|
||||
List of names of the values to be stored in the tuple. These names
|
||||
will also be attributes of instances, so the values in the tuple
|
||||
can be accessed by indexing or as attributes. At least one name
|
||||
is required. See the Notes for additional restrictions.
|
||||
extra_field_names : list of str, optional
|
||||
List of names of values that will be stored as attributes of the
|
||||
object. See the notes for additional restrictions.
|
||||
|
||||
Returns
|
||||
-------
|
||||
cls : type
|
||||
The new class.
|
||||
|
||||
Notes
|
||||
-----
|
||||
There are restrictions on the names that may be used in `field_names`
|
||||
and `extra_field_names`:
|
||||
|
||||
* The names must be unique--no duplicates allowed.
|
||||
* The names must be valid Python identifiers, and must not begin with
|
||||
an underscore.
|
||||
* The names must not be Python keywords (e.g. 'def', 'and', etc., are
|
||||
not allowed).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy._lib._bunch import _make_tuple_bunch
|
||||
|
||||
Create a class that acts like a namedtuple with length 2 (with field
|
||||
names `x` and `y`) that will also have the attributes `w` and `beta`:
|
||||
|
||||
>>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta'])
|
||||
|
||||
`Result` is the new class. We call it with keyword arguments to create
|
||||
a new instance with given values.
|
||||
|
||||
>>> result1 = Result(x=1, y=2, w=99, beta=0.5)
|
||||
>>> result1
|
||||
Result(x=1, y=2, w=99, beta=0.5)
|
||||
|
||||
`result1` acts like a tuple of length 2:
|
||||
|
||||
>>> len(result1)
|
||||
2
|
||||
>>> result1[:]
|
||||
(1, 2)
|
||||
|
||||
The values assigned when the instance was created are available as
|
||||
attributes:
|
||||
|
||||
>>> result1.y
|
||||
2
|
||||
>>> result1.beta
|
||||
0.5
|
||||
"""
|
||||
if len(field_names) == 0:
|
||||
raise ValueError('field_names must contain at least one name')
|
||||
|
||||
if extra_field_names is None:
|
||||
extra_field_names = []
|
||||
_validate_names(typename, field_names, extra_field_names)
|
||||
|
||||
typename = _sys.intern(str(typename))
|
||||
field_names = tuple(map(_sys.intern, field_names))
|
||||
extra_field_names = tuple(map(_sys.intern, extra_field_names))
|
||||
|
||||
all_names = field_names + extra_field_names
|
||||
arg_list = ', '.join(field_names)
|
||||
full_list = ', '.join(all_names)
|
||||
repr_fmt = ''.join(('(',
|
||||
', '.join(f'{name}=%({name})r' for name in all_names),
|
||||
')'))
|
||||
tuple_new = tuple.__new__
|
||||
_dict, _tuple, _zip = dict, tuple, zip
|
||||
|
||||
# Create all the named tuple methods to be added to the class namespace
|
||||
|
||||
s = f"""\
|
||||
def __new__(_cls, {arg_list}, **extra_fields):
|
||||
return _tuple_new(_cls, ({arg_list},))
|
||||
|
||||
def __init__(self, {arg_list}, **extra_fields):
|
||||
for key in self._extra_fields:
|
||||
if key not in extra_fields:
|
||||
raise TypeError("missing keyword argument '%s'" % (key,))
|
||||
for key, val in extra_fields.items():
|
||||
if key not in self._extra_fields:
|
||||
raise TypeError("unexpected keyword argument '%s'" % (key,))
|
||||
self.__dict__[key] = val
|
||||
|
||||
def __setattr__(self, key, val):
|
||||
if key in {repr(field_names)}:
|
||||
raise AttributeError("can't set attribute %r of class %r"
|
||||
% (key, self.__class__.__name__))
|
||||
else:
|
||||
self.__dict__[key] = val
|
||||
"""
|
||||
del arg_list
|
||||
namespace = {'_tuple_new': tuple_new,
|
||||
'__builtins__': dict(TypeError=TypeError,
|
||||
AttributeError=AttributeError),
|
||||
'__name__': f'namedtuple_{typename}'}
|
||||
exec(s, namespace)
|
||||
__new__ = namespace['__new__']
|
||||
__new__.__doc__ = f'Create new instance of {typename}({full_list})'
|
||||
__init__ = namespace['__init__']
|
||||
__init__.__doc__ = f'Instantiate instance of {typename}({full_list})'
|
||||
__setattr__ = namespace['__setattr__']
|
||||
|
||||
def __repr__(self):
|
||||
'Return a nicely formatted representation string'
|
||||
return self.__class__.__name__ + repr_fmt % self._asdict()
|
||||
|
||||
def _asdict(self):
|
||||
'Return a new dict which maps field names to their values.'
|
||||
out = _dict(_zip(self._fields, self))
|
||||
out.update(self.__dict__)
|
||||
return out
|
||||
|
||||
def __getnewargs_ex__(self):
|
||||
'Return self as a plain tuple. Used by copy and pickle.'
|
||||
return _tuple(self), self.__dict__
|
||||
|
||||
# Modify function metadata to help with introspection and debugging
|
||||
for method in (__new__, __repr__, _asdict, __getnewargs_ex__):
|
||||
method.__qualname__ = f'{typename}.{method.__name__}'
|
||||
|
||||
# Build-up the class namespace dictionary
|
||||
# and use type() to build the result class
|
||||
class_namespace = {
|
||||
'__doc__': f'{typename}({full_list})',
|
||||
'_fields': field_names,
|
||||
'__new__': __new__,
|
||||
'__init__': __init__,
|
||||
'__repr__': __repr__,
|
||||
'__setattr__': __setattr__,
|
||||
'_asdict': _asdict,
|
||||
'_extra_fields': extra_field_names,
|
||||
'__getnewargs_ex__': __getnewargs_ex__,
|
||||
}
|
||||
for index, name in enumerate(field_names):
|
||||
|
||||
def _get(self, index=index):
|
||||
return self[index]
|
||||
class_namespace[name] = property(_get)
|
||||
for name in extra_field_names:
|
||||
|
||||
def _get(self, name=name):
|
||||
return self.__dict__[name]
|
||||
class_namespace[name] = property(_get)
|
||||
|
||||
result = type(typename, (tuple,), class_namespace)
|
||||
|
||||
# For pickling to work, the __module__ variable needs to be set to the
|
||||
# frame where the named tuple is created. Bypass this step in environments
|
||||
# where sys._getframe is not defined (Jython for example) or sys._getframe
|
||||
# is not defined for arguments greater than 0 (IronPython), or where the
|
||||
# user has specified a particular module.
|
||||
if module is None:
|
||||
try:
|
||||
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
if module is not None:
|
||||
result.__module__ = module
|
||||
__new__.__module__ = module
|
||||
|
||||
return result
|
||||
251
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback.py
vendored
Normal file
251
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback.py
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
from . import _ccallback_c
|
||||
|
||||
import ctypes
|
||||
|
||||
PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
|
||||
|
||||
ffi = None
|
||||
|
||||
class CData:
|
||||
pass
|
||||
|
||||
def _import_cffi():
|
||||
global ffi, CData
|
||||
|
||||
if ffi is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
import cffi
|
||||
ffi = cffi.FFI()
|
||||
CData = ffi.CData
|
||||
except ImportError:
|
||||
ffi = False
|
||||
|
||||
|
||||
class LowLevelCallable(tuple):
|
||||
"""
|
||||
Low-level callback function.
|
||||
|
||||
Some functions in SciPy take as arguments callback functions, which
|
||||
can either be python callables or low-level compiled functions. Using
|
||||
compiled callback functions can improve performance somewhat by
|
||||
avoiding wrapping data in Python objects.
|
||||
|
||||
Such low-level functions in SciPy are wrapped in `LowLevelCallable`
|
||||
objects, which can be constructed from function pointers obtained from
|
||||
ctypes, cffi, Cython, or contained in Python `PyCapsule` objects.
|
||||
|
||||
.. seealso::
|
||||
|
||||
Functions accepting low-level callables:
|
||||
|
||||
`scipy.integrate.quad`, `scipy.ndimage.generic_filter`,
|
||||
`scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform`
|
||||
|
||||
Usage examples:
|
||||
|
||||
:ref:`ndimage-ccallbacks`, :ref:`quad-callbacks`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
function : {PyCapsule, ctypes function pointer, cffi function pointer}
|
||||
Low-level callback function.
|
||||
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
|
||||
User data to pass on to the callback function.
|
||||
signature : str, optional
|
||||
Signature of the function. If omitted, determined from *function*,
|
||||
if possible.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
function
|
||||
Callback function given.
|
||||
user_data
|
||||
User data given.
|
||||
signature
|
||||
Signature of the function.
|
||||
|
||||
Methods
|
||||
-------
|
||||
from_cython
|
||||
Class method for constructing callables from Cython C-exported
|
||||
functions.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The argument ``function`` can be one of:
|
||||
|
||||
- PyCapsule, whose name contains the C function signature
|
||||
- ctypes function pointer
|
||||
- cffi function pointer
|
||||
|
||||
The signature of the low-level callback must match one of those expected
|
||||
by the routine it is passed to.
|
||||
|
||||
If constructing low-level functions from a PyCapsule, the name of the
|
||||
capsule must be the corresponding signature, in the format::
|
||||
|
||||
return_type (arg1_type, arg2_type, ...)
|
||||
|
||||
For example::
|
||||
|
||||
"void (double)"
|
||||
"double (double, int *, void *)"
|
||||
|
||||
The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
|
||||
if an explicit value for ``user_data`` was not given.
|
||||
|
||||
"""
|
||||
|
||||
# Make the class immutable
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, function, user_data=None, signature=None):
|
||||
# We need to hold a reference to the function & user data,
|
||||
# to prevent them going out of scope
|
||||
item = cls._parse_callback(function, user_data, signature)
|
||||
return tuple.__new__(cls, (item, function, user_data))
|
||||
|
||||
def __repr__(self):
|
||||
return f"LowLevelCallable({self.function!r}, {self.user_data!r})"
|
||||
|
||||
@property
|
||||
def function(self):
|
||||
return tuple.__getitem__(self, 1)
|
||||
|
||||
@property
|
||||
def user_data(self):
|
||||
return tuple.__getitem__(self, 2)
|
||||
|
||||
@property
|
||||
def signature(self):
|
||||
return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
|
||||
|
||||
def __getitem__(self, idx):
|
||||
raise ValueError()
|
||||
|
||||
@classmethod
|
||||
def from_cython(cls, module, name, user_data=None, signature=None):
|
||||
"""
|
||||
Create a low-level callback function from an exported Cython function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
module : module
|
||||
Cython module where the exported function resides
|
||||
name : str
|
||||
Name of the exported function
|
||||
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
|
||||
User data to pass on to the callback function.
|
||||
signature : str, optional
|
||||
Signature of the function. If omitted, determined from *function*.
|
||||
|
||||
"""
|
||||
try:
|
||||
function = module.__pyx_capi__[name]
|
||||
except AttributeError as e:
|
||||
message = "Given module is not a Cython module with __pyx_capi__ attribute"
|
||||
raise ValueError(message) from e
|
||||
except KeyError as e:
|
||||
message = f"No function {name!r} found in __pyx_capi__ of the module"
|
||||
raise ValueError(message) from e
|
||||
return cls(function, user_data, signature)
|
||||
|
||||
@classmethod
|
||||
def _parse_callback(cls, obj, user_data=None, signature=None):
|
||||
_import_cffi()
|
||||
|
||||
if isinstance(obj, LowLevelCallable):
|
||||
func = tuple.__getitem__(obj, 0)
|
||||
elif isinstance(obj, PyCFuncPtr):
|
||||
func, signature = _get_ctypes_func(obj, signature)
|
||||
elif isinstance(obj, CData):
|
||||
func, signature = _get_cffi_func(obj, signature)
|
||||
elif _ccallback_c.check_capsule(obj):
|
||||
func = obj
|
||||
else:
|
||||
raise ValueError("Given input is not a callable or a "
|
||||
"low-level callable (pycapsule/ctypes/cffi)")
|
||||
|
||||
if isinstance(user_data, ctypes.c_void_p):
|
||||
context = _get_ctypes_data(user_data)
|
||||
elif isinstance(user_data, CData):
|
||||
context = _get_cffi_data(user_data)
|
||||
elif user_data is None:
|
||||
context = 0
|
||||
elif _ccallback_c.check_capsule(user_data):
|
||||
context = user_data
|
||||
else:
|
||||
raise ValueError("Given user data is not a valid "
|
||||
"low-level void* pointer (pycapsule/ctypes/cffi)")
|
||||
|
||||
return _ccallback_c.get_raw_capsule(func, signature, context)
|
||||
|
||||
|
||||
#
|
||||
# ctypes helpers
|
||||
#
|
||||
|
||||
def _get_ctypes_func(func, signature=None):
|
||||
# Get function pointer
|
||||
func_ptr = ctypes.cast(func, ctypes.c_void_p).value
|
||||
|
||||
# Construct function signature
|
||||
if signature is None:
|
||||
signature = _typename_from_ctypes(func.restype) + " ("
|
||||
for j, arg in enumerate(func.argtypes):
|
||||
if j == 0:
|
||||
signature += _typename_from_ctypes(arg)
|
||||
else:
|
||||
signature += ", " + _typename_from_ctypes(arg)
|
||||
signature += ")"
|
||||
|
||||
return func_ptr, signature
|
||||
|
||||
|
||||
def _typename_from_ctypes(item):
|
||||
if item is None:
|
||||
return "void"
|
||||
elif item is ctypes.c_void_p:
|
||||
return "void *"
|
||||
|
||||
name = item.__name__
|
||||
|
||||
pointer_level = 0
|
||||
while name.startswith("LP_"):
|
||||
pointer_level += 1
|
||||
name = name[3:]
|
||||
|
||||
if name.startswith('c_'):
|
||||
name = name[2:]
|
||||
|
||||
if pointer_level > 0:
|
||||
name += " " + "*"*pointer_level
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def _get_ctypes_data(data):
|
||||
# Get voidp pointer
|
||||
return ctypes.cast(data, ctypes.c_void_p).value
|
||||
|
||||
|
||||
#
|
||||
# CFFI helpers
|
||||
#
|
||||
|
||||
def _get_cffi_func(func, signature=None):
|
||||
# Get function pointer
|
||||
func_ptr = ffi.cast('uintptr_t', func)
|
||||
|
||||
# Get signature
|
||||
if signature is None:
|
||||
signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
|
||||
|
||||
return func_ptr, signature
|
||||
|
||||
|
||||
def _get_cffi_data(data):
|
||||
# Get pointer
|
||||
return ffi.cast('uintptr_t', data)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback_c.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback_c.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback_c.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_ccallback_c.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
254
.CondaPkg/env/Lib/site-packages/scipy/_lib/_disjoint_set.py
vendored
Normal file
254
.CondaPkg/env/Lib/site-packages/scipy/_lib/_disjoint_set.py
vendored
Normal file
@@ -0,0 +1,254 @@
|
||||
"""
|
||||
Disjoint set data structure
|
||||
"""
|
||||
|
||||
|
||||
class DisjointSet:
|
||||
""" Disjoint set data structure for incremental connectivity queries.
|
||||
|
||||
.. versionadded:: 1.6.0
|
||||
|
||||
Attributes
|
||||
----------
|
||||
n_subsets : int
|
||||
The number of subsets.
|
||||
|
||||
Methods
|
||||
-------
|
||||
add
|
||||
merge
|
||||
connected
|
||||
subset
|
||||
subset_size
|
||||
subsets
|
||||
__getitem__
|
||||
|
||||
Notes
|
||||
-----
|
||||
This class implements the disjoint set [1]_, also known as the *union-find*
|
||||
or *merge-find* data structure. The *find* operation (implemented in
|
||||
`__getitem__`) implements the *path halving* variant. The *merge* method
|
||||
implements the *merge by size* variant.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.cluster.hierarchy import DisjointSet
|
||||
|
||||
Initialize a disjoint set:
|
||||
|
||||
>>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
|
||||
|
||||
Merge some subsets:
|
||||
|
||||
>>> disjoint_set.merge(1, 2)
|
||||
True
|
||||
>>> disjoint_set.merge(3, 'a')
|
||||
True
|
||||
>>> disjoint_set.merge('a', 'b')
|
||||
True
|
||||
>>> disjoint_set.merge('b', 'b')
|
||||
False
|
||||
|
||||
Find root elements:
|
||||
|
||||
>>> disjoint_set[2]
|
||||
1
|
||||
>>> disjoint_set['b']
|
||||
3
|
||||
|
||||
Test connectivity:
|
||||
|
||||
>>> disjoint_set.connected(1, 2)
|
||||
True
|
||||
>>> disjoint_set.connected(1, 'b')
|
||||
False
|
||||
|
||||
List elements in disjoint set:
|
||||
|
||||
>>> list(disjoint_set)
|
||||
[1, 2, 3, 'a', 'b']
|
||||
|
||||
Get the subset containing 'a':
|
||||
|
||||
>>> disjoint_set.subset('a')
|
||||
{'a', 3, 'b'}
|
||||
|
||||
Get the size of the subset containing 'a' (without actually instantiating
|
||||
the subset):
|
||||
|
||||
>>> disjoint_set.subset_size('a')
|
||||
3
|
||||
|
||||
Get all subsets in the disjoint set:
|
||||
|
||||
>>> disjoint_set.subsets()
|
||||
[{1, 2}, {'a', 3, 'b'}]
|
||||
"""
|
||||
def __init__(self, elements=None):
|
||||
self.n_subsets = 0
|
||||
self._sizes = {}
|
||||
self._parents = {}
|
||||
# _nbrs is a circular linked list which links connected elements.
|
||||
self._nbrs = {}
|
||||
# _indices tracks the element insertion order in `__iter__`.
|
||||
self._indices = {}
|
||||
if elements is not None:
|
||||
for x in elements:
|
||||
self.add(x)
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator of the elements in the disjoint set.
|
||||
|
||||
Elements are ordered by insertion order.
|
||||
"""
|
||||
return iter(self._indices)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._indices)
|
||||
|
||||
def __contains__(self, x):
|
||||
return x in self._indices
|
||||
|
||||
def __getitem__(self, x):
|
||||
"""Find the root element of `x`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : hashable object
|
||||
Input element.
|
||||
|
||||
Returns
|
||||
-------
|
||||
root : hashable object
|
||||
Root element of `x`.
|
||||
"""
|
||||
if x not in self._indices:
|
||||
raise KeyError(x)
|
||||
|
||||
# find by "path halving"
|
||||
parents = self._parents
|
||||
while self._indices[x] != self._indices[parents[x]]:
|
||||
parents[x] = parents[parents[x]]
|
||||
x = parents[x]
|
||||
return x
|
||||
|
||||
def add(self, x):
|
||||
"""Add element `x` to disjoint set
|
||||
"""
|
||||
if x in self._indices:
|
||||
return
|
||||
|
||||
self._sizes[x] = 1
|
||||
self._parents[x] = x
|
||||
self._nbrs[x] = x
|
||||
self._indices[x] = len(self._indices)
|
||||
self.n_subsets += 1
|
||||
|
||||
def merge(self, x, y):
|
||||
"""Merge the subsets of `x` and `y`.
|
||||
|
||||
The smaller subset (the child) is merged into the larger subset (the
|
||||
parent). If the subsets are of equal size, the root element which was
|
||||
first inserted into the disjoint set is selected as the parent.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y : hashable object
|
||||
Elements to merge.
|
||||
|
||||
Returns
|
||||
-------
|
||||
merged : bool
|
||||
True if `x` and `y` were in disjoint sets, False otherwise.
|
||||
"""
|
||||
xr = self[x]
|
||||
yr = self[y]
|
||||
if self._indices[xr] == self._indices[yr]:
|
||||
return False
|
||||
|
||||
sizes = self._sizes
|
||||
if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
|
||||
xr, yr = yr, xr
|
||||
self._parents[yr] = xr
|
||||
self._sizes[xr] += self._sizes[yr]
|
||||
self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
|
||||
self.n_subsets -= 1
|
||||
return True
|
||||
|
||||
def connected(self, x, y):
|
||||
"""Test whether `x` and `y` are in the same subset.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y : hashable object
|
||||
Elements to test.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : bool
|
||||
True if `x` and `y` are in the same set, False otherwise.
|
||||
"""
|
||||
return self._indices[self[x]] == self._indices[self[y]]
|
||||
|
||||
def subset(self, x):
|
||||
"""Get the subset containing `x`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : hashable object
|
||||
Input element.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : set
|
||||
Subset containing `x`.
|
||||
"""
|
||||
if x not in self._indices:
|
||||
raise KeyError(x)
|
||||
|
||||
result = [x]
|
||||
nxt = self._nbrs[x]
|
||||
while self._indices[nxt] != self._indices[x]:
|
||||
result.append(nxt)
|
||||
nxt = self._nbrs[nxt]
|
||||
return set(result)
|
||||
|
||||
def subset_size(self, x):
|
||||
"""Get the size of the subset containing `x`.
|
||||
|
||||
Note that this method is faster than ``len(self.subset(x))`` because
|
||||
the size is directly read off an internal field, without the need to
|
||||
instantiate the full subset.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : hashable object
|
||||
Input element.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : int
|
||||
Size of the subset containing `x`.
|
||||
"""
|
||||
return self._sizes[self[x]]
|
||||
|
||||
def subsets(self):
|
||||
"""Get all the subsets in the disjoint set.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : list
|
||||
Subsets in the disjoint set.
|
||||
"""
|
||||
result = []
|
||||
visited = set()
|
||||
for x in self:
|
||||
if x not in visited:
|
||||
xset = self.subset(x)
|
||||
visited.update(xset)
|
||||
result.append(xset)
|
||||
return result
|
||||
679
.CondaPkg/env/Lib/site-packages/scipy/_lib/_docscrape.py
vendored
Normal file
679
.CondaPkg/env/Lib/site-packages/scipy/_lib/_docscrape.py
vendored
Normal file
@@ -0,0 +1,679 @@
|
||||
"""Extract reference documentation from the NumPy source tree.
|
||||
|
||||
"""
|
||||
# copied from numpydoc/docscrape.py
|
||||
import inspect
|
||||
import textwrap
|
||||
import re
|
||||
import pydoc
|
||||
from warnings import warn
|
||||
from collections import namedtuple
|
||||
from collections.abc import Callable, Mapping
|
||||
import copy
|
||||
import sys
|
||||
|
||||
|
||||
def strip_blank_lines(l):
|
||||
"Remove leading and trailing blank lines from a list of lines"
|
||||
while l and not l[0].strip():
|
||||
del l[0]
|
||||
while l and not l[-1].strip():
|
||||
del l[-1]
|
||||
return l
|
||||
|
||||
|
||||
class Reader:
|
||||
"""A line-based string reader.
|
||||
|
||||
"""
|
||||
def __init__(self, data):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
data : str
|
||||
String with lines separated by '\\n'.
|
||||
|
||||
"""
|
||||
if isinstance(data, list):
|
||||
self._str = data
|
||||
else:
|
||||
self._str = data.split('\n') # store string as list of lines
|
||||
|
||||
self.reset()
|
||||
|
||||
def __getitem__(self, n):
|
||||
return self._str[n]
|
||||
|
||||
def reset(self):
|
||||
self._l = 0 # current line nr
|
||||
|
||||
def read(self):
|
||||
if not self.eof():
|
||||
out = self[self._l]
|
||||
self._l += 1
|
||||
return out
|
||||
else:
|
||||
return ''
|
||||
|
||||
def seek_next_non_empty_line(self):
|
||||
for l in self[self._l:]:
|
||||
if l.strip():
|
||||
break
|
||||
else:
|
||||
self._l += 1
|
||||
|
||||
def eof(self):
|
||||
return self._l >= len(self._str)
|
||||
|
||||
def read_to_condition(self, condition_func):
|
||||
start = self._l
|
||||
for line in self[start:]:
|
||||
if condition_func(line):
|
||||
return self[start:self._l]
|
||||
self._l += 1
|
||||
if self.eof():
|
||||
return self[start:self._l+1]
|
||||
return []
|
||||
|
||||
def read_to_next_empty_line(self):
|
||||
self.seek_next_non_empty_line()
|
||||
|
||||
def is_empty(line):
|
||||
return not line.strip()
|
||||
|
||||
return self.read_to_condition(is_empty)
|
||||
|
||||
def read_to_next_unindented_line(self):
|
||||
def is_unindented(line):
|
||||
return (line.strip() and (len(line.lstrip()) == len(line)))
|
||||
return self.read_to_condition(is_unindented)
|
||||
|
||||
def peek(self, n=0):
|
||||
if self._l + n < len(self._str):
|
||||
return self[self._l + n]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def is_empty(self):
|
||||
return not ''.join(self._str).strip()
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
def __str__(self):
|
||||
message = self.args[0]
|
||||
if hasattr(self, 'docstring'):
|
||||
message = f"{message} in {self.docstring!r}"
|
||||
return message
|
||||
|
||||
|
||||
Parameter = namedtuple('Parameter', ['name', 'type', 'desc'])
|
||||
|
||||
|
||||
class NumpyDocString(Mapping):
|
||||
"""Parses a numpydoc string to an abstract representation
|
||||
|
||||
Instances define a mapping from section title to structured data.
|
||||
|
||||
"""
|
||||
|
||||
sections = {
|
||||
'Signature': '',
|
||||
'Summary': [''],
|
||||
'Extended Summary': [],
|
||||
'Parameters': [],
|
||||
'Returns': [],
|
||||
'Yields': [],
|
||||
'Receives': [],
|
||||
'Raises': [],
|
||||
'Warns': [],
|
||||
'Other Parameters': [],
|
||||
'Attributes': [],
|
||||
'Methods': [],
|
||||
'See Also': [],
|
||||
'Notes': [],
|
||||
'Warnings': [],
|
||||
'References': '',
|
||||
'Examples': '',
|
||||
'index': {}
|
||||
}
|
||||
|
||||
def __init__(self, docstring, config={}):
|
||||
orig_docstring = docstring
|
||||
docstring = textwrap.dedent(docstring).split('\n')
|
||||
|
||||
self._doc = Reader(docstring)
|
||||
self._parsed_data = copy.deepcopy(self.sections)
|
||||
|
||||
try:
|
||||
self._parse()
|
||||
except ParseError as e:
|
||||
e.docstring = orig_docstring
|
||||
raise
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._parsed_data[key]
|
||||
|
||||
def __setitem__(self, key, val):
|
||||
if key not in self._parsed_data:
|
||||
self._error_location("Unknown section %s" % key, error=False)
|
||||
else:
|
||||
self._parsed_data[key] = val
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._parsed_data)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._parsed_data)
|
||||
|
||||
def _is_at_section(self):
|
||||
self._doc.seek_next_non_empty_line()
|
||||
|
||||
if self._doc.eof():
|
||||
return False
|
||||
|
||||
l1 = self._doc.peek().strip() # e.g. Parameters
|
||||
|
||||
if l1.startswith('.. index::'):
|
||||
return True
|
||||
|
||||
l2 = self._doc.peek(1).strip() # ---------- or ==========
|
||||
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
|
||||
|
||||
def _strip(self, doc):
|
||||
i = 0
|
||||
j = 0
|
||||
for i, line in enumerate(doc):
|
||||
if line.strip():
|
||||
break
|
||||
|
||||
for j, line in enumerate(doc[::-1]):
|
||||
if line.strip():
|
||||
break
|
||||
|
||||
return doc[i:len(doc)-j]
|
||||
|
||||
def _read_to_next_section(self):
|
||||
section = self._doc.read_to_next_empty_line()
|
||||
|
||||
while not self._is_at_section() and not self._doc.eof():
|
||||
if not self._doc.peek(-1).strip(): # previous line was empty
|
||||
section += ['']
|
||||
|
||||
section += self._doc.read_to_next_empty_line()
|
||||
|
||||
return section
|
||||
|
||||
def _read_sections(self):
|
||||
while not self._doc.eof():
|
||||
data = self._read_to_next_section()
|
||||
name = data[0].strip()
|
||||
|
||||
if name.startswith('..'): # index section
|
||||
yield name, data[1:]
|
||||
elif len(data) < 2:
|
||||
yield StopIteration
|
||||
else:
|
||||
yield name, self._strip(data[2:])
|
||||
|
||||
def _parse_param_list(self, content, single_element_is_type=False):
|
||||
r = Reader(content)
|
||||
params = []
|
||||
while not r.eof():
|
||||
header = r.read().strip()
|
||||
if ' : ' in header:
|
||||
arg_name, arg_type = header.split(' : ')[:2]
|
||||
else:
|
||||
if single_element_is_type:
|
||||
arg_name, arg_type = '', header
|
||||
else:
|
||||
arg_name, arg_type = header, ''
|
||||
|
||||
desc = r.read_to_next_unindented_line()
|
||||
desc = dedent_lines(desc)
|
||||
desc = strip_blank_lines(desc)
|
||||
|
||||
params.append(Parameter(arg_name, arg_type, desc))
|
||||
|
||||
return params
|
||||
|
||||
# See also supports the following formats.
|
||||
#
|
||||
# <FUNCNAME>
|
||||
# <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
|
||||
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
|
||||
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
|
||||
|
||||
# <FUNCNAME> is one of
|
||||
# <PLAIN_FUNCNAME>
|
||||
# COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
|
||||
# where
|
||||
# <PLAIN_FUNCNAME> is a legal function name, and
|
||||
# <ROLE> is any nonempty sequence of word characters.
|
||||
# Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
|
||||
# <DESC> is a string describing the function.
|
||||
|
||||
_role = r":(?P<role>\w+):"
|
||||
_funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
|
||||
_funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
|
||||
_funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
|
||||
_funcnamenext = _funcname.replace('role', 'rolenext')
|
||||
_funcnamenext = _funcnamenext.replace('name', 'namenext')
|
||||
_description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
|
||||
_func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
|
||||
_line_rgx = re.compile(
|
||||
r"^\s*" +
|
||||
r"(?P<allfuncs>" + # group for all function names
|
||||
_funcname +
|
||||
r"(?P<morefuncs>([,]\s+" + _funcnamenext + r")*)" +
|
||||
r")" + # end of "allfuncs"
|
||||
# Some function lists have a trailing comma (or period) '\s*'
|
||||
r"(?P<trailing>[,\.])?" +
|
||||
_description)
|
||||
|
||||
# Empty <DESC> elements are replaced with '..'
|
||||
empty_description = '..'
|
||||
|
||||
def _parse_see_also(self, content):
|
||||
"""
|
||||
func_name : Descriptive text
|
||||
continued text
|
||||
another_func_name : Descriptive text
|
||||
func_name1, func_name2, :meth:`func_name`, func_name3
|
||||
|
||||
"""
|
||||
|
||||
items = []
|
||||
|
||||
def parse_item_name(text):
|
||||
"""Match ':role:`name`' or 'name'."""
|
||||
m = self._func_rgx.match(text)
|
||||
if not m:
|
||||
raise ParseError("%s is not a item name" % text)
|
||||
role = m.group('role')
|
||||
name = m.group('name') if role else m.group('name2')
|
||||
return name, role, m.end()
|
||||
|
||||
rest = []
|
||||
for line in content:
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
line_match = self._line_rgx.match(line)
|
||||
description = None
|
||||
if line_match:
|
||||
description = line_match.group('desc')
|
||||
if line_match.group('trailing') and description:
|
||||
self._error_location(
|
||||
'Unexpected comma or period after function list at '
|
||||
'index %d of line "%s"' % (line_match.end('trailing'),
|
||||
line),
|
||||
error=False)
|
||||
if not description and line.startswith(' '):
|
||||
rest.append(line.strip())
|
||||
elif line_match:
|
||||
funcs = []
|
||||
text = line_match.group('allfuncs')
|
||||
while True:
|
||||
if not text.strip():
|
||||
break
|
||||
name, role, match_end = parse_item_name(text)
|
||||
funcs.append((name, role))
|
||||
text = text[match_end:].strip()
|
||||
if text and text[0] == ',':
|
||||
text = text[1:].strip()
|
||||
rest = list(filter(None, [description]))
|
||||
items.append((funcs, rest))
|
||||
else:
|
||||
raise ParseError("%s is not a item name" % line)
|
||||
return items
|
||||
|
||||
def _parse_index(self, section, content):
|
||||
"""
|
||||
.. index:: default
|
||||
:refguide: something, else, and more
|
||||
|
||||
"""
|
||||
def strip_each_in(lst):
|
||||
return [s.strip() for s in lst]
|
||||
|
||||
out = {}
|
||||
section = section.split('::')
|
||||
if len(section) > 1:
|
||||
out['default'] = strip_each_in(section[1].split(','))[0]
|
||||
for line in content:
|
||||
line = line.split(':')
|
||||
if len(line) > 2:
|
||||
out[line[1]] = strip_each_in(line[2].split(','))
|
||||
return out
|
||||
|
||||
def _parse_summary(self):
|
||||
"""Grab signature (if given) and summary"""
|
||||
if self._is_at_section():
|
||||
return
|
||||
|
||||
# If several signatures present, take the last one
|
||||
while True:
|
||||
summary = self._doc.read_to_next_empty_line()
|
||||
summary_str = " ".join([s.strip() for s in summary]).strip()
|
||||
compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
|
||||
if compiled.match(summary_str):
|
||||
self['Signature'] = summary_str
|
||||
if not self._is_at_section():
|
||||
continue
|
||||
break
|
||||
|
||||
if summary is not None:
|
||||
self['Summary'] = summary
|
||||
|
||||
if not self._is_at_section():
|
||||
self['Extended Summary'] = self._read_to_next_section()
|
||||
|
||||
def _parse(self):
|
||||
self._doc.reset()
|
||||
self._parse_summary()
|
||||
|
||||
sections = list(self._read_sections())
|
||||
section_names = {section for section, content in sections}
|
||||
|
||||
has_returns = 'Returns' in section_names
|
||||
has_yields = 'Yields' in section_names
|
||||
# We could do more tests, but we are not. Arbitrarily.
|
||||
if has_returns and has_yields:
|
||||
msg = 'Docstring contains both a Returns and Yields section.'
|
||||
raise ValueError(msg)
|
||||
if not has_yields and 'Receives' in section_names:
|
||||
msg = 'Docstring contains a Receives section but not Yields.'
|
||||
raise ValueError(msg)
|
||||
|
||||
for (section, content) in sections:
|
||||
if not section.startswith('..'):
|
||||
section = (s.capitalize() for s in section.split(' '))
|
||||
section = ' '.join(section)
|
||||
if self.get(section):
|
||||
self._error_location("The section %s appears twice"
|
||||
% section)
|
||||
|
||||
if section in ('Parameters', 'Other Parameters', 'Attributes',
|
||||
'Methods'):
|
||||
self[section] = self._parse_param_list(content)
|
||||
elif section in ('Returns', 'Yields', 'Raises', 'Warns',
|
||||
'Receives'):
|
||||
self[section] = self._parse_param_list(
|
||||
content, single_element_is_type=True)
|
||||
elif section.startswith('.. index::'):
|
||||
self['index'] = self._parse_index(section, content)
|
||||
elif section == 'See Also':
|
||||
self['See Also'] = self._parse_see_also(content)
|
||||
else:
|
||||
self[section] = content
|
||||
|
||||
def _error_location(self, msg, error=True):
|
||||
if hasattr(self, '_obj'):
|
||||
# we know where the docs came from:
|
||||
try:
|
||||
filename = inspect.getsourcefile(self._obj)
|
||||
except TypeError:
|
||||
filename = None
|
||||
msg = msg + (f" in the docstring of {self._obj} in {filename}.")
|
||||
if error:
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
warn(msg, stacklevel=3)
|
||||
|
||||
# string conversion routines
|
||||
|
||||
def _str_header(self, name, symbol='-'):
|
||||
return [name, len(name)*symbol]
|
||||
|
||||
def _str_indent(self, doc, indent=4):
|
||||
out = []
|
||||
for line in doc:
|
||||
out += [' '*indent + line]
|
||||
return out
|
||||
|
||||
def _str_signature(self):
|
||||
if self['Signature']:
|
||||
return [self['Signature'].replace('*', r'\*')] + ['']
|
||||
else:
|
||||
return ['']
|
||||
|
||||
def _str_summary(self):
|
||||
if self['Summary']:
|
||||
return self['Summary'] + ['']
|
||||
else:
|
||||
return []
|
||||
|
||||
def _str_extended_summary(self):
|
||||
if self['Extended Summary']:
|
||||
return self['Extended Summary'] + ['']
|
||||
else:
|
||||
return []
|
||||
|
||||
def _str_param_list(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_header(name)
|
||||
for param in self[name]:
|
||||
parts = []
|
||||
if param.name:
|
||||
parts.append(param.name)
|
||||
if param.type:
|
||||
parts.append(param.type)
|
||||
out += [' : '.join(parts)]
|
||||
if param.desc and ''.join(param.desc).strip():
|
||||
out += self._str_indent(param.desc)
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_section(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_header(name)
|
||||
out += self[name]
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_see_also(self, func_role):
|
||||
if not self['See Also']:
|
||||
return []
|
||||
out = []
|
||||
out += self._str_header("See Also")
|
||||
out += ['']
|
||||
last_had_desc = True
|
||||
for funcs, desc in self['See Also']:
|
||||
assert isinstance(funcs, list)
|
||||
links = []
|
||||
for func, role in funcs:
|
||||
if role:
|
||||
link = f':{role}:`{func}`'
|
||||
elif func_role:
|
||||
link = f':{func_role}:`{func}`'
|
||||
else:
|
||||
link = "`%s`_" % func
|
||||
links.append(link)
|
||||
link = ', '.join(links)
|
||||
out += [link]
|
||||
if desc:
|
||||
out += self._str_indent([' '.join(desc)])
|
||||
last_had_desc = True
|
||||
else:
|
||||
last_had_desc = False
|
||||
out += self._str_indent([self.empty_description])
|
||||
|
||||
if last_had_desc:
|
||||
out += ['']
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_index(self):
|
||||
idx = self['index']
|
||||
out = []
|
||||
output_index = False
|
||||
default_index = idx.get('default', '')
|
||||
if default_index:
|
||||
output_index = True
|
||||
out += ['.. index:: %s' % default_index]
|
||||
for section, references in idx.items():
|
||||
if section == 'default':
|
||||
continue
|
||||
output_index = True
|
||||
out += [' :{}: {}'.format(section, ', '.join(references))]
|
||||
if output_index:
|
||||
return out
|
||||
else:
|
||||
return ''
|
||||
|
||||
def __str__(self, func_role=''):
|
||||
out = []
|
||||
out += self._str_signature()
|
||||
out += self._str_summary()
|
||||
out += self._str_extended_summary()
|
||||
for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',
|
||||
'Other Parameters', 'Raises', 'Warns'):
|
||||
out += self._str_param_list(param_list)
|
||||
out += self._str_section('Warnings')
|
||||
out += self._str_see_also(func_role)
|
||||
for s in ('Notes', 'References', 'Examples'):
|
||||
out += self._str_section(s)
|
||||
for param_list in ('Attributes', 'Methods'):
|
||||
out += self._str_param_list(param_list)
|
||||
out += self._str_index()
|
||||
return '\n'.join(out)
|
||||
|
||||
|
||||
def indent(str, indent=4):
|
||||
indent_str = ' '*indent
|
||||
if str is None:
|
||||
return indent_str
|
||||
lines = str.split('\n')
|
||||
return '\n'.join(indent_str + l for l in lines)
|
||||
|
||||
|
||||
def dedent_lines(lines):
|
||||
"""Deindent a list of lines maximally"""
|
||||
return textwrap.dedent("\n".join(lines)).split("\n")
|
||||
|
||||
|
||||
def header(text, style='-'):
|
||||
return text + '\n' + style*len(text) + '\n'
|
||||
|
||||
|
||||
class FunctionDoc(NumpyDocString):
|
||||
def __init__(self, func, role='func', doc=None, config={}):
|
||||
self._f = func
|
||||
self._role = role # e.g. "func" or "meth"
|
||||
|
||||
if doc is None:
|
||||
if func is None:
|
||||
raise ValueError("No function or docstring given")
|
||||
doc = inspect.getdoc(func) or ''
|
||||
NumpyDocString.__init__(self, doc, config)
|
||||
|
||||
def get_func(self):
|
||||
func_name = getattr(self._f, '__name__', self.__class__.__name__)
|
||||
if inspect.isclass(self._f):
|
||||
func = getattr(self._f, '__call__', self._f.__init__)
|
||||
else:
|
||||
func = self._f
|
||||
return func, func_name
|
||||
|
||||
def __str__(self):
|
||||
out = ''
|
||||
|
||||
func, func_name = self.get_func()
|
||||
|
||||
roles = {'func': 'function',
|
||||
'meth': 'method'}
|
||||
|
||||
if self._role:
|
||||
if self._role not in roles:
|
||||
print("Warning: invalid role %s" % self._role)
|
||||
out += '.. {}:: {}\n \n\n'.format(roles.get(self._role, ''),
|
||||
func_name)
|
||||
|
||||
out += super().__str__(func_role=self._role)
|
||||
return out
|
||||
|
||||
|
||||
class ClassDoc(NumpyDocString):
|
||||
|
||||
extra_public_methods = ['__call__']
|
||||
|
||||
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
|
||||
config={}):
|
||||
if not inspect.isclass(cls) and cls is not None:
|
||||
raise ValueError("Expected a class or None, but got %r" % cls)
|
||||
self._cls = cls
|
||||
|
||||
if 'sphinx' in sys.modules:
|
||||
from sphinx.ext.autodoc import ALL
|
||||
else:
|
||||
ALL = object()
|
||||
|
||||
self.show_inherited_members = config.get(
|
||||
'show_inherited_class_members', True)
|
||||
|
||||
if modulename and not modulename.endswith('.'):
|
||||
modulename += '.'
|
||||
self._mod = modulename
|
||||
|
||||
if doc is None:
|
||||
if cls is None:
|
||||
raise ValueError("No class or documentation string given")
|
||||
doc = pydoc.getdoc(cls)
|
||||
|
||||
NumpyDocString.__init__(self, doc)
|
||||
|
||||
_members = config.get('members', [])
|
||||
if _members is ALL:
|
||||
_members = None
|
||||
_exclude = config.get('exclude-members', [])
|
||||
|
||||
if config.get('show_class_members', True) and _exclude is not ALL:
|
||||
def splitlines_x(s):
|
||||
if not s:
|
||||
return []
|
||||
else:
|
||||
return s.splitlines()
|
||||
for field, items in [('Methods', self.methods),
|
||||
('Attributes', self.properties)]:
|
||||
if not self[field]:
|
||||
doc_list = []
|
||||
for name in sorted(items):
|
||||
if (name in _exclude or
|
||||
(_members and name not in _members)):
|
||||
continue
|
||||
try:
|
||||
doc_item = pydoc.getdoc(getattr(self._cls, name))
|
||||
doc_list.append(
|
||||
Parameter(name, '', splitlines_x(doc_item)))
|
||||
except AttributeError:
|
||||
pass # method doesn't exist
|
||||
self[field] = doc_list
|
||||
|
||||
@property
|
||||
def methods(self):
|
||||
if self._cls is None:
|
||||
return []
|
||||
return [name for name, func in inspect.getmembers(self._cls)
|
||||
if ((not name.startswith('_')
|
||||
or name in self.extra_public_methods)
|
||||
and isinstance(func, Callable)
|
||||
and self._is_show_member(name))]
|
||||
|
||||
@property
|
||||
def properties(self):
|
||||
if self._cls is None:
|
||||
return []
|
||||
return [name for name, func in inspect.getmembers(self._cls)
|
||||
if (not name.startswith('_') and
|
||||
(func is None or isinstance(func, property) or
|
||||
inspect.isdatadescriptor(func))
|
||||
and self._is_show_member(name))]
|
||||
|
||||
def _is_show_member(self, name):
|
||||
if self.show_inherited_members:
|
||||
return True # show all class members
|
||||
if name not in self._cls.__dict__:
|
||||
return False # class member is inherited, we do not show it
|
||||
return True
|
||||
348
.CondaPkg/env/Lib/site-packages/scipy/_lib/_elementwise_iterative_method.py
vendored
Normal file
348
.CondaPkg/env/Lib/site-packages/scipy/_lib/_elementwise_iterative_method.py
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
# `_elementwise_iterative_method.py` includes tools for writing functions that
|
||||
# - are vectorized to work elementwise on arrays,
|
||||
# - implement non-trivial, iterative algorithms with a callback interface, and
|
||||
# - return rich objects with iteration count, termination status, etc.
|
||||
#
|
||||
# Examples include:
|
||||
# `scipy.optimize._chandrupatla._chandrupatla for scalar rootfinding,
|
||||
# `scipy.optimize._chandrupatla._chandrupatla_minimize for scalar minimization,
|
||||
# `scipy.optimize._differentiate._differentiate for numerical differentiation,
|
||||
# `scipy.optimize._bracket._bracket_root for finding rootfinding brackets,
|
||||
# `scipy.optimize._bracket._bracket_minimize for finding minimization brackets,
|
||||
# `scipy.integrate._tanhsinh._tanhsinh` for numerical quadrature.
|
||||
|
||||
import math
|
||||
import numpy as np
|
||||
from ._util import _RichResult, _call_callback_maybe_halt
|
||||
from ._array_api import array_namespace, size as xp_size
|
||||
|
||||
_ESIGNERR = -1
|
||||
_ECONVERR = -2
|
||||
_EVALUEERR = -3
|
||||
_ECALLBACK = -4
|
||||
_EINPUTERR = -5
|
||||
_ECONVERGED = 0
|
||||
_EINPROGRESS = 1
|
||||
|
||||
def _initialize(func, xs, args, complex_ok=False, preserve_shape=None):
|
||||
"""Initialize abscissa, function, and args arrays for elementwise function
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : callable
|
||||
An elementwise function with signature
|
||||
|
||||
func(x: ndarray, *args) -> ndarray
|
||||
|
||||
where each element of ``x`` is a finite real and ``args`` is a tuple,
|
||||
which may contain an arbitrary number of arrays that are broadcastable
|
||||
with ``x``.
|
||||
xs : tuple of arrays
|
||||
Finite real abscissa arrays. Must be broadcastable.
|
||||
args : tuple, optional
|
||||
Additional positional arguments to be passed to `func`.
|
||||
preserve_shape : bool, default:False
|
||||
When ``preserve_shape=False`` (default), `func` may be passed
|
||||
arguments of any shape; `_scalar_optimization_loop` is permitted
|
||||
to reshape and compress arguments at will. When
|
||||
``preserve_shape=False``, arguments passed to `func` must have shape
|
||||
`shape` or ``shape + (n,)``, where ``n`` is any integer.
|
||||
|
||||
Returns
|
||||
-------
|
||||
xs, fs, args : tuple of arrays
|
||||
Broadcasted, writeable, 1D abscissa and function value arrays (or
|
||||
NumPy floats, if appropriate). The dtypes of the `xs` and `fs` are
|
||||
`xfat`; the dtype of the `args` are unchanged.
|
||||
shape : tuple of ints
|
||||
Original shape of broadcasted arrays.
|
||||
xfat : NumPy dtype
|
||||
Result dtype of abscissae, function values, and args determined using
|
||||
`np.result_type`, except integer types are promoted to `np.float64`.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the result dtype is not that of a real scalar
|
||||
|
||||
Notes
|
||||
-----
|
||||
Useful for initializing the input of SciPy functions that accept
|
||||
an elementwise callable, abscissae, and arguments; e.g.
|
||||
`scipy.optimize._chandrupatla`.
|
||||
"""
|
||||
nx = len(xs)
|
||||
xp = array_namespace(*xs)
|
||||
|
||||
# Try to preserve `dtype`, but we need to ensure that the arguments are at
|
||||
# least floats before passing them into the function; integers can overflow
|
||||
# and cause failure.
|
||||
# There might be benefit to combining the `xs` into a single array and
|
||||
# calling `func` once on the combined array. For now, keep them separate.
|
||||
xas = xp.broadcast_arrays(*xs, *args) # broadcast and rename
|
||||
xat = xp.result_type(*[xa.dtype for xa in xas])
|
||||
xat = xp.asarray(1.).dtype if xp.isdtype(xat, "integral") else xat
|
||||
xs, args = xas[:nx], xas[nx:]
|
||||
xs = [xp.asarray(x, dtype=xat) for x in xs] # use copy=False when implemented
|
||||
fs = [xp.asarray(func(x, *args)) for x in xs]
|
||||
shape = xs[0].shape
|
||||
fshape = fs[0].shape
|
||||
|
||||
if preserve_shape:
|
||||
# bind original shape/func now to avoid late-binding gotcha
|
||||
def func(x, *args, shape=shape, func=func, **kwargs):
|
||||
i = (0,)*(len(fshape) - len(shape))
|
||||
return func(x[i], *args, **kwargs)
|
||||
shape = np.broadcast_shapes(fshape, shape) # just shapes; use of NumPy OK
|
||||
xs = [xp.broadcast_to(x, shape) for x in xs]
|
||||
args = [xp.broadcast_to(arg, shape) for arg in args]
|
||||
|
||||
message = ("The shape of the array returned by `func` must be the same as "
|
||||
"the broadcasted shape of `x` and all other `args`.")
|
||||
if preserve_shape is not None: # only in tanhsinh for now
|
||||
message = f"When `preserve_shape=False`, {message.lower()}"
|
||||
shapes_equal = [f.shape == shape for f in fs]
|
||||
if not all(shapes_equal): # use Python all to reduce overhead
|
||||
raise ValueError(message)
|
||||
|
||||
# These algorithms tend to mix the dtypes of the abscissae and function
|
||||
# values, so figure out what the result will be and convert them all to
|
||||
# that type from the outset.
|
||||
xfat = xp.result_type(*([f.dtype for f in fs] + [xat]))
|
||||
if not complex_ok and not xp.isdtype(xfat, "real floating"):
|
||||
raise ValueError("Abscissae and function output must be real numbers.")
|
||||
xs = [xp.asarray(x, dtype=xfat, copy=True) for x in xs]
|
||||
fs = [xp.asarray(f, dtype=xfat, copy=True) for f in fs]
|
||||
|
||||
# To ensure that we can do indexing, we'll work with at least 1d arrays,
|
||||
# but remember the appropriate shape of the output.
|
||||
xs = [xp.reshape(x, (-1,)) for x in xs]
|
||||
fs = [xp.reshape(f, (-1,)) for f in fs]
|
||||
args = [xp.reshape(xp.asarray(arg, copy=True), (-1,)) for arg in args]
|
||||
return func, xs, fs, args, shape, xfat, xp
|
||||
|
||||
|
||||
def _loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval,
|
||||
post_func_eval, check_termination, post_termination_check,
|
||||
customize_result, res_work_pairs, xp, preserve_shape=False):
|
||||
"""Main loop of a vectorized scalar optimization algorithm
|
||||
|
||||
Parameters
|
||||
----------
|
||||
work : _RichResult
|
||||
All variables that need to be retained between iterations. Must
|
||||
contain attributes `nit`, `nfev`, and `success`
|
||||
callback : callable
|
||||
User-specified callback function
|
||||
shape : tuple of ints
|
||||
The shape of all output arrays
|
||||
maxiter :
|
||||
Maximum number of iterations of the algorithm
|
||||
func : callable
|
||||
The user-specified callable that is being optimized or solved
|
||||
args : tuple
|
||||
Additional positional arguments to be passed to `func`.
|
||||
dtype : NumPy dtype
|
||||
The common dtype of all abscissae and function values
|
||||
pre_func_eval : callable
|
||||
A function that accepts `work` and returns `x`, the active elements
|
||||
of `x` at which `func` will be evaluated. May modify attributes
|
||||
of `work` with any algorithmic steps that need to happen
|
||||
at the beginning of an iteration, before `func` is evaluated,
|
||||
post_func_eval : callable
|
||||
A function that accepts `x`, `func(x)`, and `work`. May modify
|
||||
attributes of `work` with any algorithmic steps that need to happen
|
||||
in the middle of an iteration, after `func` is evaluated but before
|
||||
the termination check.
|
||||
check_termination : callable
|
||||
A function that accepts `work` and returns `stop`, a boolean array
|
||||
indicating which of the active elements have met a termination
|
||||
condition.
|
||||
post_termination_check : callable
|
||||
A function that accepts `work`. May modify `work` with any algorithmic
|
||||
steps that need to happen after the termination check and before the
|
||||
end of the iteration.
|
||||
customize_result : callable
|
||||
A function that accepts `res` and `shape` and returns `shape`. May
|
||||
modify `res` (in-place) according to preferences (e.g. rearrange
|
||||
elements between attributes) and modify `shape` if needed.
|
||||
res_work_pairs : list of (str, str)
|
||||
Identifies correspondence between attributes of `res` and attributes
|
||||
of `work`; i.e., attributes of active elements of `work` will be
|
||||
copied to the appropriate indices of `res` when appropriate. The order
|
||||
determines the order in which _RichResult attributes will be
|
||||
pretty-printed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : _RichResult
|
||||
The final result object
|
||||
|
||||
Notes
|
||||
-----
|
||||
Besides providing structure, this framework provides several important
|
||||
services for a vectorized optimization algorithm.
|
||||
|
||||
- It handles common tasks involving iteration count, function evaluation
|
||||
count, a user-specified callback, and associated termination conditions.
|
||||
- It compresses the attributes of `work` to eliminate unnecessary
|
||||
computation on elements that have already converged.
|
||||
|
||||
"""
|
||||
if xp is None:
|
||||
raise NotImplementedError("Must provide xp.")
|
||||
|
||||
cb_terminate = False
|
||||
|
||||
# Initialize the result object and active element index array
|
||||
n_elements = math.prod(shape)
|
||||
active = xp.arange(n_elements) # in-progress element indices
|
||||
res_dict = {i: xp.zeros(n_elements, dtype=dtype) for i, j in res_work_pairs}
|
||||
res_dict['success'] = xp.zeros(n_elements, dtype=xp.bool)
|
||||
res_dict['status'] = xp.full(n_elements, _EINPROGRESS, dtype=xp.int32)
|
||||
res_dict['nit'] = xp.zeros(n_elements, dtype=xp.int32)
|
||||
res_dict['nfev'] = xp.zeros(n_elements, dtype=xp.int32)
|
||||
res = _RichResult(res_dict)
|
||||
work.args = args
|
||||
|
||||
active = _check_termination(work, res, res_work_pairs, active,
|
||||
check_termination, preserve_shape, xp)
|
||||
|
||||
if callback is not None:
|
||||
temp = _prepare_result(work, res, res_work_pairs, active, shape,
|
||||
customize_result, preserve_shape, xp)
|
||||
if _call_callback_maybe_halt(callback, temp):
|
||||
cb_terminate = True
|
||||
|
||||
while work.nit < maxiter and xp_size(active) and not cb_terminate and n_elements:
|
||||
x = pre_func_eval(work)
|
||||
|
||||
if work.args and work.args[0].ndim != x.ndim:
|
||||
# `x` always starts as 1D. If the SciPy function that uses
|
||||
# _loop added dimensions to `x`, we need to
|
||||
# add them to the elements of `args`.
|
||||
args = []
|
||||
for arg in work.args:
|
||||
n_new_dims = x.ndim - arg.ndim
|
||||
new_shape = arg.shape + (1,)*n_new_dims
|
||||
args.append(xp.reshape(arg, new_shape))
|
||||
work.args = args
|
||||
|
||||
x_shape = x.shape
|
||||
if preserve_shape:
|
||||
x = xp.reshape(x, (shape + (-1,)))
|
||||
f = func(x, *work.args)
|
||||
f = xp.asarray(f, dtype=dtype)
|
||||
if preserve_shape:
|
||||
x = xp.reshape(x, x_shape)
|
||||
f = xp.reshape(f, x_shape)
|
||||
work.nfev += 1 if x.ndim == 1 else x.shape[-1]
|
||||
|
||||
post_func_eval(x, f, work)
|
||||
|
||||
work.nit += 1
|
||||
active = _check_termination(work, res, res_work_pairs, active,
|
||||
check_termination, preserve_shape, xp)
|
||||
|
||||
if callback is not None:
|
||||
temp = _prepare_result(work, res, res_work_pairs, active, shape,
|
||||
customize_result, preserve_shape, xp)
|
||||
if _call_callback_maybe_halt(callback, temp):
|
||||
cb_terminate = True
|
||||
break
|
||||
if xp_size(active) == 0:
|
||||
break
|
||||
|
||||
post_termination_check(work)
|
||||
|
||||
work.status[:] = _ECALLBACK if cb_terminate else _ECONVERR
|
||||
return _prepare_result(work, res, res_work_pairs, active, shape,
|
||||
customize_result, preserve_shape, xp)
|
||||
|
||||
|
||||
def _check_termination(work, res, res_work_pairs, active, check_termination,
|
||||
preserve_shape, xp):
|
||||
# Checks termination conditions, updates elements of `res` with
|
||||
# corresponding elements of `work`, and compresses `work`.
|
||||
|
||||
stop = check_termination(work)
|
||||
|
||||
if xp.any(stop):
|
||||
# update the active elements of the result object with the active
|
||||
# elements for which a termination condition has been met
|
||||
_update_active(work, res, res_work_pairs, active, stop, preserve_shape, xp)
|
||||
|
||||
if preserve_shape:
|
||||
stop = stop[active]
|
||||
|
||||
proceed = ~stop
|
||||
active = active[proceed]
|
||||
|
||||
if not preserve_shape:
|
||||
# compress the arrays to avoid unnecessary computation
|
||||
for key, val in work.items():
|
||||
# Need to find a better way than these try/excepts
|
||||
# Somehow need to keep compressible numerical args separate
|
||||
if key == 'args':
|
||||
continue
|
||||
try:
|
||||
work[key] = val[proceed]
|
||||
except (IndexError, TypeError, KeyError): # not a compressible array
|
||||
work[key] = val
|
||||
work.args = [arg[proceed] for arg in work.args]
|
||||
|
||||
return active
|
||||
|
||||
|
||||
def _update_active(work, res, res_work_pairs, active, mask, preserve_shape, xp):
|
||||
# Update `active` indices of the arrays in result object `res` with the
|
||||
# contents of the scalars and arrays in `update_dict`. When provided,
|
||||
# `mask` is a boolean array applied both to the arrays in `update_dict`
|
||||
# that are to be used and to the arrays in `res` that are to be updated.
|
||||
update_dict = {key1: work[key2] for key1, key2 in res_work_pairs}
|
||||
update_dict['success'] = work.status == 0
|
||||
|
||||
if mask is not None:
|
||||
if preserve_shape:
|
||||
active_mask = xp.zeros_like(mask)
|
||||
active_mask[active] = 1
|
||||
active_mask = active_mask & mask
|
||||
for key, val in update_dict.items():
|
||||
try:
|
||||
res[key][active_mask] = val[active_mask]
|
||||
except (IndexError, TypeError, KeyError):
|
||||
res[key][active_mask] = val
|
||||
else:
|
||||
active_mask = active[mask]
|
||||
for key, val in update_dict.items():
|
||||
try:
|
||||
res[key][active_mask] = val[mask]
|
||||
except (IndexError, TypeError, KeyError):
|
||||
res[key][active_mask] = val
|
||||
else:
|
||||
for key, val in update_dict.items():
|
||||
if preserve_shape:
|
||||
try:
|
||||
val = val[active]
|
||||
except (IndexError, TypeError, KeyError):
|
||||
pass
|
||||
res[key][active] = val
|
||||
|
||||
|
||||
def _prepare_result(work, res, res_work_pairs, active, shape, customize_result,
|
||||
preserve_shape, xp):
|
||||
# Prepare the result object `res` by creating a copy, copying the latest
|
||||
# data from work, running the provided result customization function,
|
||||
# and reshaping the data to the original shapes.
|
||||
res = res.copy()
|
||||
_update_active(work, res, res_work_pairs, active, None, preserve_shape, xp)
|
||||
|
||||
shape = customize_result(res, shape)
|
||||
|
||||
for key, val in res.items():
|
||||
# this looks like it won't work for xp != np if val is not numeric
|
||||
temp = xp.reshape(val, shape)
|
||||
res[key] = temp[()] if temp.ndim == 0 else temp
|
||||
|
||||
res['_order_keys'] = ['success'] + [i for i, j in res_work_pairs]
|
||||
return _RichResult(**res)
|
||||
145
.CondaPkg/env/Lib/site-packages/scipy/_lib/_finite_differences.py
vendored
Normal file
145
.CondaPkg/env/Lib/site-packages/scipy/_lib/_finite_differences.py
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
from numpy import arange, newaxis, hstack, prod, array
|
||||
|
||||
|
||||
def _central_diff_weights(Np, ndiv=1):
|
||||
"""
|
||||
Return weights for an Np-point central derivative.
|
||||
|
||||
Assumes equally-spaced function points.
|
||||
|
||||
If weights are in the vector w, then
|
||||
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
Np : int
|
||||
Number of points for the central derivative.
|
||||
ndiv : int, optional
|
||||
Number of divisions. Default is 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
w : ndarray
|
||||
Weights for an Np-point central derivative. Its size is `Np`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Can be inaccurate for a large number of points.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can calculate a derivative value of a function.
|
||||
|
||||
>>> def f(x):
|
||||
... return 2 * x**2 + 3
|
||||
>>> x = 3.0 # derivative point
|
||||
>>> h = 0.1 # differential step
|
||||
>>> Np = 3 # point number for central derivative
|
||||
>>> weights = _central_diff_weights(Np) # weights for first derivative
|
||||
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
|
||||
>>> sum(w * v for (w, v) in zip(weights, vals))/h
|
||||
11.79999999999998
|
||||
|
||||
This value is close to the analytical solution:
|
||||
f'(x) = 4x, so f'(3) = 12
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Finite_difference
|
||||
|
||||
"""
|
||||
if Np < ndiv + 1:
|
||||
raise ValueError(
|
||||
"Number of points must be at least the derivative order + 1."
|
||||
)
|
||||
if Np % 2 == 0:
|
||||
raise ValueError("The number of points must be odd.")
|
||||
from scipy import linalg
|
||||
|
||||
ho = Np >> 1
|
||||
x = arange(-ho, ho + 1.0)
|
||||
x = x[:, newaxis]
|
||||
X = x**0.0
|
||||
for k in range(1, Np):
|
||||
X = hstack([X, x**k])
|
||||
w = prod(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv]
|
||||
return w
|
||||
|
||||
|
||||
def _derivative(func, x0, dx=1.0, n=1, args=(), order=3):
|
||||
"""
|
||||
Find the nth derivative of a function at a point.
|
||||
|
||||
Given a function, use a central difference formula with spacing `dx` to
|
||||
compute the nth derivative at `x0`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : function
|
||||
Input function.
|
||||
x0 : float
|
||||
The point at which the nth derivative is found.
|
||||
dx : float, optional
|
||||
Spacing.
|
||||
n : int, optional
|
||||
Order of the derivative. Default is 1.
|
||||
args : tuple, optional
|
||||
Arguments
|
||||
order : int, optional
|
||||
Number of points to use, must be odd.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Decreasing the step size too small can result in round-off error.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> def f(x):
|
||||
... return x**3 + x**2
|
||||
>>> _derivative(f, 1.0, dx=1e-6)
|
||||
4.9999999999217337
|
||||
|
||||
"""
|
||||
if order < n + 1:
|
||||
raise ValueError(
|
||||
"'order' (the number of points used to compute the derivative), "
|
||||
"must be at least the derivative order 'n' + 1."
|
||||
)
|
||||
if order % 2 == 0:
|
||||
raise ValueError(
|
||||
"'order' (the number of points used to compute the derivative) "
|
||||
"must be odd."
|
||||
)
|
||||
# pre-computed for n=1 and 2 and low-order for speed.
|
||||
if n == 1:
|
||||
if order == 3:
|
||||
weights = array([-1, 0, 1]) / 2.0
|
||||
elif order == 5:
|
||||
weights = array([1, -8, 0, 8, -1]) / 12.0
|
||||
elif order == 7:
|
||||
weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
|
||||
elif order == 9:
|
||||
weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
|
||||
else:
|
||||
weights = _central_diff_weights(order, 1)
|
||||
elif n == 2:
|
||||
if order == 3:
|
||||
weights = array([1, -2.0, 1])
|
||||
elif order == 5:
|
||||
weights = array([-1, 16, -30, 16, -1]) / 12.0
|
||||
elif order == 7:
|
||||
weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0
|
||||
elif order == 9:
|
||||
weights = (
|
||||
array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9])
|
||||
/ 5040.0
|
||||
)
|
||||
else:
|
||||
weights = _central_diff_weights(order, 2)
|
||||
else:
|
||||
weights = _central_diff_weights(order, n)
|
||||
val = 0.0
|
||||
ho = order >> 1
|
||||
for k in range(order):
|
||||
val += weights[k] * func(x0 + (k - ho) * dx, *args)
|
||||
return val / prod((dx,) * n, axis=0)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_fpumode.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_fpumode.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_fpumode.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_fpumode.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
105
.CondaPkg/env/Lib/site-packages/scipy/_lib/_gcutils.py
vendored
Normal file
105
.CondaPkg/env/Lib/site-packages/scipy/_lib/_gcutils.py
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
"""
|
||||
Module for testing automatic garbage collection of objects
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
set_gc_state - enable or disable garbage collection
|
||||
gc_state - context manager for given state of garbage collector
|
||||
assert_deallocated - context manager to check for circular references on object
|
||||
|
||||
"""
|
||||
import weakref
|
||||
import gc
|
||||
|
||||
from contextlib import contextmanager
|
||||
from platform import python_implementation
|
||||
|
||||
__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
|
||||
|
||||
|
||||
IS_PYPY = python_implementation() == 'PyPy'
|
||||
|
||||
|
||||
class ReferenceError(AssertionError):
|
||||
pass
|
||||
|
||||
|
||||
def set_gc_state(state):
|
||||
""" Set status of garbage collector """
|
||||
if gc.isenabled() == state:
|
||||
return
|
||||
if state:
|
||||
gc.enable()
|
||||
else:
|
||||
gc.disable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def gc_state(state):
|
||||
""" Context manager to set state of garbage collector to `state`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
state : bool
|
||||
True for gc enabled, False for disabled
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> with gc_state(False):
|
||||
... assert not gc.isenabled()
|
||||
>>> with gc_state(True):
|
||||
... assert gc.isenabled()
|
||||
"""
|
||||
orig_state = gc.isenabled()
|
||||
set_gc_state(state)
|
||||
yield
|
||||
set_gc_state(orig_state)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def assert_deallocated(func, *args, **kwargs):
|
||||
"""Context manager to check that object is deallocated
|
||||
|
||||
This is useful for checking that an object can be freed directly by
|
||||
reference counting, without requiring gc to break reference cycles.
|
||||
GC is disabled inside the context manager.
|
||||
|
||||
This check is not available on PyPy.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : callable
|
||||
Callable to create object to check
|
||||
\\*args : sequence
|
||||
positional arguments to `func` in order to create object to check
|
||||
\\*\\*kwargs : dict
|
||||
keyword arguments to `func` in order to create object to check
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> class C: pass
|
||||
>>> with assert_deallocated(C) as c:
|
||||
... # do something
|
||||
... del c
|
||||
|
||||
>>> class C:
|
||||
... def __init__(self):
|
||||
... self._circular = self # Make circular reference
|
||||
>>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
... # do something
|
||||
... del c
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ReferenceError: Remaining reference(s) to object
|
||||
"""
|
||||
if IS_PYPY:
|
||||
raise RuntimeError("assert_deallocated is unavailable on PyPy")
|
||||
|
||||
with gc_state(False):
|
||||
obj = func(*args, **kwargs)
|
||||
ref = weakref.ref(obj)
|
||||
yield obj
|
||||
del obj
|
||||
if ref() is not None:
|
||||
raise ReferenceError("Remaining reference(s) to object")
|
||||
487
.CondaPkg/env/Lib/site-packages/scipy/_lib/_pep440.py
vendored
Normal file
487
.CondaPkg/env/Lib/site-packages/scipy/_lib/_pep440.py
vendored
Normal file
@@ -0,0 +1,487 @@
|
||||
"""Utility to compare pep440 compatible version strings.
|
||||
|
||||
The LooseVersion and StrictVersion classes that distutils provides don't
|
||||
work; they don't recognize anything like alpha/beta/rc/dev versions.
|
||||
"""
|
||||
|
||||
# Copyright (c) Donald Stufft and individual contributors.
|
||||
# All rights reserved.
|
||||
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
|
||||
# 1. Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
|
||||
|
||||
__all__ = [
|
||||
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
|
||||
]
|
||||
|
||||
|
||||
# BEGIN packaging/_structures.py
|
||||
|
||||
|
||||
class Infinity:
|
||||
def __repr__(self):
|
||||
return "Infinity"
|
||||
|
||||
def __hash__(self):
|
||||
return hash(repr(self))
|
||||
|
||||
def __lt__(self, other):
|
||||
return False
|
||||
|
||||
def __le__(self, other):
|
||||
return False
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other):
|
||||
return True
|
||||
|
||||
def __ge__(self, other):
|
||||
return True
|
||||
|
||||
def __neg__(self):
|
||||
return NegativeInfinity
|
||||
|
||||
|
||||
Infinity = Infinity()
|
||||
|
||||
|
||||
class NegativeInfinity:
|
||||
def __repr__(self):
|
||||
return "-Infinity"
|
||||
|
||||
def __hash__(self):
|
||||
return hash(repr(self))
|
||||
|
||||
def __lt__(self, other):
|
||||
return True
|
||||
|
||||
def __le__(self, other):
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other):
|
||||
return False
|
||||
|
||||
def __ge__(self, other):
|
||||
return False
|
||||
|
||||
def __neg__(self):
|
||||
return Infinity
|
||||
|
||||
|
||||
# BEGIN packaging/version.py
|
||||
|
||||
|
||||
NegativeInfinity = NegativeInfinity()
|
||||
|
||||
_Version = collections.namedtuple(
|
||||
"_Version",
|
||||
["epoch", "release", "dev", "pre", "post", "local"],
|
||||
)
|
||||
|
||||
|
||||
def parse(version):
|
||||
"""
|
||||
Parse the given version string and return either a :class:`Version` object
|
||||
or a :class:`LegacyVersion` object depending on if the given version is
|
||||
a valid PEP 440 version or a legacy version.
|
||||
"""
|
||||
try:
|
||||
return Version(version)
|
||||
except InvalidVersion:
|
||||
return LegacyVersion(version)
|
||||
|
||||
|
||||
class InvalidVersion(ValueError):
|
||||
"""
|
||||
An invalid version was found, users should refer to PEP 440.
|
||||
"""
|
||||
|
||||
|
||||
class _BaseVersion:
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._key)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._compare(other, lambda s, o: s >= o)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._compare(other, lambda s, o: s > o)
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._compare(other, lambda s, o: s != o)
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return method(self._key, other._key)
|
||||
|
||||
|
||||
class LegacyVersion(_BaseVersion):
|
||||
|
||||
def __init__(self, version):
|
||||
self._version = str(version)
|
||||
self._key = _legacy_cmpkey(self._version)
|
||||
|
||||
def __str__(self):
|
||||
return self._version
|
||||
|
||||
def __repr__(self):
|
||||
return f"<LegacyVersion({repr(str(self))})>"
|
||||
|
||||
@property
|
||||
def public(self):
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def base_version(self):
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def local(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_prerelease(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_postrelease(self):
|
||||
return False
|
||||
|
||||
|
||||
_legacy_version_component_re = re.compile(
|
||||
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
|
||||
)
|
||||
|
||||
_legacy_version_replacement_map = {
|
||||
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
|
||||
}
|
||||
|
||||
|
||||
def _parse_version_parts(s):
|
||||
for part in _legacy_version_component_re.split(s):
|
||||
part = _legacy_version_replacement_map.get(part, part)
|
||||
|
||||
if not part or part == ".":
|
||||
continue
|
||||
|
||||
if part[:1] in "0123456789":
|
||||
# pad for numeric comparison
|
||||
yield part.zfill(8)
|
||||
else:
|
||||
yield "*" + part
|
||||
|
||||
# ensure that alpha/beta/candidate are before final
|
||||
yield "*final"
|
||||
|
||||
|
||||
def _legacy_cmpkey(version):
|
||||
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
|
||||
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
||||
# which uses the defacto standard originally implemented by setuptools,
|
||||
# as before all PEP 440 versions.
|
||||
epoch = -1
|
||||
|
||||
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
||||
# its adoption of the packaging library.
|
||||
parts = []
|
||||
for part in _parse_version_parts(version.lower()):
|
||||
if part.startswith("*"):
|
||||
# remove "-" before a prerelease tag
|
||||
if part < "*final":
|
||||
while parts and parts[-1] == "*final-":
|
||||
parts.pop()
|
||||
|
||||
# remove trailing zeros from each series of numeric parts
|
||||
while parts and parts[-1] == "00000000":
|
||||
parts.pop()
|
||||
|
||||
parts.append(part)
|
||||
parts = tuple(parts)
|
||||
|
||||
return epoch, parts
|
||||
|
||||
|
||||
# Deliberately not anchored to the start and end of the string, to make it
|
||||
# easier for 3rd party code to reuse
|
||||
VERSION_PATTERN = r"""
|
||||
v?
|
||||
(?:
|
||||
(?:(?P<epoch>[0-9]+)!)? # epoch
|
||||
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
||||
(?P<pre> # pre-release
|
||||
[-_\.]?
|
||||
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
||||
[-_\.]?
|
||||
(?P<pre_n>[0-9]+)?
|
||||
)?
|
||||
(?P<post> # post release
|
||||
(?:-(?P<post_n1>[0-9]+))
|
||||
|
|
||||
(?:
|
||||
[-_\.]?
|
||||
(?P<post_l>post|rev|r)
|
||||
[-_\.]?
|
||||
(?P<post_n2>[0-9]+)?
|
||||
)
|
||||
)?
|
||||
(?P<dev> # dev release
|
||||
[-_\.]?
|
||||
(?P<dev_l>dev)
|
||||
[-_\.]?
|
||||
(?P<dev_n>[0-9]+)?
|
||||
)?
|
||||
)
|
||||
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
||||
"""
|
||||
|
||||
|
||||
class Version(_BaseVersion):
|
||||
|
||||
_regex = re.compile(
|
||||
r"^\s*" + VERSION_PATTERN + r"\s*$",
|
||||
re.VERBOSE | re.IGNORECASE,
|
||||
)
|
||||
|
||||
def __init__(self, version):
|
||||
# Validate the version and parse it into pieces
|
||||
match = self._regex.search(version)
|
||||
if not match:
|
||||
raise InvalidVersion(f"Invalid version: '{version}'")
|
||||
|
||||
# Store the parsed out pieces of the version
|
||||
self._version = _Version(
|
||||
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
||||
release=tuple(int(i) for i in match.group("release").split(".")),
|
||||
pre=_parse_letter_version(
|
||||
match.group("pre_l"),
|
||||
match.group("pre_n"),
|
||||
),
|
||||
post=_parse_letter_version(
|
||||
match.group("post_l"),
|
||||
match.group("post_n1") or match.group("post_n2"),
|
||||
),
|
||||
dev=_parse_letter_version(
|
||||
match.group("dev_l"),
|
||||
match.group("dev_n"),
|
||||
),
|
||||
local=_parse_local_version(match.group("local")),
|
||||
)
|
||||
|
||||
# Generate a key which will be used for sorting
|
||||
self._key = _cmpkey(
|
||||
self._version.epoch,
|
||||
self._version.release,
|
||||
self._version.pre,
|
||||
self._version.post,
|
||||
self._version.dev,
|
||||
self._version.local,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Version({repr(str(self))})>"
|
||||
|
||||
def __str__(self):
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
if self._version.epoch != 0:
|
||||
parts.append(f"{self._version.epoch}!")
|
||||
|
||||
# Release segment
|
||||
parts.append(".".join(str(x) for x in self._version.release))
|
||||
|
||||
# Pre-release
|
||||
if self._version.pre is not None:
|
||||
parts.append("".join(str(x) for x in self._version.pre))
|
||||
|
||||
# Post-release
|
||||
if self._version.post is not None:
|
||||
parts.append(f".post{self._version.post[1]}")
|
||||
|
||||
# Development release
|
||||
if self._version.dev is not None:
|
||||
parts.append(f".dev{self._version.dev[1]}")
|
||||
|
||||
# Local version segment
|
||||
if self._version.local is not None:
|
||||
parts.append(
|
||||
"+{}".format(".".join(str(x) for x in self._version.local))
|
||||
)
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
@property
|
||||
def public(self):
|
||||
return str(self).split("+", 1)[0]
|
||||
|
||||
@property
|
||||
def base_version(self):
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
if self._version.epoch != 0:
|
||||
parts.append(f"{self._version.epoch}!")
|
||||
|
||||
# Release segment
|
||||
parts.append(".".join(str(x) for x in self._version.release))
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
@property
|
||||
def local(self):
|
||||
version_string = str(self)
|
||||
if "+" in version_string:
|
||||
return version_string.split("+", 1)[1]
|
||||
|
||||
@property
|
||||
def is_prerelease(self):
|
||||
return bool(self._version.dev or self._version.pre)
|
||||
|
||||
@property
|
||||
def is_postrelease(self):
|
||||
return bool(self._version.post)
|
||||
|
||||
|
||||
def _parse_letter_version(letter, number):
|
||||
if letter:
|
||||
# We assume there is an implicit 0 in a pre-release if there is
|
||||
# no numeral associated with it.
|
||||
if number is None:
|
||||
number = 0
|
||||
|
||||
# We normalize any letters to their lower-case form
|
||||
letter = letter.lower()
|
||||
|
||||
# We consider some words to be alternate spellings of other words and
|
||||
# in those cases we want to normalize the spellings to our preferred
|
||||
# spelling.
|
||||
if letter == "alpha":
|
||||
letter = "a"
|
||||
elif letter == "beta":
|
||||
letter = "b"
|
||||
elif letter in ["c", "pre", "preview"]:
|
||||
letter = "rc"
|
||||
elif letter in ["rev", "r"]:
|
||||
letter = "post"
|
||||
|
||||
return letter, int(number)
|
||||
if not letter and number:
|
||||
# We assume that if we are given a number but not given a letter,
|
||||
# then this is using the implicit post release syntax (e.g., 1.0-1)
|
||||
letter = "post"
|
||||
|
||||
return letter, int(number)
|
||||
|
||||
|
||||
_local_version_seperators = re.compile(r"[\._-]")
|
||||
|
||||
|
||||
def _parse_local_version(local):
|
||||
"""
|
||||
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
||||
"""
|
||||
if local is not None:
|
||||
return tuple(
|
||||
part.lower() if not part.isdigit() else int(part)
|
||||
for part in _local_version_seperators.split(local)
|
||||
)
|
||||
|
||||
|
||||
def _cmpkey(epoch, release, pre, post, dev, local):
|
||||
# When we compare a release version, we want to compare it with all of the
|
||||
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
||||
# leading zeros until we come to something non-zero, then take the rest,
|
||||
# re-reverse it back into the correct order, and make it a tuple and use
|
||||
# that for our sorting key.
|
||||
release = tuple(
|
||||
reversed(list(
|
||||
itertools.dropwhile(
|
||||
lambda x: x == 0,
|
||||
reversed(release),
|
||||
)
|
||||
))
|
||||
)
|
||||
|
||||
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
||||
# We'll do this by abusing the pre-segment, but we _only_ want to do this
|
||||
# if there is no pre- or a post-segment. If we have one of those, then
|
||||
# the normal sorting rules will handle this case correctly.
|
||||
if pre is None and post is None and dev is not None:
|
||||
pre = -Infinity
|
||||
# Versions without a pre-release (except as noted above) should sort after
|
||||
# those with one.
|
||||
elif pre is None:
|
||||
pre = Infinity
|
||||
|
||||
# Versions without a post-segment should sort before those with one.
|
||||
if post is None:
|
||||
post = -Infinity
|
||||
|
||||
# Versions without a development segment should sort after those with one.
|
||||
if dev is None:
|
||||
dev = Infinity
|
||||
|
||||
if local is None:
|
||||
# Versions without a local segment should sort before those with one.
|
||||
local = -Infinity
|
||||
else:
|
||||
# Versions with a local segment need that segment parsed to implement
|
||||
# the sorting rules in PEP440.
|
||||
# - Alphanumeric segments sort before numeric segments
|
||||
# - Alphanumeric segments sort lexicographically
|
||||
# - Numeric segments sort numerically
|
||||
# - Shorter versions sort before longer versions when the prefixes
|
||||
# match exactly
|
||||
local = tuple(
|
||||
(i, "") if isinstance(i, int) else (-Infinity, i)
|
||||
for i in local
|
||||
)
|
||||
|
||||
return epoch, release, pre, post, dev, local
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_ccallback.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_ccallback.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_ccallback.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_ccallback.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_call.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_call.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_call.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_call.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_def.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_def.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_def.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_test_deprecation_def.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
337
.CondaPkg/env/Lib/site-packages/scipy/_lib/_testutils.py
vendored
Normal file
337
.CondaPkg/env/Lib/site-packages/scipy/_lib/_testutils.py
vendored
Normal file
@@ -0,0 +1,337 @@
|
||||
"""
|
||||
Generic test utilities.
|
||||
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import sysconfig
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
|
||||
import numpy as np
|
||||
import scipy
|
||||
|
||||
try:
|
||||
# Need type: ignore[import-untyped] for mypy >= 1.6
|
||||
import cython # type: ignore[import-untyped]
|
||||
from Cython.Compiler.Version import ( # type: ignore[import-untyped]
|
||||
version as cython_version,
|
||||
)
|
||||
except ImportError:
|
||||
cython = None
|
||||
else:
|
||||
from scipy._lib import _pep440
|
||||
required_version = '3.0.8'
|
||||
if _pep440.parse(cython_version) < _pep440.Version(required_version):
|
||||
# too old or wrong cython, skip Cython API tests
|
||||
cython = None
|
||||
|
||||
|
||||
__all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL']
|
||||
|
||||
|
||||
IS_MUSL = False
|
||||
# alternate way is
|
||||
# from packaging.tags import sys_tags
|
||||
# _tags = list(sys_tags())
|
||||
# if 'musllinux' in _tags[0].platform:
|
||||
_v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
|
||||
if 'musl' in _v:
|
||||
IS_MUSL = True
|
||||
|
||||
|
||||
IS_EDITABLE = 'editable' in scipy.__path__[0]
|
||||
|
||||
|
||||
class FPUModeChangeWarning(RuntimeWarning):
|
||||
"""Warning about FPU mode change"""
|
||||
pass
|
||||
|
||||
|
||||
class PytestTester:
|
||||
"""
|
||||
Run tests for this namespace
|
||||
|
||||
``scipy.test()`` runs tests for all of SciPy, with the default settings.
|
||||
When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
|
||||
for that namespace are run.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
label : {'fast', 'full'}, optional
|
||||
Whether to run only the fast tests, or also those marked as slow.
|
||||
Default is 'fast'.
|
||||
verbose : int, optional
|
||||
Test output verbosity. Default is 1.
|
||||
extra_argv : list, optional
|
||||
Arguments to pass through to Pytest.
|
||||
doctests : bool, optional
|
||||
Whether to run doctests or not. Default is False.
|
||||
coverage : bool, optional
|
||||
Whether to run tests with code coverage measurements enabled.
|
||||
Default is False.
|
||||
tests : list of str, optional
|
||||
List of module names to run tests for. By default, uses the module
|
||||
from which the ``test`` function is called.
|
||||
parallel : int, optional
|
||||
Run tests in parallel with pytest-xdist, if number given is larger than
|
||||
1. Default is 1.
|
||||
|
||||
"""
|
||||
def __init__(self, module_name):
|
||||
self.module_name = module_name
|
||||
|
||||
def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
|
||||
coverage=False, tests=None, parallel=None):
|
||||
import pytest
|
||||
|
||||
module = sys.modules[self.module_name]
|
||||
module_path = os.path.abspath(module.__path__[0])
|
||||
|
||||
pytest_args = ['--showlocals', '--tb=short']
|
||||
|
||||
if doctests:
|
||||
pytest_args += [
|
||||
"--doctest-modules",
|
||||
"--ignore=scipy/interpolate/_interpnd_info.py",
|
||||
"--ignore=scipy/_lib/array_api_compat",
|
||||
"--ignore=scipy/_lib/highs",
|
||||
"--ignore=scipy/_lib/unuran",
|
||||
"--ignore=scipy/_lib/_gcutils.py",
|
||||
"--ignore=scipy/_lib/doccer.py",
|
||||
"--ignore=scipy/_lib/_uarray",
|
||||
]
|
||||
|
||||
if extra_argv:
|
||||
pytest_args += list(extra_argv)
|
||||
|
||||
if verbose and int(verbose) > 1:
|
||||
pytest_args += ["-" + "v"*(int(verbose)-1)]
|
||||
|
||||
if coverage:
|
||||
pytest_args += ["--cov=" + module_path]
|
||||
|
||||
if label == "fast":
|
||||
pytest_args += ["-m", "not slow"]
|
||||
elif label != "full":
|
||||
pytest_args += ["-m", label]
|
||||
|
||||
if tests is None:
|
||||
tests = [self.module_name]
|
||||
|
||||
if parallel is not None and parallel > 1:
|
||||
if _pytest_has_xdist():
|
||||
pytest_args += ['-n', str(parallel)]
|
||||
else:
|
||||
import warnings
|
||||
warnings.warn('Could not run tests in parallel because '
|
||||
'pytest-xdist plugin is not available.',
|
||||
stacklevel=2)
|
||||
|
||||
pytest_args += ['--pyargs'] + list(tests)
|
||||
|
||||
try:
|
||||
code = pytest.main(pytest_args)
|
||||
except SystemExit as exc:
|
||||
code = exc.code
|
||||
|
||||
return (code == 0)
|
||||
|
||||
|
||||
class _TestPythranFunc:
|
||||
'''
|
||||
These are situations that can be tested in our pythran tests:
|
||||
- A function with multiple array arguments and then
|
||||
other positional and keyword arguments.
|
||||
- A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
|
||||
Note: list/tuple input is not yet tested!
|
||||
|
||||
`self.arguments`: A dictionary which key is the index of the argument,
|
||||
value is tuple(array value, all supported dtypes)
|
||||
`self.partialfunc`: A function used to freeze some non-array argument
|
||||
that of no interests in the original function
|
||||
'''
|
||||
ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
|
||||
ALL_FLOAT = [np.float32, np.float64]
|
||||
ALL_COMPLEX = [np.complex64, np.complex128]
|
||||
|
||||
def setup_method(self):
|
||||
self.arguments = {}
|
||||
self.partialfunc = None
|
||||
self.expected = None
|
||||
|
||||
def get_optional_args(self, func):
|
||||
# get optional arguments with its default value,
|
||||
# used for testing keywords
|
||||
signature = inspect.signature(func)
|
||||
optional_args = {}
|
||||
for k, v in signature.parameters.items():
|
||||
if v.default is not inspect.Parameter.empty:
|
||||
optional_args[k] = v.default
|
||||
return optional_args
|
||||
|
||||
def get_max_dtype_list_length(self):
|
||||
# get the max supported dtypes list length in all arguments
|
||||
max_len = 0
|
||||
for arg_idx in self.arguments:
|
||||
cur_len = len(self.arguments[arg_idx][1])
|
||||
if cur_len > max_len:
|
||||
max_len = cur_len
|
||||
return max_len
|
||||
|
||||
def get_dtype(self, dtype_list, dtype_idx):
|
||||
# get the dtype from dtype_list via index
|
||||
# if the index is out of range, then return the last dtype
|
||||
if dtype_idx > len(dtype_list)-1:
|
||||
return dtype_list[-1]
|
||||
else:
|
||||
return dtype_list[dtype_idx]
|
||||
|
||||
def test_all_dtypes(self):
|
||||
for type_idx in range(self.get_max_dtype_list_length()):
|
||||
args_array = []
|
||||
for arg_idx in self.arguments:
|
||||
new_dtype = self.get_dtype(self.arguments[arg_idx][1],
|
||||
type_idx)
|
||||
args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
|
||||
self.pythranfunc(*args_array)
|
||||
|
||||
def test_views(self):
|
||||
args_array = []
|
||||
for arg_idx in self.arguments:
|
||||
args_array.append(self.arguments[arg_idx][0][::-1][::-1])
|
||||
self.pythranfunc(*args_array)
|
||||
|
||||
def test_strided(self):
|
||||
args_array = []
|
||||
for arg_idx in self.arguments:
|
||||
args_array.append(np.repeat(self.arguments[arg_idx][0],
|
||||
2, axis=0)[::2])
|
||||
self.pythranfunc(*args_array)
|
||||
|
||||
|
||||
def _pytest_has_xdist():
|
||||
"""
|
||||
Check if the pytest-xdist plugin is installed, providing parallel tests
|
||||
"""
|
||||
# Check xdist exists without importing, otherwise pytests emits warnings
|
||||
from importlib.util import find_spec
|
||||
return find_spec('xdist') is not None
|
||||
|
||||
|
||||
def check_free_memory(free_mb):
|
||||
"""
|
||||
Check *free_mb* of memory is available, otherwise do pytest.skip
|
||||
"""
|
||||
import pytest
|
||||
|
||||
try:
|
||||
mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
|
||||
msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
|
||||
free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
|
||||
except KeyError:
|
||||
mem_free = _get_mem_available()
|
||||
if mem_free is None:
|
||||
pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
|
||||
"variable to free memory in MB to run the test.")
|
||||
msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
|
||||
|
||||
if mem_free < free_mb * 1e6:
|
||||
pytest.skip(msg)
|
||||
|
||||
|
||||
def _parse_size(size_str):
|
||||
suffixes = {'': 1e6,
|
||||
'b': 1.0,
|
||||
'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
|
||||
'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
|
||||
'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
|
||||
m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
|
||||
size_str,
|
||||
re.I)
|
||||
if not m or m.group(2) not in suffixes:
|
||||
raise ValueError("Invalid size string")
|
||||
|
||||
return float(m.group(1)) * suffixes[m.group(2)]
|
||||
|
||||
|
||||
def _get_mem_available():
|
||||
"""
|
||||
Get information about memory available, not counting swap.
|
||||
"""
|
||||
try:
|
||||
import psutil
|
||||
return psutil.virtual_memory().available
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
|
||||
if sys.platform.startswith('linux'):
|
||||
info = {}
|
||||
with open('/proc/meminfo') as f:
|
||||
for line in f:
|
||||
p = line.split()
|
||||
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
|
||||
|
||||
if 'memavailable' in info:
|
||||
# Linux >= 3.14
|
||||
return info['memavailable']
|
||||
else:
|
||||
return info['memfree'] + info['cached']
|
||||
|
||||
return None
|
||||
|
||||
def _test_cython_extension(tmp_path, srcdir):
|
||||
"""
|
||||
Helper function to test building and importing Cython modules that
|
||||
make use of the Cython APIs for BLAS, LAPACK, optimize, and special.
|
||||
"""
|
||||
import pytest
|
||||
try:
|
||||
subprocess.check_call(["meson", "--version"])
|
||||
except FileNotFoundError:
|
||||
pytest.skip("No usable 'meson' found")
|
||||
|
||||
# build the examples in a temporary directory
|
||||
mod_name = os.path.split(srcdir)[1]
|
||||
shutil.copytree(srcdir, tmp_path / mod_name)
|
||||
build_dir = tmp_path / mod_name / 'tests' / '_cython_examples'
|
||||
target_dir = build_dir / 'build'
|
||||
os.makedirs(target_dir, exist_ok=True)
|
||||
|
||||
# Ensure we use the correct Python interpreter even when `meson` is
|
||||
# installed in a different Python environment (see numpy#24956)
|
||||
native_file = str(build_dir / 'interpreter-native-file.ini')
|
||||
with open(native_file, 'w') as f:
|
||||
f.write("[binaries]\n")
|
||||
f.write(f"python = '{sys.executable}'")
|
||||
|
||||
if sys.platform == "win32":
|
||||
subprocess.check_call(["meson", "setup",
|
||||
"--buildtype=release",
|
||||
"--native-file", native_file,
|
||||
"--vsenv", str(build_dir)],
|
||||
cwd=target_dir,
|
||||
)
|
||||
else:
|
||||
subprocess.check_call(["meson", "setup",
|
||||
"--native-file", native_file, str(build_dir)],
|
||||
cwd=target_dir
|
||||
)
|
||||
subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir)
|
||||
|
||||
# import without adding the directory to sys.path
|
||||
suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
||||
|
||||
def load(modname):
|
||||
so = (target_dir / modname).with_suffix(suffix)
|
||||
spec = spec_from_file_location(modname, so)
|
||||
mod = module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
|
||||
# test that the module can be imported
|
||||
return load("extending"), load("extending_cpp")
|
||||
58
.CondaPkg/env/Lib/site-packages/scipy/_lib/_threadsafety.py
vendored
Normal file
58
.CondaPkg/env/Lib/site-packages/scipy/_lib/_threadsafety.py
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
import threading
|
||||
|
||||
import scipy._lib.decorator
|
||||
|
||||
|
||||
__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
|
||||
|
||||
|
||||
class ReentrancyError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class ReentrancyLock:
|
||||
"""
|
||||
Threading lock that raises an exception for reentrant calls.
|
||||
|
||||
Calls from different threads are serialized, and nested calls from the
|
||||
same thread result to an error.
|
||||
|
||||
The object can be used as a context manager or to decorate functions
|
||||
via the decorate() method.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, err_msg):
|
||||
self._rlock = threading.RLock()
|
||||
self._entered = False
|
||||
self._err_msg = err_msg
|
||||
|
||||
def __enter__(self):
|
||||
self._rlock.acquire()
|
||||
if self._entered:
|
||||
self._rlock.release()
|
||||
raise ReentrancyError(self._err_msg)
|
||||
self._entered = True
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self._entered = False
|
||||
self._rlock.release()
|
||||
|
||||
def decorate(self, func):
|
||||
def caller(func, *a, **kw):
|
||||
with self:
|
||||
return func(*a, **kw)
|
||||
return scipy._lib.decorator.decorate(func, caller)
|
||||
|
||||
|
||||
def non_reentrant(err_msg=None):
|
||||
"""
|
||||
Decorate a function with a threading lock and prevent reentrant calls.
|
||||
"""
|
||||
def decorator(func):
|
||||
msg = err_msg
|
||||
if msg is None:
|
||||
msg = "%s is not re-entrant" % func.__name__
|
||||
lock = ReentrancyLock(msg)
|
||||
return lock.decorate(func)
|
||||
return decorator
|
||||
86
.CondaPkg/env/Lib/site-packages/scipy/_lib/_tmpdirs.py
vendored
Normal file
86
.CondaPkg/env/Lib/site-packages/scipy/_lib/_tmpdirs.py
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
''' Contexts for *with* statement providing temporary directories
|
||||
'''
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
|
||||
@contextmanager
|
||||
def tempdir():
|
||||
"""Create and return a temporary directory. This has the same
|
||||
behavior as mkdtemp but can be used as a context manager.
|
||||
|
||||
Upon exiting the context, the directory and everything contained
|
||||
in it are removed.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import os
|
||||
>>> with tempdir() as tmpdir:
|
||||
... fname = os.path.join(tmpdir, 'example_file.txt')
|
||||
... with open(fname, 'wt') as fobj:
|
||||
... _ = fobj.write('a string\\n')
|
||||
>>> os.path.exists(tmpdir)
|
||||
False
|
||||
"""
|
||||
d = mkdtemp()
|
||||
yield d
|
||||
rmtree(d)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def in_tempdir():
|
||||
''' Create, return, and change directory to a temporary directory
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import os
|
||||
>>> my_cwd = os.getcwd()
|
||||
>>> with in_tempdir() as tmpdir:
|
||||
... _ = open('test.txt', 'wt').write('some text')
|
||||
... assert os.path.isfile('test.txt')
|
||||
... assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
|
||||
>>> os.path.exists(tmpdir)
|
||||
False
|
||||
>>> os.getcwd() == my_cwd
|
||||
True
|
||||
'''
|
||||
pwd = os.getcwd()
|
||||
d = mkdtemp()
|
||||
os.chdir(d)
|
||||
yield d
|
||||
os.chdir(pwd)
|
||||
rmtree(d)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def in_dir(dir=None):
|
||||
""" Change directory to given directory for duration of ``with`` block
|
||||
|
||||
Useful when you want to use `in_tempdir` for the final test, but
|
||||
you are still debugging. For example, you may want to do this in the end:
|
||||
|
||||
>>> with in_tempdir() as tmpdir:
|
||||
... # do something complicated which might break
|
||||
... pass
|
||||
|
||||
But, indeed, the complicated thing does break, and meanwhile, the
|
||||
``in_tempdir`` context manager wiped out the directory with the
|
||||
temporary files that you wanted for debugging. So, while debugging, you
|
||||
replace with something like:
|
||||
|
||||
>>> with in_dir() as tmpdir: # Use working directory by default
|
||||
... # do something complicated which might break
|
||||
... pass
|
||||
|
||||
You can then look at the temporary file outputs to debug what is happening,
|
||||
fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
|
||||
"""
|
||||
cwd = os.getcwd()
|
||||
if dir is None:
|
||||
yield cwd
|
||||
return
|
||||
os.chdir(dir)
|
||||
yield dir
|
||||
os.chdir(cwd)
|
||||
29
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/LICENSE
vendored
Normal file
29
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/LICENSE
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2018, Quansight-Labs
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
116
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__init__.py
vendored
Normal file
116
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__init__.py
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
"""
|
||||
.. note:
|
||||
If you are looking for overrides for NumPy-specific methods, see the
|
||||
documentation for :obj:`unumpy`. This page explains how to write
|
||||
back-ends and multimethods.
|
||||
|
||||
``uarray`` is built around a back-end protocol, and overridable multimethods.
|
||||
It is necessary to define multimethods for back-ends to be able to override them.
|
||||
See the documentation of :obj:`generate_multimethod` on how to write multimethods.
|
||||
|
||||
|
||||
|
||||
Let's start with the simplest:
|
||||
|
||||
``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
|
||||
separated string consisting of the modules you extend plus the submodule. For
|
||||
example, if a submodule ``module2.submodule`` extends ``module1``
|
||||
(i.e., it exposes dispatchables marked as types available in ``module1``),
|
||||
then the domain string should be ``"module1.module2.submodule"``.
|
||||
|
||||
|
||||
For the purpose of this demonstration, we'll be creating an object and setting
|
||||
its attributes directly. However, note that you can use a module or your own type
|
||||
as a backend as well.
|
||||
|
||||
>>> class Backend: pass
|
||||
>>> be = Backend()
|
||||
>>> be.__ua_domain__ = "ua_examples"
|
||||
|
||||
It might be useful at this point to sidetrack to the documentation of
|
||||
:obj:`generate_multimethod` to find out how to generate a multimethod
|
||||
overridable by :obj:`uarray`. Needless to say, writing a backend and
|
||||
creating multimethods are mostly orthogonal activities, and knowing
|
||||
one doesn't necessarily require knowledge of the other, although it
|
||||
is certainly helpful. We expect core API designers/specifiers to write the
|
||||
multimethods, and implementors to override them. But, as is often the case,
|
||||
similar people write both.
|
||||
|
||||
Without further ado, here's an example multimethod:
|
||||
|
||||
>>> import uarray as ua
|
||||
>>> from uarray import Dispatchable
|
||||
>>> def override_me(a, b):
|
||||
... return Dispatchable(a, int),
|
||||
>>> def override_replacer(args, kwargs, dispatchables):
|
||||
... return (dispatchables[0], args[1]), {}
|
||||
>>> overridden_me = ua.generate_multimethod(
|
||||
... override_me, override_replacer, "ua_examples"
|
||||
... )
|
||||
|
||||
Next comes the part about overriding the multimethod. This requires
|
||||
the ``__ua_function__`` protocol, and the ``__ua_convert__``
|
||||
protocol. The ``__ua_function__`` protocol has the signature
|
||||
``(method, args, kwargs)`` where ``method`` is the passed
|
||||
multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
|
||||
is the list of converted dispatchables passed in.
|
||||
|
||||
>>> def __ua_function__(method, args, kwargs):
|
||||
... return method.__name__, args, kwargs
|
||||
>>> be.__ua_function__ = __ua_function__
|
||||
|
||||
The other protocol of interest is the ``__ua_convert__`` protocol. It has the
|
||||
signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
|
||||
between the formats should ideally be an ``O(1)`` operation, but it means that
|
||||
no memory copying should be involved, only views of the existing data.
|
||||
|
||||
>>> def __ua_convert__(dispatchables, coerce):
|
||||
... for d in dispatchables:
|
||||
... if d.type is int:
|
||||
... if coerce and d.coercible:
|
||||
... yield str(d.value)
|
||||
... else:
|
||||
... yield d.value
|
||||
>>> be.__ua_convert__ = __ua_convert__
|
||||
|
||||
Now that we have defined the backend, the next thing to do is to call the multimethod.
|
||||
|
||||
>>> with ua.set_backend(be):
|
||||
... overridden_me(1, "2")
|
||||
('override_me', (1, '2'), {})
|
||||
|
||||
Note that the marked type has no effect on the actual type of the passed object.
|
||||
We can also coerce the type of the input.
|
||||
|
||||
>>> with ua.set_backend(be, coerce=True):
|
||||
... overridden_me(1, "2")
|
||||
... overridden_me(1.0, "2")
|
||||
('override_me', ('1', '2'), {})
|
||||
('override_me', ('1.0', '2'), {})
|
||||
|
||||
Another feature is that if you remove ``__ua_convert__``, the arguments are not
|
||||
converted at all and it's up to the backend to handle that.
|
||||
|
||||
>>> del be.__ua_convert__
|
||||
>>> with ua.set_backend(be):
|
||||
... overridden_me(1, "2")
|
||||
('override_me', (1, '2'), {})
|
||||
|
||||
You also have the option to return ``NotImplemented``, in which case processing moves on
|
||||
to the next back-end, which in this case, doesn't exist. The same applies to
|
||||
``__ua_convert__``.
|
||||
|
||||
>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
|
||||
>>> with ua.set_backend(be):
|
||||
... overridden_me(1, "2")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
uarray.BackendNotImplementedError: ...
|
||||
|
||||
The last possibility is if we don't have ``__ua_convert__``, in which case the job is
|
||||
left up to ``__ua_function__``, but putting things back into arrays after conversion
|
||||
will not be possible.
|
||||
"""
|
||||
|
||||
from ._backend import *
|
||||
__version__ = '0.8.8.dev0+aa94c5a4.scipy'
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-312.pyc
vendored
Normal file
Binary file not shown.
704
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_backend.py
vendored
Normal file
704
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_backend.py
vendored
Normal file
@@ -0,0 +1,704 @@
|
||||
import typing
|
||||
import types
|
||||
import inspect
|
||||
import functools
|
||||
from . import _uarray
|
||||
import copyreg
|
||||
import pickle
|
||||
import contextlib
|
||||
|
||||
from ._uarray import ( # type: ignore
|
||||
BackendNotImplementedError,
|
||||
_Function,
|
||||
_SkipBackendContext,
|
||||
_SetBackendContext,
|
||||
_BackendState,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"set_backend",
|
||||
"set_global_backend",
|
||||
"skip_backend",
|
||||
"register_backend",
|
||||
"determine_backend",
|
||||
"determine_backend_multi",
|
||||
"clear_backends",
|
||||
"create_multimethod",
|
||||
"generate_multimethod",
|
||||
"_Function",
|
||||
"BackendNotImplementedError",
|
||||
"Dispatchable",
|
||||
"wrap_single_convertor",
|
||||
"wrap_single_convertor_instance",
|
||||
"all_of_type",
|
||||
"mark_as",
|
||||
"set_state",
|
||||
"get_state",
|
||||
"reset_state",
|
||||
"_BackendState",
|
||||
"_SkipBackendContext",
|
||||
"_SetBackendContext",
|
||||
]
|
||||
|
||||
ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]]
|
||||
ArgumentReplacerType = typing.Callable[
|
||||
[tuple, dict, tuple], tuple[tuple, dict]
|
||||
]
|
||||
|
||||
def unpickle_function(mod_name, qname, self_):
|
||||
import importlib
|
||||
|
||||
try:
|
||||
module = importlib.import_module(mod_name)
|
||||
qname = qname.split(".")
|
||||
func = module
|
||||
for q in qname:
|
||||
func = getattr(func, q)
|
||||
|
||||
if self_ is not None:
|
||||
func = types.MethodType(func, self_)
|
||||
|
||||
return func
|
||||
except (ImportError, AttributeError) as e:
|
||||
from pickle import UnpicklingError
|
||||
|
||||
raise UnpicklingError from e
|
||||
|
||||
|
||||
def pickle_function(func):
|
||||
mod_name = getattr(func, "__module__", None)
|
||||
qname = getattr(func, "__qualname__", None)
|
||||
self_ = getattr(func, "__self__", None)
|
||||
|
||||
try:
|
||||
test = unpickle_function(mod_name, qname, self_)
|
||||
except pickle.UnpicklingError:
|
||||
test = None
|
||||
|
||||
if test is not func:
|
||||
raise pickle.PicklingError(
|
||||
f"Can't pickle {func}: it's not the same object as {test}"
|
||||
)
|
||||
|
||||
return unpickle_function, (mod_name, qname, self_)
|
||||
|
||||
|
||||
def pickle_state(state):
|
||||
return _uarray._BackendState._unpickle, state._pickle()
|
||||
|
||||
|
||||
def pickle_set_backend_context(ctx):
|
||||
return _SetBackendContext, ctx._pickle()
|
||||
|
||||
|
||||
def pickle_skip_backend_context(ctx):
|
||||
return _SkipBackendContext, ctx._pickle()
|
||||
|
||||
|
||||
copyreg.pickle(_Function, pickle_function)
|
||||
copyreg.pickle(_uarray._BackendState, pickle_state)
|
||||
copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
|
||||
copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
|
||||
|
||||
|
||||
def get_state():
|
||||
"""
|
||||
Returns an opaque object containing the current state of all the backends.
|
||||
|
||||
Can be used for synchronization between threads/processes.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_state
|
||||
Sets the state returned by this function.
|
||||
"""
|
||||
return _uarray.get_state()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def reset_state():
|
||||
"""
|
||||
Returns a context manager that resets all state once exited.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_state
|
||||
Context manager that sets the backend state.
|
||||
get_state
|
||||
Gets a state to be set by this context manager.
|
||||
"""
|
||||
with set_state(get_state()):
|
||||
yield
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_state(state):
|
||||
"""
|
||||
A context manager that sets the state of the backends to one returned by :obj:`get_state`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
get_state
|
||||
Gets a state to be set by this context manager.
|
||||
""" # noqa: E501
|
||||
old_state = get_state()
|
||||
_uarray.set_state(state)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_uarray.set_state(old_state, True)
|
||||
|
||||
|
||||
def create_multimethod(*args, **kwargs):
|
||||
"""
|
||||
Creates a decorator for generating multimethods.
|
||||
|
||||
This function creates a decorator that can be used with an argument
|
||||
extractor in order to generate a multimethod. Other than for the
|
||||
argument extractor, all arguments are passed on to
|
||||
:obj:`generate_multimethod`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
generate_multimethod
|
||||
Generates a multimethod.
|
||||
"""
|
||||
|
||||
def wrapper(a):
|
||||
return generate_multimethod(a, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def generate_multimethod(
|
||||
argument_extractor: ArgumentExtractorType,
|
||||
argument_replacer: ArgumentReplacerType,
|
||||
domain: str,
|
||||
default: typing.Optional[typing.Callable] = None,
|
||||
):
|
||||
"""
|
||||
Generates a multimethod.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
argument_extractor : ArgumentExtractorType
|
||||
A callable which extracts the dispatchable arguments. Extracted arguments
|
||||
should be marked by the :obj:`Dispatchable` class. It has the same signature
|
||||
as the desired multimethod.
|
||||
argument_replacer : ArgumentReplacerType
|
||||
A callable with the signature (args, kwargs, dispatchables), which should also
|
||||
return an (args, kwargs) pair with the dispatchables replaced inside the
|
||||
args/kwargs.
|
||||
domain : str
|
||||
A string value indicating the domain of this multimethod.
|
||||
default: Optional[Callable], optional
|
||||
The default implementation of this multimethod, where ``None`` (the default)
|
||||
specifies there is no default implementation.
|
||||
|
||||
Examples
|
||||
--------
|
||||
In this example, ``a`` is to be dispatched over, so we return it, while marking it
|
||||
as an ``int``.
|
||||
The trailing comma is needed because the args have to be returned as an iterable.
|
||||
|
||||
>>> def override_me(a, b):
|
||||
... return Dispatchable(a, int),
|
||||
|
||||
Next, we define the argument replacer that replaces the dispatchables inside
|
||||
args/kwargs with the supplied ones.
|
||||
|
||||
>>> def override_replacer(args, kwargs, dispatchables):
|
||||
... return (dispatchables[0], args[1]), {}
|
||||
|
||||
Next, we define the multimethod.
|
||||
|
||||
>>> overridden_me = generate_multimethod(
|
||||
... override_me, override_replacer, "ua_examples"
|
||||
... )
|
||||
|
||||
Notice that there's no default implementation, unless you supply one.
|
||||
|
||||
>>> overridden_me(1, "a")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
uarray.BackendNotImplementedError: ...
|
||||
|
||||
>>> overridden_me2 = generate_multimethod(
|
||||
... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
|
||||
... )
|
||||
>>> overridden_me2(1, "a")
|
||||
(1, 'a')
|
||||
|
||||
See Also
|
||||
--------
|
||||
uarray
|
||||
See the module documentation for how to override the method by creating
|
||||
backends.
|
||||
"""
|
||||
kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
|
||||
ua_func = _Function(
|
||||
argument_extractor,
|
||||
argument_replacer,
|
||||
domain,
|
||||
arg_defaults,
|
||||
kw_defaults,
|
||||
default,
|
||||
)
|
||||
|
||||
return functools.update_wrapper(ua_func, argument_extractor)
|
||||
|
||||
|
||||
def set_backend(backend, coerce=False, only=False):
|
||||
"""
|
||||
A context manager that sets the preferred backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
backend
|
||||
The backend to set.
|
||||
coerce
|
||||
Whether or not to coerce to a specific backend's types. Implies ``only``.
|
||||
only
|
||||
Whether or not this should be the last backend to try.
|
||||
|
||||
See Also
|
||||
--------
|
||||
skip_backend: A context manager that allows skipping of backends.
|
||||
set_global_backend: Set a single, global backend for a domain.
|
||||
"""
|
||||
try:
|
||||
return backend.__ua_cache__["set", coerce, only]
|
||||
except AttributeError:
|
||||
backend.__ua_cache__ = {}
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
ctx = _SetBackendContext(backend, coerce, only)
|
||||
backend.__ua_cache__["set", coerce, only] = ctx
|
||||
return ctx
|
||||
|
||||
|
||||
def skip_backend(backend):
|
||||
"""
|
||||
A context manager that allows one to skip a given backend from processing
|
||||
entirely. This allows one to use another backend's code in a library that
|
||||
is also a consumer of the same backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
backend
|
||||
The backend to skip.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_backend: A context manager that allows setting of backends.
|
||||
set_global_backend: Set a single, global backend for a domain.
|
||||
"""
|
||||
try:
|
||||
return backend.__ua_cache__["skip"]
|
||||
except AttributeError:
|
||||
backend.__ua_cache__ = {}
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
ctx = _SkipBackendContext(backend)
|
||||
backend.__ua_cache__["skip"] = ctx
|
||||
return ctx
|
||||
|
||||
|
||||
def get_defaults(f):
|
||||
sig = inspect.signature(f)
|
||||
kw_defaults = {}
|
||||
arg_defaults = []
|
||||
opts = set()
|
||||
for k, v in sig.parameters.items():
|
||||
if v.default is not inspect.Parameter.empty:
|
||||
kw_defaults[k] = v.default
|
||||
if v.kind in (
|
||||
inspect.Parameter.POSITIONAL_ONLY,
|
||||
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
||||
):
|
||||
arg_defaults.append(v.default)
|
||||
opts.add(k)
|
||||
|
||||
return kw_defaults, tuple(arg_defaults), opts
|
||||
|
||||
|
||||
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
|
||||
"""
|
||||
This utility method replaces the default backend for permanent use. It
|
||||
will be tried in the list of backends automatically, unless the
|
||||
``only`` flag is set on a backend. This will be the first tried
|
||||
backend outside the :obj:`set_backend` context manager.
|
||||
|
||||
Note that this method is not thread-safe.
|
||||
|
||||
.. warning::
|
||||
We caution library authors against using this function in
|
||||
their code. We do *not* support this use-case. This function
|
||||
is meant to be used only by users themselves, or by a reference
|
||||
implementation, if one exists.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
backend
|
||||
The backend to register.
|
||||
coerce : bool
|
||||
Whether to coerce input types when trying this backend.
|
||||
only : bool
|
||||
If ``True``, no more backends will be tried if this fails.
|
||||
Implied by ``coerce=True``.
|
||||
try_last : bool
|
||||
If ``True``, the global backend is tried after registered backends.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_backend: A context manager that allows setting of backends.
|
||||
skip_backend: A context manager that allows skipping of backends.
|
||||
"""
|
||||
_uarray.set_global_backend(backend, coerce, only, try_last)
|
||||
|
||||
|
||||
def register_backend(backend):
|
||||
"""
|
||||
This utility method sets registers backend for permanent use. It
|
||||
will be tried in the list of backends automatically, unless the
|
||||
``only`` flag is set on a backend.
|
||||
|
||||
Note that this method is not thread-safe.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
backend
|
||||
The backend to register.
|
||||
"""
|
||||
_uarray.register_backend(backend)
|
||||
|
||||
|
||||
def clear_backends(domain, registered=True, globals=False):
|
||||
"""
|
||||
This utility method clears registered backends.
|
||||
|
||||
.. warning::
|
||||
We caution library authors against using this function in
|
||||
their code. We do *not* support this use-case. This function
|
||||
is meant to be used only by users themselves.
|
||||
|
||||
.. warning::
|
||||
Do NOT use this method inside a multimethod call, or the
|
||||
program is likely to crash.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
domain : Optional[str]
|
||||
The domain for which to de-register backends. ``None`` means
|
||||
de-register for all domains.
|
||||
registered : bool
|
||||
Whether or not to clear registered backends. See :obj:`register_backend`.
|
||||
globals : bool
|
||||
Whether or not to clear global backends. See :obj:`set_global_backend`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
register_backend : Register a backend globally.
|
||||
set_global_backend : Set a global backend.
|
||||
"""
|
||||
_uarray.clear_backends(domain, registered, globals)
|
||||
|
||||
|
||||
class Dispatchable:
|
||||
"""
|
||||
A utility class which marks an argument with a specific dispatch type.
|
||||
|
||||
|
||||
Attributes
|
||||
----------
|
||||
value
|
||||
The value of the Dispatchable.
|
||||
|
||||
type
|
||||
The type of the Dispatchable.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> x = Dispatchable(1, str)
|
||||
>>> x
|
||||
<Dispatchable: type=<class 'str'>, value=1>
|
||||
|
||||
See Also
|
||||
--------
|
||||
all_of_type
|
||||
Marks all unmarked parameters of a function.
|
||||
|
||||
mark_as
|
||||
Allows one to create a utility function to mark as a given type.
|
||||
"""
|
||||
|
||||
def __init__(self, value, dispatch_type, coercible=True):
|
||||
self.value = value
|
||||
self.type = dispatch_type
|
||||
self.coercible = coercible
|
||||
|
||||
def __getitem__(self, index):
|
||||
return (self.type, self.value)[index]
|
||||
|
||||
def __str__(self):
|
||||
return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>"
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
def mark_as(dispatch_type):
|
||||
"""
|
||||
Creates a utility function to mark something as a specific type.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> mark_int = mark_as(int)
|
||||
>>> mark_int(1)
|
||||
<Dispatchable: type=<class 'int'>, value=1>
|
||||
"""
|
||||
return functools.partial(Dispatchable, dispatch_type=dispatch_type)
|
||||
|
||||
|
||||
def all_of_type(arg_type):
|
||||
"""
|
||||
Marks all unmarked arguments as a given type.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> @all_of_type(str)
|
||||
... def f(a, b):
|
||||
... return a, Dispatchable(b, int)
|
||||
>>> f('a', 1)
|
||||
(<Dispatchable: type=<class 'str'>, value='a'>,
|
||||
<Dispatchable: type=<class 'int'>, value=1>)
|
||||
"""
|
||||
|
||||
def outer(func):
|
||||
@functools.wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
extracted_args = func(*args, **kwargs)
|
||||
return tuple(
|
||||
Dispatchable(arg, arg_type)
|
||||
if not isinstance(arg, Dispatchable)
|
||||
else arg
|
||||
for arg in extracted_args
|
||||
)
|
||||
|
||||
return inner
|
||||
|
||||
return outer
|
||||
|
||||
|
||||
def wrap_single_convertor(convert_single):
|
||||
"""
|
||||
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
||||
If any of them return ``NotImplemented``, the operation is assumed to be
|
||||
undefined.
|
||||
|
||||
Accepts a signature of (value, type, coerce).
|
||||
"""
|
||||
|
||||
@functools.wraps(convert_single)
|
||||
def __ua_convert__(dispatchables, coerce):
|
||||
converted = []
|
||||
for d in dispatchables:
|
||||
c = convert_single(d.value, d.type, coerce and d.coercible)
|
||||
|
||||
if c is NotImplemented:
|
||||
return NotImplemented
|
||||
|
||||
converted.append(c)
|
||||
|
||||
return converted
|
||||
|
||||
return __ua_convert__
|
||||
|
||||
|
||||
def wrap_single_convertor_instance(convert_single):
|
||||
"""
|
||||
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
||||
If any of them return ``NotImplemented``, the operation is assumed to be
|
||||
undefined.
|
||||
|
||||
Accepts a signature of (value, type, coerce).
|
||||
"""
|
||||
|
||||
@functools.wraps(convert_single)
|
||||
def __ua_convert__(self, dispatchables, coerce):
|
||||
converted = []
|
||||
for d in dispatchables:
|
||||
c = convert_single(self, d.value, d.type, coerce and d.coercible)
|
||||
|
||||
if c is NotImplemented:
|
||||
return NotImplemented
|
||||
|
||||
converted.append(c)
|
||||
|
||||
return converted
|
||||
|
||||
return __ua_convert__
|
||||
|
||||
|
||||
def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
|
||||
"""Set the backend to the first active backend that supports ``value``
|
||||
|
||||
This is useful for functions that call multimethods without any dispatchable
|
||||
arguments. You can use :func:`determine_backend` to ensure the same backend
|
||||
is used everywhere in a block of multimethod calls.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value
|
||||
The value being tested
|
||||
dispatch_type
|
||||
The dispatch type associated with ``value``, aka
|
||||
":ref:`marking <MarkingGlossary>`".
|
||||
domain: string
|
||||
The domain to query for backends and set.
|
||||
coerce: bool
|
||||
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
||||
only: bool
|
||||
Whether or not this should be the last backend to try.
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_backend: For when you know which backend to set
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
||||
supporting the type must return ``NotImplemented`` from their
|
||||
``__ua_convert__`` if they don't support input of that type.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
|
||||
different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
|
||||
|
||||
>>> with ua.set_backend(ex.BackendA):
|
||||
... ex.call_multimethod(ex.TypeB(), ex.TypeB())
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
uarray.BackendNotImplementedError: ...
|
||||
|
||||
Now consider a multimethod that creates a new object of ``TypeA``, or
|
||||
``TypeB`` depending on the active backend.
|
||||
|
||||
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
||||
... res = ex.creation_multimethod()
|
||||
... ex.call_multimethod(res, ex.TypeA())
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
uarray.BackendNotImplementedError: ...
|
||||
|
||||
``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
|
||||
innermost with statement. So, ``call_multimethod`` fails since the types
|
||||
don't match.
|
||||
|
||||
Instead, we need to first find a backend suitable for all of our objects.
|
||||
|
||||
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
||||
... x = ex.TypeA()
|
||||
... with ua.determine_backend(x, "mark", domain="ua_examples"):
|
||||
... res = ex.creation_multimethod()
|
||||
... ex.call_multimethod(res, x)
|
||||
TypeA
|
||||
|
||||
"""
|
||||
dispatchables = (Dispatchable(value, dispatch_type, coerce),)
|
||||
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
||||
|
||||
return set_backend(backend, coerce=coerce, only=only)
|
||||
|
||||
|
||||
def determine_backend_multi(
|
||||
dispatchables, *, domain, only=True, coerce=False, **kwargs
|
||||
):
|
||||
"""Set a backend supporting all ``dispatchables``
|
||||
|
||||
This is useful for functions that call multimethods without any dispatchable
|
||||
arguments. You can use :func:`determine_backend_multi` to ensure the same
|
||||
backend is used everywhere in a block of multimethod calls involving
|
||||
multiple arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
|
||||
The dispatchables that must be supported
|
||||
domain: string
|
||||
The domain to query for backends and set.
|
||||
coerce: bool
|
||||
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
||||
only: bool
|
||||
Whether or not this should be the last backend to try.
|
||||
dispatch_type: Optional[Any]
|
||||
The default dispatch type associated with ``dispatchables``, aka
|
||||
":ref:`marking <MarkingGlossary>`".
|
||||
|
||||
See Also
|
||||
--------
|
||||
determine_backend: For a single dispatch value
|
||||
set_backend: For when you know which backend to set
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
||||
supporting the type must return ``NotImplemented`` from their
|
||||
``__ua_convert__`` if they don't support input of that type.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
:func:`determine_backend` allows the backend to be set from a single
|
||||
object. :func:`determine_backend_multi` allows multiple objects to be
|
||||
checked simultaneously for support in the backend. Suppose we have a
|
||||
``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
|
||||
and a ``BackendBC`` that doesn't support ``TypeA``.
|
||||
|
||||
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
||||
... a, b = ex.TypeA(), ex.TypeB()
|
||||
... with ua.determine_backend_multi(
|
||||
... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
|
||||
... domain="ua_examples"
|
||||
... ):
|
||||
... res = ex.creation_multimethod()
|
||||
... ex.call_multimethod(res, a, b)
|
||||
TypeA
|
||||
|
||||
This won't call ``BackendBC`` because it doesn't support ``TypeA``.
|
||||
|
||||
We can also use leave out the ``ua.Dispatchable`` if we specify the
|
||||
default ``dispatch_type`` for the ``dispatchables`` argument.
|
||||
|
||||
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
||||
... a, b = ex.TypeA(), ex.TypeB()
|
||||
... with ua.determine_backend_multi(
|
||||
... [a, b], dispatch_type="mark", domain="ua_examples"
|
||||
... ):
|
||||
... res = ex.creation_multimethod()
|
||||
... ex.call_multimethod(res, a, b)
|
||||
TypeA
|
||||
|
||||
"""
|
||||
if "dispatch_type" in kwargs:
|
||||
disp_type = kwargs.pop("dispatch_type")
|
||||
dispatchables = tuple(
|
||||
d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
|
||||
for d in dispatchables
|
||||
)
|
||||
else:
|
||||
dispatchables = tuple(dispatchables)
|
||||
if not all(isinstance(d, Dispatchable) for d in dispatchables):
|
||||
raise TypeError("dispatchables must be instances of uarray.Dispatchable")
|
||||
|
||||
if len(kwargs) != 0:
|
||||
raise TypeError(f"Received unexpected keyword arguments: {kwargs}")
|
||||
|
||||
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
||||
|
||||
return set_backend(backend, coerce=coerce, only=only)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_uarray.cp312-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_uarray.cp312-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_uarray.cp312-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/_uarray/_uarray.cp312-win_amd64.pyd
vendored
Normal file
Binary file not shown.
954
.CondaPkg/env/Lib/site-packages/scipy/_lib/_util.py
vendored
Normal file
954
.CondaPkg/env/Lib/site-packages/scipy/_lib/_util.py
vendored
Normal file
@@ -0,0 +1,954 @@
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
import functools
|
||||
import operator
|
||||
import warnings
|
||||
import numbers
|
||||
from collections import namedtuple
|
||||
import inspect
|
||||
import math
|
||||
from typing import (
|
||||
Optional,
|
||||
Union,
|
||||
TYPE_CHECKING,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
from scipy._lib._array_api import array_namespace, is_numpy, size as xp_size
|
||||
|
||||
|
||||
AxisError: type[Exception]
|
||||
ComplexWarning: type[Warning]
|
||||
VisibleDeprecationWarning: type[Warning]
|
||||
|
||||
if np.lib.NumpyVersion(np.__version__) >= '1.25.0':
|
||||
from numpy.exceptions import (
|
||||
AxisError, ComplexWarning, VisibleDeprecationWarning,
|
||||
DTypePromotionError
|
||||
)
|
||||
else:
|
||||
from numpy import ( # type: ignore[attr-defined, no-redef]
|
||||
AxisError, ComplexWarning, VisibleDeprecationWarning # noqa: F401
|
||||
)
|
||||
DTypePromotionError = TypeError # type: ignore
|
||||
|
||||
np_long: type
|
||||
np_ulong: type
|
||||
|
||||
if np.lib.NumpyVersion(np.__version__) >= "2.0.0.dev0":
|
||||
try:
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
r".*In the future `np\.long` will be defined as.*",
|
||||
FutureWarning,
|
||||
)
|
||||
np_long = np.long # type: ignore[attr-defined]
|
||||
np_ulong = np.ulong # type: ignore[attr-defined]
|
||||
except AttributeError:
|
||||
np_long = np.int_
|
||||
np_ulong = np.uint
|
||||
else:
|
||||
np_long = np.int_
|
||||
np_ulong = np.uint
|
||||
|
||||
IntNumber = Union[int, np.integer]
|
||||
DecimalNumber = Union[float, np.floating, np.integer]
|
||||
|
||||
copy_if_needed: Optional[bool]
|
||||
|
||||
if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
|
||||
copy_if_needed = None
|
||||
elif np.lib.NumpyVersion(np.__version__) < "1.28.0":
|
||||
copy_if_needed = False
|
||||
else:
|
||||
# 2.0.0 dev versions, handle cases where copy may or may not exist
|
||||
try:
|
||||
np.array([1]).__array__(copy=None) # type: ignore[call-overload]
|
||||
copy_if_needed = None
|
||||
except TypeError:
|
||||
copy_if_needed = False
|
||||
|
||||
# Since Generator was introduced in numpy 1.17, the following condition is needed for
|
||||
# backward compatibility
|
||||
if TYPE_CHECKING:
|
||||
SeedType = Optional[Union[IntNumber, np.random.Generator,
|
||||
np.random.RandomState]]
|
||||
GeneratorType = TypeVar("GeneratorType", bound=Union[np.random.Generator,
|
||||
np.random.RandomState])
|
||||
|
||||
try:
|
||||
from numpy.random import Generator as Generator
|
||||
except ImportError:
|
||||
class Generator: # type: ignore[no-redef]
|
||||
pass
|
||||
|
||||
|
||||
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
|
||||
"""Return elements chosen from two possibilities depending on a condition
|
||||
|
||||
Equivalent to ``f(*arrays) if cond else fillvalue`` performed elementwise.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cond : array
|
||||
The condition (expressed as a boolean array).
|
||||
arrays : tuple of array
|
||||
Arguments to `f` (and `f2`). Must be broadcastable with `cond`.
|
||||
f : callable
|
||||
Where `cond` is True, output will be ``f(arr1[cond], arr2[cond], ...)``
|
||||
fillvalue : object
|
||||
If provided, value with which to fill output array where `cond` is
|
||||
not True.
|
||||
f2 : callable
|
||||
If provided, output will be ``f2(arr1[cond], arr2[cond], ...)`` where
|
||||
`cond` is not True.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : array
|
||||
An array with elements from the output of `f` where `cond` is True
|
||||
and `fillvalue` (or elements from the output of `f2`) elsewhere. The
|
||||
returned array has data type determined by Type Promotion Rules
|
||||
with the output of `f` and `fillvalue` (or the output of `f2`).
|
||||
|
||||
Notes
|
||||
-----
|
||||
``xp.where(cond, x, fillvalue)`` requires explicitly forming `x` even where
|
||||
`cond` is False. This function evaluates ``f(arr1[cond], arr2[cond], ...)``
|
||||
onle where `cond` ``is True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
|
||||
>>> def f(a, b):
|
||||
... return a*b
|
||||
>>> _lazywhere(a > 2, (a, b), f, np.nan)
|
||||
array([ nan, nan, 21., 32.])
|
||||
|
||||
"""
|
||||
xp = array_namespace(cond, *arrays)
|
||||
|
||||
if (f2 is fillvalue is None) or (f2 is not None and fillvalue is not None):
|
||||
raise ValueError("Exactly one of `fillvalue` or `f2` must be given.")
|
||||
|
||||
args = xp.broadcast_arrays(cond, *arrays)
|
||||
bool_dtype = xp.asarray([True]).dtype # numpy 1.xx doesn't have `bool`
|
||||
cond, arrays = xp.astype(args[0], bool_dtype, copy=False), args[1:]
|
||||
|
||||
temp1 = xp.asarray(f(*(arr[cond] for arr in arrays)))
|
||||
|
||||
if f2 is None:
|
||||
fillvalue = xp.asarray(fillvalue)
|
||||
dtype = xp.result_type(temp1.dtype, fillvalue.dtype)
|
||||
out = xp.full(cond.shape, fill_value=fillvalue, dtype=dtype)
|
||||
else:
|
||||
ncond = ~cond
|
||||
temp2 = xp.asarray(f2(*(arr[ncond] for arr in arrays)))
|
||||
dtype = xp.result_type(temp1, temp2)
|
||||
out = xp.empty(cond.shape, dtype=dtype)
|
||||
out[ncond] = temp2
|
||||
|
||||
out[cond] = temp1
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _lazyselect(condlist, choicelist, arrays, default=0):
|
||||
"""
|
||||
Mimic `np.select(condlist, choicelist)`.
|
||||
|
||||
Notice, it assumes that all `arrays` are of the same shape or can be
|
||||
broadcasted together.
|
||||
|
||||
All functions in `choicelist` must accept array arguments in the order
|
||||
given in `arrays` and must return an array of the same shape as broadcasted
|
||||
`arrays`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> x = np.arange(6)
|
||||
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
|
||||
array([ 0, 1, 4, 0, 64, 125])
|
||||
|
||||
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
|
||||
array([ 0., 1., 4., 0., 64., 125.])
|
||||
|
||||
>>> a = -np.ones_like(x)
|
||||
>>> _lazyselect([x < 3, x > 3],
|
||||
... [lambda x, a: x**2, lambda x, a: a * x**3],
|
||||
... (x, a), default=np.nan)
|
||||
array([ 0., 1., 4., nan, -64., -125.])
|
||||
|
||||
"""
|
||||
arrays = np.broadcast_arrays(*arrays)
|
||||
tcode = np.mintypecode([a.dtype.char for a in arrays])
|
||||
out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
|
||||
for func, cond in zip(choicelist, condlist):
|
||||
if np.all(cond is False):
|
||||
continue
|
||||
cond, _ = np.broadcast_arrays(cond, arrays[0])
|
||||
temp = tuple(np.extract(cond, arr) for arr in arrays)
|
||||
np.place(out, cond, func(*temp))
|
||||
return out
|
||||
|
||||
|
||||
def _aligned_zeros(shape, dtype=float, order="C", align=None):
|
||||
"""Allocate a new ndarray with aligned memory.
|
||||
|
||||
Primary use case for this currently is working around a f2py issue
|
||||
in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
|
||||
not necessarily create arrays aligned up to it.
|
||||
|
||||
"""
|
||||
dtype = np.dtype(dtype)
|
||||
if align is None:
|
||||
align = dtype.alignment
|
||||
if not hasattr(shape, '__len__'):
|
||||
shape = (shape,)
|
||||
size = functools.reduce(operator.mul, shape) * dtype.itemsize
|
||||
buf = np.empty(size + align + 1, np.uint8)
|
||||
offset = buf.__array_interface__['data'][0] % align
|
||||
if offset != 0:
|
||||
offset = align - offset
|
||||
# Note: slices producing 0-size arrays do not necessarily change
|
||||
# data pointer --- so we use and allocate size+1
|
||||
buf = buf[offset:offset+size+1][:-1]
|
||||
data = np.ndarray(shape, dtype, buf, order=order)
|
||||
data.fill(0)
|
||||
return data
|
||||
|
||||
|
||||
def _prune_array(array):
|
||||
"""Return an array equivalent to the input array. If the input
|
||||
array is a view of a much larger array, copy its contents to a
|
||||
newly allocated array. Otherwise, return the input unchanged.
|
||||
"""
|
||||
if array.base is not None and array.size < array.base.size // 2:
|
||||
return array.copy()
|
||||
return array
|
||||
|
||||
|
||||
def float_factorial(n: int) -> float:
|
||||
"""Compute the factorial and return as a float
|
||||
|
||||
Returns infinity when result is too large for a double
|
||||
"""
|
||||
return float(math.factorial(n)) if n < 171 else np.inf
|
||||
|
||||
|
||||
# copy-pasted from scikit-learn utils/validation.py
|
||||
# change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
|
||||
def check_random_state(seed):
|
||||
"""Turn `seed` into a `np.random.RandomState` instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
||||
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
||||
singleton is used.
|
||||
If `seed` is an int, a new ``RandomState`` instance is used,
|
||||
seeded with `seed`.
|
||||
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
||||
that instance is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
|
||||
Random number generator.
|
||||
|
||||
"""
|
||||
if seed is None or seed is np.random:
|
||||
return np.random.mtrand._rand
|
||||
if isinstance(seed, (numbers.Integral, np.integer)):
|
||||
return np.random.RandomState(seed)
|
||||
if isinstance(seed, (np.random.RandomState, np.random.Generator)):
|
||||
return seed
|
||||
|
||||
raise ValueError(f"'{seed}' cannot be used to seed a numpy.random.RandomState"
|
||||
" instance")
|
||||
|
||||
|
||||
def _asarray_validated(a, check_finite=True,
|
||||
sparse_ok=False, objects_ok=False, mask_ok=False,
|
||||
as_inexact=False):
|
||||
"""
|
||||
Helper function for SciPy argument validation.
|
||||
|
||||
Many SciPy linear algebra functions do support arbitrary array-like
|
||||
input arguments. Examples of commonly unsupported inputs include
|
||||
matrices containing inf/nan, sparse matrix representations, and
|
||||
matrices with complicated elements.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : array_like
|
||||
The array-like input.
|
||||
check_finite : bool, optional
|
||||
Whether to check that the input matrices contain only finite numbers.
|
||||
Disabling may give a performance gain, but may result in problems
|
||||
(crashes, non-termination) if the inputs do contain infinities or NaNs.
|
||||
Default: True
|
||||
sparse_ok : bool, optional
|
||||
True if scipy sparse matrices are allowed.
|
||||
objects_ok : bool, optional
|
||||
True if arrays with dype('O') are allowed.
|
||||
mask_ok : bool, optional
|
||||
True if masked arrays are allowed.
|
||||
as_inexact : bool, optional
|
||||
True to convert the input array to a np.inexact dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ret : ndarray
|
||||
The converted validated array.
|
||||
|
||||
"""
|
||||
if not sparse_ok:
|
||||
import scipy.sparse
|
||||
if scipy.sparse.issparse(a):
|
||||
msg = ('Sparse matrices are not supported by this function. '
|
||||
'Perhaps one of the scipy.sparse.linalg functions '
|
||||
'would work instead.')
|
||||
raise ValueError(msg)
|
||||
if not mask_ok:
|
||||
if np.ma.isMaskedArray(a):
|
||||
raise ValueError('masked arrays are not supported')
|
||||
toarray = np.asarray_chkfinite if check_finite else np.asarray
|
||||
a = toarray(a)
|
||||
if not objects_ok:
|
||||
if a.dtype is np.dtype('O'):
|
||||
raise ValueError('object arrays are not supported')
|
||||
if as_inexact:
|
||||
if not np.issubdtype(a.dtype, np.inexact):
|
||||
a = toarray(a, dtype=np.float64)
|
||||
return a
|
||||
|
||||
|
||||
def _validate_int(k, name, minimum=None):
|
||||
"""
|
||||
Validate a scalar integer.
|
||||
|
||||
This function can be used to validate an argument to a function
|
||||
that expects the value to be an integer. It uses `operator.index`
|
||||
to validate the value (so, for example, k=2.0 results in a
|
||||
TypeError).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
k : int
|
||||
The value to be validated.
|
||||
name : str
|
||||
The name of the parameter.
|
||||
minimum : int, optional
|
||||
An optional lower bound.
|
||||
"""
|
||||
try:
|
||||
k = operator.index(k)
|
||||
except TypeError:
|
||||
raise TypeError(f'{name} must be an integer.') from None
|
||||
if minimum is not None and k < minimum:
|
||||
raise ValueError(f'{name} must be an integer not less '
|
||||
f'than {minimum}') from None
|
||||
return k
|
||||
|
||||
|
||||
# Add a replacement for inspect.getfullargspec()/
|
||||
# The version below is borrowed from Django,
|
||||
# https://github.com/django/django/pull/4846.
|
||||
|
||||
# Note an inconsistency between inspect.getfullargspec(func) and
|
||||
# inspect.signature(func). If `func` is a bound method, the latter does *not*
|
||||
# list `self` as a first argument, while the former *does*.
|
||||
# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
|
||||
# mimics `inspect.getfullargspec` but does not list `self`.
|
||||
#
|
||||
# This way, the caller code does not need to know whether it uses a legacy
|
||||
# .getfullargspec or a bright and shiny .signature.
|
||||
|
||||
FullArgSpec = namedtuple('FullArgSpec',
|
||||
['args', 'varargs', 'varkw', 'defaults',
|
||||
'kwonlyargs', 'kwonlydefaults', 'annotations'])
|
||||
|
||||
|
||||
def getfullargspec_no_self(func):
|
||||
"""inspect.getfullargspec replacement using inspect.signature.
|
||||
|
||||
If func is a bound method, do not list the 'self' parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : callable
|
||||
A callable to inspect
|
||||
|
||||
Returns
|
||||
-------
|
||||
fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
|
||||
kwonlydefaults, annotations)
|
||||
|
||||
NOTE: if the first argument of `func` is self, it is *not*, I repeat
|
||||
*not*, included in fullargspec.args.
|
||||
This is done for consistency between inspect.getargspec() under
|
||||
Python 2.x, and inspect.signature() under Python 3.x.
|
||||
|
||||
"""
|
||||
sig = inspect.signature(func)
|
||||
args = [
|
||||
p.name for p in sig.parameters.values()
|
||||
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
||||
inspect.Parameter.POSITIONAL_ONLY]
|
||||
]
|
||||
varargs = [
|
||||
p.name for p in sig.parameters.values()
|
||||
if p.kind == inspect.Parameter.VAR_POSITIONAL
|
||||
]
|
||||
varargs = varargs[0] if varargs else None
|
||||
varkw = [
|
||||
p.name for p in sig.parameters.values()
|
||||
if p.kind == inspect.Parameter.VAR_KEYWORD
|
||||
]
|
||||
varkw = varkw[0] if varkw else None
|
||||
defaults = tuple(
|
||||
p.default for p in sig.parameters.values()
|
||||
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
|
||||
p.default is not p.empty)
|
||||
) or None
|
||||
kwonlyargs = [
|
||||
p.name for p in sig.parameters.values()
|
||||
if p.kind == inspect.Parameter.KEYWORD_ONLY
|
||||
]
|
||||
kwdefaults = {p.name: p.default for p in sig.parameters.values()
|
||||
if p.kind == inspect.Parameter.KEYWORD_ONLY and
|
||||
p.default is not p.empty}
|
||||
annotations = {p.name: p.annotation for p in sig.parameters.values()
|
||||
if p.annotation is not p.empty}
|
||||
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
|
||||
kwdefaults or None, annotations)
|
||||
|
||||
|
||||
class _FunctionWrapper:
|
||||
"""
|
||||
Object to wrap user's function, allowing picklability
|
||||
"""
|
||||
def __init__(self, f, args):
|
||||
self.f = f
|
||||
self.args = [] if args is None else args
|
||||
|
||||
def __call__(self, x):
|
||||
return self.f(x, *self.args)
|
||||
|
||||
|
||||
class MapWrapper:
|
||||
"""
|
||||
Parallelisation wrapper for working with map-like callables, such as
|
||||
`multiprocessing.Pool.map`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pool : int or map-like callable
|
||||
If `pool` is an integer, then it specifies the number of threads to
|
||||
use for parallelization. If ``int(pool) == 1``, then no parallel
|
||||
processing is used and the map builtin is used.
|
||||
If ``pool == -1``, then the pool will utilize all available CPUs.
|
||||
If `pool` is a map-like callable that follows the same
|
||||
calling sequence as the built-in map function, then this callable is
|
||||
used for parallelization.
|
||||
"""
|
||||
def __init__(self, pool=1):
|
||||
self.pool = None
|
||||
self._mapfunc = map
|
||||
self._own_pool = False
|
||||
|
||||
if callable(pool):
|
||||
self.pool = pool
|
||||
self._mapfunc = self.pool
|
||||
else:
|
||||
from multiprocessing import Pool
|
||||
# user supplies a number
|
||||
if int(pool) == -1:
|
||||
# use as many processors as possible
|
||||
self.pool = Pool()
|
||||
self._mapfunc = self.pool.map
|
||||
self._own_pool = True
|
||||
elif int(pool) == 1:
|
||||
pass
|
||||
elif int(pool) > 1:
|
||||
# use the number of processors requested
|
||||
self.pool = Pool(processes=int(pool))
|
||||
self._mapfunc = self.pool.map
|
||||
self._own_pool = True
|
||||
else:
|
||||
raise RuntimeError("Number of workers specified must be -1,"
|
||||
" an int >= 1, or an object with a 'map' "
|
||||
"method")
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def terminate(self):
|
||||
if self._own_pool:
|
||||
self.pool.terminate()
|
||||
|
||||
def join(self):
|
||||
if self._own_pool:
|
||||
self.pool.join()
|
||||
|
||||
def close(self):
|
||||
if self._own_pool:
|
||||
self.pool.close()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self._own_pool:
|
||||
self.pool.close()
|
||||
self.pool.terminate()
|
||||
|
||||
def __call__(self, func, iterable):
|
||||
# only accept one iterable because that's all Pool.map accepts
|
||||
try:
|
||||
return self._mapfunc(func, iterable)
|
||||
except TypeError as e:
|
||||
# wrong number of arguments
|
||||
raise TypeError("The map-like callable must be of the"
|
||||
" form f(func, iterable)") from e
|
||||
|
||||
|
||||
def rng_integers(gen, low, high=None, size=None, dtype='int64',
|
||||
endpoint=False):
|
||||
"""
|
||||
Return random integers from low (inclusive) to high (exclusive), or if
|
||||
endpoint=True, low (inclusive) to high (inclusive). Replaces
|
||||
`RandomState.randint` (with endpoint=False) and
|
||||
`RandomState.random_integers` (with endpoint=True).
|
||||
|
||||
Return random integers from the "discrete uniform" distribution of the
|
||||
specified dtype. If high is None (the default), then results are from
|
||||
0 to low.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
gen : {None, np.random.RandomState, np.random.Generator}
|
||||
Random number generator. If None, then the np.random.RandomState
|
||||
singleton is used.
|
||||
low : int or array-like of ints
|
||||
Lowest (signed) integers to be drawn from the distribution (unless
|
||||
high=None, in which case this parameter is 0 and this value is used
|
||||
for high).
|
||||
high : int or array-like of ints
|
||||
If provided, one above the largest (signed) integer to be drawn from
|
||||
the distribution (see above for behavior if high=None). If array-like,
|
||||
must contain integer values.
|
||||
size : array-like of ints, optional
|
||||
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
|
||||
samples are drawn. Default is None, in which case a single value is
|
||||
returned.
|
||||
dtype : {str, dtype}, optional
|
||||
Desired dtype of the result. All dtypes are determined by their name,
|
||||
i.e., 'int64', 'int', etc, so byteorder is not available and a specific
|
||||
precision may have different C types depending on the platform.
|
||||
The default value is 'int64'.
|
||||
endpoint : bool, optional
|
||||
If True, sample from the interval [low, high] instead of the default
|
||||
[low, high) Defaults to False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out: int or ndarray of ints
|
||||
size-shaped array of random integers from the appropriate distribution,
|
||||
or a single such random int if size not provided.
|
||||
"""
|
||||
if isinstance(gen, Generator):
|
||||
return gen.integers(low, high=high, size=size, dtype=dtype,
|
||||
endpoint=endpoint)
|
||||
else:
|
||||
if gen is None:
|
||||
# default is RandomState singleton used by np.random.
|
||||
gen = np.random.mtrand._rand
|
||||
if endpoint:
|
||||
# inclusive of endpoint
|
||||
# remember that low and high can be arrays, so don't modify in
|
||||
# place
|
||||
if high is None:
|
||||
return gen.randint(low + 1, size=size, dtype=dtype)
|
||||
if high is not None:
|
||||
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
|
||||
|
||||
# exclusive
|
||||
return gen.randint(low, high=high, size=size, dtype=dtype)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _fixed_default_rng(seed=1638083107694713882823079058616272161):
|
||||
"""Context with a fixed np.random.default_rng seed."""
|
||||
orig_fun = np.random.default_rng
|
||||
np.random.default_rng = lambda seed=seed: orig_fun(seed)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
np.random.default_rng = orig_fun
|
||||
|
||||
|
||||
def _rng_html_rewrite(func):
|
||||
"""Rewrite the HTML rendering of ``np.random.default_rng``.
|
||||
|
||||
This is intended to decorate
|
||||
``numpydoc.docscrape_sphinx.SphinxDocString._str_examples``.
|
||||
|
||||
Examples are only run by Sphinx when there are plot involved. Even so,
|
||||
it does not change the result values getting printed.
|
||||
"""
|
||||
# hexadecimal or number seed, case-insensitive
|
||||
pattern = re.compile(r'np.random.default_rng\((0x[0-9A-F]+|\d+)\)', re.I)
|
||||
|
||||
def _wrapped(*args, **kwargs):
|
||||
res = func(*args, **kwargs)
|
||||
lines = [
|
||||
re.sub(pattern, 'np.random.default_rng()', line)
|
||||
for line in res
|
||||
]
|
||||
return lines
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
def _argmin(a, keepdims=False, axis=None):
|
||||
"""
|
||||
argmin with a `keepdims` parameter.
|
||||
|
||||
See https://github.com/numpy/numpy/issues/8710
|
||||
|
||||
If axis is not None, a.shape[axis] must be greater than 0.
|
||||
"""
|
||||
res = np.argmin(a, axis=axis)
|
||||
if keepdims and axis is not None:
|
||||
res = np.expand_dims(res, axis=axis)
|
||||
return res
|
||||
|
||||
|
||||
def _first_nonnan(a, axis):
|
||||
"""
|
||||
Return the first non-nan value along the given axis.
|
||||
|
||||
If a slice is all nan, nan is returned for that slice.
|
||||
|
||||
The shape of the return value corresponds to ``keepdims=True``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> nan = np.nan
|
||||
>>> a = np.array([[ 3., 3., nan, 3.],
|
||||
[ 1., nan, 2., 4.],
|
||||
[nan, nan, 9., -1.],
|
||||
[nan, 5., 4., 3.],
|
||||
[ 2., 2., 2., 2.],
|
||||
[nan, nan, nan, nan]])
|
||||
>>> _first_nonnan(a, axis=0)
|
||||
array([[3., 3., 2., 3.]])
|
||||
>>> _first_nonnan(a, axis=1)
|
||||
array([[ 3.],
|
||||
[ 1.],
|
||||
[ 9.],
|
||||
[ 5.],
|
||||
[ 2.],
|
||||
[nan]])
|
||||
"""
|
||||
k = _argmin(np.isnan(a), axis=axis, keepdims=True)
|
||||
return np.take_along_axis(a, k, axis=axis)
|
||||
|
||||
|
||||
def _nan_allsame(a, axis, keepdims=False):
|
||||
"""
|
||||
Determine if the values along an axis are all the same.
|
||||
|
||||
nan values are ignored.
|
||||
|
||||
`a` must be a numpy array.
|
||||
|
||||
`axis` is assumed to be normalized; that is, 0 <= axis < a.ndim.
|
||||
|
||||
For an axis of length 0, the result is True. That is, we adopt the
|
||||
convention that ``allsame([])`` is True. (There are no values in the
|
||||
input that are different.)
|
||||
|
||||
`True` is returned for slices that are all nan--not because all the
|
||||
values are the same, but because this is equivalent to ``allsame([])``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy import nan, array
|
||||
>>> a = array([[ 3., 3., nan, 3.],
|
||||
... [ 1., nan, 2., 4.],
|
||||
... [nan, nan, 9., -1.],
|
||||
... [nan, 5., 4., 3.],
|
||||
... [ 2., 2., 2., 2.],
|
||||
... [nan, nan, nan, nan]])
|
||||
>>> _nan_allsame(a, axis=1, keepdims=True)
|
||||
array([[ True],
|
||||
[False],
|
||||
[False],
|
||||
[False],
|
||||
[ True],
|
||||
[ True]])
|
||||
"""
|
||||
if axis is None:
|
||||
if a.size == 0:
|
||||
return True
|
||||
a = a.ravel()
|
||||
axis = 0
|
||||
else:
|
||||
shp = a.shape
|
||||
if shp[axis] == 0:
|
||||
shp = shp[:axis] + (1,)*keepdims + shp[axis + 1:]
|
||||
return np.full(shp, fill_value=True, dtype=bool)
|
||||
a0 = _first_nonnan(a, axis=axis)
|
||||
return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)
|
||||
|
||||
|
||||
def _contains_nan(a, nan_policy='propagate', policies=None, *, xp=None):
|
||||
if xp is None:
|
||||
xp = array_namespace(a)
|
||||
not_numpy = not is_numpy(xp)
|
||||
|
||||
if policies is None:
|
||||
policies = {'propagate', 'raise', 'omit'}
|
||||
if nan_policy not in policies:
|
||||
raise ValueError(f"nan_policy must be one of {set(policies)}.")
|
||||
|
||||
inexact = (xp.isdtype(a.dtype, "real floating")
|
||||
or xp.isdtype(a.dtype, "complex floating"))
|
||||
if xp_size(a) == 0:
|
||||
contains_nan = False
|
||||
elif inexact:
|
||||
# Faster and less memory-intensive than xp.any(xp.isnan(a))
|
||||
contains_nan = xp.isnan(xp.max(a))
|
||||
elif is_numpy(xp) and np.issubdtype(a.dtype, object):
|
||||
contains_nan = False
|
||||
for el in a.ravel():
|
||||
# isnan doesn't work on non-numeric elements
|
||||
if np.issubdtype(type(el), np.number) and np.isnan(el):
|
||||
contains_nan = True
|
||||
break
|
||||
else:
|
||||
# Only `object` and `inexact` arrays can have NaNs
|
||||
contains_nan = False
|
||||
|
||||
if contains_nan and nan_policy == 'raise':
|
||||
raise ValueError("The input contains nan values")
|
||||
|
||||
if not_numpy and contains_nan and nan_policy=='omit':
|
||||
message = "`nan_policy='omit' is incompatible with non-NumPy arrays."
|
||||
raise ValueError(message)
|
||||
|
||||
return contains_nan, nan_policy
|
||||
|
||||
|
||||
def _rename_parameter(old_name, new_name, dep_version=None):
|
||||
"""
|
||||
Generate decorator for backward-compatible keyword renaming.
|
||||
|
||||
Apply the decorator generated by `_rename_parameter` to functions with a
|
||||
recently renamed parameter to maintain backward-compatibility.
|
||||
|
||||
After decoration, the function behaves as follows:
|
||||
If only the new parameter is passed into the function, behave as usual.
|
||||
If only the old parameter is passed into the function (as a keyword), raise
|
||||
a DeprecationWarning if `dep_version` is provided, and behave as usual
|
||||
otherwise.
|
||||
If both old and new parameters are passed into the function, raise a
|
||||
DeprecationWarning if `dep_version` is provided, and raise the appropriate
|
||||
TypeError (function got multiple values for argument).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
old_name : str
|
||||
Old name of parameter
|
||||
new_name : str
|
||||
New name of parameter
|
||||
dep_version : str, optional
|
||||
Version of SciPy in which old parameter was deprecated in the format
|
||||
'X.Y.Z'. If supplied, the deprecation message will indicate that
|
||||
support for the old parameter will be removed in version 'X.Y+2.Z'
|
||||
|
||||
Notes
|
||||
-----
|
||||
Untested with functions that accept *args. Probably won't work as written.
|
||||
|
||||
"""
|
||||
def decorator(fun):
|
||||
@functools.wraps(fun)
|
||||
def wrapper(*args, **kwargs):
|
||||
if old_name in kwargs:
|
||||
if dep_version:
|
||||
end_version = dep_version.split('.')
|
||||
end_version[1] = str(int(end_version[1]) + 2)
|
||||
end_version = '.'.join(end_version)
|
||||
message = (f"Use of keyword argument `{old_name}` is "
|
||||
f"deprecated and replaced by `{new_name}`. "
|
||||
f"Support for `{old_name}` will be removed "
|
||||
f"in SciPy {end_version}.")
|
||||
warnings.warn(message, DeprecationWarning, stacklevel=2)
|
||||
if new_name in kwargs:
|
||||
message = (f"{fun.__name__}() got multiple values for "
|
||||
f"argument now known as `{new_name}`")
|
||||
raise TypeError(message)
|
||||
kwargs[new_name] = kwargs.pop(old_name)
|
||||
return fun(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def _rng_spawn(rng, n_children):
|
||||
# spawns independent RNGs from a parent RNG
|
||||
bg = rng._bit_generator
|
||||
ss = bg._seed_seq
|
||||
child_rngs = [np.random.Generator(type(bg)(child_ss))
|
||||
for child_ss in ss.spawn(n_children)]
|
||||
return child_rngs
|
||||
|
||||
|
||||
def _get_nan(*data, xp=None):
|
||||
xp = array_namespace(*data) if xp is None else xp
|
||||
# Get NaN of appropriate dtype for data
|
||||
data = [xp.asarray(item) for item in data]
|
||||
try:
|
||||
min_float = getattr(xp, 'float16', xp.float32)
|
||||
dtype = xp.result_type(*data, min_float) # must be at least a float
|
||||
except DTypePromotionError:
|
||||
# fallback to float64
|
||||
dtype = xp.float64
|
||||
return xp.asarray(xp.nan, dtype=dtype)[()]
|
||||
|
||||
|
||||
def normalize_axis_index(axis, ndim):
|
||||
# Check if `axis` is in the correct range and normalize it
|
||||
if axis < -ndim or axis >= ndim:
|
||||
msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
|
||||
raise AxisError(msg)
|
||||
|
||||
if axis < 0:
|
||||
axis = axis + ndim
|
||||
return axis
|
||||
|
||||
|
||||
def _call_callback_maybe_halt(callback, res):
|
||||
"""Call wrapped callback; return True if algorithm should stop.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
callback : callable or None
|
||||
A user-provided callback wrapped with `_wrap_callback`
|
||||
res : OptimizeResult
|
||||
Information about the current iterate
|
||||
|
||||
Returns
|
||||
-------
|
||||
halt : bool
|
||||
True if minimization should stop
|
||||
|
||||
"""
|
||||
if callback is None:
|
||||
return False
|
||||
try:
|
||||
callback(res)
|
||||
return False
|
||||
except StopIteration:
|
||||
callback.stop_iteration = True
|
||||
return True
|
||||
|
||||
|
||||
class _RichResult(dict):
|
||||
""" Container for multiple outputs with pretty-printing """
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self[name]
|
||||
except KeyError as e:
|
||||
raise AttributeError(name) from e
|
||||
|
||||
__setattr__ = dict.__setitem__ # type: ignore[assignment]
|
||||
__delattr__ = dict.__delitem__ # type: ignore[assignment]
|
||||
|
||||
def __repr__(self):
|
||||
order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl',
|
||||
'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin',
|
||||
'converged', 'flag', 'function_calls', 'iterations',
|
||||
'root']
|
||||
order_keys = getattr(self, '_order_keys', order_keys)
|
||||
# 'slack', 'con' are redundant with residuals
|
||||
# 'crossover_nit' is probably not interesting to most users
|
||||
omit_keys = {'slack', 'con', 'crossover_nit', '_order_keys'}
|
||||
|
||||
def key(item):
|
||||
try:
|
||||
return order_keys.index(item[0].lower())
|
||||
except ValueError: # item not in list
|
||||
return np.inf
|
||||
|
||||
def omit_redundant(items):
|
||||
for item in items:
|
||||
if item[0] in omit_keys:
|
||||
continue
|
||||
yield item
|
||||
|
||||
def item_sorter(d):
|
||||
return sorted(omit_redundant(d.items()), key=key)
|
||||
|
||||
if self.keys():
|
||||
return _dict_formatter(self, sorter=item_sorter)
|
||||
else:
|
||||
return self.__class__.__name__ + "()"
|
||||
|
||||
def __dir__(self):
|
||||
return list(self.keys())
|
||||
|
||||
|
||||
def _indenter(s, n=0):
|
||||
"""
|
||||
Ensures that lines after the first are indented by the specified amount
|
||||
"""
|
||||
split = s.split("\n")
|
||||
indent = " "*n
|
||||
return ("\n" + indent).join(split)
|
||||
|
||||
|
||||
def _float_formatter_10(x):
|
||||
"""
|
||||
Returns a string representation of a float with exactly ten characters
|
||||
"""
|
||||
if np.isposinf(x):
|
||||
return " inf"
|
||||
elif np.isneginf(x):
|
||||
return " -inf"
|
||||
elif np.isnan(x):
|
||||
return " nan"
|
||||
return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)
|
||||
|
||||
|
||||
def _dict_formatter(d, n=0, mplus=1, sorter=None):
|
||||
"""
|
||||
Pretty printer for dictionaries
|
||||
|
||||
`n` keeps track of the starting indentation;
|
||||
lines are indented by this much after a line break.
|
||||
`mplus` is additional left padding applied to keys
|
||||
"""
|
||||
if isinstance(d, dict):
|
||||
m = max(map(len, list(d.keys()))) + mplus # width to print keys
|
||||
s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m
|
||||
_indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
|
||||
for k, v in sorter(d)]) # +2 for ': '
|
||||
else:
|
||||
# By default, NumPy arrays print with linewidth=76. `n` is
|
||||
# the indent at which a line begins printing, so it is subtracted
|
||||
# from the default to avoid exceeding 76 characters total.
|
||||
# `edgeitems` is the number of elements to include before and after
|
||||
# ellipses when arrays are not shown in full.
|
||||
# `threshold` is the maximum number of elements for which an
|
||||
# array is shown in full.
|
||||
# These values tend to work well for use with OptimizeResult.
|
||||
with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
|
||||
formatter={'float_kind': _float_formatter_10}):
|
||||
s = str(d)
|
||||
return s
|
||||
22
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__init__.py
vendored
Normal file
22
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__init__.py
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
NumPy Array API compatibility library
|
||||
|
||||
This is a small wrapper around NumPy and CuPy that is compatible with the
|
||||
Array API standard https://data-apis.org/array-api/latest/. See also NEP 47
|
||||
https://numpy.org/neps/nep-0047-array-api-standard.html.
|
||||
|
||||
Unlike array_api_strict, this is not a strict minimal implementation of the
|
||||
Array API, but rather just an extension of the main NumPy namespace with
|
||||
changes needed to be compliant with the Array API. See
|
||||
https://numpy.org/doc/stable/reference/array_api.html for a full list of
|
||||
changes. In particular, unlike array_api_strict, this package does not use a
|
||||
separate Array object, but rather just uses numpy.ndarray directly.
|
||||
|
||||
Library authors using the Array API may wish to test against array_api_strict
|
||||
to ensure they are not using functionality outside of the standard, but prefer
|
||||
this implementation for the default when working with NumPy arrays.
|
||||
|
||||
"""
|
||||
__version__ = '1.5.1'
|
||||
|
||||
from .common import * # noqa: F401, F403
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-312.pyc
vendored
Normal file
Binary file not shown.
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/_internal.py
vendored
Normal file
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/_internal.py
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
"""
|
||||
Internal helpers
|
||||
"""
|
||||
|
||||
from functools import wraps
|
||||
from inspect import signature
|
||||
|
||||
def get_xp(xp):
|
||||
"""
|
||||
Decorator to automatically replace xp with the corresponding array module.
|
||||
|
||||
Use like
|
||||
|
||||
import numpy as np
|
||||
|
||||
@get_xp(np)
|
||||
def func(x, /, xp, kwarg=None):
|
||||
return xp.func(x, kwarg=kwarg)
|
||||
|
||||
Note that xp must be a keyword argument and come after all non-keyword
|
||||
arguments.
|
||||
|
||||
"""
|
||||
|
||||
def inner(f):
|
||||
@wraps(f)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
return f(*args, xp=xp, **kwargs)
|
||||
|
||||
sig = signature(f)
|
||||
new_sig = sig.replace(
|
||||
parameters=[sig.parameters[i] for i in sig.parameters if i != "xp"]
|
||||
)
|
||||
|
||||
if wrapped_f.__doc__ is None:
|
||||
wrapped_f.__doc__ = f"""\
|
||||
Array API compatibility wrapper for {f.__name__}.
|
||||
|
||||
See the corresponding documentation in NumPy/CuPy and/or the array API
|
||||
specification for more details.
|
||||
|
||||
"""
|
||||
wrapped_f.__signature__ = new_sig
|
||||
return wrapped_f
|
||||
|
||||
return inner
|
||||
1
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/__init__.py
vendored
Normal file
1
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/__init__.py
vendored
Normal file
@@ -0,0 +1 @@
|
||||
from ._helpers import * # noqa: F403
|
||||
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-312.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
554
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_aliases.py
vendored
Normal file
554
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_aliases.py
vendored
Normal file
@@ -0,0 +1,554 @@
|
||||
"""
|
||||
These are functions that are just aliases of existing functions in NumPy.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
import numpy as np
|
||||
from typing import Optional, Sequence, Tuple, Union
|
||||
from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol
|
||||
|
||||
from typing import NamedTuple
|
||||
from types import ModuleType
|
||||
import inspect
|
||||
|
||||
from ._helpers import _check_device, is_numpy_array, array_namespace
|
||||
|
||||
# These functions are modified from the NumPy versions.
|
||||
|
||||
def arange(
|
||||
start: Union[int, float],
|
||||
/,
|
||||
stop: Optional[Union[int, float]] = None,
|
||||
step: Union[int, float] = 1,
|
||||
*,
|
||||
xp,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs)
|
||||
|
||||
def empty(
|
||||
shape: Union[int, Tuple[int, ...]],
|
||||
xp,
|
||||
*,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.empty(shape, dtype=dtype, **kwargs)
|
||||
|
||||
def empty_like(
|
||||
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
||||
**kwargs
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.empty_like(x, dtype=dtype, **kwargs)
|
||||
|
||||
def eye(
|
||||
n_rows: int,
|
||||
n_cols: Optional[int] = None,
|
||||
/,
|
||||
*,
|
||||
xp,
|
||||
k: int = 0,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs)
|
||||
|
||||
def full(
|
||||
shape: Union[int, Tuple[int, ...]],
|
||||
fill_value: Union[int, float],
|
||||
xp,
|
||||
*,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.full(shape, fill_value, dtype=dtype, **kwargs)
|
||||
|
||||
def full_like(
|
||||
x: ndarray,
|
||||
/,
|
||||
fill_value: Union[int, float],
|
||||
*,
|
||||
xp,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.full_like(x, fill_value, dtype=dtype, **kwargs)
|
||||
|
||||
def linspace(
|
||||
start: Union[int, float],
|
||||
stop: Union[int, float],
|
||||
/,
|
||||
num: int,
|
||||
*,
|
||||
xp,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
endpoint: bool = True,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs)
|
||||
|
||||
def ones(
|
||||
shape: Union[int, Tuple[int, ...]],
|
||||
xp,
|
||||
*,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.ones(shape, dtype=dtype, **kwargs)
|
||||
|
||||
def ones_like(
|
||||
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.ones_like(x, dtype=dtype, **kwargs)
|
||||
|
||||
def zeros(
|
||||
shape: Union[int, Tuple[int, ...]],
|
||||
xp,
|
||||
*,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.zeros(shape, dtype=dtype, **kwargs)
|
||||
|
||||
def zeros_like(
|
||||
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
_check_device(xp, device)
|
||||
return xp.zeros_like(x, dtype=dtype, **kwargs)
|
||||
|
||||
# np.unique() is split into four functions in the array API:
|
||||
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
|
||||
# to remove polymorphic return types).
|
||||
|
||||
# The functions here return namedtuples (np.unique() returns a normal
|
||||
# tuple).
|
||||
|
||||
# Note that these named tuples aren't actually part of the standard namespace,
|
||||
# but I don't see any issue with exporting the names here regardless.
|
||||
class UniqueAllResult(NamedTuple):
|
||||
values: ndarray
|
||||
indices: ndarray
|
||||
inverse_indices: ndarray
|
||||
counts: ndarray
|
||||
|
||||
|
||||
class UniqueCountsResult(NamedTuple):
|
||||
values: ndarray
|
||||
counts: ndarray
|
||||
|
||||
|
||||
class UniqueInverseResult(NamedTuple):
|
||||
values: ndarray
|
||||
inverse_indices: ndarray
|
||||
|
||||
|
||||
def _unique_kwargs(xp):
|
||||
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
|
||||
# trying to parse version numbers, just check if equal_nan is in the
|
||||
# signature.
|
||||
s = inspect.signature(xp.unique)
|
||||
if 'equal_nan' in s.parameters:
|
||||
return {'equal_nan': False}
|
||||
return {}
|
||||
|
||||
def unique_all(x: ndarray, /, xp) -> UniqueAllResult:
|
||||
kwargs = _unique_kwargs(xp)
|
||||
values, indices, inverse_indices, counts = xp.unique(
|
||||
x,
|
||||
return_counts=True,
|
||||
return_index=True,
|
||||
return_inverse=True,
|
||||
**kwargs,
|
||||
)
|
||||
# np.unique() flattens inverse indices, but they need to share x's shape
|
||||
# See https://github.com/numpy/numpy/issues/20638
|
||||
inverse_indices = inverse_indices.reshape(x.shape)
|
||||
return UniqueAllResult(
|
||||
values,
|
||||
indices,
|
||||
inverse_indices,
|
||||
counts,
|
||||
)
|
||||
|
||||
|
||||
def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult:
|
||||
kwargs = _unique_kwargs(xp)
|
||||
res = xp.unique(
|
||||
x,
|
||||
return_counts=True,
|
||||
return_index=False,
|
||||
return_inverse=False,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
return UniqueCountsResult(*res)
|
||||
|
||||
|
||||
def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult:
|
||||
kwargs = _unique_kwargs(xp)
|
||||
values, inverse_indices = xp.unique(
|
||||
x,
|
||||
return_counts=False,
|
||||
return_index=False,
|
||||
return_inverse=True,
|
||||
**kwargs,
|
||||
)
|
||||
# xp.unique() flattens inverse indices, but they need to share x's shape
|
||||
# See https://github.com/numpy/numpy/issues/20638
|
||||
inverse_indices = inverse_indices.reshape(x.shape)
|
||||
return UniqueInverseResult(values, inverse_indices)
|
||||
|
||||
|
||||
def unique_values(x: ndarray, /, xp) -> ndarray:
|
||||
kwargs = _unique_kwargs(xp)
|
||||
return xp.unique(
|
||||
x,
|
||||
return_counts=False,
|
||||
return_index=False,
|
||||
return_inverse=False,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray:
|
||||
if not copy and dtype == x.dtype:
|
||||
return x
|
||||
return x.astype(dtype=dtype, copy=copy)
|
||||
|
||||
# These functions have different keyword argument names
|
||||
|
||||
def std(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
||||
correction: Union[int, float] = 0.0, # correction instead of ddof
|
||||
keepdims: bool = False,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
||||
|
||||
def var(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
||||
correction: Union[int, float] = 0.0, # correction instead of ddof
|
||||
keepdims: bool = False,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
||||
|
||||
# Unlike transpose(), the axes argument to permute_dims() is required.
|
||||
def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray:
|
||||
return xp.transpose(x, axes)
|
||||
|
||||
# Creation functions add the device keyword (which does nothing for NumPy)
|
||||
|
||||
# asarray also adds the copy keyword
|
||||
def _asarray(
|
||||
obj: Union[
|
||||
ndarray,
|
||||
bool,
|
||||
int,
|
||||
float,
|
||||
NestedSequence[bool | int | float],
|
||||
SupportsBufferProtocol,
|
||||
],
|
||||
/,
|
||||
*,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
copy: "Optional[Union[bool, np._CopyMode]]" = None,
|
||||
namespace = None,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
"""
|
||||
Array API compatibility wrapper for asarray().
|
||||
|
||||
See the corresponding documentation in NumPy/CuPy and/or the array API
|
||||
specification for more details.
|
||||
|
||||
"""
|
||||
if namespace is None:
|
||||
try:
|
||||
xp = array_namespace(obj, _use_compat=False)
|
||||
except ValueError:
|
||||
# TODO: What about lists of arrays?
|
||||
raise ValueError("A namespace must be specified for asarray() with non-array input")
|
||||
elif isinstance(namespace, ModuleType):
|
||||
xp = namespace
|
||||
elif namespace == 'numpy':
|
||||
import numpy as xp
|
||||
elif namespace == 'cupy':
|
||||
import cupy as xp
|
||||
elif namespace == 'dask.array':
|
||||
import dask.array as xp
|
||||
else:
|
||||
raise ValueError("Unrecognized namespace argument to asarray()")
|
||||
|
||||
_check_device(xp, device)
|
||||
if is_numpy_array(obj):
|
||||
import numpy as np
|
||||
if hasattr(np, '_CopyMode'):
|
||||
# Not present in older NumPys
|
||||
COPY_FALSE = (False, np._CopyMode.IF_NEEDED)
|
||||
COPY_TRUE = (True, np._CopyMode.ALWAYS)
|
||||
else:
|
||||
COPY_FALSE = (False,)
|
||||
COPY_TRUE = (True,)
|
||||
else:
|
||||
COPY_FALSE = (False,)
|
||||
COPY_TRUE = (True,)
|
||||
if copy in COPY_FALSE and namespace != "dask.array":
|
||||
# copy=False is not yet implemented in xp.asarray
|
||||
raise NotImplementedError("copy=False is not yet implemented")
|
||||
if (hasattr(xp, "ndarray") and isinstance(obj, xp.ndarray)):
|
||||
if dtype is not None and obj.dtype != dtype:
|
||||
copy = True
|
||||
if copy in COPY_TRUE:
|
||||
return xp.array(obj, copy=True, dtype=dtype)
|
||||
return obj
|
||||
elif namespace == "dask.array":
|
||||
if copy in COPY_TRUE:
|
||||
if dtype is None:
|
||||
return obj.copy()
|
||||
# Go through numpy, since dask copy is no-op by default
|
||||
import numpy as np
|
||||
obj = np.array(obj, dtype=dtype, copy=True)
|
||||
return xp.array(obj, dtype=dtype)
|
||||
else:
|
||||
import dask.array as da
|
||||
import numpy as np
|
||||
if not isinstance(obj, da.Array):
|
||||
obj = np.asarray(obj, dtype=dtype)
|
||||
return da.from_array(obj)
|
||||
return obj
|
||||
|
||||
return xp.asarray(obj, dtype=dtype, **kwargs)
|
||||
|
||||
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
|
||||
def reshape(x: ndarray,
|
||||
/,
|
||||
shape: Tuple[int, ...],
|
||||
xp, copy: Optional[bool] = None,
|
||||
**kwargs) -> ndarray:
|
||||
if copy is True:
|
||||
x = x.copy()
|
||||
elif copy is False:
|
||||
y = x.view()
|
||||
y.shape = shape
|
||||
return y
|
||||
return xp.reshape(x, shape, **kwargs)
|
||||
|
||||
# The descending keyword is new in sort and argsort, and 'kind' replaced with
|
||||
# 'stable'
|
||||
def argsort(
|
||||
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
# Note: this keyword argument is different, and the default is different.
|
||||
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
||||
# as the default whereas cupy.sort uses kind=None.
|
||||
if stable:
|
||||
kwargs['kind'] = "stable"
|
||||
if not descending:
|
||||
res = xp.argsort(x, axis=axis, **kwargs)
|
||||
else:
|
||||
# As NumPy has no native descending sort, we imitate it here. Note that
|
||||
# simply flipping the results of xp.argsort(x, ...) would not
|
||||
# respect the relative order like it would in native descending sorts.
|
||||
res = xp.flip(
|
||||
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
|
||||
axis=axis,
|
||||
)
|
||||
# Rely on flip()/argsort() to validate axis
|
||||
normalised_axis = axis if axis >= 0 else x.ndim + axis
|
||||
max_i = x.shape[normalised_axis] - 1
|
||||
res = max_i - res
|
||||
return res
|
||||
|
||||
def sort(
|
||||
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
# Note: this keyword argument is different, and the default is different.
|
||||
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
||||
# as the default whereas cupy.sort uses kind=None.
|
||||
if stable:
|
||||
kwargs['kind'] = "stable"
|
||||
res = xp.sort(x, axis=axis, **kwargs)
|
||||
if descending:
|
||||
res = xp.flip(res, axis=axis)
|
||||
return res
|
||||
|
||||
# nonzero should error for zero-dimensional arrays
|
||||
def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]:
|
||||
if x.ndim == 0:
|
||||
raise ValueError("nonzero() does not support zero-dimensional arrays")
|
||||
return xp.nonzero(x, **kwargs)
|
||||
|
||||
# sum() and prod() should always upcast when dtype=None
|
||||
def sum(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
||||
dtype: Optional[Dtype] = None,
|
||||
keepdims: bool = False,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
# `xp.sum` already upcasts integers, but not floats or complexes
|
||||
if dtype is None:
|
||||
if x.dtype == xp.float32:
|
||||
dtype = xp.float64
|
||||
elif x.dtype == xp.complex64:
|
||||
dtype = xp.complex128
|
||||
return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs)
|
||||
|
||||
def prod(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
||||
dtype: Optional[Dtype] = None,
|
||||
keepdims: bool = False,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
if dtype is None:
|
||||
if x.dtype == xp.float32:
|
||||
dtype = xp.float64
|
||||
elif x.dtype == xp.complex64:
|
||||
dtype = xp.complex128
|
||||
return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs)
|
||||
|
||||
# ceil, floor, and trunc return integers for integer inputs
|
||||
|
||||
def ceil(x: ndarray, /, xp, **kwargs) -> ndarray:
|
||||
if xp.issubdtype(x.dtype, xp.integer):
|
||||
return x
|
||||
return xp.ceil(x, **kwargs)
|
||||
|
||||
def floor(x: ndarray, /, xp, **kwargs) -> ndarray:
|
||||
if xp.issubdtype(x.dtype, xp.integer):
|
||||
return x
|
||||
return xp.floor(x, **kwargs)
|
||||
|
||||
def trunc(x: ndarray, /, xp, **kwargs) -> ndarray:
|
||||
if xp.issubdtype(x.dtype, xp.integer):
|
||||
return x
|
||||
return xp.trunc(x, **kwargs)
|
||||
|
||||
# linear algebra functions
|
||||
|
||||
def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
||||
return xp.matmul(x1, x2, **kwargs)
|
||||
|
||||
# Unlike transpose, matrix_transpose only transposes the last two axes.
|
||||
def matrix_transpose(x: ndarray, /, xp) -> ndarray:
|
||||
if x.ndim < 2:
|
||||
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
|
||||
return xp.swapaxes(x, -1, -2)
|
||||
|
||||
def tensordot(x1: ndarray,
|
||||
x2: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
|
||||
**kwargs,
|
||||
) -> ndarray:
|
||||
return xp.tensordot(x1, x2, axes=axes, **kwargs)
|
||||
|
||||
def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray:
|
||||
if x1.shape[axis] != x2.shape[axis]:
|
||||
raise ValueError("x1 and x2 must have the same size along the given axis")
|
||||
|
||||
if hasattr(xp, 'broadcast_tensors'):
|
||||
_broadcast = xp.broadcast_tensors
|
||||
else:
|
||||
_broadcast = xp.broadcast_arrays
|
||||
|
||||
x1_ = xp.moveaxis(x1, axis, -1)
|
||||
x2_ = xp.moveaxis(x2, axis, -1)
|
||||
x1_, x2_ = _broadcast(x1_, x2_)
|
||||
|
||||
res = x1_[..., None, :] @ x2_[..., None]
|
||||
return res[..., 0, 0]
|
||||
|
||||
# isdtype is a new function in the 2022.12 array API specification.
|
||||
|
||||
def isdtype(
|
||||
dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp,
|
||||
*, _tuple=True, # Disallow nested tuples
|
||||
) -> bool:
|
||||
"""
|
||||
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
|
||||
|
||||
Note that outside of this function, this compat library does not yet fully
|
||||
support complex numbers.
|
||||
|
||||
See
|
||||
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
|
||||
for more details
|
||||
"""
|
||||
if isinstance(kind, tuple) and _tuple:
|
||||
return any(isdtype(dtype, k, xp, _tuple=False) for k in kind)
|
||||
elif isinstance(kind, str):
|
||||
if kind == 'bool':
|
||||
return dtype == xp.bool_
|
||||
elif kind == 'signed integer':
|
||||
return xp.issubdtype(dtype, xp.signedinteger)
|
||||
elif kind == 'unsigned integer':
|
||||
return xp.issubdtype(dtype, xp.unsignedinteger)
|
||||
elif kind == 'integral':
|
||||
return xp.issubdtype(dtype, xp.integer)
|
||||
elif kind == 'real floating':
|
||||
return xp.issubdtype(dtype, xp.floating)
|
||||
elif kind == 'complex floating':
|
||||
return xp.issubdtype(dtype, xp.complexfloating)
|
||||
elif kind == 'numeric':
|
||||
return xp.issubdtype(dtype, xp.number)
|
||||
else:
|
||||
raise ValueError(f"Unrecognized data type kind: {kind!r}")
|
||||
else:
|
||||
# This will allow things that aren't required by the spec, like
|
||||
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
|
||||
# more strict here to match the type annotation? Note that the
|
||||
# array_api_strict implementation will be very strict.
|
||||
return dtype == kind
|
||||
|
||||
__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like',
|
||||
'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like',
|
||||
'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult',
|
||||
'unique_all', 'unique_counts', 'unique_inverse', 'unique_values',
|
||||
'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort',
|
||||
'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc',
|
||||
'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype']
|
||||
183
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_fft.py
vendored
Normal file
183
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_fft.py
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Union, Optional, Literal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._typing import Device, ndarray
|
||||
from collections.abc import Sequence
|
||||
|
||||
# Note: NumPy fft functions improperly upcast float32 and complex64 to
|
||||
# complex128, which is why we require wrapping them all here.
|
||||
|
||||
def fft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.fft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def ifft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.ifft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def fftn(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
s: Sequence[int] = None,
|
||||
axes: Sequence[int] = None,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.fftn(x, s=s, axes=axes, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def ifftn(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
s: Sequence[int] = None,
|
||||
axes: Sequence[int] = None,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.ifftn(x, s=s, axes=axes, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def rfft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.rfft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype == xp.float32:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def irfft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.irfft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype == xp.complex64:
|
||||
return res.astype(xp.float32)
|
||||
return res
|
||||
|
||||
def rfftn(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
s: Sequence[int] = None,
|
||||
axes: Sequence[int] = None,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.rfftn(x, s=s, axes=axes, norm=norm)
|
||||
if x.dtype == xp.float32:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def irfftn(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
s: Sequence[int] = None,
|
||||
axes: Sequence[int] = None,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.irfftn(x, s=s, axes=axes, norm=norm)
|
||||
if x.dtype == xp.complex64:
|
||||
return res.astype(xp.float32)
|
||||
return res
|
||||
|
||||
def hfft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.hfft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.float32)
|
||||
return res
|
||||
|
||||
def ihfft(
|
||||
x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
n: Optional[int] = None,
|
||||
axis: int = -1,
|
||||
norm: Literal["backward", "ortho", "forward"] = "backward",
|
||||
) -> ndarray:
|
||||
res = xp.fft.ihfft(x, n=n, axis=axis, norm=norm)
|
||||
if x.dtype in [xp.float32, xp.complex64]:
|
||||
return res.astype(xp.complex64)
|
||||
return res
|
||||
|
||||
def fftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray:
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(f"Unsupported device {device!r}")
|
||||
return xp.fft.fftfreq(n, d=d)
|
||||
|
||||
def rfftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray:
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(f"Unsupported device {device!r}")
|
||||
return xp.fft.rfftfreq(n, d=d)
|
||||
|
||||
def fftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray:
|
||||
return xp.fft.fftshift(x, axes=axes)
|
||||
|
||||
def ifftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray:
|
||||
return xp.fft.ifftshift(x, axes=axes)
|
||||
|
||||
__all__ = [
|
||||
"fft",
|
||||
"ifft",
|
||||
"fftn",
|
||||
"ifftn",
|
||||
"rfft",
|
||||
"irfft",
|
||||
"rfftn",
|
||||
"irfftn",
|
||||
"hfft",
|
||||
"ihfft",
|
||||
"fftfreq",
|
||||
"rfftfreq",
|
||||
"fftshift",
|
||||
"ifftshift",
|
||||
]
|
||||
515
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_helpers.py
vendored
Normal file
515
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_helpers.py
vendored
Normal file
@@ -0,0 +1,515 @@
|
||||
"""
|
||||
Various helper functions which are not part of the spec.
|
||||
|
||||
Functions which start with an underscore are for internal use only but helpers
|
||||
that are in __all__ are intended as additional helper functions for use by end
|
||||
users of the compat library.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional, Union, Any
|
||||
from ._typing import Array, Device
|
||||
|
||||
import sys
|
||||
import math
|
||||
import inspect
|
||||
import warnings
|
||||
|
||||
def is_numpy_array(x):
|
||||
"""
|
||||
Return True if `x` is a NumPy array.
|
||||
|
||||
This function does not import NumPy if it has not already been imported
|
||||
and is therefore cheap to use.
|
||||
|
||||
This also returns True for `ndarray` subclasses and NumPy scalar objects.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_array_api_obj
|
||||
is_cupy_array
|
||||
is_torch_array
|
||||
is_dask_array
|
||||
is_jax_array
|
||||
"""
|
||||
# Avoid importing NumPy if it isn't already
|
||||
if 'numpy' not in sys.modules:
|
||||
return False
|
||||
|
||||
import numpy as np
|
||||
|
||||
# TODO: Should we reject ndarray subclasses?
|
||||
return isinstance(x, (np.ndarray, np.generic))
|
||||
|
||||
def is_cupy_array(x):
|
||||
"""
|
||||
Return True if `x` is a CuPy array.
|
||||
|
||||
This function does not import CuPy if it has not already been imported
|
||||
and is therefore cheap to use.
|
||||
|
||||
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_array_api_obj
|
||||
is_numpy_array
|
||||
is_torch_array
|
||||
is_dask_array
|
||||
is_jax_array
|
||||
"""
|
||||
# Avoid importing NumPy if it isn't already
|
||||
if 'cupy' not in sys.modules:
|
||||
return False
|
||||
|
||||
import cupy as cp
|
||||
|
||||
# TODO: Should we reject ndarray subclasses?
|
||||
return isinstance(x, (cp.ndarray, cp.generic))
|
||||
|
||||
def is_torch_array(x):
|
||||
"""
|
||||
Return True if `x` is a PyTorch tensor.
|
||||
|
||||
This function does not import PyTorch if it has not already been imported
|
||||
and is therefore cheap to use.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_array_api_obj
|
||||
is_numpy_array
|
||||
is_cupy_array
|
||||
is_dask_array
|
||||
is_jax_array
|
||||
"""
|
||||
# Avoid importing torch if it isn't already
|
||||
if 'torch' not in sys.modules:
|
||||
return False
|
||||
|
||||
import torch
|
||||
|
||||
# TODO: Should we reject ndarray subclasses?
|
||||
return isinstance(x, torch.Tensor)
|
||||
|
||||
def is_dask_array(x):
|
||||
"""
|
||||
Return True if `x` is a dask.array Array.
|
||||
|
||||
This function does not import dask if it has not already been imported
|
||||
and is therefore cheap to use.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_array_api_obj
|
||||
is_numpy_array
|
||||
is_cupy_array
|
||||
is_torch_array
|
||||
is_jax_array
|
||||
"""
|
||||
# Avoid importing dask if it isn't already
|
||||
if 'dask.array' not in sys.modules:
|
||||
return False
|
||||
|
||||
import dask.array
|
||||
|
||||
return isinstance(x, dask.array.Array)
|
||||
|
||||
def is_jax_array(x):
|
||||
"""
|
||||
Return True if `x` is a JAX array.
|
||||
|
||||
This function does not import JAX if it has not already been imported
|
||||
and is therefore cheap to use.
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_array_api_obj
|
||||
is_numpy_array
|
||||
is_cupy_array
|
||||
is_torch_array
|
||||
is_dask_array
|
||||
"""
|
||||
# Avoid importing jax if it isn't already
|
||||
if 'jax' not in sys.modules:
|
||||
return False
|
||||
|
||||
import jax
|
||||
|
||||
return isinstance(x, jax.Array)
|
||||
|
||||
def is_array_api_obj(x):
|
||||
"""
|
||||
Return True if `x` is an array API compatible array object.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
array_namespace
|
||||
is_numpy_array
|
||||
is_cupy_array
|
||||
is_torch_array
|
||||
is_dask_array
|
||||
is_jax_array
|
||||
"""
|
||||
return is_numpy_array(x) \
|
||||
or is_cupy_array(x) \
|
||||
or is_torch_array(x) \
|
||||
or is_dask_array(x) \
|
||||
or is_jax_array(x) \
|
||||
or hasattr(x, '__array_namespace__')
|
||||
|
||||
def _check_api_version(api_version):
|
||||
if api_version == '2021.12':
|
||||
warnings.warn("The 2021.12 version of the array API specification was requested but the returned namespace is actually version 2022.12")
|
||||
elif api_version is not None and api_version != '2022.12':
|
||||
raise ValueError("Only the 2022.12 version of the array API specification is currently supported")
|
||||
|
||||
def array_namespace(*xs, api_version=None, _use_compat=True):
|
||||
"""
|
||||
Get the array API compatible namespace for the arrays `xs`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xs: arrays
|
||||
one or more arrays.
|
||||
|
||||
api_version: str
|
||||
The newest version of the spec that you need support for (currently
|
||||
the compat library wrapped APIs support v2022.12).
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
out: namespace
|
||||
The array API compatible namespace corresponding to the arrays in `xs`.
|
||||
|
||||
Raises
|
||||
------
|
||||
TypeError
|
||||
If `xs` contains arrays from different array libraries or contains a
|
||||
non-array.
|
||||
|
||||
|
||||
Typical usage is to pass the arguments of a function to
|
||||
`array_namespace()` at the top of a function to get the corresponding
|
||||
array API namespace:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def your_function(x, y):
|
||||
xp = array_api_compat.array_namespace(x, y)
|
||||
# Now use xp as the array library namespace
|
||||
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
|
||||
|
||||
|
||||
Wrapped array namespaces can also be imported directly. For example,
|
||||
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
|
||||
This function will also work for any array library not wrapped by
|
||||
array-api-compat if it explicitly defines `__array_namespace__
|
||||
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
|
||||
(the wrapped namespace is always preferred if it exists).
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
is_array_api_obj
|
||||
is_numpy_array
|
||||
is_cupy_array
|
||||
is_torch_array
|
||||
is_dask_array
|
||||
is_jax_array
|
||||
|
||||
"""
|
||||
namespaces = set()
|
||||
for x in xs:
|
||||
if is_numpy_array(x):
|
||||
_check_api_version(api_version)
|
||||
if _use_compat:
|
||||
from .. import numpy as numpy_namespace
|
||||
namespaces.add(numpy_namespace)
|
||||
else:
|
||||
import numpy as np
|
||||
namespaces.add(np)
|
||||
elif is_cupy_array(x):
|
||||
_check_api_version(api_version)
|
||||
if _use_compat:
|
||||
from .. import cupy as cupy_namespace
|
||||
namespaces.add(cupy_namespace)
|
||||
else:
|
||||
import cupy as cp
|
||||
namespaces.add(cp)
|
||||
elif is_torch_array(x):
|
||||
_check_api_version(api_version)
|
||||
if _use_compat:
|
||||
from .. import torch as torch_namespace
|
||||
namespaces.add(torch_namespace)
|
||||
else:
|
||||
import torch
|
||||
namespaces.add(torch)
|
||||
elif is_dask_array(x):
|
||||
_check_api_version(api_version)
|
||||
if _use_compat:
|
||||
from ..dask import array as dask_namespace
|
||||
namespaces.add(dask_namespace)
|
||||
else:
|
||||
raise TypeError("_use_compat cannot be False if input array is a dask array!")
|
||||
elif is_jax_array(x):
|
||||
_check_api_version(api_version)
|
||||
# jax.experimental.array_api is already an array namespace. We do
|
||||
# not have a wrapper submodule for it.
|
||||
import jax.experimental.array_api as jnp
|
||||
namespaces.add(jnp)
|
||||
elif hasattr(x, '__array_namespace__'):
|
||||
namespaces.add(x.__array_namespace__(api_version=api_version))
|
||||
else:
|
||||
# TODO: Support Python scalars?
|
||||
raise TypeError(f"{type(x).__name__} is not a supported array type")
|
||||
|
||||
if not namespaces:
|
||||
raise TypeError("Unrecognized array input")
|
||||
|
||||
if len(namespaces) != 1:
|
||||
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
|
||||
|
||||
xp, = namespaces
|
||||
|
||||
return xp
|
||||
|
||||
# backwards compatibility alias
|
||||
get_namespace = array_namespace
|
||||
|
||||
def _check_device(xp, device):
|
||||
if xp == sys.modules.get('numpy'):
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(f"Unsupported device for NumPy: {device!r}")
|
||||
|
||||
# Placeholder object to represent the dask device
|
||||
# when the array backend is not the CPU.
|
||||
# (since it is not easy to tell which device a dask array is on)
|
||||
class _dask_device:
|
||||
def __repr__(self):
|
||||
return "DASK_DEVICE"
|
||||
|
||||
_DASK_DEVICE = _dask_device()
|
||||
|
||||
# device() is not on numpy.ndarray or dask.array and to_device() is not on numpy.ndarray
|
||||
# or cupy.ndarray. They are not included in array objects of this library
|
||||
# because this library just reuses the respective ndarray classes without
|
||||
# wrapping or subclassing them. These helper functions can be used instead of
|
||||
# the wrapper functions for libraries that need to support both NumPy/CuPy and
|
||||
# other libraries that use devices.
|
||||
def device(x: Array, /) -> Device:
|
||||
"""
|
||||
Hardware device the array data resides on.
|
||||
|
||||
This is equivalent to `x.device` according to the `standard
|
||||
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
|
||||
This helper is included because some array libraries either do not have
|
||||
the `device` attribute or include it with an incompatible API.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x: array
|
||||
array instance from an array API compatible library.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out: device
|
||||
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
||||
section of the array API specification).
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
For NumPy the device is always `"cpu"`. For Dask, the device is always a
|
||||
special `DASK_DEVICE` object.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
to_device : Move array data to a different device.
|
||||
|
||||
"""
|
||||
if is_numpy_array(x):
|
||||
return "cpu"
|
||||
elif is_dask_array(x):
|
||||
# Peek at the metadata of the jax array to determine type
|
||||
try:
|
||||
import numpy as np
|
||||
if isinstance(x._meta, np.ndarray):
|
||||
# Must be on CPU since backed by numpy
|
||||
return "cpu"
|
||||
except ImportError:
|
||||
pass
|
||||
return _DASK_DEVICE
|
||||
elif is_jax_array(x):
|
||||
# JAX has .device() as a method, but it is being deprecated so that it
|
||||
# can become a property, in accordance with the standard. In order for
|
||||
# this function to not break when JAX makes the flip, we check for
|
||||
# both here.
|
||||
if inspect.ismethod(x.device):
|
||||
return x.device()
|
||||
else:
|
||||
return x.device
|
||||
return x.device
|
||||
|
||||
# Based on cupy.array_api.Array.to_device
|
||||
def _cupy_to_device(x, device, /, stream=None):
|
||||
import cupy as cp
|
||||
from cupy.cuda import Device as _Device
|
||||
from cupy.cuda import stream as stream_module
|
||||
from cupy_backends.cuda.api import runtime
|
||||
|
||||
if device == x.device:
|
||||
return x
|
||||
elif device == "cpu":
|
||||
# allowing us to use `to_device(x, "cpu")`
|
||||
# is useful for portable test swapping between
|
||||
# host and device backends
|
||||
return x.get()
|
||||
elif not isinstance(device, _Device):
|
||||
raise ValueError(f"Unsupported device {device!r}")
|
||||
else:
|
||||
# see cupy/cupy#5985 for the reason how we handle device/stream here
|
||||
prev_device = runtime.getDevice()
|
||||
prev_stream: stream_module.Stream = None
|
||||
if stream is not None:
|
||||
prev_stream = stream_module.get_current_stream()
|
||||
# stream can be an int as specified in __dlpack__, or a CuPy stream
|
||||
if isinstance(stream, int):
|
||||
stream = cp.cuda.ExternalStream(stream)
|
||||
elif isinstance(stream, cp.cuda.Stream):
|
||||
pass
|
||||
else:
|
||||
raise ValueError('the input stream is not recognized')
|
||||
stream.use()
|
||||
try:
|
||||
runtime.setDevice(device.id)
|
||||
arr = x.copy()
|
||||
finally:
|
||||
runtime.setDevice(prev_device)
|
||||
if stream is not None:
|
||||
prev_stream.use()
|
||||
return arr
|
||||
|
||||
def _torch_to_device(x, device, /, stream=None):
|
||||
if stream is not None:
|
||||
raise NotImplementedError
|
||||
return x.to(device)
|
||||
|
||||
def to_device(x: Array, device: Device, /, *, stream: Optional[Union[int, Any]] = None) -> Array:
|
||||
"""
|
||||
Copy the array from the device on which it currently resides to the specified ``device``.
|
||||
|
||||
This is equivalent to `x.to_device(device, stream=stream)` according to
|
||||
the `standard
|
||||
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
|
||||
This helper is included because some array libraries do not have the
|
||||
`to_device` method.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
x: array
|
||||
array instance from an array API compatible library.
|
||||
|
||||
device: device
|
||||
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
||||
section of the array API specification).
|
||||
|
||||
stream: Optional[Union[int, Any]]
|
||||
stream object to use during copy. In addition to the types supported
|
||||
in ``array.__dlpack__``, implementations may choose to support any
|
||||
library-specific stream object with the caveat that any code using
|
||||
such an object would not be portable.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
out: array
|
||||
an array with the same data and data type as ``x`` and located on the
|
||||
specified ``device``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
For NumPy, this function effectively does nothing since the only supported
|
||||
device is the CPU. For CuPy, this method supports CuPy CUDA
|
||||
:external+cupy:class:`Device <cupy.cuda.Device>` and
|
||||
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
|
||||
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
|
||||
(the ``stream`` argument is not supported in PyTorch).
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
device : Hardware device the array data resides on.
|
||||
|
||||
"""
|
||||
if is_numpy_array(x):
|
||||
if stream is not None:
|
||||
raise ValueError("The stream argument to to_device() is not supported")
|
||||
if device == 'cpu':
|
||||
return x
|
||||
raise ValueError(f"Unsupported device {device!r}")
|
||||
elif is_cupy_array(x):
|
||||
# cupy does not yet have to_device
|
||||
return _cupy_to_device(x, device, stream=stream)
|
||||
elif is_torch_array(x):
|
||||
return _torch_to_device(x, device, stream=stream)
|
||||
elif is_dask_array(x):
|
||||
if stream is not None:
|
||||
raise ValueError("The stream argument to to_device() is not supported")
|
||||
# TODO: What if our array is on the GPU already?
|
||||
if device == 'cpu':
|
||||
return x
|
||||
raise ValueError(f"Unsupported device {device!r}")
|
||||
elif is_jax_array(x):
|
||||
# This import adds to_device to x
|
||||
import jax.experimental.array_api # noqa: F401
|
||||
return x.to_device(device, stream=stream)
|
||||
return x.to_device(device, stream=stream)
|
||||
|
||||
def size(x):
|
||||
"""
|
||||
Return the total number of elements of x.
|
||||
|
||||
This is equivalent to `x.size` according to the `standard
|
||||
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
|
||||
This helper is included because PyTorch defines `size` in an
|
||||
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
|
||||
|
||||
"""
|
||||
if None in x.shape:
|
||||
return None
|
||||
return math.prod(x.shape)
|
||||
|
||||
__all__ = [
|
||||
"array_namespace",
|
||||
"device",
|
||||
"get_namespace",
|
||||
"is_array_api_obj",
|
||||
"is_cupy_array",
|
||||
"is_dask_array",
|
||||
"is_jax_array",
|
||||
"is_numpy_array",
|
||||
"is_torch_array",
|
||||
"size",
|
||||
"to_device",
|
||||
]
|
||||
|
||||
_all_ignore = ['sys', 'math', 'inspect', 'warnings']
|
||||
161
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_linalg.py
vendored
Normal file
161
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_linalg.py
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, NamedTuple
|
||||
if TYPE_CHECKING:
|
||||
from typing import Literal, Optional, Tuple, Union
|
||||
from ._typing import ndarray
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
if np.__version__[0] == "2":
|
||||
from numpy.lib.array_utils import normalize_axis_tuple
|
||||
else:
|
||||
from numpy.core.numeric import normalize_axis_tuple
|
||||
|
||||
from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype
|
||||
from .._internal import get_xp
|
||||
|
||||
# These are in the main NumPy namespace but not in numpy.linalg
|
||||
def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray:
|
||||
return xp.cross(x1, x2, axis=axis, **kwargs)
|
||||
|
||||
def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
||||
return xp.outer(x1, x2, **kwargs)
|
||||
|
||||
class EighResult(NamedTuple):
|
||||
eigenvalues: ndarray
|
||||
eigenvectors: ndarray
|
||||
|
||||
class QRResult(NamedTuple):
|
||||
Q: ndarray
|
||||
R: ndarray
|
||||
|
||||
class SlogdetResult(NamedTuple):
|
||||
sign: ndarray
|
||||
logabsdet: ndarray
|
||||
|
||||
class SVDResult(NamedTuple):
|
||||
U: ndarray
|
||||
S: ndarray
|
||||
Vh: ndarray
|
||||
|
||||
# These functions are the same as their NumPy counterparts except they return
|
||||
# a namedtuple.
|
||||
def eigh(x: ndarray, /, xp, **kwargs) -> EighResult:
|
||||
return EighResult(*xp.linalg.eigh(x, **kwargs))
|
||||
|
||||
def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced',
|
||||
**kwargs) -> QRResult:
|
||||
return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs))
|
||||
|
||||
def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult:
|
||||
return SlogdetResult(*xp.linalg.slogdet(x, **kwargs))
|
||||
|
||||
def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult:
|
||||
return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs))
|
||||
|
||||
# These functions have additional keyword arguments
|
||||
|
||||
# The upper keyword argument is new from NumPy
|
||||
def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray:
|
||||
L = xp.linalg.cholesky(x, **kwargs)
|
||||
if upper:
|
||||
U = get_xp(xp)(matrix_transpose)(L)
|
||||
if get_xp(xp)(isdtype)(U.dtype, 'complex floating'):
|
||||
U = xp.conj(U)
|
||||
return U
|
||||
return L
|
||||
|
||||
# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy.
|
||||
# Note that it has a different semantic meaning from tol and rcond.
|
||||
def matrix_rank(x: ndarray,
|
||||
/,
|
||||
xp,
|
||||
*,
|
||||
rtol: Optional[Union[float, ndarray]] = None,
|
||||
**kwargs) -> ndarray:
|
||||
# this is different from xp.linalg.matrix_rank, which supports 1
|
||||
# dimensional arrays.
|
||||
if x.ndim < 2:
|
||||
raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
|
||||
S = get_xp(xp)(svdvals)(x, **kwargs)
|
||||
if rtol is None:
|
||||
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps
|
||||
else:
|
||||
# this is different from xp.linalg.matrix_rank, which does not
|
||||
# multiply the tolerance by the largest singular value.
|
||||
tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis]
|
||||
return xp.count_nonzero(S > tol, axis=-1)
|
||||
|
||||
def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray:
|
||||
# this is different from xp.linalg.pinv, which does not multiply the
|
||||
# default tolerance by max(M, N).
|
||||
if rtol is None:
|
||||
rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps
|
||||
return xp.linalg.pinv(x, rcond=rtol, **kwargs)
|
||||
|
||||
# These functions are new in the array API spec
|
||||
|
||||
def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray:
|
||||
return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
|
||||
|
||||
# svdvals is not in NumPy (but it is in SciPy). It is equivalent to
|
||||
# xp.linalg.svd(compute_uv=False).
|
||||
def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]:
|
||||
return xp.linalg.svd(x, compute_uv=False)
|
||||
|
||||
def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray:
|
||||
# xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
|
||||
# when axis=None and the input is 2-D, so to force a vector norm, we make
|
||||
# it so the input is 1-D (for axis=None), or reshape so that norm is done
|
||||
# on a single dimension.
|
||||
if axis is None:
|
||||
# Note: xp.linalg.norm() doesn't handle 0-D arrays
|
||||
_x = x.ravel()
|
||||
_axis = 0
|
||||
elif isinstance(axis, tuple):
|
||||
# Note: The axis argument supports any number of axes, whereas
|
||||
# xp.linalg.norm() only supports a single axis for vector norm.
|
||||
normalized_axis = normalize_axis_tuple(axis, x.ndim)
|
||||
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
|
||||
newshape = axis + rest
|
||||
_x = xp.transpose(x, newshape).reshape(
|
||||
(math.prod([x.shape[i] for i in axis]), *[x.shape[i] for i in rest]))
|
||||
_axis = 0
|
||||
else:
|
||||
_x = x
|
||||
_axis = axis
|
||||
|
||||
res = xp.linalg.norm(_x, axis=_axis, ord=ord)
|
||||
|
||||
if keepdims:
|
||||
# We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks
|
||||
# above to avoid matrix norm logic.
|
||||
shape = list(x.shape)
|
||||
_axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim)
|
||||
for i in _axis:
|
||||
shape[i] = 1
|
||||
res = xp.reshape(res, tuple(shape))
|
||||
|
||||
return res
|
||||
|
||||
# xp.diagonal and xp.trace operate on the first two axes whereas these
|
||||
# operates on the last two
|
||||
|
||||
def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray:
|
||||
return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs)
|
||||
|
||||
def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray:
|
||||
if dtype is None:
|
||||
if x.dtype == xp.float32:
|
||||
dtype = xp.float64
|
||||
elif x.dtype == xp.complex64:
|
||||
dtype = xp.complex128
|
||||
return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs))
|
||||
|
||||
__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult',
|
||||
'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet',
|
||||
'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm',
|
||||
'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal',
|
||||
'trace']
|
||||
23
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_typing.py
vendored
Normal file
23
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/common/_typing.py
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
__all__ = [
|
||||
"NestedSequence",
|
||||
"SupportsBufferProtocol",
|
||||
]
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
TypeVar,
|
||||
Protocol,
|
||||
)
|
||||
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
|
||||
class NestedSequence(Protocol[_T_co]):
|
||||
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
|
||||
def __len__(self, /) -> int: ...
|
||||
|
||||
SupportsBufferProtocol = Any
|
||||
|
||||
Array = Any
|
||||
Device = Any
|
||||
16
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
vendored
Normal file
16
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
from cupy import * # noqa: F403
|
||||
|
||||
# from cupy import * doesn't overwrite these builtin names
|
||||
from cupy import abs, max, min, round # noqa: F401
|
||||
|
||||
# These imports may overwrite names from the import * above.
|
||||
from ._aliases import * # noqa: F403
|
||||
|
||||
# See the comment in the numpy __init__.py
|
||||
__import__(__package__ + '.linalg')
|
||||
|
||||
__import__(__package__ + '.fft')
|
||||
|
||||
from ..common._helpers import * # noqa: F401,F403
|
||||
|
||||
__array_api_version__ = '2022.12'
|
||||
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-312.pyc
vendored
Normal file
Binary file not shown.
81
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
vendored
Normal file
81
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cupy as cp
|
||||
|
||||
from ..common import _aliases
|
||||
from .._internal import get_xp
|
||||
|
||||
asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy')
|
||||
asarray.__doc__ = _aliases._asarray.__doc__
|
||||
del partial
|
||||
|
||||
bool = cp.bool_
|
||||
|
||||
# Basic renames
|
||||
acos = cp.arccos
|
||||
acosh = cp.arccosh
|
||||
asin = cp.arcsin
|
||||
asinh = cp.arcsinh
|
||||
atan = cp.arctan
|
||||
atan2 = cp.arctan2
|
||||
atanh = cp.arctanh
|
||||
bitwise_left_shift = cp.left_shift
|
||||
bitwise_invert = cp.invert
|
||||
bitwise_right_shift = cp.right_shift
|
||||
concat = cp.concatenate
|
||||
pow = cp.power
|
||||
|
||||
arange = get_xp(cp)(_aliases.arange)
|
||||
empty = get_xp(cp)(_aliases.empty)
|
||||
empty_like = get_xp(cp)(_aliases.empty_like)
|
||||
eye = get_xp(cp)(_aliases.eye)
|
||||
full = get_xp(cp)(_aliases.full)
|
||||
full_like = get_xp(cp)(_aliases.full_like)
|
||||
linspace = get_xp(cp)(_aliases.linspace)
|
||||
ones = get_xp(cp)(_aliases.ones)
|
||||
ones_like = get_xp(cp)(_aliases.ones_like)
|
||||
zeros = get_xp(cp)(_aliases.zeros)
|
||||
zeros_like = get_xp(cp)(_aliases.zeros_like)
|
||||
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
|
||||
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
|
||||
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
|
||||
unique_all = get_xp(cp)(_aliases.unique_all)
|
||||
unique_counts = get_xp(cp)(_aliases.unique_counts)
|
||||
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
|
||||
unique_values = get_xp(cp)(_aliases.unique_values)
|
||||
astype = _aliases.astype
|
||||
std = get_xp(cp)(_aliases.std)
|
||||
var = get_xp(cp)(_aliases.var)
|
||||
permute_dims = get_xp(cp)(_aliases.permute_dims)
|
||||
reshape = get_xp(cp)(_aliases.reshape)
|
||||
argsort = get_xp(cp)(_aliases.argsort)
|
||||
sort = get_xp(cp)(_aliases.sort)
|
||||
nonzero = get_xp(cp)(_aliases.nonzero)
|
||||
sum = get_xp(cp)(_aliases.sum)
|
||||
prod = get_xp(cp)(_aliases.prod)
|
||||
ceil = get_xp(cp)(_aliases.ceil)
|
||||
floor = get_xp(cp)(_aliases.floor)
|
||||
trunc = get_xp(cp)(_aliases.trunc)
|
||||
matmul = get_xp(cp)(_aliases.matmul)
|
||||
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
|
||||
tensordot = get_xp(cp)(_aliases.tensordot)
|
||||
|
||||
# These functions are completely new here. If the library already has them
|
||||
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
||||
if hasattr(cp, 'vecdot'):
|
||||
vecdot = cp.vecdot
|
||||
else:
|
||||
vecdot = get_xp(cp)(_aliases.vecdot)
|
||||
if hasattr(cp, 'isdtype'):
|
||||
isdtype = cp.isdtype
|
||||
else:
|
||||
isdtype = get_xp(cp)(_aliases.isdtype)
|
||||
|
||||
__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos',
|
||||
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
||||
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
||||
'bitwise_right_shift', 'concat', 'pow']
|
||||
|
||||
_all_ignore = ['cp', 'get_xp']
|
||||
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py
vendored
Normal file
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
__all__ = [
|
||||
"ndarray",
|
||||
"Device",
|
||||
"Dtype",
|
||||
]
|
||||
|
||||
import sys
|
||||
from typing import (
|
||||
Union,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from cupy import (
|
||||
ndarray,
|
||||
dtype,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
float32,
|
||||
float64,
|
||||
)
|
||||
|
||||
from cupy.cuda.device import Device
|
||||
|
||||
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
||||
Dtype = dtype[Union[
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
float32,
|
||||
float64,
|
||||
]]
|
||||
else:
|
||||
Dtype = dtype
|
||||
36
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/fft.py
vendored
Normal file
36
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/fft.py
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
from cupy.fft import * # noqa: F403
|
||||
# cupy.fft doesn't have __all__. If it is added, replace this with
|
||||
#
|
||||
# from cupy.fft import __all__ as linalg_all
|
||||
_n = {}
|
||||
exec('from cupy.fft import *', _n)
|
||||
del _n['__builtins__']
|
||||
fft_all = list(_n)
|
||||
del _n
|
||||
|
||||
from ..common import _fft
|
||||
from .._internal import get_xp
|
||||
|
||||
import cupy as cp
|
||||
|
||||
fft = get_xp(cp)(_fft.fft)
|
||||
ifft = get_xp(cp)(_fft.ifft)
|
||||
fftn = get_xp(cp)(_fft.fftn)
|
||||
ifftn = get_xp(cp)(_fft.ifftn)
|
||||
rfft = get_xp(cp)(_fft.rfft)
|
||||
irfft = get_xp(cp)(_fft.irfft)
|
||||
rfftn = get_xp(cp)(_fft.rfftn)
|
||||
irfftn = get_xp(cp)(_fft.irfftn)
|
||||
hfft = get_xp(cp)(_fft.hfft)
|
||||
ihfft = get_xp(cp)(_fft.ihfft)
|
||||
fftfreq = get_xp(cp)(_fft.fftfreq)
|
||||
rfftfreq = get_xp(cp)(_fft.rfftfreq)
|
||||
fftshift = get_xp(cp)(_fft.fftshift)
|
||||
ifftshift = get_xp(cp)(_fft.ifftshift)
|
||||
|
||||
__all__ = fft_all + _fft.__all__
|
||||
|
||||
del get_xp
|
||||
del cp
|
||||
del fft_all
|
||||
del _fft
|
||||
49
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py
vendored
Normal file
49
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
from cupy.linalg import * # noqa: F403
|
||||
# cupy.linalg doesn't have __all__. If it is added, replace this with
|
||||
#
|
||||
# from cupy.linalg import __all__ as linalg_all
|
||||
_n = {}
|
||||
exec('from cupy.linalg import *', _n)
|
||||
del _n['__builtins__']
|
||||
linalg_all = list(_n)
|
||||
del _n
|
||||
|
||||
from ..common import _linalg
|
||||
from .._internal import get_xp
|
||||
|
||||
import cupy as cp
|
||||
|
||||
# These functions are in both the main and linalg namespaces
|
||||
from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401
|
||||
|
||||
cross = get_xp(cp)(_linalg.cross)
|
||||
outer = get_xp(cp)(_linalg.outer)
|
||||
EighResult = _linalg.EighResult
|
||||
QRResult = _linalg.QRResult
|
||||
SlogdetResult = _linalg.SlogdetResult
|
||||
SVDResult = _linalg.SVDResult
|
||||
eigh = get_xp(cp)(_linalg.eigh)
|
||||
qr = get_xp(cp)(_linalg.qr)
|
||||
slogdet = get_xp(cp)(_linalg.slogdet)
|
||||
svd = get_xp(cp)(_linalg.svd)
|
||||
cholesky = get_xp(cp)(_linalg.cholesky)
|
||||
matrix_rank = get_xp(cp)(_linalg.matrix_rank)
|
||||
pinv = get_xp(cp)(_linalg.pinv)
|
||||
matrix_norm = get_xp(cp)(_linalg.matrix_norm)
|
||||
svdvals = get_xp(cp)(_linalg.svdvals)
|
||||
diagonal = get_xp(cp)(_linalg.diagonal)
|
||||
trace = get_xp(cp)(_linalg.trace)
|
||||
|
||||
# These functions are completely new here. If the library already has them
|
||||
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
||||
if hasattr(cp.linalg, 'vector_norm'):
|
||||
vector_norm = cp.linalg.vector_norm
|
||||
else:
|
||||
vector_norm = get_xp(cp)(_linalg.vector_norm)
|
||||
|
||||
__all__ = linalg_all + _linalg.__all__
|
||||
|
||||
del get_xp
|
||||
del cp
|
||||
del linalg_all
|
||||
del _linalg
|
||||
0
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/__init__.py
vendored
Normal file
0
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/__init__.py
vendored
Normal file
Binary file not shown.
8
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py
vendored
Normal file
8
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
from dask.array import * # noqa: F403
|
||||
|
||||
# These imports may overwrite names from the import * above.
|
||||
from ._aliases import * # noqa: F403
|
||||
|
||||
__array_api_version__ = '2022.12'
|
||||
|
||||
__import__(__package__ + '.linalg')
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
146
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py
vendored
Normal file
146
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ...common import _aliases
|
||||
from ...common._helpers import _check_device
|
||||
|
||||
from ..._internal import get_xp
|
||||
|
||||
import numpy as np
|
||||
from numpy import (
|
||||
# Constants
|
||||
e,
|
||||
inf,
|
||||
nan,
|
||||
pi,
|
||||
newaxis,
|
||||
# Dtypes
|
||||
bool_ as bool,
|
||||
float32,
|
||||
float64,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
complex64,
|
||||
complex128,
|
||||
iinfo,
|
||||
finfo,
|
||||
can_cast,
|
||||
result_type,
|
||||
)
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional, Union
|
||||
|
||||
from ...common._typing import Device, Dtype, Array
|
||||
|
||||
import dask.array as da
|
||||
|
||||
isdtype = get_xp(np)(_aliases.isdtype)
|
||||
astype = _aliases.astype
|
||||
|
||||
# Common aliases
|
||||
|
||||
# This arange func is modified from the common one to
|
||||
# not pass stop/step as keyword arguments, which will cause
|
||||
# an error with dask
|
||||
|
||||
# TODO: delete the xp stuff, it shouldn't be necessary
|
||||
def _dask_arange(
|
||||
start: Union[int, float],
|
||||
/,
|
||||
stop: Optional[Union[int, float]] = None,
|
||||
step: Union[int, float] = 1,
|
||||
*,
|
||||
xp,
|
||||
dtype: Optional[Dtype] = None,
|
||||
device: Optional[Device] = None,
|
||||
**kwargs,
|
||||
) -> Array:
|
||||
_check_device(xp, device)
|
||||
args = [start]
|
||||
if stop is not None:
|
||||
args.append(stop)
|
||||
else:
|
||||
# stop is None, so start is actually stop
|
||||
# prepend the default value for start which is 0
|
||||
args.insert(0, 0)
|
||||
args.append(step)
|
||||
return xp.arange(*args, dtype=dtype, **kwargs)
|
||||
|
||||
arange = get_xp(da)(_dask_arange)
|
||||
eye = get_xp(da)(_aliases.eye)
|
||||
|
||||
from functools import partial
|
||||
asarray = partial(_aliases._asarray, namespace='dask.array')
|
||||
asarray.__doc__ = _aliases._asarray.__doc__
|
||||
|
||||
linspace = get_xp(da)(_aliases.linspace)
|
||||
eye = get_xp(da)(_aliases.eye)
|
||||
UniqueAllResult = get_xp(da)(_aliases.UniqueAllResult)
|
||||
UniqueCountsResult = get_xp(da)(_aliases.UniqueCountsResult)
|
||||
UniqueInverseResult = get_xp(da)(_aliases.UniqueInverseResult)
|
||||
unique_all = get_xp(da)(_aliases.unique_all)
|
||||
unique_counts = get_xp(da)(_aliases.unique_counts)
|
||||
unique_inverse = get_xp(da)(_aliases.unique_inverse)
|
||||
unique_values = get_xp(da)(_aliases.unique_values)
|
||||
permute_dims = get_xp(da)(_aliases.permute_dims)
|
||||
std = get_xp(da)(_aliases.std)
|
||||
var = get_xp(da)(_aliases.var)
|
||||
empty = get_xp(da)(_aliases.empty)
|
||||
empty_like = get_xp(da)(_aliases.empty_like)
|
||||
full = get_xp(da)(_aliases.full)
|
||||
full_like = get_xp(da)(_aliases.full_like)
|
||||
ones = get_xp(da)(_aliases.ones)
|
||||
ones_like = get_xp(da)(_aliases.ones_like)
|
||||
zeros = get_xp(da)(_aliases.zeros)
|
||||
zeros_like = get_xp(da)(_aliases.zeros_like)
|
||||
reshape = get_xp(da)(_aliases.reshape)
|
||||
matrix_transpose = get_xp(da)(_aliases.matrix_transpose)
|
||||
vecdot = get_xp(da)(_aliases.vecdot)
|
||||
|
||||
nonzero = get_xp(da)(_aliases.nonzero)
|
||||
sum = get_xp(np)(_aliases.sum)
|
||||
prod = get_xp(np)(_aliases.prod)
|
||||
ceil = get_xp(np)(_aliases.ceil)
|
||||
floor = get_xp(np)(_aliases.floor)
|
||||
trunc = get_xp(np)(_aliases.trunc)
|
||||
matmul = get_xp(np)(_aliases.matmul)
|
||||
tensordot = get_xp(np)(_aliases.tensordot)
|
||||
|
||||
from dask.array import (
|
||||
# Element wise aliases
|
||||
arccos as acos,
|
||||
arccosh as acosh,
|
||||
arcsin as asin,
|
||||
arcsinh as asinh,
|
||||
arctan as atan,
|
||||
arctan2 as atan2,
|
||||
arctanh as atanh,
|
||||
left_shift as bitwise_left_shift,
|
||||
right_shift as bitwise_right_shift,
|
||||
invert as bitwise_invert,
|
||||
power as pow,
|
||||
# Other
|
||||
concatenate as concat,
|
||||
)
|
||||
|
||||
# exclude these from all since
|
||||
_da_unsupported = ['sort', 'argsort']
|
||||
|
||||
common_aliases = [alias for alias in _aliases.__all__ if alias not in _da_unsupported]
|
||||
|
||||
__all__ = common_aliases + ['asarray', 'bool', 'acos',
|
||||
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
||||
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
||||
'bitwise_right_shift', 'concat', 'pow',
|
||||
'e', 'inf', 'nan', 'pi', 'newaxis', 'float32', 'float64', 'int8',
|
||||
'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64',
|
||||
'complex64', 'complex128', 'iinfo', 'finfo', 'can_cast', 'result_type']
|
||||
|
||||
_all_ignore = ['get_xp', 'da', 'partial', 'common_aliases', 'np']
|
||||
72
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py
vendored
Normal file
72
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ...common import _linalg
|
||||
from ..._internal import get_xp
|
||||
|
||||
# Exports
|
||||
from dask.array.linalg import * # noqa: F403
|
||||
from dask.array import trace, outer
|
||||
|
||||
# These functions are in both the main and linalg namespaces
|
||||
from dask.array import matmul, tensordot
|
||||
from ._aliases import matrix_transpose, vecdot
|
||||
|
||||
import dask.array as da
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from ...common._typing import Array
|
||||
from typing import Literal
|
||||
|
||||
# dask.array.linalg doesn't have __all__. If it is added, replace this with
|
||||
#
|
||||
# from dask.array.linalg import __all__ as linalg_all
|
||||
_n = {}
|
||||
exec('from dask.array.linalg import *', _n)
|
||||
del _n['__builtins__']
|
||||
if 'annotations' in _n:
|
||||
del _n['annotations']
|
||||
linalg_all = list(_n)
|
||||
del _n
|
||||
|
||||
EighResult = _linalg.EighResult
|
||||
QRResult = _linalg.QRResult
|
||||
SlogdetResult = _linalg.SlogdetResult
|
||||
SVDResult = _linalg.SVDResult
|
||||
# TODO: use the QR wrapper once dask
|
||||
# supports the mode keyword on QR
|
||||
# https://github.com/dask/dask/issues/10388
|
||||
#qr = get_xp(da)(_linalg.qr)
|
||||
def qr(x: Array, mode: Literal['reduced', 'complete'] = 'reduced',
|
||||
**kwargs) -> QRResult:
|
||||
if mode != "reduced":
|
||||
raise ValueError("dask arrays only support using mode='reduced'")
|
||||
return QRResult(*da.linalg.qr(x, **kwargs))
|
||||
cholesky = get_xp(da)(_linalg.cholesky)
|
||||
matrix_rank = get_xp(da)(_linalg.matrix_rank)
|
||||
matrix_norm = get_xp(da)(_linalg.matrix_norm)
|
||||
|
||||
|
||||
# Wrap the svd functions to not pass full_matrices to dask
|
||||
# when full_matrices=False (as that is the default behavior for dask),
|
||||
# and dask doesn't have the full_matrices keyword
|
||||
def svd(x: Array, full_matrices: bool = True, **kwargs) -> SVDResult:
|
||||
if full_matrices:
|
||||
raise ValueError("full_matrics=True is not supported by dask.")
|
||||
return da.linalg.svd(x, coerce_signs=False, **kwargs)
|
||||
|
||||
def svdvals(x: Array) -> Array:
|
||||
# TODO: can't avoid computing U or V for dask
|
||||
_, s, _ = svd(x)
|
||||
return s
|
||||
|
||||
vector_norm = get_xp(da)(_linalg.vector_norm)
|
||||
diagonal = get_xp(da)(_linalg.diagonal)
|
||||
|
||||
__all__ = linalg_all + ["trace", "outer", "matmul", "tensordot",
|
||||
"matrix_transpose", "vecdot", "EighResult",
|
||||
"QRResult", "SlogdetResult", "SVDResult", "qr",
|
||||
"cholesky", "matrix_rank", "matrix_norm", "svdvals",
|
||||
"vector_norm", "diagonal"]
|
||||
|
||||
_all_ignore = ['get_xp', 'da', 'linalg_all']
|
||||
24
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py
vendored
Normal file
24
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
from numpy import * # noqa: F403
|
||||
|
||||
# from numpy import * doesn't overwrite these builtin names
|
||||
from numpy import abs, max, min, round # noqa: F401
|
||||
|
||||
# These imports may overwrite names from the import * above.
|
||||
from ._aliases import * # noqa: F403
|
||||
|
||||
# Don't know why, but we have to do an absolute import to import linalg. If we
|
||||
# instead do
|
||||
#
|
||||
# from . import linalg
|
||||
#
|
||||
# It doesn't overwrite np.linalg from above. The import is generated
|
||||
# dynamically so that the library can be vendored.
|
||||
__import__(__package__ + '.linalg')
|
||||
|
||||
__import__(__package__ + '.fft')
|
||||
|
||||
from .linalg import matrix_transpose, vecdot # noqa: F401
|
||||
|
||||
from ..common._helpers import * # noqa: F403
|
||||
|
||||
__array_api_version__ = '2022.12'
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-312.pyc
vendored
Normal file
Binary file not shown.
81
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py
vendored
Normal file
81
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from functools import partial
|
||||
|
||||
from ..common import _aliases
|
||||
|
||||
from .._internal import get_xp
|
||||
|
||||
asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy')
|
||||
asarray.__doc__ = _aliases._asarray.__doc__
|
||||
del partial
|
||||
|
||||
import numpy as np
|
||||
bool = np.bool_
|
||||
|
||||
# Basic renames
|
||||
acos = np.arccos
|
||||
acosh = np.arccosh
|
||||
asin = np.arcsin
|
||||
asinh = np.arcsinh
|
||||
atan = np.arctan
|
||||
atan2 = np.arctan2
|
||||
atanh = np.arctanh
|
||||
bitwise_left_shift = np.left_shift
|
||||
bitwise_invert = np.invert
|
||||
bitwise_right_shift = np.right_shift
|
||||
concat = np.concatenate
|
||||
pow = np.power
|
||||
|
||||
arange = get_xp(np)(_aliases.arange)
|
||||
empty = get_xp(np)(_aliases.empty)
|
||||
empty_like = get_xp(np)(_aliases.empty_like)
|
||||
eye = get_xp(np)(_aliases.eye)
|
||||
full = get_xp(np)(_aliases.full)
|
||||
full_like = get_xp(np)(_aliases.full_like)
|
||||
linspace = get_xp(np)(_aliases.linspace)
|
||||
ones = get_xp(np)(_aliases.ones)
|
||||
ones_like = get_xp(np)(_aliases.ones_like)
|
||||
zeros = get_xp(np)(_aliases.zeros)
|
||||
zeros_like = get_xp(np)(_aliases.zeros_like)
|
||||
UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult)
|
||||
UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult)
|
||||
UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult)
|
||||
unique_all = get_xp(np)(_aliases.unique_all)
|
||||
unique_counts = get_xp(np)(_aliases.unique_counts)
|
||||
unique_inverse = get_xp(np)(_aliases.unique_inverse)
|
||||
unique_values = get_xp(np)(_aliases.unique_values)
|
||||
astype = _aliases.astype
|
||||
std = get_xp(np)(_aliases.std)
|
||||
var = get_xp(np)(_aliases.var)
|
||||
permute_dims = get_xp(np)(_aliases.permute_dims)
|
||||
reshape = get_xp(np)(_aliases.reshape)
|
||||
argsort = get_xp(np)(_aliases.argsort)
|
||||
sort = get_xp(np)(_aliases.sort)
|
||||
nonzero = get_xp(np)(_aliases.nonzero)
|
||||
sum = get_xp(np)(_aliases.sum)
|
||||
prod = get_xp(np)(_aliases.prod)
|
||||
ceil = get_xp(np)(_aliases.ceil)
|
||||
floor = get_xp(np)(_aliases.floor)
|
||||
trunc = get_xp(np)(_aliases.trunc)
|
||||
matmul = get_xp(np)(_aliases.matmul)
|
||||
matrix_transpose = get_xp(np)(_aliases.matrix_transpose)
|
||||
tensordot = get_xp(np)(_aliases.tensordot)
|
||||
|
||||
# These functions are completely new here. If the library already has them
|
||||
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
||||
if hasattr(np, 'vecdot'):
|
||||
vecdot = np.vecdot
|
||||
else:
|
||||
vecdot = get_xp(np)(_aliases.vecdot)
|
||||
if hasattr(np, 'isdtype'):
|
||||
isdtype = np.isdtype
|
||||
else:
|
||||
isdtype = get_xp(np)(_aliases.isdtype)
|
||||
|
||||
__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos',
|
||||
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
||||
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
||||
'bitwise_right_shift', 'concat', 'pow']
|
||||
|
||||
_all_ignore = ['np', 'get_xp']
|
||||
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py
vendored
Normal file
46
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
__all__ = [
|
||||
"ndarray",
|
||||
"Device",
|
||||
"Dtype",
|
||||
]
|
||||
|
||||
import sys
|
||||
from typing import (
|
||||
Literal,
|
||||
Union,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
ndarray,
|
||||
dtype,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
float32,
|
||||
float64,
|
||||
)
|
||||
|
||||
Device = Literal["cpu"]
|
||||
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
||||
Dtype = dtype[Union[
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
float32,
|
||||
float64,
|
||||
]]
|
||||
else:
|
||||
Dtype = dtype
|
||||
29
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/fft.py
vendored
Normal file
29
.CondaPkg/env/Lib/site-packages/scipy/_lib/array_api_compat/numpy/fft.py
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
from numpy.fft import * # noqa: F403
|
||||
from numpy.fft import __all__ as fft_all
|
||||
|
||||
from ..common import _fft
|
||||
from .._internal import get_xp
|
||||
|
||||
import numpy as np
|
||||
|
||||
fft = get_xp(np)(_fft.fft)
|
||||
ifft = get_xp(np)(_fft.ifft)
|
||||
fftn = get_xp(np)(_fft.fftn)
|
||||
ifftn = get_xp(np)(_fft.ifftn)
|
||||
rfft = get_xp(np)(_fft.rfft)
|
||||
irfft = get_xp(np)(_fft.irfft)
|
||||
rfftn = get_xp(np)(_fft.rfftn)
|
||||
irfftn = get_xp(np)(_fft.irfftn)
|
||||
hfft = get_xp(np)(_fft.hfft)
|
||||
ihfft = get_xp(np)(_fft.ihfft)
|
||||
fftfreq = get_xp(np)(_fft.fftfreq)
|
||||
rfftfreq = get_xp(np)(_fft.rfftfreq)
|
||||
fftshift = get_xp(np)(_fft.fftshift)
|
||||
ifftshift = get_xp(np)(_fft.ifftshift)
|
||||
|
||||
__all__ = fft_all + _fft.__all__
|
||||
|
||||
del get_xp
|
||||
del np
|
||||
del fft_all
|
||||
del _fft
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user