using for loop to install conda package

This commit is contained in:
ton
2023-04-16 11:03:27 +07:00
parent 49da9f29c1
commit 0c2b34d6f8
12168 changed files with 2656238 additions and 1 deletions

View File

@@ -0,0 +1,3 @@
from .version_requirements import is_installed
has_mpl = is_installed("matplotlib", ">=3.3")

View File

@@ -0,0 +1,54 @@
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
from .version_requirements import require
@require("matplotlib", ">=3.3")
def polygon_clip(rp, cp, r0, c0, r1, c1):
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
from matplotlib import path, transforms
poly = path.Path(np.vstack((rp, cp)).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
return poly_clipped[:, 0], poly_clipped[:, 1]
def polygon_area(pr, pc):
"""Compute the area of a polygon.
Parameters
----------
pr, pc : (N,) array of float
Polygon row and column coordinates.
Returns
-------
a : float
Area of the polygon.
"""
pr = np.asarray(pr)
pc = np.asarray(pc)
return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))

View File

@@ -0,0 +1,27 @@
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
import os
@contextmanager
def temporary_file(suffix=''):
"""Yield a writeable temporary filename that is deleted on context exit.
Parameters
----------
suffix : string, optional
The suffix for the file.
Examples
--------
>>> import numpy as np
>>> from skimage import io
>>> with temporary_file('.tif') as tempfile:
... im = np.arange(25, dtype=np.uint8).reshape((5, 5))
... io.imsave(tempfile, im)
... assert np.all(io.imread(tempfile) == im)
"""
with NamedTemporaryFile(suffix=suffix, delete=False) as tempfile_stream:
tempfile = tempfile_stream.name
yield tempfile
os.remove(tempfile)

View File

@@ -0,0 +1,147 @@
from contextlib import contextmanager
import sys
import warnings
import re
import functools
import os
__all__ = ['all_warnings', 'expected_warnings', 'warn']
# A version of `warnings.warn` with a default stacklevel of 2.
# functool is used so as not to increase the call stack accidentally
warn = functools.partial(warnings.warn, stacklevel=2)
@contextmanager
def all_warnings():
"""
Context for use in testing to ensure that all warnings are raised.
Examples
--------
>>> import warnings
>>> def foo():
... warnings.warn(RuntimeWarning("bar"), stacklevel=2)
We raise the warning once, while the warning filter is set to "once".
Hereafter, the warning is invisible, even with custom filters:
>>> with warnings.catch_warnings():
... warnings.simplefilter('once')
... foo() # doctest: +SKIP
We can now run ``foo()`` without a warning being raised:
>>> from numpy.testing import assert_warns
>>> foo() # doctest: +SKIP
To catch the warning, we call in the help of ``all_warnings``:
>>> with all_warnings():
... assert_warns(RuntimeWarning, foo)
"""
# _warnings.py is on the critical import path.
# Since this is a testing only function, we lazy import inspect.
import inspect
# Whenever a warning is triggered, Python adds a __warningregistry__
# member to the *calling* module. The exercise here is to find
# and eradicate all those breadcrumbs that were left lying around.
#
# We proceed by first searching all parent calling frames and explicitly
# clearing their warning registries (necessary for the doctests above to
# pass). Then, we search for all submodules of skimage and clear theirs
# as well (necessary for the skimage test suite to pass).
frame = inspect.currentframe()
if frame:
for f in inspect.getouterframes(frame):
f[0].f_locals['__warningregistry__'] = {}
del frame
for mod_name, mod in list(sys.modules.items()):
try:
mod.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
@contextmanager
def expected_warnings(matching):
r"""Context for use in testing to catch known warnings matching regexes
Parameters
----------
matching : None or a list of strings or compiled regexes
Regexes for the desired warning to catch
If matching is None, this behaves as a no-op.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)
>>> # rank filters are slow when bit-depth exceeds 10 bits
>>> from skimage import filters
>>> with expected_warnings(['Bad rank filter performance']):
... median_filtered = filters.rank.median(image)
Notes
-----
Uses `all_warnings` to ensure all warnings are raised.
Upon exiting, it checks the recorded warnings for the desired matching
pattern(s).
Raises a ValueError if any match was not found or an unexpected
warning was raised.
Allows for three types of behaviors: `and`, `or`, and `optional` matches.
This is done to accommodate different build environments or loop conditions
that may produce different warnings. The behaviors can be combined.
If you pass multiple patterns, you get an orderless `and`, where all of the
warnings must be raised.
If you use the `|` operator in a pattern, you can catch one of several
warnings.
Finally, you can use `|\A\Z` in a pattern to signify it as optional.
"""
if isinstance(matching, str):
raise ValueError('``matching`` should be a list of strings and not '
'a string itself.')
# Special case for disabling the context manager
if matching is None:
yield None
return
strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
if strict_warnings.lower() == 'true':
strict_warnings = True
elif strict_warnings.lower() == 'false':
strict_warnings = False
else:
strict_warnings = bool(int(strict_warnings))
with all_warnings() as w:
# enter context
yield w
# exited user context, check the recorded warnings
# Allow users to provide None
while None in matching:
matching.remove(None)
remaining = [m for m in matching if r'\A\Z' not in m.split('|')]
for warn in w:
found = False
for match in matching:
if re.search(match, str(warn.message)) is not None:
found = True
if match in remaining:
remaining.remove(match)
if strict_warnings and not found:
raise ValueError(f'Unexpected warning: {str(warn.message)}')
if strict_warnings and (len(remaining) > 0):
newline = "\n"
msg = f"No warning raised matching:{newline}{newline.join(remaining)}"
raise ValueError(msg)

View File

@@ -0,0 +1,121 @@
import numpy as np
from scipy.spatial import cKDTree, distance
def _ensure_spacing(coord, spacing, p_norm, max_out):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
----------
coord : ndarray
The coordinates of the considered points.
spacing : float
the maximum allowed spacing between the points.
p_norm : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
max_out: int
If not None, at most the first ``max_out`` candidates are
returned.
Returns
-------
output : ndarray
A subset of coord where a minimum spacing is guaranteed.
"""
# Use KDtree to find the peaks that are too close to each other
tree = cKDTree(coord)
indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
rejected_peaks_indices = set()
naccepted = 0
for idx, candidates in enumerate(indices):
if idx not in rejected_peaks_indices:
# keep current point and the points at exactly spacing from it
candidates.remove(idx)
dist = distance.cdist([coord[idx]],
coord[candidates],
distance.minkowski,
p=p_norm).reshape(-1)
candidates = [c for c, d in zip(candidates, dist)
if d < spacing]
# candidates.remove(keep)
rejected_peaks_indices.update(candidates)
naccepted += 1
if max_out is not None and naccepted >= max_out:
break
# Remove the peaks that are too close to each other
output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
if max_out is not None:
output = output[:max_out]
return output
def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,
max_out=None, *, max_split_size=2000):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
----------
coords : array_like
The coordinates of the considered points.
spacing : float
the maximum allowed spacing between the points.
p_norm : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
min_split_size : int
Minimum split size used to process ``coords`` by batch to save
memory. If None, the memory saving strategy is not applied.
max_out : int
If not None, only the first ``max_out`` candidates are returned.
max_split_size : int
Maximum split size used to process ``coords`` by batch to save
memory. This number was decided by profiling with a large number
of points. Too small a number results in too much looping in
Python instead of C, slowing down the process, while too large
a number results in large memory allocations, slowdowns, and,
potentially, in the process being killed -- see gh-6010. See
benchmark results `here
<https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.
Returns
-------
output : array_like
A subset of coord where a minimum spacing is guaranteed.
"""
output = coords
if len(coords):
coords = np.atleast_2d(coords)
if min_split_size is None:
batch_list = [coords]
else:
coord_count = len(coords)
split_idx = [min_split_size]
split_size = min_split_size
while coord_count - split_idx[-1] > max_split_size:
split_size *= 2
split_idx.append(split_idx[-1] + min(split_size,
max_split_size))
batch_list = np.array_split(coords, split_idx)
output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
for batch in batch_list:
output = _ensure_spacing(np.vstack([output, batch]),
spacing, p_norm, max_out)
if max_out is not None and len(output) >= max_out:
break
return output

View File

@@ -0,0 +1,47 @@
/* A fast approximation of the exponential function.
* Reference [1]: https://schraudolph.org/pubs/Schraudolph99.pdf
* Reference [2]: https://doi.org/10.1162/089976600300015033
* Additional improvements by Leonid Bloch. */
#include <stdint.h>
/* use just EXP_A = 1512775 for integer version, to avoid FP calculations */
#define EXP_A (1512775.3951951856938) /* 2^20/ln2 */
/* For min. RMS error */
#define EXP_BC 1072632447 /* 1023*2^20 - 60801 */
/* For min. max. relative error */
/* #define EXP_BC 1072647449 */ /* 1023*2^20 - 45799 */
/* For min. mean relative error */
/* #define EXP_BC 1072625005 */ /* 1023*2^20 - 68243 */
__inline double _fast_exp (double y)
{
union
{
double d;
struct { int32_t i, j; } n;
char t[8];
} _eco;
_eco.n.i = 1;
switch(_eco.t[0]) {
case 1:
/* Little endian */
_eco.n.j = (int32_t)(EXP_A*(y)) + EXP_BC;
_eco.n.i = 0;
break;
case 0:
/* Big endian */
_eco.n.i = (int32_t)(EXP_A*(y)) + EXP_BC;
_eco.n.j = 0;
break;
}
return _eco.d;
}
__inline float _fast_expf (float y)
{
return (float)_fast_exp((double)y);
}

View File

@@ -0,0 +1,166 @@
"""Filters used across multiple skimage submodules.
These are defined here to avoid circular imports.
The unit tests remain under skimage/filters/tests/
"""
from collections.abc import Iterable
import numpy as np
from scipy import ndimage as ndi
from .._shared.utils import _supported_float_type, convert_to_float, warn
class _PatchClassRepr(type):
"""Control class representations in rendered signatures."""
def __repr__(cls):
return f"<{cls.__name__}>"
class ChannelAxisNotSet(metaclass=_PatchClassRepr):
"""Signal that the `channel_axis` parameter is not set.
This is a proxy object, used to signal to `skimage.filters.gaussian` that
the `channel_axis` parameter has not been set, in which case the function
will determine whether a color channel is present. We cannot use ``None``
for this purpose as it has its own meaning which indicates that the given
image is grayscale.
This automatic behavior was broken in v0.19, recovered but deprecated in
v0.20 and will be removed in v0.21.
"""
def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
preserve_range=False, truncate=4.0, *,
channel_axis=ChannelAxisNotSet):
"""Multi-dimensional Gaussian filter.
Parameters
----------
image : array-like
Input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
preserve_range : bool, optional
If True, keep the original range of values. Otherwise, the input
``image`` is converted according to the conventions of ``img_as_float``
(Normalized first to values [-1.0 ; 1.0] or [0 ; 1.0] depending on
dtype of input)
For more information, see:
https://scikit-image.org/docs/dev/user_guide/data_types.html
truncate : float, optional
Truncate the filter at this many standard deviations.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
.. warning::
Automatic detection of the color channel based on the old deprecated
`multichannel=None` was broken in version 0.19. In 0.20 this
behavior is fixed. The last axis of an `image` with dimensions
(M, N, 3) is interpreted as a color channel if `channel_axis` is not
set by the user (signaled by the default proxy value
`ChannelAxisNotSet`). Starting with 0.21, `channel_axis=None` will
be used as the new default value.
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
Integer arrays are converted to float.
The ``output`` should be floating point data type since gaussian converts
to float provided ``image``. If ``output`` is not provided, another array
will be allocated and returned as the result.
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[1, 1] = 1
>>> a
array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
>>> gaussian(a, sigma=0.4) # mild smoothing
array([[0.00163116, 0.03712502, 0.00163116],
[0.03712502, 0.84496158, 0.03712502],
[0.00163116, 0.03712502, 0.00163116]])
>>> gaussian(a, sigma=1) # more smoothing
array([[0.05855018, 0.09653293, 0.05855018],
[0.09653293, 0.15915589, 0.09653293],
[0.05855018, 0.09653293, 0.05855018]])
>>> # Several modes are possible for handling boundaries
>>> gaussian(a, sigma=1, mode='reflect')
array([[0.08767308, 0.12075024, 0.08767308],
[0.12075024, 0.16630671, 0.12075024],
[0.08767308, 0.12075024, 0.08767308]])
>>> # For RGB images, each is filtered separately
>>> from skimage.data import astronaut
>>> image = astronaut()
>>> filtered_img = gaussian(image, sigma=1, channel_axis=-1)
"""
if channel_axis is ChannelAxisNotSet:
if image.ndim == 3 and image.shape[-1] == 3:
warn(
"Automatic detection of the color channel was deprecated in "
"v0.19, and `channel_axis=None` will be the new default in "
"v0.21. Set `channel_axis=-1` explicitly to silence this "
"warning.",
FutureWarning,
stacklevel=2,
)
channel_axis = -1
else:
channel_axis = None
if np.any(np.asarray(sigma) < 0.0):
raise ValueError("Sigma values less than zero are not valid")
if channel_axis is not None:
# do not filter across channels
if not isinstance(sigma, Iterable):
sigma = [sigma] * (image.ndim - 1)
if len(sigma) == image.ndim - 1:
sigma = list(sigma)
sigma.insert(channel_axis % image.ndim, 0)
image = convert_to_float(image, preserve_range)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if (output is not None) and (not np.issubdtype(output.dtype, np.floating)):
raise ValueError("Provided output data type is not float")
return ndi.gaussian_filter(image, sigma, output=output,
mode=mode, cval=cval, truncate=truncate)

View File

@@ -0,0 +1,120 @@
import os
import sys
def _show_skimage_info():
import skimage
print(f"skimage version {skimage.__version__}")
class PytestTester:
"""
Pytest test runner.
This class is made available in ``skimage._shared.testing``, and a test
function is typically added to a package's __init__.py like so::
from skimage._shared.testing import PytestTester
test = PytestTester(__name__)
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of scikit-image code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
"""
import pytest
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed", ]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v" * (verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += [f"--durations={durations}"]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_skimage_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0

View File

@@ -0,0 +1,331 @@
"""
Testing utilities.
"""
import os
import re
import struct
import threading
import functools
from tempfile import NamedTemporaryFile
import numpy as np
from numpy import testing
from numpy.testing import (
TestCase, assert_, assert_warns, assert_no_warnings,
assert_equal, assert_almost_equal,
assert_array_equal, assert_allclose,
assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_less
)
import warnings
from .. import data, io
from ..data._fetchers import _fetch
from ..util import img_as_uint, img_as_float, img_as_int, img_as_ubyte
from ._warnings import expected_warnings
import pytest
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
raises = pytest.raises
fixture = pytest.fixture
SKIP_RE = re.compile(r"(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
# true if python is running in 32bit mode
# Calculate the size of a void * pointer in bits
# https://docs.python.org/3/library/struct.html
arch32 = struct.calcsize("P") * 8 == 32
_error_on_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS_GLOBAL', '0')
if _error_on_warnings.lower() == 'true':
_error_on_warnings = True
elif _error_on_warnings.lower() == 'false':
_error_on_warnings = False
else:
try:
_error_on_warnings = bool(int(_error_on_warnings))
except ValueError:
_error_on_warnings = False
def assert_less(a, b, msg=None):
message = f"{a!r} is not lower than {b!r}"
if msg is not None:
message += ": " + msg
assert a < b, message
def assert_greater(a, b, msg=None):
message = f"{a!r} is not greater than {b!r}"
if msg is not None:
message += ": " + msg
assert a > b, message
def doctest_skip_parser(func):
""" Decorator replaces custom skip test markup in doctests
Say a function has a docstring::
>>> something, HAVE_AMODULE, HAVE_BMODULE = 0, False, False
>>> something # skip if not HAVE_AMODULE
0
>>> something # skip if HAVE_BMODULE
0
This decorator will evaluate the expression after ``skip if``. If this
evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If
False, then the comment is just removed. The expression is evaluated in the
``globals`` scope of `func`.
For example, if the module global ``HAVE_AMODULE`` is False, and module
global ``HAVE_BMODULE`` is False, the returned function will have docstring::
>>> something # doctest: +SKIP
>>> something + else # doctest: +SKIP
>>> something # doctest: +SKIP
"""
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
# Works as a function decorator
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
# Works as a class decorator
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(image, plugin, suffix):
"""Save and read an image using a specified plugin"""
if '.' not in suffix:
suffix = '.' + suffix
with NamedTemporaryFile(suffix=suffix, delete=False) as temp_file:
fname = temp_file.name
io.imsave(fname, image, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
"""Check roundtrip behavior for color images.
All major input types should be handled as ubytes and read
back correctly.
"""
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2, r2.astype(bool))
img3 = img_as_float(img)
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
"""Check the roundtrip behavior for images that support most types.
All major input types should be handled.
"""
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2, r2.astype(bool))
img3 = img_as_float(img)
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
"""Default package level setup routine for skimage tests.
Import packages known to raise warnings, and then
force warnings to raise errors.
Also set the random seed to zero.
"""
warnings.simplefilter('default')
if _error_on_warnings:
np.random.seed(0)
warnings.simplefilter('error')
warnings.filterwarnings(
'default', message='unclosed file', category=ResourceWarning
)
# Ignore other warnings only seen when using older versions of
# dependencies.
warnings.filterwarnings(
'default',
message='Conversion of the second argument of issubdtype',
category=FutureWarning
)
warnings.filterwarnings(
'default',
message='the matrix subclass is not the recommended way',
category=PendingDeprecationWarning, module='numpy'
)
warnings.filterwarnings(
'default',
message='Your installed pillow version',
category=UserWarning,
module='skimage.io'
)
# ignore warning from cycle_spin about Dask not being installed
warnings.filterwarnings(
'default',
message='The optional dask dependency is not installed.',
category=UserWarning
)
warnings.filterwarnings(
'default',
message='numpy.ufunc size changed',
category=RuntimeWarning
)
warnings.filterwarnings(
'default',
message='\n\nThe scipy.sparse array containers',
category=DeprecationWarning
)
# ignore dtype deprecation warning from NumPy arising from use of SciPy
# as a reference in test_watershed09. Should be fixed in scipy>=1.9.4
# https://github.com/scipy/scipy/commit/da3ff893b9ac161938e11f9bcd5380e09cf03150
warnings.filterwarnings(
'default',
message=('`np.int0` is a deprecated alias for `np.intp`'),
category=DeprecationWarning
)
def teardown_test():
"""Default package level teardown routine for skimage tests.
Restore warnings to default behavior
"""
if _error_on_warnings:
warnings.resetwarnings()
warnings.simplefilter('default')
def fetch(data_filename):
"""Attempt to fetch data, but if unavailable, skip the tests."""
try:
return _fetch(data_filename)
except (ConnectionError, ModuleNotFoundError):
pytest.skip(f'Unable to download {data_filename}',
allow_module_level=True)
@pytest.mark.skip()
def test_parallel(num_threads=2, warnings_matching=None):
"""Decorator to run the same function multiple times in parallel.
This decorator is useful to ensure that separate threads execute
concurrently and correctly while releasing the GIL.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
warnings_matching: list or None
This parameter is passed on to `expected_warnings` so as not to have
race conditions with the warnings filters. A single
`expected_warnings` context manager is used for all threads.
If None, then no warnings are checked.
"""
assert num_threads > 0
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
with expected_warnings(warnings_matching):
threads = []
for i in range(num_threads - 1):
thread = threading.Thread(target=func, args=args,
kwargs=kwargs)
threads.append(thread)
for thread in threads:
thread.start()
func(*args, **kwargs)
for thread in threads:
thread.join()
return inner
return wrapper

View File

@@ -0,0 +1,94 @@
import time
import numpy as np
import pytest
from scipy.spatial.distance import pdist, minkowski
from skimage._shared.coord import ensure_spacing
@pytest.mark.parametrize("p", [1, 2, np.inf])
@pytest.mark.parametrize("size", [30, 50, None])
def test_ensure_spacing_trivial(p, size):
# --- Empty input
assert ensure_spacing([], p_norm=p) == []
# --- A unique point
coord = np.random.randn(1, 2)
assert np.array_equal(coord, ensure_spacing(coord, p_norm=p,
min_split_size=size))
# --- Verified spacing
coord = np.random.randn(100, 2)
# --- 0 spacing
assert np.array_equal(coord, ensure_spacing(coord, spacing=0, p_norm=p,
min_split_size=size))
# Spacing is chosen to be half the minimum distance
spacing = pdist(coord, metric=minkowski, p=p).min() * 0.5
out = ensure_spacing(coord, spacing=spacing, p_norm=p,
min_split_size=size)
assert np.array_equal(coord, out)
@pytest.mark.parametrize("ndim", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("size", [2, 10, None])
def test_ensure_spacing_nD(ndim, size):
coord = np.ones((5, ndim))
expected = np.ones((1, ndim))
assert np.array_equal(ensure_spacing(coord, min_split_size=size), expected)
@pytest.mark.parametrize("p", [1, 2, np.inf])
@pytest.mark.parametrize("size", [50, 100, None])
def test_ensure_spacing_batch_processing(p, size):
coord = np.random.randn(100, 2)
# --- Consider the average distance btween the point as spacing
spacing = np.median(pdist(coord, metric=minkowski, p=p))
expected = ensure_spacing(coord, spacing=spacing, p_norm=p)
assert np.array_equal(ensure_spacing(coord, spacing=spacing, p_norm=p,
min_split_size=size),
expected)
def test_max_batch_size():
"""Small batches are slow, large batches -> large allocations -> also slow.
https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691
"""
coords = np.random.randint(low=0, high=1848, size=(40000, 2))
tstart = time.time()
ensure_spacing(coords, spacing=100, min_split_size=50,
max_split_size=2000)
dur1 = time.time() - tstart
tstart = time.time()
ensure_spacing(coords, spacing=100, min_split_size=50,
max_split_size=20000)
dur2 = time.time() - tstart
# Originally checked dur1 < dur2 to assert that the default batch size was
# faster than a much larger batch size. However, on rare occasion a CI test
# case would fail with dur1 ~5% larger than dur2. To be more robust to
# variable load or differences across architectures, we relax this here.
assert dur1 < 1.33 * dur2
@pytest.mark.parametrize("p", [1, 2, np.inf])
@pytest.mark.parametrize("size", [30, 50, None])
def test_ensure_spacing_p_norm(p, size):
coord = np.random.randn(100, 2)
# --- Consider the average distance btween the point as spacing
spacing = np.median(pdist(coord, metric=minkowski, p=p))
out = ensure_spacing(coord, spacing=spacing, p_norm=p, min_split_size=size)
assert pdist(out, metric=minkowski, p=p).min() > spacing

View File

@@ -0,0 +1,21 @@
from ..fast_exp import fast_exp
import numpy as np
def test_fast_exp():
X = np.linspace(-5, 0, 5000, endpoint=True)
# Ground truth
Y = np.exp(X)
# Approximation at double precision
_y_f64 = np.array([fast_exp['float64_t'](x) for x in X])
# Approximation at single precision
_y_f32 = np.array([fast_exp['float32_t'](x) for x in X.astype('float32')],
dtype='float32')
for _y in [_y_f64, _y_f32]:
assert np.abs(Y - _y).mean() < 3e-3

View File

@@ -0,0 +1,78 @@
import pytest
from skimage._shared._geometry import polygon_clip, polygon_area
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
pytest.importorskip("matplotlib")
hand = np.array(
[[ 1.64516129, 1.16145833 ],
[ 1.64516129, 1.59375 ],
[ 1.35080645, 1.921875 ],
[ 1.375 , 2.18229167 ],
[ 1.68548387, 1.9375 ],
[ 1.60887097, 2.55208333 ],
[ 1.68548387, 2.69791667 ],
[ 1.76209677, 2.56770833 ],
[ 1.83064516, 1.97395833 ],
[ 1.89516129, 2.75 ],
[ 1.9516129 , 2.84895833 ],
[ 2.01209677, 2.76041667 ],
[ 1.99193548, 1.99479167 ],
[ 2.11290323, 2.63020833 ],
[ 2.2016129 , 2.734375 ],
[ 2.25403226, 2.60416667 ],
[ 2.14919355, 1.953125 ],
[ 2.30645161, 2.36979167 ],
[ 2.39112903, 2.36979167 ],
[ 2.41532258, 2.1875 ],
[ 2.1733871 , 1.703125 ],
[ 2.07782258, 1.16666667 ]])
def test_polygon_area():
x = [0, 0, 1, 1]
y = [0, 1, 1, 0]
assert_almost_equal(polygon_area(y, x), 1)
x = [0, 0, 1]
y = [0, 1, 1]
assert_almost_equal(polygon_area(y, x), 0.5)
x = [0, 0, 0.5, 1, 1, 0.5]
y = [0, 1, 0.5, 1, 0, 0.5]
assert_almost_equal(polygon_area(y, x), 0.5)
def test_poly_clip():
x = [0, 1, 2, 1]
y = [0, -1, 0, 1]
yc, xc = polygon_clip(y, x, 0, 0, 1, 1)
assert_equal(polygon_area(yc, xc), 0.5)
x = [-1, 1.5, 1.5, -1]
y = [.5, 0.5, 1.5, 1.5]
yc, xc = polygon_clip(y, x, 0, 0, 1, 1)
assert_equal(polygon_area(yc, xc), 0.5)
def test_hand_clip():
(r0, c0, r1, c1) = (1.0, 1.5, 2.1, 2.5)
clip_r, clip_c = polygon_clip(hand[:, 1], hand[:, 0], r0, c0, r1, c1)
assert_equal(clip_r.size, 19)
assert_equal(clip_r[0], clip_r[-1])
assert_equal(clip_c[0], clip_c[-1])
(r0, c0, r1, c1) = (1.0, 1.5, 1.7, 2.5)
clip_r, clip_c = polygon_clip(hand[:, 1], hand[:, 0], r0, c0, r1, c1)
assert_equal(clip_r.size, 6)
(r0, c0, r1, c1) = (1.0, 1.5, 1.5, 2.5)
clip_r, clip_c = polygon_clip(hand[:, 1], hand[:, 0], r0, c0, r1, c1)
assert_equal(clip_r.size, 5)

View File

@@ -0,0 +1,27 @@
from skimage._shared.interpolation import coord_map_py
from skimage._shared.testing import assert_array_equal
def test_coord_map():
symmetric = [coord_map_py(4, n, 'S') for n in range(-6, 6)]
expected_symmetric = [2, 3, 3, 2, 1, 0, 0, 1, 2, 3, 3, 2]
assert_array_equal(symmetric, expected_symmetric)
wrap = [coord_map_py(4, n, 'W') for n in range(-6, 6)]
expected_wrap = [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1]
assert_array_equal(wrap, expected_wrap)
edge = [coord_map_py(4, n, 'E') for n in range(-6, 6)]
expected_edge = [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3]
assert_array_equal(edge, expected_edge)
reflect = [coord_map_py(4, n, 'R') for n in range(-6, 6)]
expected_reflect = [0, 1, 2, 3, 2, 1, 0, 1, 2, 3, 2, 1]
assert_array_equal(reflect, expected_reflect)
reflect = [coord_map_py(1, n, 'R') for n in range(-6, 6)]
expected_reflect = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
assert_array_equal(reflect, expected_reflect)
other = [coord_map_py(4, n, 'undefined') for n in range(-6, 6)]
expected_other = list(range(-6, 6))
assert_array_equal(other, expected_other)

View File

@@ -0,0 +1,42 @@
import numpy as np
from skimage._shared.utils import safe_as_int
from skimage._shared import testing
def test_int_cast_not_possible():
with testing.raises(ValueError):
safe_as_int(7.1)
with testing.raises(ValueError):
safe_as_int([7.1, 0.9])
with testing.raises(ValueError):
safe_as_int(np.r_[7.1, 0.9])
with testing.raises(ValueError):
safe_as_int((7.1, 0.9))
with testing.raises(ValueError):
safe_as_int(((3, 4, 1),
(2, 7.6, 289)))
with testing.raises(ValueError):
safe_as_int(7.1, 0.09)
with testing.raises(ValueError):
safe_as_int([7.1, 0.9], 0.09)
with testing.raises(ValueError):
safe_as_int(np.r_[7.1, 0.9], 0.09)
with testing.raises(ValueError):
safe_as_int((7.1, 0.9), 0.09)
with testing.raises(ValueError):
safe_as_int(((3, 4, 1),
(2, 7.6, 289)), 0.25)
def test_int_cast_possible():
testing.assert_equal(safe_as_int(7.1, atol=0.11), 7)
testing.assert_equal(safe_as_int(-7.1, atol=0.11), -7)
testing.assert_equal(safe_as_int(41.9, atol=0.11), 42)
testing.assert_array_equal(safe_as_int([2, 42, 5789234.0, 87, 4]),
np.r_[2, 42, 5789234, 87, 4])
testing.assert_array_equal(safe_as_int(np.r_[[[3, 4, 1.000000001],
[7, 2, -8.999999999],
[6, 9, -4234918347.]]]),
np.r_[[[3, 4, 1],
[7, 2, -9],
[6, 9, -4234918347]]])

View File

@@ -0,0 +1,123 @@
""" Testing decorators module
"""
from numpy.testing import assert_equal
from skimage._shared.testing import doctest_skip_parser, test_parallel
from skimage._shared import testing
from skimage._shared._warnings import expected_warnings
from warnings import warn
def test_skipper():
def f():
pass
class c():
def __init__(self):
self.me = "I think, therefore..."
docstring = \
""" Header
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> a = 1 # skip if not HAVE_BMODULE
>>> something2 # skip if HAVE_AMODULE
"""
f.__doc__ = docstring
c.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert f is f2
assert c is c2
expected = \
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
"""
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
c.__doc__ = docstring
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert f is f2
expected = \
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
"""
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
del HAVE_AMODULE
f.__doc__ = docstring
c.__doc__ = docstring
with testing.raises(NameError):
doctest_skip_parser(f)
with testing.raises(NameError):
doctest_skip_parser(c)
def test_test_parallel():
state = []
@test_parallel()
def change_state1():
state.append(None)
change_state1()
assert len(state) == 2
@test_parallel(num_threads=1)
def change_state2():
state.append(None)
change_state2()
assert len(state) == 3
@test_parallel(num_threads=3)
def change_state3():
state.append(None)
change_state3()
assert len(state) == 6
def test_parallel_warning():
@test_parallel()
def change_state_warns_fails():
warn("Test warning for test parallel", stacklevel=2)
with expected_warnings(['Test warning for test parallel']):
change_state_warns_fails()
@test_parallel(warnings_matching=['Test warning for test parallel'])
def change_state_warns_passes():
warn("Test warning for test parallel", stacklevel=2)
change_state_warns_passes()
def test_expected_warnings_noop():
# This will ensure the line beolow it behaves like a no-op
with expected_warnings(['Expected warnings test']):
# This should behave as a no-op
with expected_warnings(None):
warn('Expected warnings test')

View File

@@ -0,0 +1,294 @@
import sys
import warnings
import inspect
import numpy as np
import pytest
from skimage._shared import testing
from skimage._shared.utils import (check_nD, deprecate_kwarg,
_validate_interpolation_order,
change_default_value, remove_arg,
_supported_float_type,
channel_as_last_axis)
complex_dtypes = [np.complex64, np.complex128]
if hasattr(np, 'complex256'):
complex_dtypes += [np.complex256]
have_numpydoc = False
try:
import numpydoc # noqa: F401
have_numpydoc = True
except ImportError:
pass
def test_remove_argument():
@remove_arg('arg1', changed_version='0.12')
def foo(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
@remove_arg('arg1', changed_version='0.12',
help_msg="Some indication on future behavior")
def bar(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
# Assert warning messages
expected_msg = ("arg1 argument is deprecated and will be removed "
"in version 0.12. To avoid this warning, "
"please do not use the arg1 argument. Please see "
"foo documentation for more details.")
with pytest.warns(FutureWarning) as record:
assert foo(0, 1) == (0, 1, 1)
assert str(record[0].message) == expected_msg
with pytest.warns(FutureWarning) as record:
assert foo(0, arg1=1) == (0, 1, 1)
assert str(record[0].message) == expected_msg
expected_msg = ("arg1 argument is deprecated and will be removed "
"in version 0.12. To avoid this warning, "
"please do not use the arg1 argument. Please see "
"bar documentation for more details."
" Some indication on future behavior")
with pytest.warns(FutureWarning) as record:
assert bar(0, 1) == (0, 1, 1)
assert str(record[0].message) == expected_msg
with pytest.warns(FutureWarning) as record:
assert bar(0, arg1=1) == (0, 1, 1)
assert str(record[0].message) == expected_msg
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0) == (0, 0, 1)
assert foo(0, arg2=0) == (0, 0, 0)
# Function name and doc is preserved
assert foo.__name__ == 'foo'
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
assert foo.__doc__ == 'Expected docstring'
# Assert no warnings were raised
assert len(recorded) == 0
def test_change_default_value():
@change_default_value('arg1', new_value=-1, changed_version='0.12')
def foo(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
@change_default_value('arg1', new_value=-1, changed_version='0.12',
warning_msg="Custom warning message")
def bar(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
# Assert warning messages
with pytest.warns(FutureWarning) as record:
assert foo(0) == (0, 0, 1)
assert bar(0) == (0, 0, 1)
expected_msg = ("The new recommended value for arg1 is -1. Until "
"version 0.12, the default arg1 value is 0. From "
"version 0.12, the arg1 default value will be -1. "
"To avoid this warning, please explicitly set arg1 value.")
assert str(record[0].message) == expected_msg
assert str(record[1].message) == "Custom warning message"
# Assert that nothing happens if arg1 is set
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0, 2) == (0, 2, 1)
assert foo(0, arg1=0) == (0, 0, 1)
# Function name and doc is preserved
assert foo.__name__ == 'foo'
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
assert foo.__doc__ == 'Expected docstring'
# Assert no warnings were raised
assert len(recorded) == 0
def test_deprecate_kwarg():
@deprecate_kwarg({'old_arg1': 'new_arg1'}, '0.19')
def foo(arg0, new_arg1=1, arg2=None):
"""Expected docstring"""
return arg0, new_arg1, arg2
@deprecate_kwarg({'old_arg1': 'new_arg1'},
deprecated_version='0.19',
warning_msg="Custom warning message")
def bar(arg0, new_arg1=1, arg2=None):
"""Expected docstring"""
return arg0, new_arg1, arg2
# Assert that the DeprecationWarning is raised when the deprecated
# argument name is used and that the reasult is valid
with pytest.warns(FutureWarning) as record:
assert foo(0, old_arg1=1) == (0, 1, None)
assert bar(0, old_arg1=1) == (0, 1, None)
msg = ("`old_arg1` is a deprecated argument name "
"for `foo`. Please use `new_arg1` instead.")
assert str(record[0].message) == msg
assert str(record[1].message) == "Custom warning message"
# Assert that nothing happens when the function is called with the
# new API
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0) == (0, 1, None)
assert foo(0, 2) == (0, 2, None)
assert foo(0, 1, 2) == (0, 1, 2)
# Kwargs without deprecated argument
assert foo(0, new_arg1=1, arg2=2) == (0, 1, 2)
assert foo(0, new_arg1=2) == (0, 2, None)
assert foo(0, arg2=2) == (0, 1, 2)
assert foo(0, 1, arg2=2) == (0, 1, 2)
# Function name and doc is preserved
assert foo.__name__ == 'foo'
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
if not have_numpydoc:
assert foo.__doc__ == """Expected docstring"""
else:
assert foo.__doc__ == """Expected docstring
Other Parameters
----------------
old_arg1 : DEPRECATED
Deprecated in favor of `new_arg1`.
.. deprecated:: 0.19
"""
assert len(recorded) == 0
def test_check_nD():
z = np.random.random(200**2).reshape((200, 200))
x = z[10:30, 30:10]
with testing.raises(ValueError):
check_nD(x, 2)
@pytest.mark.parametrize('dtype', [bool, int, np.uint8, np.uint16,
float, np.float32, np.float64])
@pytest.mark.parametrize('order', [None, -1, 0, 1, 2, 3, 4, 5, 6])
def test_validate_interpolation_order(dtype, order):
if order is None:
# Default order
assert (_validate_interpolation_order(dtype, None) == 0
if dtype == bool else 1)
elif order < 0 or order > 5:
# Order not in valid range
with testing.raises(ValueError):
_validate_interpolation_order(dtype, order)
elif dtype == bool and order != 0:
# Deprecated order for bool array
with pytest.raises(ValueError):
_validate_interpolation_order(bool, order)
else:
# Valid use case
assert _validate_interpolation_order(dtype, order) == order
@pytest.mark.parametrize(
'dtype',
[bool, np.float16, np.float32, np.float64, np.uint8, np.uint16, np.uint32,
np.uint64, np.int8, np.int16, np.int32, np.int64]
)
def test_supported_float_dtype_real(dtype):
float_dtype = _supported_float_type(dtype)
if dtype in [np.float16, np.float32]:
assert float_dtype == np.float32
else:
assert float_dtype == np.float64
@pytest.mark.parametrize('dtype', complex_dtypes)
@pytest.mark.parametrize('allow_complex', [False, True])
def test_supported_float_dtype_complex(dtype, allow_complex):
if allow_complex:
float_dtype = _supported_float_type(dtype, allow_complex=allow_complex)
if dtype == np.complex64:
assert float_dtype == np.complex64
else:
assert float_dtype == np.complex128
else:
with testing.raises(ValueError):
_supported_float_type(dtype, allow_complex=allow_complex)
@pytest.mark.parametrize(
'dtype', ['f', 'float32', np.float32, np.dtype(np.float32)]
)
def test_supported_float_dtype_input_kinds(dtype):
assert _supported_float_type(dtype) == np.float32
@pytest.mark.parametrize(
'dtypes, expected',
[
((np.float16, np.float64), np.float64),
([np.float32, np.uint16, np.int8], np.float64),
({np.float32, np.float16}, np.float32),
]
)
def test_supported_float_dtype_sequence(dtypes, expected):
float_dtype = _supported_float_type(dtypes)
assert float_dtype == expected
@channel_as_last_axis(multichannel_output=False)
def _decorated_channel_axis_size(x, *, channel_axis=None):
if channel_axis is None:
return None
assert channel_axis == -1
return x.shape[-1]
@testing.parametrize('channel_axis', [None, 0, 1, 2, -1, -2, -3])
def test_decorated_channel_axis_shape(channel_axis):
# Verify that channel_as_last_axis modifies the channel_axis as expected
# need unique size per axis here
x = np.zeros((2, 3, 4))
size = _decorated_channel_axis_size(x, channel_axis=channel_axis)
if channel_axis is None:
assert size is None
else:
assert size == x.shape[channel_axis]
@deprecate_kwarg({"old_kwarg": "new_kwarg"}, deprecated_version="x.y.z")
def _function_with_deprecated_kwarg(*, new_kwarg):
pass
def test_deprecate_kwarg_location():
"""Assert that warning message issued by deprecate_kwarg points to
file and line number where decorated function is called.
"""
with pytest.warns(FutureWarning) as record:
_function_with_deprecated_kwarg(old_kwarg=True)
expected_lineno = inspect.currentframe().f_lineno - 1
assert record[0].lineno == expected_lineno
assert record[0].filename == __file__

View File

@@ -0,0 +1,41 @@
"""Tests for the version requirement functions.
"""
import numpy as np
from numpy.testing import assert_equal
from skimage._shared import version_requirements as version_req
from skimage._shared import testing
def test_get_module_version():
assert version_req.get_module_version('numpy')
assert version_req.get_module_version('scipy')
with testing.raises(ImportError):
version_req.get_module_version('fakenumpy')
def test_is_installed():
assert version_req.is_installed('python', '>=2.7')
assert not version_req.is_installed('numpy', '<1.0')
def test_require():
# A function that only runs on Python >2.7 and numpy > 1.5 (should pass)
@version_req.require('python', '>2.7')
@version_req.require('numpy', '>1.5')
def foo():
return 1
assert_equal(foo(), 1)
# function that requires scipy < 0.1 (should fail)
@version_req.require('scipy', '<0.1')
def bar():
return 0
with testing.raises(ImportError):
bar()
def test_get_module():
assert version_req.get_module("numpy") is np

View File

@@ -0,0 +1,37 @@
import os
from skimage._shared._warnings import expected_warnings
import pytest
@pytest.fixture(scope='function')
def setup():
# Remove any environment variable if it exists
old_strictness = os.environ.pop('SKIMAGE_TEST_STRICT_WARNINGS', None)
yield
# Add the user's desired strictness
if old_strictness is not None:
os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = old_strictness
def test_strict_warnigns_default(setup):
# By default we should fail on missing expected warnings
with pytest.raises(ValueError):
with expected_warnings(['some warnings']):
pass
@pytest.mark.parametrize('strictness', ['1', 'true', 'True', 'TRUE'])
def test_strict_warning_true(setup, strictness):
os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = strictness
with pytest.raises(ValueError):
with expected_warnings(['some warnings']):
pass
@pytest.mark.parametrize('strictness', ['0', 'false', 'False', 'FALSE'])
def test_strict_warning_false(setup, strictness):
# If the user doesn't wish to be strict about warnings
# the following shouldn't raise any error
os.environ['SKIMAGE_TEST_STRICT_WARNINGS'] = strictness
with expected_warnings(['some warnings']):
pass

View File

@@ -0,0 +1,759 @@
import inspect
import functools
import sys
import warnings
from collections.abc import Iterable
import numpy as np
from ._warnings import all_warnings, warn
__all__ = ['deprecated', 'get_bound_method_class', 'all_warnings',
'safe_as_int', 'check_shape_equality', 'check_nD', 'warn',
'reshape_nd', 'identity', 'slice_at_axis']
class skimage_deprecation(Warning):
"""Create our own deprecation class, since Python >= 2.7
silences deprecations by default.
"""
pass
def _get_stack_rank(func):
"""Return function rank in the call stack."""
if _is_wrapped(func):
return 1 + _get_stack_rank(func.__wrapped__)
else:
return 0
def _is_wrapped(func):
return "__wrapped__" in dir(func)
def _get_stack_length(func):
"""Return function call stack length."""
return _get_stack_rank(func.__globals__.get(func.__name__, func))
class _DecoratorBaseClass:
"""Used to manage decorators' warnings stacklevel.
The `_stack_length` class variable is used to store the number of
times a function is wrapped by a decorator.
Let `stack_length` be the total number of times a decorated
function is wrapped, and `stack_rank` be the rank of the decorator
in the decorators stack. The stacklevel of a warning is then
`stacklevel = 1 + stack_length - stack_rank`.
"""
_stack_length = {}
def get_stack_length(self, func):
return self._stack_length.get(func.__name__,
_get_stack_length(func))
class change_default_value(_DecoratorBaseClass):
"""Decorator for changing the default value of an argument.
Parameters
----------
arg_name: str
The name of the argument to be updated.
new_value: any
The argument new value.
changed_version : str
The package version in which the change will be introduced.
warning_msg: str
Optional warning message. If None, a generic warning message
is used.
"""
def __init__(self, arg_name, *, new_value, changed_version,
warning_msg=None):
self.arg_name = arg_name
self.new_value = new_value
self.warning_msg = warning_msg
self.changed_version = changed_version
def __call__(self, func):
parameters = inspect.signature(func).parameters
arg_idx = list(parameters.keys()).index(self.arg_name)
old_value = parameters[self.arg_name].default
stack_rank = _get_stack_rank(func)
if self.warning_msg is None:
self.warning_msg = (
f'The new recommended value for {self.arg_name} is '
f'{self.new_value}. Until version {self.changed_version}, '
f'the default {self.arg_name} value is {old_value}. '
f'From version {self.changed_version}, the {self.arg_name} '
f'default value will be {self.new_value}. To avoid '
f'this warning, please explicitly set {self.arg_name} value.')
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
if len(args) < arg_idx + 1 and self.arg_name not in kwargs.keys():
# warn that arg_name default value changed:
warnings.warn(self.warning_msg, FutureWarning,
stacklevel=stacklevel)
return func(*args, **kwargs)
return fixed_func
class remove_arg(_DecoratorBaseClass):
"""Decorator to remove an argument from function's signature.
Parameters
----------
arg_name: str
The name of the argument to be removed.
changed_version : str
The package version in which the warning will be replaced by
an error.
help_msg: str
Optional message appended to the generic warning message.
"""
def __init__(self, arg_name, *, changed_version, help_msg=None):
self.arg_name = arg_name
self.help_msg = help_msg
self.changed_version = changed_version
def __call__(self, func):
parameters = inspect.signature(func).parameters
arg_idx = list(parameters.keys()).index(self.arg_name)
warning_msg = (
f'{self.arg_name} argument is deprecated and will be removed '
f'in version {self.changed_version}. To avoid this warning, '
f'please do not use the {self.arg_name} argument. Please '
f'see {func.__name__} documentation for more details.')
if self.help_msg is not None:
warning_msg += f' {self.help_msg}'
stack_rank = _get_stack_rank(func)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
if len(args) > arg_idx or self.arg_name in kwargs.keys():
# warn that arg_name is deprecated
warnings.warn(warning_msg, FutureWarning,
stacklevel=stacklevel)
return func(*args, **kwargs)
return fixed_func
def docstring_add_deprecated(func, kwarg_mapping, deprecated_version):
"""Add deprecated kwarg(s) to the "Other Params" section of a docstring.
Parameters
---------
func : function
The function whose docstring we wish to update.
kwarg_mapping : dict
A dict containing {old_arg: new_arg} key/value pairs as used by
`deprecate_kwarg`.
deprecated_version : str
A major.minor version string specifying when old_arg was
deprecated.
Returns
-------
new_doc : str
The updated docstring. Returns the original docstring if numpydoc is
not available.
"""
if func.__doc__ is None:
return None
try:
from numpydoc.docscrape import FunctionDoc, Parameter
except ImportError:
# Return an unmodified docstring if numpydoc is not available.
return func.__doc__
Doc = FunctionDoc(func)
for old_arg, new_arg in kwarg_mapping.items():
desc = [f'Deprecated in favor of `{new_arg}`.',
'',
f'.. deprecated:: {deprecated_version}']
Doc['Other Parameters'].append(
Parameter(name=old_arg,
type='DEPRECATED',
desc=desc)
)
new_docstring = str(Doc)
# new_docstring will have a header starting with:
#
# .. function:: func.__name__
#
# and some additional blank lines. We strip these off below.
split = new_docstring.split('\n')
no_header = split[1:]
while not no_header[0].strip():
no_header.pop(0)
# Store the initial description before any of the Parameters fields.
# Usually this is a single line, but the while loop covers any case
# where it is not.
descr = no_header.pop(0)
while no_header[0].strip():
descr += '\n ' + no_header.pop(0)
descr += '\n\n'
# '\n ' rather than '\n' here to restore the original indentation.
final_docstring = descr + '\n '.join(no_header)
# strip any extra spaces from ends of lines
final_docstring = '\n'.join(
[line.rstrip() for line in final_docstring.split('\n')]
)
return final_docstring
class deprecate_kwarg(_DecoratorBaseClass):
"""Decorator ensuring backward compatibility when argument names are
modified in a function definition.
Parameters
----------
kwarg_mapping: dict
Mapping between the function's old argument names and the new
ones.
deprecated_version : str
The package version in which the argument was first deprecated.
warning_msg: str
Optional warning message. If None, a generic warning message
is used.
removed_version : str
The package version in which the deprecated argument will be
removed.
"""
def __init__(self, kwarg_mapping, deprecated_version, warning_msg=None,
removed_version=None):
self.kwarg_mapping = kwarg_mapping
if warning_msg is None:
self.warning_msg = ("`{old_arg}` is a deprecated argument name "
"for `{func_name}`. ")
if removed_version is not None:
self.warning_msg += (f'It will be removed in '
f'version {removed_version}. ')
self.warning_msg += "Please use `{new_arg}` instead."
else:
self.warning_msg = warning_msg
self.deprecated_version = deprecated_version
def __call__(self, func):
stack_rank = _get_stack_rank(func)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
for old_arg, new_arg in self.kwarg_mapping.items():
if old_arg in kwargs:
# warn that the function interface has changed:
warnings.warn(self.warning_msg.format(
old_arg=old_arg, func_name=func.__name__,
new_arg=new_arg), FutureWarning,
stacklevel=stacklevel)
# Substitute new_arg to old_arg
kwargs[new_arg] = kwargs.pop(old_arg)
# Call the function with the fixed arguments
return func(*args, **kwargs)
if func.__doc__ is not None:
newdoc = docstring_add_deprecated(func, self.kwarg_mapping,
self.deprecated_version)
fixed_func.__doc__ = newdoc
return fixed_func
class channel_as_last_axis:
"""Decorator for automatically making channels axis last for all arrays.
This decorator reorders axes for compatibility with functions that only
support channels along the last axis. After the function call is complete
the channels axis is restored back to its original position.
Parameters
----------
channel_arg_positions : tuple of int, optional
Positional arguments at the positions specified in this tuple are
assumed to be multichannel arrays. The default is to assume only the
first argument to the function is a multichannel array.
channel_kwarg_names : tuple of str, optional
A tuple containing the names of any keyword arguments corresponding to
multichannel arrays.
multichannel_output : bool, optional
A boolean that should be True if the output of the function is not a
multichannel array and False otherwise. This decorator does not
currently support the general case of functions with multiple outputs
where some or all are multichannel.
"""
def __init__(self, channel_arg_positions=(0,), channel_kwarg_names=(),
multichannel_output=True):
self.arg_positions = set(channel_arg_positions)
self.kwarg_names = set(channel_kwarg_names)
self.multichannel_output = multichannel_output
def __call__(self, func):
@functools.wraps(func)
def fixed_func(*args, **kwargs):
channel_axis = kwargs.get('channel_axis', None)
if channel_axis is None:
return func(*args, **kwargs)
# TODO: convert scalars to a tuple in anticipation of eventually
# supporting a tuple of channel axes. Right now, only an
# integer or a single-element tuple is supported, though.
if np.isscalar(channel_axis):
channel_axis = (channel_axis,)
if len(channel_axis) > 1:
raise ValueError(
"only a single channel axis is currently supported")
if channel_axis == (-1,) or channel_axis == -1:
return func(*args, **kwargs)
if self.arg_positions:
new_args = []
for pos, arg in enumerate(args):
if pos in self.arg_positions:
new_args.append(np.moveaxis(arg, channel_axis[0], -1))
else:
new_args.append(arg)
new_args = tuple(new_args)
else:
new_args = args
for name in self.kwarg_names:
kwargs[name] = np.moveaxis(kwargs[name], channel_axis[0], -1)
# now that we have moved the channels axis to the last position,
# change the channel_axis argument to -1
kwargs["channel_axis"] = -1
# Call the function with the fixed arguments
out = func(*new_args, **kwargs)
if self.multichannel_output:
out = np.moveaxis(out, -1, channel_axis[0])
return out
return fixed_func
class deprecated:
"""Decorator to mark deprecated functions with warning.
Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.
Parameters
----------
alt_func : str
If given, tell user what function to use instead.
behavior : {'warn', 'raise'}
Behavior during call to deprecated function: 'warn' = warn user that
function is deprecated; 'raise' = raise error.
removed_version : str
The package version in which the deprecated function will be removed.
"""
def __init__(self, alt_func=None, behavior='warn', removed_version=None):
self.alt_func = alt_func
self.behavior = behavior
self.removed_version = removed_version
def __call__(self, func):
alt_msg = ''
if self.alt_func is not None:
alt_msg = f' Use ``{self.alt_func}`` instead.'
rmv_msg = ''
if self.removed_version is not None:
rmv_msg = f' and will be removed in version {self.removed_version}'
msg = f'Function ``{func.__name__}`` is deprecated{rmv_msg}.{alt_msg}'
@functools.wraps(func)
def wrapped(*args, **kwargs):
if self.behavior == 'warn':
func_code = func.__code__
warnings.simplefilter('always', skimage_deprecation)
warnings.warn_explicit(msg,
category=skimage_deprecation,
filename=func_code.co_filename,
lineno=func_code.co_firstlineno + 1)
elif self.behavior == 'raise':
raise skimage_deprecation(msg)
return func(*args, **kwargs)
# modify doc string to display deprecation warning
doc = '**Deprecated function**.' + alt_msg
if wrapped.__doc__ is None:
wrapped.__doc__ = doc
else:
wrapped.__doc__ = doc + '\n\n ' + wrapped.__doc__
return wrapped
def get_bound_method_class(m):
"""Return the class for a bound method.
"""
return m.im_class if sys.version < '3' else m.__self__.__class__
def safe_as_int(val, atol=1e-3):
"""
Attempt to safely cast values to integer format.
Parameters
----------
val : scalar or iterable of scalars
Number or container of numbers which are intended to be interpreted as
integers, e.g., for indexing purposes, but which may not carry integer
type.
atol : float
Absolute tolerance away from nearest integer to consider values in
``val`` functionally integers.
Returns
-------
val_int : NumPy scalar or ndarray of dtype `np.int64`
Returns the input value(s) coerced to dtype `np.int64` assuming all
were within ``atol`` of the nearest integer.
Notes
-----
This operation calculates ``val`` modulo 1, which returns the mantissa of
all values. Then all mantissas greater than 0.5 are subtracted from one.
Finally, the absolute tolerance from zero is calculated. If it is less
than ``atol`` for all value(s) in ``val``, they are rounded and returned
in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is
returned.
If any value(s) are outside the specified tolerance, an informative error
is raised.
Examples
--------
>>> safe_as_int(7.0)
7
>>> safe_as_int([9, 4, 2.9999999999])
array([9, 4, 3])
>>> safe_as_int(53.1)
Traceback (most recent call last):
...
ValueError: Integer argument required but received 53.1, check inputs.
>>> safe_as_int(53.01, atol=0.01)
53
"""
mod = np.asarray(val) % 1 # Extract mantissa
# Check for and subtract any mod values > 0.5 from 1
if mod.ndim == 0: # Scalar input, cannot be indexed
if mod > 0.5:
mod = 1 - mod
else: # Iterable input, now ndarray
mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int
try:
np.testing.assert_allclose(mod, 0, atol=atol)
except AssertionError:
raise ValueError(f'Integer argument required but received '
f'{val}, check inputs.')
return np.round(val).astype(np.int64)
def check_shape_equality(*images):
"""Check that all images have the same shape"""
image0 = images[0]
if not all(image0.shape == image.shape for image in images[1:]):
raise ValueError('Input images must have the same dimensions.')
return
def slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), Ellipsis)
"""
return (slice(None),) * axis + (sl,) + (...,)
def reshape_nd(arr, ndim, dim):
"""Reshape a 1D array to have n dimensions, all singletons but one.
Parameters
----------
arr : array, shape (N,)
Input array
ndim : int
Number of desired dimensions of reshaped array.
dim : int
Which dimension/axis will not be singleton-sized.
Returns
-------
arr_reshaped : array, shape ([1, ...], N, [1,...])
View of `arr` reshaped to the desired shape.
Examples
--------
>>> rng = np.random.default_rng()
>>> arr = rng.random(7)
>>> reshape_nd(arr, 2, 0).shape
(7, 1)
>>> reshape_nd(arr, 3, 1).shape
(1, 7, 1)
>>> reshape_nd(arr, 4, -1).shape
(1, 1, 1, 7)
"""
if arr.ndim != 1:
raise ValueError("arr must be a 1D array")
new_shape = [1] * ndim
new_shape[dim] = -1
return np.reshape(arr, new_shape)
def check_nD(array, ndim, arg_name='image'):
"""
Verify an array meets the desired ndims and array isn't empty.
Parameters
----------
array : array-like
Input array to be validated
ndim : int or iterable of ints
Allowable ndim or ndims for the array.
arg_name : str, optional
The name of the array in the original function.
"""
array = np.asanyarray(array)
msg_incorrect_dim = "The parameter `%s` must be a %s-dimensional array"
msg_empty_array = "The parameter `%s` cannot be an empty array"
if isinstance(ndim, int):
ndim = [ndim]
if array.size == 0:
raise ValueError(msg_empty_array % (arg_name))
if array.ndim not in ndim:
raise ValueError(
msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim]))
)
def convert_to_float(image, preserve_range):
"""Convert input image to float image with the appropriate range.
Parameters
----------
image : ndarray
Input image.
preserve_range : bool
Determines if the range of the image should be kept or transformed
using img_as_float. Also see
https://scikit-image.org/docs/dev/user_guide/data_types.html
Notes
-----
* Input images with `float32` data type are not upcast.
Returns
-------
image : ndarray
Transformed version of the input.
"""
if image.dtype == np.float16:
return image.astype(np.float32)
if preserve_range:
# Convert image to double only if it is not single or double
# precision float
if image.dtype.char not in 'df':
image = image.astype(float)
else:
from ..util.dtype import img_as_float
image = img_as_float(image)
return image
def _validate_interpolation_order(image_dtype, order):
"""Validate and return spline interpolation's order.
Parameters
----------
image_dtype : dtype
Image dtype.
order : int, optional
The order of the spline interpolation. The order has to be in
the range 0-5. See `skimage.transform.warp` for detail.
Returns
-------
order : int
if input order is None, returns 0 if image_dtype is bool and 1
otherwise. Otherwise, image_dtype is checked and input order
is validated accordingly (order > 0 is not supported for bool
image dtype)
"""
if order is None:
return 0 if image_dtype == bool else 1
if order < 0 or order > 5:
raise ValueError("Spline interpolation order has to be in the "
"range 0-5.")
if image_dtype == bool and order != 0:
raise ValueError(
"Input image dtype is bool. Interpolation is not defined "
"with bool data type. Please set order to 0 or explicitly "
"cast input image to another data type.")
return order
def _to_np_mode(mode):
"""Convert padding modes from `ndi.correlate` to `np.pad`."""
mode_translation_dict = dict(nearest='edge', reflect='symmetric',
mirror='reflect')
if mode in mode_translation_dict:
mode = mode_translation_dict[mode]
return mode
def _to_ndimage_mode(mode):
"""Convert from `numpy.pad` mode name to the corresponding ndimage mode."""
mode_translation_dict = dict(constant='constant', edge='nearest',
symmetric='reflect', reflect='mirror',
wrap='wrap')
if mode not in mode_translation_dict:
raise ValueError(
f"Unknown mode: '{mode}', or cannot translate mode. The "
f"mode should be one of 'constant', 'edge', 'symmetric', "
f"'reflect', or 'wrap'. See the documentation of numpy.pad for "
f"more info.")
return _fix_ndimage_mode(mode_translation_dict[mode])
def _fix_ndimage_mode(mode):
# SciPy 1.6.0 introduced grid variants of constant and wrap which
# have less surprising behavior for images. Use these when available
grid_modes = {'constant': 'grid-constant', 'wrap': 'grid-wrap'}
return grid_modes.get(mode, mode)
new_float_type = {
# preserved types
np.float32().dtype.char: np.float32,
np.float64().dtype.char: np.float64,
np.complex64().dtype.char: np.complex64,
np.complex128().dtype.char: np.complex128,
# altered types
np.float16().dtype.char: np.float32,
'g': np.float64, # np.float128 ; doesn't exist on windows
'G': np.complex128, # np.complex256 ; doesn't exist on windows
}
def _supported_float_type(input_dtype, allow_complex=False):
"""Return an appropriate floating-point dtype for a given dtype.
float32, float64, complex64, complex128 are preserved.
float16 is promoted to float32.
complex256 is demoted to complex128.
Other types are cast to float64.
Parameters
----------
input_dtype : np.dtype or Iterable of np.dtype
The input dtype. If a sequence of multiple dtypes is provided, each
dtype is first converted to a supported floating point type and the
final dtype is then determined by applying `np.result_type` on the
sequence of supported floating point types.
allow_complex : bool, optional
If False, raise a ValueError on complex-valued inputs.
Returns
-------
float_type : dtype
Floating-point dtype for the image.
"""
if isinstance(input_dtype, Iterable) and not isinstance(input_dtype, str):
return np.result_type(*(_supported_float_type(d) for d in input_dtype))
input_dtype = np.dtype(input_dtype)
if not allow_complex and input_dtype.kind == 'c':
raise ValueError("complex valued input is not supported")
return new_float_type.get(input_dtype.char, np.float64)
def identity(image, *args, **kwargs):
"""Returns the first argument unmodified."""
return image
def as_binary_ndarray(array, *, variable_name):
"""Return `array` as a numpy.ndarray of dtype bool.
Raises
------
ValueError:
An error including the given `variable_name` if `array` can not be
safely cast to a boolean array.
"""
array = np.asarray(array)
if array.dtype != bool:
if np.any((array != 1) & (array != 0)):
raise ValueError(
f"{variable_name} array is not of dtype boolean or "
f"contains values other than 0 and 1 so cannot be "
f"safely cast to boolean array."
)
return np.asarray(array, dtype=bool)

View File

@@ -0,0 +1,168 @@
import sys
from packaging import version as _version
def ensure_python_version(min_version):
if not isinstance(min_version, tuple):
min_version = (min_version, )
if sys.version_info < min_version:
# since ensure_python_version is in the critical import path,
# we lazy import it.
from platform import python_version
raise ImportError("""
You are running scikit-image on an unsupported version of Python.
Unfortunately, scikit-image 0.15 and above no longer work with your installed
version of Python ({}). You therefore have two options: either upgrade to
Python {}, or install an older version of scikit-image.
For Python 2.7 or Python 3.4, use
$ pip install 'scikit-image<0.15'
Please also consider updating `pip` and `setuptools`:
$ pip install pip setuptools --upgrade
Newer versions of these tools avoid installing packages incompatible
with your version of Python.
""".format(python_version(), '.'.join([str(v) for v in min_version])))
def _check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
"""
try:
if cmp_op == '>':
return _version.parse(actver) > _version.parse(version)
elif cmp_op == '>=':
return _version.parse(actver) >= _version.parse(version)
elif cmp_op == '=':
return _version.parse(actver) == _version.parse(version)
elif cmp_op == '<':
return _version.parse(actver) < _version.parse(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name,
fromlist=[module_name.rpartition('.')[-1]])
return getattr(mod, '__version__', getattr(mod, 'VERSION', None))
def is_installed(name, version=None):
"""Test if *name* is installed.
Parameters
----------
name : str
Name of module or "python"
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
out : bool
True if `name` is installed matching the optional version.
"""
if name.lower() == 'python':
actver = sys.version[:6]
else:
try:
actver = get_module_version(name)
except ImportError:
return False
if version is None:
return True
else:
# since version_requirements is in the critical import path,
# we lazy import re
import re
match = re.search('[0-9]', version)
assert match is not None, "Invalid version number"
symb = version[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<'),\
f"Invalid version condition '{symb}'"
version = version[match.start():]
return _check_version(actver, version, symb)
def require(name, version=None):
"""Return decorator that forces a requirement for a function or class.
Parameters
----------
name : str
Name of module or "python".
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
func : function
A decorator that raises an ImportError if a function is run
in the absence of the input dependency.
"""
# since version_requirements is in the critical import path, we lazy import
# functools
import functools
def decorator(obj):
@functools.wraps(obj)
def func_wrapped(*args, **kwargs):
if is_installed(name, version):
return obj(*args, **kwargs)
else:
msg = f'"{obj}" in "{obj.__module__}" requires "{name}'
if version is not None:
msg += f" {version}"
raise ImportError(msg + '"')
return func_wrapped
return decorator
def get_module(module_name, version=None):
"""Return a module object of name *module_name* if installed.
Parameters
----------
module_name : str
Name of module.
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
mod : module or None
Module if *module_name* is installed matching the optional version
or None otherwise.
"""
if not is_installed(module_name, version):
return None
return __import__(module_name,
fromlist=[module_name.rpartition('.')[-1]])