This commit is contained in:
ton
2024-10-07 10:13:40 +07:00
parent aa1631742f
commit 3a7d696db6
9729 changed files with 1832837 additions and 161742 deletions

View File

@@ -0,0 +1,61 @@
"""General utility functions.
This module contains a number of utility functions to work with images in general.
"""
import functools
import warnings
import numpy as np
# keep .dtype imports first to avoid circular imports
from .dtype import (
dtype_limits,
img_as_float,
img_as_float32,
img_as_float64,
img_as_bool,
img_as_int,
img_as_ubyte,
img_as_uint,
)
from ._slice_along_axes import slice_along_axes
from ._invert import invert
from ._label import label_points
from ._montage import montage
from ._map_array import map_array
from ._regular_grid import regular_grid, regular_seeds
from .apply_parallel import apply_parallel
from .arraycrop import crop
from .compare import compare_images
from .noise import random_noise
from .shape import view_as_blocks, view_as_windows
from .unique import unique_rows
from .lookfor import lookfor
__all__ = [
'img_as_float32',
'img_as_float64',
'img_as_float',
'img_as_int',
'img_as_uint',
'img_as_ubyte',
'img_as_bool',
'dtype_limits',
'view_as_blocks',
'view_as_windows',
'slice_along_axes',
'crop',
'compare_images',
'map_array',
'montage',
'random_noise',
'regular_grid',
'regular_seeds',
'apply_parallel',
'invert',
'unique_rows',
'label_points',
'lookfor',
]

View File

@@ -0,0 +1,74 @@
import numpy as np
from .dtype import dtype_limits
def invert(image, signed_float=False):
"""Invert an image.
Invert the intensity range of the input image, so that the dtype maximum
is now the dtype minimum, and vice-versa. This operation is
slightly different depending on the input dtype:
- unsigned integers: subtract the image from the dtype maximum
- signed integers: subtract the image from -1 (see Notes)
- floats: subtract the image from 1 (if signed_float is False, so we
assume the image is unsigned), or from 0 (if signed_float is True).
See the examples for clarification.
Parameters
----------
image : ndarray
Input image.
signed_float : bool, optional
If True and the image is of type float, the range is assumed to
be [-1, 1]. If False and the image is of type float, the range is
assumed to be [0, 1].
Returns
-------
inverted : ndarray
Inverted image.
Notes
-----
Ideally, for signed integers we would simply multiply by -1. However,
signed integer ranges are asymmetric. For example, for np.int8, the range
of possible values is [-128, 127], so that -128 * -1 equals -128! By
subtracting from -1, we correctly map the maximum dtype value to the
minimum.
Examples
--------
>>> img = np.array([[100, 0, 200],
... [ 0, 50, 0],
... [ 30, 0, 255]], np.uint8)
>>> invert(img)
array([[155, 255, 55],
[255, 205, 255],
[225, 255, 0]], dtype=uint8)
>>> img2 = np.array([[ -2, 0, -128],
... [127, 0, 5]], np.int8)
>>> invert(img2)
array([[ 1, -1, 127],
[-128, -1, -6]], dtype=int8)
>>> img3 = np.array([[ 0., 1., 0.5, 0.75]])
>>> invert(img3)
array([[1. , 0. , 0.5 , 0.25]])
>>> img4 = np.array([[ 0., 1., -1., -0.25]])
>>> invert(img4, signed_float=True)
array([[-0. , -1. , 1. , 0.25]])
"""
if image.dtype == 'bool':
inverted = ~image
elif np.issubdtype(image.dtype, np.unsignedinteger):
max_val = dtype_limits(image, clip_negative=False)[1]
inverted = np.subtract(max_val, image, dtype=image.dtype)
elif np.issubdtype(image.dtype, np.signedinteger):
inverted = np.subtract(-1, image, dtype=image.dtype)
else: # float dtype
if signed_float:
inverted = -image
else:
inverted = np.subtract(1, image, dtype=image.dtype)
return inverted

View File

@@ -0,0 +1,51 @@
import numpy as np
__all__ = ["label_points"]
def label_points(coords, output_shape):
"""Assign unique integer labels to coordinates on an image mask
Parameters
----------
coords: ndarray
An array of N coordinates with dimension D
output_shape: tuple
The shape of the mask on which `coords` are labelled
Returns
-------
labels: ndarray
A mask of zeroes containing unique integer labels at the `coords`
Examples
--------
>>> import numpy as np
>>> from skimage.util._label import label_points
>>> coords = np.array([[0, 1], [2, 2]])
>>> output_shape = (5, 5)
>>> mask = label_points(coords, output_shape)
>>> mask
array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint64)
Notes
-----
- The labels are assigned to coordinates that are converted to
integer and considered to start from 0.
- Coordinates that are out of range of the mask raise an IndexError.
- Negative coordinates raise a ValueError
"""
if coords.shape[1] != len(output_shape):
raise ValueError("Dimensionality of points should match the " "output shape")
if np.any(coords < 0):
raise ValueError("Coordinates should be positive and start from 0")
np_indices = tuple(np.transpose(np.round(coords).astype(int, copy=False)))
labels = np.zeros(output_shape, dtype=np.uint64)
labels[np_indices] = np.arange(1, coords.shape[0] + 1)
return labels

View File

@@ -0,0 +1,199 @@
import numpy as np
def map_array(input_arr, input_vals, output_vals, out=None):
"""Map values from input array from input_vals to output_vals.
Parameters
----------
input_arr : array of int, shape (M[, ...])
The input label image.
input_vals : array of int, shape (K,)
The values to map from.
output_vals : array, shape (K,)
The values to map to.
out: array, same shape as `input_arr`
The output array. Will be created if not provided. It should
have the same dtype as `output_vals`.
Returns
-------
out : array, same shape as `input_arr`
The array of mapped values.
Notes
-----
If `input_arr` contains values that aren't covered by `input_vals`, they
are set to 0.
Examples
--------
>>> import numpy as np
>>> import skimage as ski
>>> ski.util.map_array(
... input_arr=np.array([[0, 2, 2, 0], [3, 4, 5, 0]]),
... input_vals=np.array([1, 2, 3, 4, 6]),
... output_vals=np.array([6, 7, 8, 9, 10]),
... )
array([[0, 7, 7, 0],
[8, 9, 0, 0]])
"""
from ._remap import _map_array
if not np.issubdtype(input_arr.dtype, np.integer):
raise TypeError('The dtype of an array to be remapped should be integer.')
# We ravel the input array for simplicity of iteration in Cython:
orig_shape = input_arr.shape
# NumPy docs for `np.ravel()` says:
# "When a view is desired in as many cases as possible,
# arr.reshape(-1) may be preferable."
input_arr = input_arr.reshape(-1)
if out is None:
out = np.empty(orig_shape, dtype=output_vals.dtype)
elif out.shape != orig_shape:
raise ValueError(
'If out array is provided, it should have the same shape as '
f'the input array. Input array has shape {orig_shape}, provided '
f'output array has shape {out.shape}.'
)
try:
out_view = out.view()
out_view.shape = (-1,) # no-copy reshape/ravel
except AttributeError: # if out strides are not compatible with 0-copy
raise ValueError(
'If out array is provided, it should be either contiguous '
f'or 1-dimensional. Got array with shape {out.shape} and '
f'strides {out.strides}.'
)
# ensure all arrays have matching types before sending to Cython
input_vals = input_vals.astype(input_arr.dtype, copy=False)
output_vals = output_vals.astype(out.dtype, copy=False)
_map_array(input_arr, out_view, input_vals, output_vals)
return out
class ArrayMap:
"""Class designed to mimic mapping by NumPy array indexing.
This class is designed to replicate the use of NumPy arrays for mapping
values with indexing:
>>> values = np.array([0.25, 0.5, 1.0])
>>> indices = np.array([[0, 0, 1], [2, 2, 1]])
>>> values[indices]
array([[0.25, 0.25, 0.5 ],
[1. , 1. , 0.5 ]])
The issue with this indexing is that you need a very large ``values``
array if the values in the ``indices`` array are large.
>>> values = np.array([0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0])
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
>>> values[indices]
array([[0.25, 0.25, 1. ],
[0.25, 1. , 1. ]])
Using this class, the approach is similar, but there is no need to
create a large values array:
>>> in_indices = np.array([0, 10])
>>> out_values = np.array([0.25, 1.0])
>>> values = ArrayMap(in_indices, out_values)
>>> values
ArrayMap(array([ 0, 10]), array([0.25, 1. ]))
>>> print(values)
ArrayMap:
0 → 0.25
10 → 1.0
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
>>> values[indices]
array([[0.25, 0.25, 1. ],
[0.25, 1. , 1. ]])
Parameters
----------
in_values : array of int, shape (K,)
The source values from which to map.
out_values : array, shape (K,)
The destination values from which to map.
"""
def __init__(self, in_values, out_values):
self.in_values = in_values
self.out_values = out_values
self._max_str_lines = 4
self._array = None
def __len__(self):
"""Return one more than the maximum label value being remapped."""
return np.max(self.in_values) + 1
def __array__(self, dtype=None):
"""Return an array that behaves like the arraymap when indexed.
This array can be very large: it is the size of the largest value
in the ``in_vals`` array, plus one.
"""
if dtype is None:
dtype = self.out_values.dtype
output = np.zeros(np.max(self.in_values) + 1, dtype=dtype)
output[self.in_values] = self.out_values
return output
@property
def dtype(self):
return self.out_values.dtype
def __repr__(self):
return f'ArrayMap({repr(self.in_values)}, {repr(self.out_values)})'
def __str__(self):
if len(self.in_values) <= self._max_str_lines + 1:
rows = range(len(self.in_values))
string = '\n'.join(
['ArrayMap:']
+ [f' {self.in_values[i]}{self.out_values[i]}' for i in rows]
)
else:
rows0 = list(range(0, self._max_str_lines // 2))
rows1 = list(range(-self._max_str_lines // 2, 0))
string = '\n'.join(
['ArrayMap:']
+ [f' {self.in_values[i]}{self.out_values[i]}' for i in rows0]
+ [' ...']
+ [f' {self.in_values[i]}{self.out_values[i]}' for i in rows1]
)
return string
def __call__(self, arr):
return self.__getitem__(arr)
def __getitem__(self, index):
scalar = np.isscalar(index)
if scalar:
index = np.array([index])
elif isinstance(index, slice):
start = index.start or 0 # treat None or 0 the same way
stop = index.stop if index.stop is not None else len(self)
step = index.step
index = np.arange(start, stop, step)
if index.dtype == bool:
index = np.flatnonzero(index)
out = map_array(
index,
self.in_values.astype(index.dtype, copy=False),
self.out_values,
)
if scalar:
out = out[0]
return out
def __setitem__(self, indices, values):
if self._array is None:
self._array = self.__array__()
self._array[indices] = values
self.in_values = np.flatnonzero(self._array)
self.out_values = self._array[self.in_values]

View File

@@ -0,0 +1,158 @@
import numpy as np
from .._shared import utils
from .. import exposure
__all__ = ['montage']
@utils.channel_as_last_axis(multichannel_output=False)
def montage(
arr_in,
fill='mean',
rescale_intensity=False,
grid_shape=None,
padding_width=0,
*,
channel_axis=None,
):
"""Create a montage of several single- or multichannel images.
Create a rectangular montage from an input array representing an ensemble
of equally shaped single- (gray) or multichannel (color) images.
For example, ``montage(arr_in)`` called with the following `arr_in`
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
will return
+---+---+
| 1 | 2 |
+---+---+
| 3 | * |
+---+---+
where the '*' patch will be determined by the `fill` parameter.
Parameters
----------
arr_in : ndarray, shape (K, M, N[, C])
An array representing an ensemble of `K` images of equal shape.
fill : float or array-like of floats or 'mean', optional
Value to fill the padding areas and/or the extra tiles in
the output array. Has to be `float` for single channel collections.
For multichannel collections has to be an array-like of shape of
number of channels. If `mean`, uses the mean value over all images.
rescale_intensity : bool, optional
Whether to rescale the intensity of each image to [0, 1].
grid_shape : tuple, optional
The desired grid shape for the montage `(ntiles_row, ntiles_column)`.
The default aspect ratio is square.
padding_width : int, optional
The size of the spacing between the tiles and between the tiles and
the borders. If non-zero, makes the boundaries of individual images
easier to perceive.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
Returns
-------
arr_out : (K*(M+p)+p, K*(N+p)+p[, C]) ndarray
Output array with input images glued together (including padding `p`).
Examples
--------
>>> import numpy as np
>>> from skimage.util import montage
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> arr_out = montage(arr_in)
>>> arr_out.shape
(4, 4)
>>> arr_out
array([[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 5, 5],
[10, 11, 5, 5]])
>>> arr_in.mean()
5.5
>>> arr_out_nonsquare = montage(arr_in, grid_shape=(1, 3))
>>> arr_out_nonsquare
array([[ 0, 1, 4, 5, 8, 9],
[ 2, 3, 6, 7, 10, 11]])
>>> arr_out_nonsquare.shape
(2, 6)
"""
if channel_axis is not None:
arr_in = np.asarray(arr_in)
else:
arr_in = np.asarray(arr_in)[..., np.newaxis]
if arr_in.ndim != 4:
raise ValueError(
'Input array has to be 3-dimensional for grayscale '
'images, or 4-dimensional with a `channel_axis` '
'specified.'
)
n_images, n_rows, n_cols, n_chan = arr_in.shape
if grid_shape:
ntiles_row, ntiles_col = (int(s) for s in grid_shape)
else:
ntiles_row = ntiles_col = int(np.ceil(np.sqrt(n_images)))
# Rescale intensity if necessary
if rescale_intensity:
for i in range(n_images):
arr_in[i] = exposure.rescale_intensity(arr_in[i])
# Calculate the fill value
if fill == 'mean':
fill = arr_in.mean(axis=(0, 1, 2))
fill = np.atleast_1d(fill).astype(arr_in.dtype)
# Pre-allocate an array with padding for montage
n_pad = padding_width
arr_out = np.empty(
(
(n_rows + n_pad) * ntiles_row + n_pad,
(n_cols + n_pad) * ntiles_col + n_pad,
n_chan,
),
dtype=arr_in.dtype,
)
for idx_chan in range(n_chan):
arr_out[..., idx_chan] = fill[idx_chan]
slices_row = [
slice(n_pad + (n_rows + n_pad) * n, n_pad + (n_rows + n_pad) * n + n_rows)
for n in range(ntiles_row)
]
slices_col = [
slice(n_pad + (n_cols + n_pad) * n, n_pad + (n_cols + n_pad) * n + n_cols)
for n in range(ntiles_col)
]
# Copy the data to the output array
for idx_image, image in enumerate(arr_in):
idx_sr = idx_image // ntiles_col
idx_sc = idx_image % ntiles_col
arr_out[slices_row[idx_sr], slices_col[idx_sc], :] = image
if channel_axis is not None:
return arr_out
else:
return arr_out[..., 0]

View File

@@ -0,0 +1,114 @@
import numpy as np
def regular_grid(ar_shape, n_points):
"""Find `n_points` regularly spaced along `ar_shape`.
The returned points (as slices) should be as close to cubically-spaced as
possible. Essentially, the points are spaced by the Nth root of the input
array size, where N is the number of dimensions. However, if an array
dimension cannot fit a full step size, it is "discarded", and the
computation is done for only the remaining dimensions.
Parameters
----------
ar_shape : array-like of ints
The shape of the space embedding the grid. ``len(ar_shape)`` is the
number of dimensions.
n_points : int
The (approximate) number of points to embed in the space.
Returns
-------
slices : tuple of slice objects
A slice along each dimension of `ar_shape`, such that the intersection
of all the slices give the coordinates of regularly spaced points.
.. versionchanged:: 0.14.1
In scikit-image 0.14.1 and 0.15, the return type was changed from a
list to a tuple to ensure `compatibility with Numpy 1.15`_ and
higher. If your code requires the returned result to be a list, you
may convert the output of this function to a list with:
>>> result = list(regular_grid(ar_shape=(3, 20, 40), n_points=8))
.. _compatibility with NumPy 1.15: https://github.com/numpy/numpy/blob/master/doc/release/1.15.0-notes.rst#deprecations
Examples
--------
>>> ar = np.zeros((20, 40))
>>> g = regular_grid(ar.shape, 8)
>>> g
(slice(5, None, 10), slice(5, None, 10))
>>> ar[g] = 1
>>> ar.sum()
8.0
>>> ar = np.zeros((20, 40))
>>> g = regular_grid(ar.shape, 32)
>>> g
(slice(2, None, 5), slice(2, None, 5))
>>> ar[g] = 1
>>> ar.sum()
32.0
>>> ar = np.zeros((3, 20, 40))
>>> g = regular_grid(ar.shape, 8)
>>> g
(slice(1, None, 3), slice(5, None, 10), slice(5, None, 10))
>>> ar[g] = 1
>>> ar.sum()
8.0
"""
ar_shape = np.asanyarray(ar_shape)
ndim = len(ar_shape)
unsort_dim_idxs = np.argsort(np.argsort(ar_shape))
sorted_dims = np.sort(ar_shape)
space_size = float(np.prod(ar_shape))
if space_size <= n_points:
return (slice(None),) * ndim
stepsizes = np.full(ndim, (space_size / n_points) ** (1.0 / ndim), dtype='float64')
if (sorted_dims < stepsizes).any():
for dim in range(ndim):
stepsizes[dim] = sorted_dims[dim]
space_size = float(np.prod(sorted_dims[dim + 1 :]))
stepsizes[dim + 1 :] = (space_size / n_points) ** (1.0 / (ndim - dim - 1))
if (sorted_dims >= stepsizes).all():
break
starts = (stepsizes // 2).astype(int)
stepsizes = np.round(stepsizes).astype(int)
slices = [slice(start, None, step) for start, step in zip(starts, stepsizes)]
slices = tuple(slices[i] for i in unsort_dim_idxs)
return slices
def regular_seeds(ar_shape, n_points, dtype=int):
"""Return an image with ~`n_points` regularly-spaced nonzero pixels.
Parameters
----------
ar_shape : tuple of int
The shape of the desired output image.
n_points : int
The desired number of nonzero points.
dtype : numpy data type, optional
The desired data type of the output.
Returns
-------
seed_img : array of int or bool
The desired image.
Examples
--------
>>> regular_seeds((5, 5), 4)
array([[0, 0, 0, 0, 0],
[0, 1, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 3, 0, 4, 0],
[0, 0, 0, 0, 0]])
"""
grid = regular_grid(ar_shape, n_points)
seed_img = np.zeros(ar_shape, dtype=dtype)
seed_img[grid] = 1 + np.reshape(
np.arange(seed_img[grid].size), seed_img[grid].shape
)
return seed_img

View File

@@ -0,0 +1,86 @@
__all__ = ['slice_along_axes']
def slice_along_axes(image, slices, axes=None, copy=False):
"""Slice an image along given axes.
Parameters
----------
image : ndarray
Input image.
slices : list of 2-tuple (a, b) where a < b.
For each axis in `axes`, a corresponding 2-tuple
``(min_val, max_val)`` to slice with (as with Python slices,
``max_val`` is non-inclusive).
axes : int or tuple, optional
Axes corresponding to the limits given in `slices`. If None,
axes are in ascending order, up to the length of `slices`.
copy : bool, optional
If True, ensure that the output is not a view of `image`.
Returns
-------
out : ndarray
The region of `image` corresponding to the given slices and axes.
Examples
--------
>>> from skimage import data
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = slice_along_axes(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = slice_along_axes(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = slice_along_axes(img, [(0, 100), (0, 75)], axes=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty length of bounding box detected on None
if not slices:
return image
if axes is None:
axes = list(range(image.ndim))
if len(axes) < len(slices):
raise ValueError("More `slices` than available axes")
elif len(axes) != len(slices):
raise ValueError("`axes` and `slices` must have equal length")
if len(axes) != len(set(axes)):
raise ValueError("`axes` must be unique")
if not all(a >= 0 and a < image.ndim for a in axes):
raise ValueError(
f"axes {axes} out of range; image has only " f"{image.ndim} dimensions"
)
_slices = [
slice(None),
] * image.ndim
for (a, b), ax in zip(slices, axes):
if a < 0:
a %= image.shape[ax]
if b < 0:
b %= image.shape[ax]
if a > b:
raise ValueError(
f"Invalid slice ({a}, {b}): must be ordered `(min_val, max_val)`"
)
if a < 0 or b > image.shape[ax]:
raise ValueError(
f"Invalid slice ({a}, {b}) for image with dimensions {image.shape}"
)
_slices[ax] = slice(a, b)
image_slice = image[tuple(_slices)]
if copy and image_slice.base is not None:
image_slice = image_slice.copy()
return image_slice

View File

@@ -0,0 +1,213 @@
import numpy
__all__ = ['apply_parallel']
def _get_chunks(shape, ncpu):
"""Split the array into equal sized chunks based on the number of
available processors. The last chunk in each dimension absorbs the
remainder array elements if the number of CPUs does not divide evenly into
the number of array elements.
Examples
--------
>>> _get_chunks((4, 4), 4)
((2, 2), (2, 2))
>>> _get_chunks((4, 4), 2)
((2, 2), (4,))
>>> _get_chunks((5, 5), 2)
((2, 3), (5,))
>>> _get_chunks((2, 4), 2)
((1, 1), (4,))
"""
# since apply_parallel is in the critical import path, we lazy import
# math just when we need it.
from math import ceil
chunks = []
nchunks_per_dim = int(ceil(ncpu ** (1.0 / len(shape))))
used_chunks = 1
for i in shape:
if used_chunks < ncpu:
regular_chunk = i // nchunks_per_dim
remainder_chunk = regular_chunk + (i % nchunks_per_dim)
if regular_chunk == 0:
chunk_lens = (remainder_chunk,)
else:
chunk_lens = (regular_chunk,) * (nchunks_per_dim - 1) + (
remainder_chunk,
)
else:
chunk_lens = (i,)
chunks.append(chunk_lens)
used_chunks *= nchunks_per_dim
return tuple(chunks)
def _ensure_dask_array(array, chunks=None):
import dask.array as da
if isinstance(array, da.Array):
return array
return da.from_array(array, chunks=chunks)
def apply_parallel(
function,
array,
chunks=None,
depth=0,
mode=None,
extra_arguments=(),
extra_keywords=None,
*,
dtype=None,
compute=None,
channel_axis=None,
):
"""Map a function in parallel across an array.
Split an array into possibly overlapping chunks of a given depth and
boundary type, call the given function in parallel on the chunks, combine
the chunks and return the resulting array.
Parameters
----------
function : function
Function to be mapped which takes an array as an argument.
array : numpy array or dask array
Array which the function will be applied to.
chunks : int, tuple, or tuple of tuples, optional
A single integer is interpreted as the length of one side of a square
chunk that should be tiled across the array. One tuple of length
``array.ndim`` represents the shape of a chunk, and it is tiled across
the array. A list of tuples of length ``ndim``, where each sub-tuple
is a sequence of chunk sizes along the corresponding dimension. If
None, the array is broken up into chunks based on the number of
available cpus. More information about chunks is in the documentation
`here <https://dask.pydata.org/en/latest/array-design.html>`_. When
`channel_axis` is not None, the tuples can be length ``ndim - 1`` and
a single chunk will be used along the channel axis.
depth : int or sequence of int, optional
The depth of the added boundary cells. A tuple can be used to specify a
different depth per array axis. Defaults to zero. When `channel_axis`
is not None, and a tuple of length ``ndim - 1`` is provided, a depth of
0 will be used along the channel axis.
mode : {'reflect', 'symmetric', 'periodic', 'wrap', 'nearest', 'edge'}, optional
Type of external boundary padding.
extra_arguments : tuple, optional
Tuple of arguments to be passed to the function.
extra_keywords : dictionary, optional
Dictionary of keyword arguments to be passed to the function.
dtype : data-type or None, optional
The data-type of the `function` output. If None, Dask will attempt to
infer this by calling the function on data of shape ``(1,) * ndim``.
For functions expecting RGB or multichannel data this may be
problematic. In such cases, the user should manually specify this dtype
argument instead.
.. versionadded:: 0.18
``dtype`` was added in 0.18.
compute : bool, optional
If ``True``, compute eagerly returning a NumPy Array.
If ``False``, compute lazily returning a Dask Array.
If ``None`` (default), compute based on array type provided
(eagerly for NumPy Arrays and lazily for Dask Arrays).
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
Returns
-------
out : ndarray or dask Array
Returns the result of the applying the operation.
Type is dependent on the ``compute`` argument.
Notes
-----
Numpy edge modes 'symmetric', 'wrap', and 'edge' are converted to the
equivalent ``dask`` boundary modes 'reflect', 'periodic' and 'nearest',
respectively.
Setting ``compute=False`` can be useful for chaining later operations.
For example region selection to preview a result or storing large data
to disk instead of loading in memory.
"""
try:
# Importing dask takes time. since apply_parallel is on the
# minimum import path of skimage, we lazy attempt to import dask
import dask.array as da
except ImportError:
raise RuntimeError(
"Could not import 'dask'. Please install " "using 'pip install dask'"
)
if extra_keywords is None:
extra_keywords = {}
if compute is None:
compute = not isinstance(array, da.Array)
if channel_axis is not None:
channel_axis = channel_axis % array.ndim
if chunks is None:
shape = array.shape
try:
# since apply_parallel is in the critical import path, we lazy
# import multiprocessing just when we need it.
from multiprocessing import cpu_count
ncpu = cpu_count()
except NotImplementedError:
ncpu = 4
if channel_axis is not None:
# use a single chunk along the channel axis
spatial_shape = shape[:channel_axis] + shape[channel_axis + 1 :]
chunks = list(_get_chunks(spatial_shape, ncpu))
chunks.insert(channel_axis, shape[channel_axis])
chunks = tuple(chunks)
else:
chunks = _get_chunks(shape, ncpu)
elif channel_axis is not None and len(chunks) == array.ndim - 1:
# insert a single chunk along the channel axis
chunks = list(chunks)
chunks.insert(channel_axis, array.shape[channel_axis])
chunks = tuple(chunks)
if mode == 'wrap':
mode = 'periodic'
elif mode == 'symmetric':
mode = 'reflect'
elif mode == 'edge':
mode = 'nearest'
elif mode is None:
# default value for Dask.
# Note: that for dask >= 2022.03 it will change to 'none' so we set it
# here for consistent behavior across Dask versions.
mode = 'reflect'
if channel_axis is not None:
if numpy.isscalar(depth):
# depth is zero along channel_axis
depth = [depth] * (array.ndim - 1)
depth = list(depth)
if len(depth) == array.ndim - 1:
depth.insert(channel_axis, 0)
depth = tuple(depth)
def wrapped_func(arr):
return function(arr, *extra_arguments, **extra_keywords)
darr = _ensure_dask_array(array, chunks=chunks)
res = darr.map_overlap(wrapped_func, depth, boundary=mode, dtype=dtype)
if compute:
res = res.compute()
return res

View File

@@ -0,0 +1,72 @@
"""
The arraycrop module contains functions to crop values from the edges of an
n-dimensional array.
"""
import numpy as np
from numbers import Integral
__all__ = ['crop']
def crop(ar, crop_width, copy=False, order='K'):
"""Crop array `ar` by `crop_width` along each dimension.
Parameters
----------
ar : array-like of rank N
Input array.
crop_width : {sequence, int}
Number of values to remove from the edges of each axis.
``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies
unique crop widths at the start and end of each axis.
``((before, after),) or (before, after)`` specifies
a fixed start and end crop for every axis.
``(n,)`` or ``n`` for integer ``n`` is a shortcut for
before = after = ``n`` for all axes.
copy : bool, optional
If `True`, ensure the returned array is a contiguous copy. Normally,
a crop operation will return a discontiguous view of the underlying
input array.
order : {'C', 'F', 'A', 'K'}, optional
If ``copy==True``, control the memory layout of the copy. See
``np.copy``.
Returns
-------
cropped : array
The cropped array. If ``copy=False`` (default), this is a sliced
view of the input array.
"""
ar = np.array(ar, copy=False)
if isinstance(crop_width, Integral):
crops = [[crop_width, crop_width]] * ar.ndim
elif isinstance(crop_width[0], Integral):
if len(crop_width) == 1:
crops = [[crop_width[0], crop_width[0]]] * ar.ndim
elif len(crop_width) == 2:
crops = [crop_width] * ar.ndim
else:
raise ValueError(
f'crop_width has an invalid length: {len(crop_width)}\n'
f'crop_width should be a sequence of N pairs, '
f'a single pair, or a single integer'
)
elif len(crop_width) == 1:
crops = [crop_width[0]] * ar.ndim
elif len(crop_width) == ar.ndim:
crops = crop_width
else:
raise ValueError(
f'crop_width has an invalid length: {len(crop_width)}\n'
f'crop_width should be a sequence of N pairs, '
f'a single pair, or a single integer'
)
slices = tuple(slice(a, ar.shape[i] - b) for i, (a, b) in enumerate(crops))
if copy:
cropped = np.array(ar[slices], order=order, copy=True)
else:
cropped = ar[slices]
return cropped

View File

@@ -0,0 +1,132 @@
import functools
import warnings
from itertools import product
import numpy as np
from .dtype import img_as_float
def _rename_image_params(func):
wm_images = (
"Since version 0.24, the two input images are named `image0` and "
"`image1` (instead of `image1` and `image2`, respectively). Please use "
"`image0, image1` to avoid this warning for now, and avoid an error "
"from version 0.26 onwards."
)
wm_method = (
"Starting in version 0.24, all arguments following `image0, image1` "
"(including `method`) will be keyword-only. Please pass `method=` "
"in the function call to avoid this warning for now, and avoid an error "
"from version 0.26 onwards."
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Turn all args into kwargs
for i, (value, param) in enumerate(
zip(args, ["image0", "image1", "method", "n_tiles"])
):
if i >= 2:
warnings.warn(wm_method, category=FutureWarning, stacklevel=2)
if param in kwargs:
raise ValueError(
f"{param} passed both as positional and keyword argument."
)
else:
kwargs[param] = value
args = tuple()
# Account for `image2` if given
if "image2" in kwargs.keys():
warnings.warn(wm_images, category=FutureWarning, stacklevel=2)
# Safely move `image2` to `image1` if that's empty
if "image1" in kwargs.keys():
# Safely move `image1` to `image0`
if "image0" in kwargs.keys():
raise ValueError(
"Three input images given; please use only `image0` "
"and `image1`."
)
kwargs["image0"] = kwargs.pop("image1")
kwargs["image1"] = kwargs.pop("image2")
return func(*args, **kwargs)
return wrapper
@_rename_image_params
def compare_images(image0, image1, *, method='diff', n_tiles=(8, 8)):
"""
Return an image showing the differences between two images.
.. versionadded:: 0.16
Parameters
----------
image0, image1 : ndarray, shape (M, N)
Images to process, must be of the same shape.
.. versionchanged:: 0.24
`image1` and `image2` were renamed into `image0` and `image1`
respectively.
method : string, optional
Method used for the comparison.
Valid values are {'diff', 'blend', 'checkerboard'}.
Details are provided in the note section.
.. versionchanged:: 0.24
This parameter and following ones are keyword-only.
n_tiles : tuple, optional
Used only for the `checkerboard` method. Specifies the number
of tiles (row, column) to divide the image.
Returns
-------
comparison : ndarray, shape (M, N)
Image showing the differences.
Notes
-----
``'diff'`` computes the absolute difference between the two images.
``'blend'`` computes the mean value.
``'checkerboard'`` makes tiles of dimension `n_tiles` that display
alternatively the first and the second image. Note that images must be
2-dimensional to be compared with the checkerboard method.
"""
if image1.shape != image0.shape:
raise ValueError('Images must have the same shape.')
img1 = img_as_float(image0)
img2 = img_as_float(image1)
if method == 'diff':
comparison = np.abs(img2 - img1)
elif method == 'blend':
comparison = 0.5 * (img2 + img1)
elif method == 'checkerboard':
if img1.ndim != 2:
raise ValueError(
'Images must be 2-dimensional to be compared with the '
'checkerboard method.'
)
shapex, shapey = img1.shape
mask = np.full((shapex, shapey), False)
stepx = int(shapex / n_tiles[0])
stepy = int(shapey / n_tiles[1])
for i, j in product(range(n_tiles[0]), range(n_tiles[1])):
if (i + j) % 2 == 0:
mask[i * stepx : (i + 1) * stepx, j * stepy : (j + 1) * stepy] = True
comparison = np.zeros_like(img1)
comparison[mask] = img1[mask]
comparison[~mask] = img2[~mask]
else:
raise ValueError(
'Wrong value for `method`. '
'Must be either "diff", "blend" or "checkerboard".'
)
return comparison

View File

@@ -0,0 +1,600 @@
import warnings
from warnings import warn
import numpy as np
__all__ = [
'img_as_float32',
'img_as_float64',
'img_as_float',
'img_as_int',
'img_as_uint',
'img_as_ubyte',
'img_as_bool',
'dtype_limits',
]
# Some of these may or may not be aliases depending on architecture & platform
_integer_types = (
np.int8,
np.byte,
np.int16,
np.short,
np.int32,
np.int64,
np.longlong,
np.int_,
np.intp,
np.intc,
int,
np.uint8,
np.ubyte,
np.uint16,
np.ushort,
np.uint32,
np.uint64,
np.ulonglong,
np.uint,
np.uintp,
np.uintc,
)
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}
dtype_range = {
bool: (False, True),
np.bool_: (False, True),
float: (-1, 1),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1),
}
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
# np.bool8 is a deprecated alias of np.bool_
if hasattr(np, 'bool8'):
dtype_range[np.bool8] = (False, True)
dtype_range.update(_integer_ranges)
_supported_types = list(dtype_range.keys())
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
Parameters
----------
image : ndarray
Input image.
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
-------
imin, imax : tuple
Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def _dtype_itemsize(itemsize, *dtypes):
"""Return first of `dtypes` with itemsize greater than `itemsize`
Parameters
----------
itemsize: int
The data type object element size.
Other Parameters
----------------
*dtypes:
Any Object accepted by `np.dtype` to be converted to a data
type object
Returns
-------
dtype: data type object
First of `dtypes` with itemsize greater than `itemsize`.
"""
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
def _dtype_bits(kind, bits, itemsize=1):
"""Return dtype of `kind` that can store a `bits` wide unsigned int
Parameters:
kind: str
Data type kind.
bits: int
Desired number of bits.
itemsize: int
The data type object element size.
Returns
-------
dtype: data type object
Data type of `kind` that can store a `bits` wide unsigned int
"""
s = next(
i
for i in (itemsize,) + (2, 4, 8)
if bits < (i * 8) or (bits == (i * 8) and kind == 'u')
)
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
Numbers can be represented exactly only if `m` is a multiple of `n`.
Parameters
----------
a : ndarray
Input image array.
n : int
Number of bits currently used to encode the values in `a`.
m : int
Desired number of bits to encode the values in `out`.
copy : bool, optional
If True, allocates and returns new array. Otherwise, modifies
`a` in place.
Returns
-------
out : array
Output image array. Has the same kind as `a`.
"""
kind = a.dtype.kind
if n > m and a.max() < 2**m:
mnew = int(np.ceil(m / 2) * 2)
if mnew > m:
dtype = f'int{mnew}'
else:
dtype = f'uint{mnew}'
n = int(np.ceil(n / 2) * 2)
warn(
f'Downcasting {a.dtype} to {dtype} without scaling because max '
f'value {a.max()} fits in {dtype}',
stacklevel=3,
)
return a.astype(_dtype_bits(kind, m))
elif n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.floor_divide(a, 2 ** (n - m), out=b, dtype=a.dtype, casting='unsafe')
return b
else:
a //= 2 ** (n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of `n` bits
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of `n` bits,
# then downscale with precision loss
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype_bits(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2 ** (o - m)
return b
else:
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2 ** (o - m)
return a
def _convert(image, dtype, force_copy=False, uniform=False):
"""
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
uniform : bool, optional
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
.. versionchanged:: 0.15
``_convert`` no longer warns about possible precision or sign
information loss. See discussions on these warnings at:
https://github.com/scikit-image/scikit-image/issues/2602
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
https://github.com/scikit-image/scikit-image/pull/3575
References
----------
.. [1] DirectX data conversion rules.
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
pp 7-8. Khronos Group, 2010.
.. [3] Proper treatment of pixels as integers. A.W. Paeth.
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
.. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
pp 47-57. Morgan Kaufmann, 1998.
"""
image = np.asarray(image)
dtypeobj_in = image.dtype
if dtype is np.floating:
dtypeobj_out = np.dtype('float64')
else:
dtypeobj_out = np.dtype(dtype)
dtype_in = dtypeobj_in.type
dtype_out = dtypeobj_out.type
kind_in = dtypeobj_in.kind
kind_out = dtypeobj_out.kind
itemsize_in = dtypeobj_in.itemsize
itemsize_out = dtypeobj_out.itemsize
# Below, we do an `issubdtype` check. Its purpose is to find out
# whether we can get away without doing any image conversion. This happens
# when:
#
# - the output and input dtypes are the same or
# - when the output is specified as a type, and the input dtype
# is a subclass of that type (e.g. `np.floating` will allow
# `float32` and `float64` arrays through)
if np.issubdtype(dtype_in, dtype):
if force_copy:
image = image.copy()
return image
if not (dtype_in in _supported_types and dtype_out in _supported_types):
raise ValueError(f'Cannot convert from {dtypeobj_in} to ' f'{dtypeobj_out}.')
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_out in 'ui':
imin_out = np.iinfo(dtype_out).min
imax_out = np.iinfo(dtype_out).max
# any -> binary
if kind_out == 'b':
return image > dtype_in(dtype_range[dtype_in][1] / 2)
# binary -> any
if kind_in == 'b':
result = image.astype(dtype_out)
if kind_out != 'f':
result *= dtype_out(dtype_range[dtype_out][1])
return result
# float -> any
if kind_in == 'f':
if kind_out == 'f':
# float -> float
return image.astype(dtype_out)
if np.min(image) < -1.0 or np.max(image) > 1.0:
raise ValueError("Images of type float must be between -1 and 1.")
# floating point -> integer
# use float type that can represent output integer type
computation_type = _dtype_itemsize(
itemsize_out, dtype_in, np.float32, np.float64
)
if not uniform:
if kind_out == 'u':
image_out = np.multiply(image, imax_out, dtype=computation_type)
else:
image_out = np.multiply(
image, (imax_out - imin_out) / 2, dtype=computation_type
)
image_out -= 1.0 / 2.0
np.rint(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
elif kind_out == 'u':
image_out = np.multiply(image, imax_out + 1, dtype=computation_type)
np.clip(image_out, 0, imax_out, out=image_out)
else:
image_out = np.multiply(
image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type
)
np.floor(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
return image_out.astype(dtype_out)
# signed/unsigned int -> float
if kind_out == 'f':
# use float type that can exactly represent input integers
computation_type = _dtype_itemsize(
itemsize_in, dtype_out, np.float32, np.float64
)
if kind_in == 'u':
# using np.divide or np.multiply doesn't copy the data
# until the computation time
image = np.multiply(image, 1.0 / imax_in, dtype=computation_type)
# DirectX uses this conversion also for signed ints
# if imin_in:
# np.maximum(image, -1.0, out=image)
elif kind_in == 'i':
# From DirectX conversions:
# The most negative value maps to -1.0f
# Every other value is converted to a float (call it c)
# and then result = c * (1.0f / (2⁽ⁿ⁻¹⁾-1)).
image = np.multiply(image, 1.0 / imax_in, dtype=computation_type)
np.maximum(image, -1.0, out=image)
else:
image = np.add(image, 0.5, dtype=computation_type)
image *= 2 / (imax_in - imin_in)
return np.asarray(image, dtype_out)
# unsigned int -> signed/unsigned int
if kind_in == 'u':
if kind_out == 'i':
# unsigned int -> signed int
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
return image.view(dtype_out)
else:
# unsigned int -> unsigned int
return _scale(image, 8 * itemsize_in, 8 * itemsize_out)
# signed int -> unsigned int
if kind_out == 'u':
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
result = np.empty(image.shape, dtype_out)
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
return result
# signed int -> signed int
if itemsize_in > itemsize_out:
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)
image = image.astype(_dtype_bits('i', itemsize_out * 8))
image -= imin_in
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
image += imin_out
return image.astype(dtype_out)
def convert(image, dtype, force_copy=False, uniform=False):
warn(
"The use of this function is discouraged as its behavior may change "
"dramatically in scikit-image 1.0. This function will be removed "
"in scikit-image 1.0.",
FutureWarning,
stacklevel=2,
)
return _convert(image=image, dtype=dtype, force_copy=force_copy, uniform=uniform)
if _convert.__doc__ is not None:
convert.__doc__ = (
_convert.__doc__
+ """
Warns
-----
FutureWarning:
.. versionadded:: 0.17
The use of this function is discouraged as its behavior may change
dramatically in scikit-image 1.0. This function will be removed
in scikit-image 1.0.
"""
)
def img_as_float32(image, force_copy=False):
"""Convert an image to single-precision (32-bit) floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float32
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return _convert(image, np.float32, force_copy)
def img_as_float64(image, force_copy=False):
"""Convert an image to double-precision (64-bit) floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float64
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return _convert(image, np.float64, force_copy)
def img_as_float(image, force_copy=False):
"""Convert an image to floating point format.
This function is similar to `img_as_float64`, but will not convert
lower-precision floating point arrays to `float64`.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return _convert(image, np.floating, force_copy)
def img_as_uint(image, force_copy=False):
"""Convert an image to 16-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of uint16
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 65535.
"""
return _convert(image, np.uint16, force_copy)
def img_as_int(image, force_copy=False):
"""Convert an image to 16-bit signed integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of int16
Output image.
Notes
-----
The values are scaled between -32768 and 32767.
If the input data-type is positive-only (e.g., uint8), then
the output image will still only have positive values.
"""
return _convert(image, np.int16, force_copy)
def img_as_ubyte(image, force_copy=False):
"""Convert an image to 8-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of ubyte (uint8)
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 255.
"""
return _convert(image, np.uint8, force_copy)
def img_as_bool(image, force_copy=False):
"""Convert an image to boolean format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of bool (`bool_`)
Output image.
Notes
-----
The upper half of the input dtype's positive range is True, and the lower
half is False. All negative values (if present) are False.
"""
return _convert(image, bool, force_copy)

View File

@@ -0,0 +1,30 @@
import sys
from .._vendored.numpy_lookfor import lookfor as _lookfor
def lookfor(what):
"""Do a keyword search on scikit-image docstrings and print results.
.. warning::
This function may also print results that are not part of
scikit-image's public API.
Parameters
----------
what : str
Words to look for.
Examples
--------
>>> import skimage as ski
>>> ski.util.lookfor('regular_grid')
Search results for 'regular_grid'
---------------------------------
skimage.util.regular_grid
Find `n_points` regularly spaced along `ar_shape`.
skimage.util.lookfor
Do a keyword search on scikit-image docstrings and print results.
"""
return _lookfor(what, sys.modules[__name__.split('.')[0]])

View File

@@ -0,0 +1,233 @@
__all__ = ['random_noise']
import numpy as np
from .dtype import img_as_float
def _bernoulli(p, shape, *, rng):
"""
Bernoulli trials at a given probability of a given size.
This function is meant as a lower-memory alternative to calls such as
`np.random.choice([True, False], size=image.shape, p=[p, 1-p])`.
While `np.random.choice` can handle many classes, for the 2-class case
(Bernoulli trials), this function is much more efficient.
Parameters
----------
p : float
The probability that any given trial returns `True`.
shape : int or tuple of ints
The shape of the ndarray to return.
rng : `numpy.random.Generator`
``Generator`` instance, typically obtained via `np.random.default_rng()`.
Returns
-------
out : ndarray[bool]
The results of Bernoulli trials in the given `size` where success
occurs with probability `p`.
"""
if p == 0:
return np.zeros(shape, dtype=bool)
if p == 1:
return np.ones(shape, dtype=bool)
return rng.random(shape) <= p
def random_noise(image, mode='gaussian', rng=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str, optional
One of the following strings, selecting the type of noise to add:
'gaussian' (default)
Gaussian-distributed additive noise.
'localvar'
Gaussian-distributed additive noise, with specified local variance
at each point of `image`.
'poisson'
Poisson-distributed noise generated from the data.
'salt'
Replaces random pixels with 1.
'pepper'
Replaces random pixels with 0 (for unsigned images) or -1 (for
signed images).
's&p'
Replaces random pixels with either 1 or `low_val`, where `low_val`
is 0 for unsigned images or -1 for signed images.
'speckle'
Multiplicative noise using ``out = image + n * image``, where ``n``
is Gaussian noise with specified mean & variance.
rng : {`numpy.random.Generator`, int}, optional
Pseudo-random number generator.
By default, a PCG64 generator is used (see :func:`numpy.random.default_rng`).
If `rng` is an int, it is used to seed the generator.
clip : bool, optional
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float, optional
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float, optional
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray, optional
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float, optional
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float, optional
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicitly
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
image = img_as_float(image)
rng = np.random.default_rng(rng)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values',
}
kwdefaults = {
'mean': 0.0,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01,
}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': [],
}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError(
f"{key} keyword not in allowed keywords "
f"{allowedkwargs[allowedtypes[mode]]}"
)
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = rng.normal(kwargs['mean'], kwargs['var'] ** 0.5, image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + rng.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.0:
old_max = image.max()
image = (image + 1.0) / (old_max + 1.0)
# Generating noise for each unique value in image.
out = rng.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.0:
out = out * (old_max + 1.0) - 1.0
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(
image, mode='s&p', rng=rng, amount=kwargs['amount'], salt_vs_pepper=1.0
)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(
image, mode='s&p', rng=rng, amount=kwargs['amount'], salt_vs_pepper=0.0
)
elif mode == 's&p':
out = image.copy()
p = kwargs['amount']
q = kwargs['salt_vs_pepper']
flipped = _bernoulli(p, image.shape, rng=rng)
salted = _bernoulli(q, image.shape, rng=rng)
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = low_clip
elif mode == 'speckle':
noise = rng.normal(kwargs['mean'], kwargs['var'] ** 0.5, image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out

View File

@@ -0,0 +1,247 @@
import numbers
import numpy as np
from numpy.lib.stride_tricks import as_strided
__all__ = ['view_as_blocks', 'view_as_windows']
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in : ndarray, shape (M[, ...])
Input array.
block_shape : tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out : ndarray
Block view of the input array.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
[[[76, 77],
[82, 83]]]])
"""
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length " "as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
new_shape = tuple(arr_shape // block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in : ndarray, shape (M[, ...])
Input array.
window_shape : integer or tuple of length arr_in.ndim
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
If an integer is given, the shape will be a hypercube of
sidelength given by its value.
step : integer or tuple of length arr_in.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
arr_out : ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] https://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
ndim = arr_in.ndim
if isinstance(window_shape, numbers.Number):
window_shape = (window_shape,) * ndim
if not (len(window_shape) == ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if isinstance(step, numbers.Number):
if step < 1:
raise ValueError("`step` must be >= 1")
step = (step,) * ndim
if len(step) != ndim:
raise ValueError("`step` is incompatible with `arr_in.shape`")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
slices = tuple(slice(None, None, st) for st in step)
window_strides = np.array(arr_in.strides)
indexing_strides = arr_in[slices].strides
win_indices_shape = (
(np.array(arr_in.shape) - np.array(window_shape)) // np.array(step)
) + 1
new_shape = tuple(list(win_indices_shape) + list(window_shape))
strides = tuple(list(indexing_strides) + list(window_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=strides)
return arr_out

View File

@@ -0,0 +1,173 @@
import numpy as np
from skimage._shared.testing import assert_array_almost_equal, assert_equal
from skimage import color, data, img_as_float
from skimage.filters import threshold_local, gaussian
from skimage.util.apply_parallel import apply_parallel
import pytest
da = pytest.importorskip('dask.array')
def test_apply_parallel():
# data
a = np.arange(144).reshape(12, 12).astype(float)
# apply the filter
expected1 = threshold_local(a, 3)
result1 = apply_parallel(
threshold_local,
a,
chunks=(6, 6),
depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'},
)
assert_array_almost_equal(result1, expected1)
def wrapped_gauss(arr):
return gaussian(arr, sigma=1, mode='reflect')
expected2 = gaussian(a, sigma=1, mode='reflect')
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
assert_array_almost_equal(result2, expected2)
expected3 = gaussian(a, sigma=1, mode='reflect')
result3 = apply_parallel(
wrapped_gauss, da.from_array(a, chunks=(6, 6)), depth=5, compute=True
)
assert isinstance(result3, np.ndarray)
assert_array_almost_equal(result3, expected3)
def test_apply_parallel_lazy():
# data
a = np.arange(144).reshape(12, 12).astype(float)
d = da.from_array(a, chunks=(6, 6))
# apply the filter
expected1 = threshold_local(a, 3)
result1 = apply_parallel(
threshold_local,
a,
chunks=(6, 6),
depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'},
compute=False,
)
# apply the filter on a Dask Array
result2 = apply_parallel(
threshold_local,
d,
depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'},
)
assert isinstance(result1, da.Array)
assert_array_almost_equal(result1.compute(), expected1)
assert isinstance(result2, da.Array)
assert_array_almost_equal(result2.compute(), expected1)
def test_no_chunks():
a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9)
def add_42(arr):
return arr + 42
expected = add_42(a)
result = apply_parallel(add_42, a)
assert_array_almost_equal(result, expected)
def test_apply_parallel_wrap():
def wrapped(arr):
return gaussian(arr, sigma=1, mode='wrap')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian(a, sigma=1, mode='wrap')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')
assert_array_almost_equal(result, expected)
def test_apply_parallel_nearest():
def wrapped(arr):
return gaussian(arr, sigma=1, mode='nearest')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian(a, sigma=1, mode='nearest')
result = apply_parallel(
wrapped, a, chunks=(6, 6), depth={0: 5, 1: 5}, mode='nearest'
)
assert_array_almost_equal(result, expected)
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
@pytest.mark.parametrize('chunks', (None, (128, 128, 3)))
@pytest.mark.parametrize('depth', (0, 8, (8, 8, 0)))
def test_apply_parallel_rgb(depth, chunks, dtype):
cat = data.chelsea().astype(dtype) / 255.0
func = color.rgb2ycbcr
cat_ycbcr_expected = func(cat)
cat_ycbcr = apply_parallel(
func, cat, chunks=chunks, depth=depth, dtype=dtype, channel_axis=-1
)
assert_equal(cat_ycbcr.dtype, cat.dtype)
assert_array_almost_equal(cat_ycbcr_expected, cat_ycbcr)
@pytest.mark.parametrize('chunks', (None, (128, 256), 'ndim'))
@pytest.mark.parametrize('depth', (0, 8, (8, 16), 'ndim'))
@pytest.mark.parametrize('channel_axis', (0, 1, 2, -1, -2, -3))
def test_apply_parallel_rgb_channel_axis(depth, chunks, channel_axis):
"""Test channel_axis combinations.
For depth and chunks, test in three ways:
1.) scalar (to be applied over all axes)
2.) tuple of length ``image.ndim - 1`` corresponding to spatial axes
3.) tuple of length ``image.ndim`` corresponding to all axes
"""
cat = img_as_float(data.chelsea())
func = color.rgb2ycbcr
cat_ycbcr_expected = func(cat, channel_axis=-1)
# move channel axis to another position
cat = np.moveaxis(cat, -1, channel_axis)
if chunks == 'ndim':
# explicitly specify the chunksize for the channel axis
chunks = [128, 128]
chunks.insert(channel_axis % cat.ndim, cat.shape[channel_axis])
if depth == 'ndim':
# explicitly specify the depth for the channel axis
depth = [8, 8]
depth.insert(channel_axis % cat.ndim, 0)
cat_ycbcr = apply_parallel(
func,
cat,
chunks=chunks,
depth=depth,
dtype=cat.dtype,
channel_axis=channel_axis,
extra_keywords=dict(channel_axis=channel_axis),
)
# move channels of output back to the last dimension
cat_ycbcr = np.moveaxis(cat_ycbcr, channel_axis, -1)
assert_array_almost_equal(cat_ycbcr_expected, cat_ycbcr)

View File

@@ -0,0 +1,71 @@
import numpy as np
from skimage.util import crop
from skimage._shared.testing import assert_array_equal, assert_equal
def test_multi_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, ((1, 2), (2, 1)))
assert_array_equal(out[0], [7, 8])
assert_array_equal(out[-1], [32, 33])
assert_equal(out.shape, (6, 2))
def test_pair_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, (1, 2))
assert_array_equal(out[0], [6, 7])
assert_array_equal(out[-1], [31, 32])
assert_equal(out.shape, (6, 2))
def test_pair_tuple_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, ((1, 2),))
assert_array_equal(out[0], [6, 7])
assert_array_equal(out[-1], [31, 32])
assert_equal(out.shape, (6, 2))
def test_int_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, 1)
assert_array_equal(out[0], [6, 7, 8])
assert_array_equal(out[-1], [36, 37, 38])
assert_equal(out.shape, (7, 3))
def test_int_tuple_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, (1,))
assert_array_equal(out[0], [6, 7, 8])
assert_array_equal(out[-1], [36, 37, 38])
assert_equal(out.shape, (7, 3))
def test_copy_crop():
arr = np.arange(45).reshape(9, 5)
out0 = crop(arr, 1, copy=True)
assert out0.flags.c_contiguous
out0[0, 0] = 100
assert not np.any(arr == 100)
assert not np.may_share_memory(arr, out0)
out1 = crop(arr, 1)
out1[0, 0] = 100
assert arr[1, 1] == 100
assert np.may_share_memory(arr, out1)
def test_zero_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, 0)
assert out.shape == (9, 5)
def test_np_int_crop():
arr = np.arange(45).reshape(9, 5)
out1 = crop(arr, np.int64(1))
out2 = crop(arr, np.int32(1))
assert_array_equal(out1, out2)
assert out1.shape == (7, 3)

View File

@@ -0,0 +1,105 @@
import numpy as np
import pytest
from skimage.util.compare import compare_images
from skimage._shared.testing import assert_stacklevel
def test_compare_images_ValueError_shape():
img1 = np.zeros((10, 10), dtype=np.uint8)
img2 = np.zeros((10, 1), dtype=np.uint8)
with pytest.raises(ValueError):
compare_images(img1, img2)
def test_compare_images_ValueError_args():
a = np.ones((10, 10)) * 3
b = np.zeros((10, 10))
with pytest.raises(ValueError):
compare_images(a, b, method="unknown")
def test_compare_images_diff():
img1 = np.zeros((10, 10), dtype=np.uint8)
img1[3:8, 3:8] = 255
img2 = np.zeros_like(img1)
img2[3:8, 0:8] = 255
expected_result = np.zeros_like(img1, dtype=np.float64)
expected_result[3:8, 0:3] = 1
result = compare_images(img1, img2, method='diff')
np.testing.assert_array_equal(result, expected_result)
def test_compare_images_replaced_param():
img1 = np.zeros((10, 10), dtype=np.uint8)
img1[3:8, 3:8] = 255
img2 = np.zeros_like(img1)
img2[3:8, 0:8] = 255
expected_result = np.zeros_like(img1, dtype=np.float64)
expected_result[3:8, 0:3] = 1
regex = ".*Please use `image0, image1`.*"
with pytest.warns(FutureWarning, match=regex) as record:
result = compare_images(image1=img1, image2=img2)
assert_stacklevel(record)
np.testing.assert_array_equal(result, expected_result)
with pytest.warns(FutureWarning, match=regex) as record:
result = compare_images(image0=img1, image2=img2)
assert_stacklevel(record)
np.testing.assert_array_equal(result, expected_result)
with pytest.warns(FutureWarning, match=regex) as record:
result = compare_images(img1, image2=img2)
assert_stacklevel(record)
np.testing.assert_array_equal(result, expected_result)
# Test making "method" keyword-only here as well
# so whole test can be removed in one go
regex = ".*Please pass `method=`.*"
with pytest.warns(FutureWarning, match=regex) as record:
result = compare_images(img1, img2, "diff")
assert_stacklevel(record)
np.testing.assert_array_equal(result, expected_result)
def test_compare_images_blend():
img1 = np.zeros((10, 10), dtype=np.uint8)
img1[3:8, 3:8] = 255
img2 = np.zeros_like(img1)
img2[3:8, 0:8] = 255
expected_result = np.zeros_like(img1, dtype=np.float64)
expected_result[3:8, 3:8] = 1
expected_result[3:8, 0:3] = 0.5
result = compare_images(img1, img2, method='blend')
np.testing.assert_array_equal(result, expected_result)
def test_compare_images_checkerboard_default():
img1 = np.zeros((2**4, 2**4), dtype=np.uint8)
img2 = np.full(img1.shape, fill_value=255, dtype=np.uint8)
res = compare_images(img1, img2, method='checkerboard')
# fmt: off
exp_row1 = np.array([0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1.])
exp_row2 = np.array([1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0.])
# fmt: on
for i in (0, 1, 4, 5, 8, 9, 12, 13):
np.testing.assert_array_equal(res[i, :], exp_row1)
for i in (2, 3, 6, 7, 10, 11, 14, 15):
np.testing.assert_array_equal(res[i, :], exp_row2)
def test_compare_images_checkerboard_tuple():
img1 = np.zeros((2**4, 2**4), dtype=np.uint8)
img2 = np.full(img1.shape, fill_value=255, dtype=np.uint8)
res = compare_images(img1, img2, method='checkerboard', n_tiles=(4, 8))
exp_row1 = np.array(
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
)
exp_row2 = np.array(
[1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]
)
for i in (0, 1, 2, 3, 8, 9, 10, 11):
np.testing.assert_array_equal(res[i, :], exp_row1)
for i in (4, 5, 6, 7, 12, 13, 14, 15):
np.testing.assert_array_equal(res[i, :], exp_row2)

View File

@@ -0,0 +1,241 @@
import numpy as np
import itertools
from skimage import (
img_as_float,
img_as_float32,
img_as_float64,
img_as_int,
img_as_uint,
img_as_ubyte,
)
from skimage.util.dtype import _convert
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
from skimage._shared.testing import assert_equal, parametrize
dtype_range = {
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.float32: (-1.0, 1.0),
np.float64: (-1.0, 1.0),
}
img_funcs = (img_as_int, img_as_float64, img_as_float32, img_as_uint, img_as_ubyte)
dtypes_for_img_funcs = (np.int16, np.float64, np.float32, np.uint16, np.ubyte)
img_funcs_and_types = zip(img_funcs, dtypes_for_img_funcs)
def _verify_range(msg, x, vmin, vmax, dtype):
assert_equal(x[0], vmin)
assert_equal(x[-1], vmax)
assert x.dtype == dtype
@parametrize("dtype, f_and_dt", itertools.product(dtype_range, img_funcs_and_types))
def test_range(dtype, f_and_dt):
imin, imax = dtype_range[dtype]
x = np.linspace(imin, imax, 10).astype(dtype)
f, dt = f_and_dt
y = f(x)
omin, omax = dtype_range[dt]
if imin == 0 or omin == 0:
omin = 0
imin = 0
_verify_range(
f"From {np.dtype(dtype)} to {np.dtype(dt)}", y, omin, omax, np.dtype(dt)
)
# Add non-standard data types that are allowed by the `_convert` function.
dtype_range_extra = dtype_range.copy()
dtype_range_extra.update(
{np.int32: (-2147483648, 2147483647), np.uint32: (0, 4294967295)}
)
dtype_pairs = [
(np.uint8, np.uint32),
(np.int8, np.uint32),
(np.int8, np.int32),
(np.int32, np.int8),
(np.float64, np.float32),
(np.int32, np.float32),
]
@parametrize("dtype_in, dt", dtype_pairs)
def test_range_extra_dtypes(dtype_in, dt):
"""Test code paths that are not skipped by `test_range`"""
imin, imax = dtype_range_extra[dtype_in]
x = np.linspace(imin, imax, 10).astype(dtype_in)
y = _convert(x, dt)
omin, omax = dtype_range_extra[dt]
_verify_range(
f"From {np.dtype(dtype_in)} to {np.dtype(dt)}", y, omin, omax, np.dtype(dt)
)
def test_downcast():
x = np.arange(10).astype(np.uint64)
with expected_warnings(['Downcasting']):
y = img_as_int(x)
assert np.allclose(y, x.astype(np.int16))
assert y.dtype == np.int16, y.dtype
def test_float_out_of_range():
too_high = np.array([2], dtype=np.float32)
with testing.raises(ValueError):
img_as_int(too_high)
too_low = np.array([-2], dtype=np.float32)
with testing.raises(ValueError):
img_as_int(too_low)
def test_float_float_all_ranges():
arr_in = np.array([[-10.0, 10.0, 1e20]], dtype=np.float32)
np.testing.assert_array_equal(img_as_float(arr_in), arr_in)
def test_copy():
x = np.array([1], dtype=np.float64)
y = img_as_float(x)
z = img_as_float(x, force_copy=True)
assert y is x
assert z is not x
def test_bool():
img_ = np.zeros((10, 10), bool)
img8 = np.zeros((10, 10), np.bool_)
img_[1, 1] = True
img8[1, 1] = True
for func, dt in [
(img_as_int, np.int16),
(img_as_float, np.float64),
(img_as_uint, np.uint16),
(img_as_ubyte, np.ubyte),
]:
converted_ = func(img_)
assert np.sum(converted_) == dtype_range[dt][1]
converted8 = func(img8)
assert np.sum(converted8) == dtype_range[dt][1]
def test_clobber():
# The `img_as_*` functions should never modify input arrays.
for func_input_type in img_funcs:
for func_output_type in img_funcs:
img = np.random.rand(5, 5)
img_in = func_input_type(img)
img_in_before = img_in.copy()
func_output_type(img_in)
assert_equal(img_in, img_in_before)
def test_signed_scaling_float32():
x = np.array([-128, 127], dtype=np.int8)
y = img_as_float32(x)
assert_equal(y.max(), 1)
def test_float32_passthrough():
x = np.array([-1, 1], dtype=np.float32)
y = img_as_float(x)
assert_equal(y.dtype, x.dtype)
float_dtype_list = [
float,
float,
np.float64,
np.single,
np.float32,
np.float64,
'float32',
'float64',
]
def test_float_conversion_dtype():
"""Test any conversion from a float dtype to an other."""
x = np.array([-1, 1])
# Test all combinations of dtypes conversions
dtype_combin = np.array(np.meshgrid(float_dtype_list, float_dtype_list)).T.reshape(
-1, 2
)
for dtype_in, dtype_out in dtype_combin:
x = x.astype(dtype_in)
y = _convert(x, dtype_out)
assert y.dtype == np.dtype(dtype_out)
def test_float_conversion_dtype_warns():
"""Test that convert issues a warning when called"""
from skimage.util.dtype import convert
x = np.array([-1, 1])
# Test all combinations of dtypes conversions
dtype_combin = np.array(np.meshgrid(float_dtype_list, float_dtype_list)).T.reshape(
-1, 2
)
for dtype_in, dtype_out in dtype_combin:
x = x.astype(dtype_in)
with expected_warnings(["The use of this function is discouraged"]):
y = convert(x, dtype_out)
assert y.dtype == np.dtype(dtype_out)
def test_subclass_conversion():
"""Check subclass conversion behavior"""
x = np.array([-1, 1])
for dtype in float_dtype_list:
x = x.astype(dtype)
y = _convert(x, np.floating)
assert y.dtype == x.dtype
def test_int_to_float():
"""Check Normalization when casting img_as_float from int types to float"""
int_list = np.arange(9, dtype=np.int64)
converted = img_as_float(int_list)
assert np.allclose(converted, int_list * 1e-19, atol=0.0, rtol=0.1)
ii32 = np.iinfo(np.int32)
ii_list = np.array([ii32.min, ii32.max], dtype=np.int32)
floats = img_as_float(ii_list)
assert_equal(floats.max(), 1)
assert_equal(floats.min(), -1)
def test_img_as_ubyte_supports_npulonglong():
# Pre NumPy <2.0.0, `data_scaled.dtype.type` is `np.ulonglong` instead of
# np.uint64 as one might expect. This caused issues with `img_as_ubyte` due
# to `np.ulonglong` missing from `skimage.util.dtype._integer_types`.
# This doesn't seem to be an issue for NumPy >=2.0.0.
# https://github.com/scikit-image/scikit-image/issues/7385
data = np.arange(50, dtype=np.uint64)
data_scaled = data * 256 ** (data.dtype.itemsize - 1)
result = img_as_ubyte(data_scaled)
assert result.dtype == np.uint8

View File

@@ -0,0 +1,74 @@
import numpy as np
from skimage import dtype_limits
from skimage.util.dtype import dtype_range
from skimage.util import invert
from skimage._shared.testing import assert_array_equal
def test_invert_bool():
dtype = 'bool'
image = np.zeros((3, 3), dtype=dtype)
upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
image[1, :] = upper_dtype_limit
expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
expected[1, :] = 0
result = invert(image)
assert_array_equal(expected, result)
def test_invert_uint8():
dtype = 'uint8'
image = np.zeros((3, 3), dtype=dtype)
upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
image[1, :] = upper_dtype_limit
expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
expected[1, :] = 0
result = invert(image)
assert_array_equal(expected, result)
def test_invert_int8():
dtype = 'int8'
image = np.zeros((3, 3), dtype=dtype)
lower_dtype_limit, upper_dtype_limit = dtype_limits(image, clip_negative=False)
image[1, :] = lower_dtype_limit
image[2, :] = upper_dtype_limit
expected = np.zeros((3, 3), dtype=dtype)
expected[2, :] = lower_dtype_limit
expected[1, :] = upper_dtype_limit
expected[0, :] = -1
result = invert(image)
assert_array_equal(expected, result)
def test_invert_float64_signed():
dtype = 'float64'
image = np.zeros((3, 3), dtype=dtype)
lower_dtype_limit, upper_dtype_limit = dtype_limits(image, clip_negative=False)
image[1, :] = lower_dtype_limit
image[2, :] = upper_dtype_limit
expected = np.zeros((3, 3), dtype=dtype)
expected[2, :] = lower_dtype_limit
expected[1, :] = upper_dtype_limit
result = invert(image, signed_float=True)
assert_array_equal(expected, result)
def test_invert_float64_unsigned():
dtype = 'float64'
image = np.zeros((3, 3), dtype=dtype)
lower_dtype_limit, upper_dtype_limit = dtype_limits(image, clip_negative=True)
image[2, :] = upper_dtype_limit
expected = np.zeros((3, 3), dtype=dtype)
expected[0, :] = upper_dtype_limit
expected[1, :] = upper_dtype_limit
result = invert(image)
assert_array_equal(expected, result)
def test_invert_roundtrip():
for t, limits in dtype_range.items():
image = np.array(limits, dtype=t)
expected = invert(invert(image))
assert_array_equal(image, expected)

View File

@@ -0,0 +1,58 @@
import numpy as np
from skimage._shared import testing
from skimage._shared.testing import assert_equal
from skimage.util._label import label_points
def test_label_points_coords_dimension():
coords, output_shape = np.array([[1, 2], [3, 4]]), (5, 5, 2)
with testing.raises(ValueError):
label_points(coords, output_shape)
def test_label_points_coords_range():
coords, output_shape = np.array([[0, 0], [5, 5]]), (5, 5)
with testing.raises(IndexError):
label_points(coords, output_shape)
def test_label_points_coords_negative():
coords, output_shape = np.array([[-1, 0], [5, 5]]), (5, 5)
with testing.raises(ValueError):
label_points(coords, output_shape)
def test_label_points_two_dimensional_output():
coords, output_shape = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]), (5, 5)
mask = label_points(coords, output_shape)
assert_equal(
mask,
np.array(
[
[1, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 4, 0],
[0, 0, 0, 0, 5],
]
),
)
def test_label_points_multi_dimensional_output():
coords, output_shape = np.array(
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 0], [4, 4, 1]]
), (5, 5, 3)
mask = label_points(coords, output_shape)
result = np.array(
[
[[1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 2, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 3], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [4, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 5, 0]],
]
)
assert_equal(mask, result)

View File

@@ -0,0 +1,10 @@
import skimage as ski
def test_lookfor_basic(capsys):
assert ski.lookfor is ski.util.lookfor
ski.util.lookfor("regionprops")
search_results = capsys.readouterr().out
assert "skimage.measure.regionprops" in search_results
assert "skimage.measure.regionprops_table" in search_results

View File

@@ -0,0 +1,87 @@
import numpy as np
import pytest
from skimage.util._map_array import map_array, ArrayMap
from skimage._shared import testing
_map_array_dtypes_in = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
]
_map_array_dtypes_out = _map_array_dtypes_in + [np.float32, np.float64]
@pytest.mark.parametrize("dtype_in", _map_array_dtypes_in)
@pytest.mark.parametrize("dtype_out", _map_array_dtypes_out)
@pytest.mark.parametrize("out_array", [True, False])
def test_map_array_simple(dtype_in, dtype_out, out_array):
input_arr = np.array([0, 2, 0, 3, 4, 5, 0], dtype=dtype_in)
input_vals = np.array([1, 2, 3, 4, 6], dtype=dtype_in)[::-1]
output_vals = np.array([6, 7, 8, 9, 10], dtype=dtype_out)[::-1]
desired = np.array([0, 7, 0, 8, 9, 0, 0], dtype=dtype_out)
out = None
if out_array:
out = np.full(desired.shape, 11, dtype=dtype_out)
result = map_array(
input_arr=input_arr, input_vals=input_vals, output_vals=output_vals, out=out
)
np.testing.assert_array_equal(result, desired)
assert result.dtype == dtype_out
if out_array:
assert out is result
def test_map_array_incorrect_output_shape():
labels = np.random.randint(0, 5, size=(24, 25))
out = np.empty((24, 24))
in_values = np.unique(labels)
out_values = np.random.random(in_values.shape).astype(out.dtype)
with testing.raises(ValueError):
map_array(labels, in_values, out_values, out=out)
def test_map_array_non_contiguous_output_array():
labels = np.random.randint(0, 5, size=(24, 25))
out = np.empty((24 * 3, 25 * 2))[::3, ::2]
in_values = np.unique(labels)
out_values = np.random.random(in_values.shape).astype(out.dtype)
with testing.raises(ValueError):
map_array(labels, in_values, out_values, out=out)
def test_arraymap_long_str():
labels = np.random.randint(0, 40, size=(24, 25))
in_values = np.unique(labels)
out_values = np.random.random(in_values.shape)
m = ArrayMap(in_values, out_values)
assert len(str(m).split('\n')) == m._max_str_lines + 2
def test_arraymap_update():
in_values = np.unique(np.random.randint(0, 200, size=5))
out_values = np.random.random(len(in_values))
m = ArrayMap(in_values, out_values)
image = np.random.randint(1, len(m), size=(512, 512))
assert np.all(m[image] < 1) # missing values map to 0.
m[1:] += 1
assert np.all(m[image] >= 1)
def test_arraymap_bool_index():
in_values = np.unique(np.random.randint(0, 200, size=5))
out_values = np.random.random(len(in_values))
m = ArrayMap(in_values, out_values)
image = np.random.randint(1, len(in_values), size=(512, 512))
assert np.all(m[image] < 1) # missing values map to 0.
positive = np.ones(len(m), dtype=bool)
positive[0] = False
m[positive] += 1
assert np.all(m[image] >= 1)

View File

@@ -0,0 +1,183 @@
from skimage._shared import testing
from skimage._shared.testing import assert_equal, assert_array_equal
import numpy as np
from skimage.util import montage
# TODO: when minimum numpy dependency is 1.25 use:
# np..exceptions.AxisError instead of AxisError
# and remove this try-except
try:
from numpy import AxisError
except ImportError:
from numpy.exceptions import AxisError
def test_montage_simple_gray():
n_images, n_rows, n_cols = 3, 2, 3
arr_in = np.arange(n_images * n_rows * n_cols, dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in)
arr_ref = np.array(
[
[0.0, 1.0, 2.0, 6.0, 7.0, 8.0],
[3.0, 4.0, 5.0, 9.0, 10.0, 11.0],
[12.0, 13.0, 14.0, 8.5, 8.5, 8.5],
[15.0, 16.0, 17.0, 8.5, 8.5, 8.5],
]
)
assert_array_equal(arr_out, arr_ref)
def test_montage_simple_rgb():
n_images, n_rows, n_cols, n_channels = 2, 2, 2, 2
arr_in = np.arange(
n_images * n_rows * n_cols * n_channels,
dtype=float,
)
arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels)
arr_out = montage(arr_in, channel_axis=-1)
arr_ref = np.array(
[
[[0, 1], [2, 3], [8, 9], [10, 11]],
[[4, 5], [6, 7], [12, 13], [14, 15]],
[[7, 8], [7, 8], [7, 8], [7, 8]],
[[7, 8], [7, 8], [7, 8], [7, 8]],
]
)
assert_array_equal(arr_out, arr_ref)
@testing.parametrize('channel_axis', (0, 1, 2, 3, -1, -2, -3, -4))
def test_montage_simple_rgb_channel_axes(channel_axis):
n_images, n_rows, n_cols, n_channels = 2, 2, 2, 2
arr_in = np.arange(n_images * n_rows * n_cols * n_channels, dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels)
# place channels at the desired location
arr_in = np.moveaxis(arr_in, -1, channel_axis)
arr_out = montage(arr_in, channel_axis=channel_axis)
arr_ref = np.array(
[
[[0, 1], [2, 3], [8, 9], [10, 11]],
[[4, 5], [6, 7], [12, 13], [14, 15]],
[[7, 8], [7, 8], [7, 8], [7, 8]],
[[7, 8], [7, 8], [7, 8], [7, 8]],
]
)
assert_array_equal(arr_out, arr_ref)
@testing.parametrize('channel_axis', (4, -5))
def test_montage_invalid_channel_axes(channel_axis):
arr_in = np.arange(16, dtype=float).reshape(2, 2, 2, 2)
with testing.raises(AxisError):
montage(arr_in, channel_axis=channel_axis)
def test_montage_fill_gray():
n_images, n_rows, n_cols = 3, 2, 3
arr_in = np.arange(n_images * n_rows * n_cols, dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, fill=0)
arr_ref = np.array(
[
[0.0, 1.0, 2.0, 6.0, 7.0, 8.0],
[3.0, 4.0, 5.0, 9.0, 10.0, 11.0],
[12.0, 13.0, 14.0, 0.0, 0.0, 0.0],
[15.0, 16.0, 17.0, 0.0, 0.0, 0.0],
]
)
assert_array_equal(arr_out, arr_ref)
def test_montage_grid_default_gray():
n_images, n_rows, n_cols = 15, 11, 7
arr_in = np.arange(n_images * n_rows * n_cols, dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
n_tiles = int(np.ceil(np.sqrt(n_images)))
arr_out = montage(arr_in)
assert_equal(arr_out.shape, (n_tiles * n_rows, n_tiles * n_cols))
def test_montage_grid_custom_gray():
n_images, n_rows, n_cols = 6, 2, 2
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, grid_shape=(3, 2))
arr_ref = np.array(
[
[0.0, 1.0, 4.0, 5.0],
[2.0, 3.0, 6.0, 7.0],
[8.0, 9.0, 12.0, 13.0],
[10.0, 11.0, 14.0, 15.0],
[16.0, 17.0, 20.0, 21.0],
[18.0, 19.0, 22.0, 23.0],
]
)
assert_array_equal(arr_out, arr_ref)
def test_montage_rescale_intensity_gray():
n_images, n_rows, n_cols = 4, 3, 3
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, rescale_intensity=True)
arr_ref = np.array(
[
[0.0, 0.125, 0.25, 0.0, 0.125, 0.25],
[0.375, 0.5, 0.625, 0.375, 0.5, 0.625],
[0.75, 0.875, 1.0, 0.75, 0.875, 1.0],
[0.0, 0.125, 0.25, 0.0, 0.125, 0.25],
[0.375, 0.5, 0.625, 0.375, 0.5, 0.625],
[0.75, 0.875, 1.0, 0.75, 0.875, 1.0],
]
)
assert_equal(arr_out.min(), 0.0)
assert_equal(arr_out.max(), 1.0)
assert_array_equal(arr_out, arr_ref)
def test_montage_simple_padding_gray():
n_images, n_rows, n_cols = 2, 2, 2
arr_in = np.arange(n_images * n_rows * n_cols)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, padding_width=1)
arr_ref = np.array(
[
[3, 3, 3, 3, 3, 3, 3],
[3, 0, 1, 3, 4, 5, 3],
[3, 2, 3, 3, 6, 7, 3],
[3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3],
]
)
assert_array_equal(arr_out, arr_ref)
def test_error_ndim():
arr_error = np.random.randn(1, 2)
with testing.raises(ValueError):
montage(arr_error)
arr_error = np.random.randn(1, 2, 3, 4)
with testing.raises(ValueError):
montage(arr_error)
arr_error = np.random.randn(1, 2, 3)
with testing.raises(ValueError):
montage(arr_error, channel_axis=-1)
arr_error = np.random.randn(1, 2, 3, 4, 5)
with testing.raises(ValueError):
montage(arr_error, channel_axis=-1)

View File

@@ -0,0 +1,208 @@
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal, assert_allclose
import numpy as np
from skimage.data import camera
from skimage.util import random_noise, img_as_float
def test_set_seed():
cam = camera()
test = random_noise(cam, rng=42)
assert_array_equal(test, random_noise(cam, rng=42))
def test_salt():
cam = img_as_float(camera())
amount = 0.15
cam_noisy = random_noise(cam, rng=42, mode='salt', amount=amount)
saltmask = cam != cam_noisy
# Ensure all changes are to 1.0
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(saltmask.sum()) / (cam.shape[0] * cam.shape[1])
tolerance = 1e-2
assert abs(amount - proportion) <= tolerance
def test_salt_p1():
image = np.random.rand(2, 3)
noisy = random_noise(image, mode='salt', amount=1)
assert_array_equal(noisy, [[1, 1, 1], [1, 1, 1]])
def test_singleton_dim():
"""Ensure images where size of a given dimension is 1 work correctly."""
image = np.random.rand(1, 1000)
noisy = random_noise(image, mode='salt', amount=0.1, rng=42)
tolerance = 5e-2
assert abs(np.average(noisy == 1) - 0.1) <= tolerance
def test_pepper():
cam = img_as_float(camera())
data_signed = cam * 2.0 - 1.0 # Same image, on range [-1, 1]
amount = 0.15
cam_noisy = random_noise(cam, rng=42, mode='pepper', amount=amount)
peppermask = cam != cam_noisy
# Ensure all changes are to 1.0
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(peppermask.sum()) / (cam.shape[0] * cam.shape[1])
tolerance = 1e-2
assert abs(amount - proportion) <= tolerance
# Check to make sure pepper gets added properly to signed images
orig_zeros = (data_signed == -1).sum()
cam_noisy_signed = random_noise(data_signed, rng=42, mode='pepper', amount=0.15)
proportion = float((cam_noisy_signed == -1).sum() - orig_zeros) / (
cam.shape[0] * cam.shape[1]
)
assert abs(amount - proportion) <= tolerance
def test_salt_and_pepper():
cam = img_as_float(camera())
amount = 0.15
cam_noisy = random_noise(
cam, rng=42, mode='s&p', amount=amount, salt_vs_pepper=0.25
)
saltmask = np.logical_and(cam != cam_noisy, cam_noisy == 1.0)
peppermask = np.logical_and(cam != cam_noisy, cam_noisy == 0.0)
# Ensure all changes are to 0. or 1.
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(saltmask.sum() + peppermask.sum()) / (
cam.shape[0] * cam.shape[1]
)
tolerance = 1e-2
assert abs(amount - proportion) <= tolerance
# Verify the relative amount of salt vs. pepper is close to expected
assert 0.18 < saltmask.sum() / peppermask.sum() < 0.35
def test_gaussian():
data = np.zeros((128, 128)) + 0.5
data_gaussian = random_noise(data, rng=42, var=0.01)
assert 0.008 < data_gaussian.var() < 0.012
data_gaussian = random_noise(data, rng=42, mean=0.3, var=0.015)
assert 0.28 < data_gaussian.mean() - 0.5 < 0.32
assert 0.012 < data_gaussian.var() < 0.018
def test_localvar():
seed = 23703
data = np.zeros((128, 128)) + 0.5
local_vars = np.zeros((128, 128)) + 0.001
local_vars[:64, 64:] = 0.1
local_vars[64:, :64] = 0.25
local_vars[64:, 64:] = 0.45
data_gaussian = random_noise(
data, mode='localvar', rng=seed, local_vars=local_vars, clip=False
)
assert 0.0 < data_gaussian[:64, :64].var() < 0.002
assert 0.095 < data_gaussian[:64, 64:].var() < 0.105
assert 0.245 < data_gaussian[64:, :64].var() < 0.255
assert 0.445 < data_gaussian[64:, 64:].var() < 0.455
# Ensure local variance bounds checking works properly
bad_local_vars = np.zeros_like(data)
with testing.raises(ValueError):
random_noise(data, mode='localvar', rng=seed, local_vars=bad_local_vars)
bad_local_vars += 0.1
bad_local_vars[0, 0] = -1
with testing.raises(ValueError):
random_noise(data, mode='localvar', rng=seed, local_vars=bad_local_vars)
def test_speckle():
seed = 42
data = np.zeros((128, 128)) + 0.1
rng = np.random.default_rng(seed)
noise = rng.normal(0.1, 0.02**0.5, (128, 128))
expected = np.clip(data + data * noise, 0, 1)
data_speckle = random_noise(data, mode='speckle', rng=42, mean=0.1, var=0.02)
assert_allclose(expected, data_speckle)
def test_poisson():
data = camera() # 512x512 grayscale uint8
rng = np.random.default_rng(42)
cam_noisy = random_noise(data, mode='poisson', rng=42)
cam_noisy2 = random_noise(data, mode='poisson', rng=42, clip=False)
expected = rng.poisson(img_as_float(data) * 256) / 256.0
assert_allclose(cam_noisy, np.clip(expected, 0.0, 1.0))
assert_allclose(cam_noisy2, expected)
def test_clip_poisson():
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_poisson = random_noise(data, mode='poisson', rng=42, clip=True)
cam_poisson2 = random_noise(data_signed, mode='poisson', rng=42, clip=True)
assert (cam_poisson.max() == 1.0) and (cam_poisson.min() == 0.0)
assert (cam_poisson2.max() == 1.0) and (cam_poisson2.min() == -1.0)
# Signed and unsigned, unclipped
cam_poisson = random_noise(data, mode='poisson', rng=42, clip=False)
cam_poisson2 = random_noise(data_signed, mode='poisson', rng=42, clip=False)
assert (cam_poisson.max() > 1.15) and (cam_poisson.min() == 0.0)
assert (cam_poisson2.max() > 1.3) and (cam_poisson2.min() == -1.0)
def test_clip_gaussian():
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_gauss = random_noise(data, mode='gaussian', rng=42, clip=True)
cam_gauss2 = random_noise(data_signed, mode='gaussian', rng=42, clip=True)
assert (cam_gauss.max() == 1.0) and (cam_gauss.min() == 0.0)
assert (cam_gauss2.max() == 1.0) and (cam_gauss2.min() == -1.0)
# Signed and unsigned, unclipped
cam_gauss = random_noise(data, mode='gaussian', rng=42, clip=False)
cam_gauss2 = random_noise(data_signed, mode='gaussian', rng=42, clip=False)
assert (cam_gauss.max() > 1.22) and (cam_gauss.min() < -0.35)
assert (cam_gauss2.max() > 1.219) and (cam_gauss2.min() < -1.219)
def test_clip_speckle():
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2.0 - 1.0 # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_speckle = random_noise(data, mode='speckle', rng=42, clip=True)
cam_speckle_sig = random_noise(data_signed, mode='speckle', rng=42, clip=True)
assert (cam_speckle.max() == 1.0) and (cam_speckle.min() == 0.0)
assert (cam_speckle_sig.max() == 1.0) and (cam_speckle_sig.min() == -1.0)
# Signed and unsigned, unclipped
cam_speckle = random_noise(data, mode='speckle', rng=42, clip=False)
cam_speckle_sig = random_noise(data_signed, mode='speckle', rng=42, clip=False)
assert (cam_speckle.max() > 1.219) and (cam_speckle.min() == 0.0)
assert (cam_speckle_sig.max() > 1.219) and (cam_speckle_sig.min() < -1.219)
def test_bad_mode():
data = np.zeros((64, 64))
with testing.raises(KeyError):
random_noise(data, 'perlin')

View File

@@ -0,0 +1,37 @@
import numpy as np
from skimage.util import regular_grid
from skimage._shared.testing import assert_equal
def test_regular_grid_full():
ar = np.zeros((2, 2))
g = regular_grid(ar, 25)
assert_equal(g, [slice(None, None, None), slice(None, None, None)])
ar[g] = 1
assert_equal(ar.size, ar.sum())
def test_regular_grid_2d_8():
ar = np.zeros((20, 40))
g = regular_grid(ar.shape, 8)
assert_equal(g, [slice(5.0, None, 10.0), slice(5.0, None, 10.0)])
ar[g] = 1
assert_equal(ar.sum(), 8)
def test_regular_grid_2d_32():
ar = np.zeros((20, 40))
g = regular_grid(ar.shape, 32)
assert_equal(g, [slice(2.0, None, 5.0), slice(2.0, None, 5.0)])
ar[g] = 1
assert_equal(ar.sum(), 32)
def test_regular_grid_3d_8():
ar = np.zeros((3, 20, 40))
g = regular_grid(ar.shape, 8)
assert_equal(
g, [slice(1.0, None, 3.0), slice(5.0, None, 10.0), slice(5.0, None, 10.0)]
)
ar[g] = 1
assert_equal(ar.sum(), 8)

View File

@@ -0,0 +1,181 @@
import numpy as np
from skimage._shared import testing
from skimage._shared.testing import assert_equal
from skimage.util.shape import view_as_blocks, view_as_windows
def test_view_as_blocks_block_not_a_tuple():
A = np.arange(10)
with testing.raises(TypeError):
view_as_blocks(A, [5])
def test_view_as_blocks_negative_shape():
A = np.arange(10)
with testing.raises(ValueError):
view_as_blocks(A, (-2,))
def test_view_as_blocks_block_too_large():
A = np.arange(10)
with testing.raises(ValueError):
view_as_blocks(A, (11,))
def test_view_as_blocks_wrong_block_dimension():
A = np.arange(10)
with testing.raises(ValueError):
view_as_blocks(A, (2, 2))
def test_view_as_blocks_1D_array_wrong_block_shape():
A = np.arange(10)
with testing.raises(ValueError):
view_as_blocks(A, (3,))
def test_view_as_blocks_1D_array():
A = np.arange(10)
B = view_as_blocks(A, (5,))
assert_equal(B, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
def test_view_as_blocks_2D_array():
A = np.arange(4 * 4).reshape(4, 4)
B = view_as_blocks(A, (2, 2))
assert_equal(B[0, 1], np.array([[2, 3], [6, 7]]))
assert_equal(B[1, 0, 1, 1], 13)
def test_view_as_blocks_3D_array():
A = np.arange(4 * 4 * 6).reshape(4, 4, 6)
B = view_as_blocks(A, (1, 2, 2))
assert_equal(B.shape, (4, 2, 3, 1, 2, 2))
assert_equal(
B[2:, 0, 2], np.array([[[[52, 53], [58, 59]]], [[[76, 77], [82, 83]]]])
)
def test_view_as_windows_input_not_array():
A = [1, 2, 3, 4, 5]
with testing.raises(TypeError):
view_as_windows(A, (2,))
def test_view_as_windows_wrong_window_dimension():
A = np.arange(10)
with testing.raises(ValueError):
view_as_windows(A, (2, 2))
def test_view_as_windows_negative_window_length():
A = np.arange(10)
with testing.raises(ValueError):
view_as_windows(A, (-1,))
def test_view_as_windows_window_too_large():
A = np.arange(10)
with testing.raises(ValueError):
view_as_windows(A, (11,))
def test_view_as_windows_step_below_one():
A = np.arange(10)
with testing.raises(ValueError):
view_as_windows(A, (11,), step=0.9)
def test_view_as_windows_1D():
A = np.arange(10)
window_shape = (3,)
B = view_as_windows(A, window_shape)
assert_equal(
B,
np.array(
[
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9],
]
),
)
def test_view_as_windows_2D():
A = np.arange(5 * 4).reshape(5, 4)
window_shape = (4, 3)
B = view_as_windows(A, window_shape)
assert_equal(B.shape, (2, 2, 4, 3))
assert_equal(
B,
np.array(
[
[
[[0, 1, 2], [4, 5, 6], [8, 9, 10], [12, 13, 14]],
[[1, 2, 3], [5, 6, 7], [9, 10, 11], [13, 14, 15]],
],
[
[[4, 5, 6], [8, 9, 10], [12, 13, 14], [16, 17, 18]],
[[5, 6, 7], [9, 10, 11], [13, 14, 15], [17, 18, 19]],
],
]
),
)
def test_view_as_windows_with_skip():
A = np.arange(20).reshape((5, 4))
B = view_as_windows(A, 2, step=2)
assert_equal(
B,
[
[[[0, 1], [4, 5]], [[2, 3], [6, 7]]],
[[[8, 9], [12, 13]], [[10, 11], [14, 15]]],
],
)
C = view_as_windows(A, 2, step=4)
assert_equal(C.shape, (1, 1, 2, 2))
def test_views_non_contiguous():
A = np.arange(16).reshape((4, 4))
A = A[::2, :]
res_b = view_as_blocks(A, (2, 2))
res_w = view_as_windows(A, (2, 2))
print(res_b)
print(res_w)
expected_b = [[[[0, 1], [8, 9]], [[2, 3], [10, 11]]]]
expected_w = [[[[0, 1], [8, 9]], [[1, 2], [9, 10]], [[2, 3], [10, 11]]]]
assert_equal(res_b, expected_b)
assert_equal(res_w, expected_w)
def test_view_as_windows_step_tuple():
A = np.arange(24).reshape((6, 4))
B = view_as_windows(A, (3, 2), step=3)
assert B.shape == (2, 1, 3, 2)
assert B.size != A.size
C = view_as_windows(A, (3, 2), step=(3, 2))
assert C.shape == (2, 2, 3, 2)
assert C.size == A.size
assert_equal(
C,
[
[[[0, 1], [4, 5], [8, 9]], [[2, 3], [6, 7], [10, 11]]],
[[[12, 13], [16, 17], [20, 21]], [[14, 15], [18, 19], [22, 23]]],
],
)

View File

@@ -0,0 +1,63 @@
import numpy as np
import pytest
from skimage.util import slice_along_axes
rng = np.random.default_rng()
def test_2d_crop_0():
data = rng.random((50, 50))
out = slice_along_axes(data, [(0, 25)])
np.testing.assert_array_equal(out, data[:25, :])
def test_2d_crop_1():
data = rng.random((50, 50))
out = slice_along_axes(data, [(0, 25), (0, 10)])
np.testing.assert_array_equal(out, data[:25, :10])
def test_2d_crop_2():
data = rng.random((50, 50))
out = slice_along_axes(data, [(0, 25), (0, 30)], axes=[1, 0])
np.testing.assert_array_equal(out, data[:30, :25])
def test_2d_negative():
data = rng.random((50, 50))
out = slice_along_axes(data, [(5, -5), (6, -6)])
np.testing.assert_array_equal(out, data[5:-5, 6:-6])
def test_copy():
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
out_without_copy = slice_along_axes(data, [(0, 3)], axes=[1], copy=False)
out_copy = slice_along_axes(data, [(0, 3)], axes=[0], copy=True)
assert out_without_copy.base is data
assert out_copy.base is not data
def test_nd_crop():
data = rng.random((50, 50, 50))
out = slice_along_axes(data, [(0, 25)], axes=[2])
np.testing.assert_array_equal(out, data[:, :, :25])
def test_axes_invalid():
data = np.empty((2, 3))
with pytest.raises(ValueError):
slice_along_axes(data, [(0, 3)], axes=[2])
def test_axes_limit_invalid():
data = np.empty((50, 50))
with pytest.raises(ValueError):
slice_along_axes(data, [(0, 51)], axes=[0])
def test_too_many_axes():
data = np.empty((10, 10))
with pytest.raises(ValueError):
slice_along_axes(data, [(0, 1), (0, 1), (0, 1)])

View File

@@ -0,0 +1,38 @@
import numpy as np
from skimage.util import unique_rows
from skimage._shared import testing
from skimage._shared.testing import assert_equal
def test_discontiguous_array():
ar = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], np.uint8)
ar = ar[::2]
ar_out = unique_rows(ar)
desired_ar_out = np.array([[1, 0, 1]], np.uint8)
assert_equal(ar_out, desired_ar_out)
def test_uint8_array():
ar = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], np.uint8)
ar_out = unique_rows(ar)
desired_ar_out = np.array([[0, 1, 0], [1, 0, 1]], np.uint8)
assert_equal(ar_out, desired_ar_out)
def test_float_array():
ar = np.array([[1.1, 0.0, 1.1], [0.0, 1.1, 0.0], [1.1, 0.0, 1.1]], float)
ar_out = unique_rows(ar)
desired_ar_out = np.array([[0.0, 1.1, 0.0], [1.1, 0.0, 1.1]], float)
assert_equal(ar_out, desired_ar_out)
def test_1d_array():
ar = np.array([1, 0, 1, 1], np.uint8)
with testing.raises(ValueError):
unique_rows(ar)
def test_3d_array():
ar = np.arange(8).reshape((2, 2, 2))
with testing.raises(ValueError):
unique_rows(ar)

View File

@@ -0,0 +1,51 @@
import numpy as np
def unique_rows(ar):
"""Remove repeated rows from a 2D array.
In particular, if given an array of coordinates of shape
(Npoints, Ndim), it will remove repeated points.
Parameters
----------
ar : ndarray, shape (M, N)
The input array.
Returns
-------
ar_out : ndarray, shape (P, N)
A copy of the input array with repeated rows removed.
Raises
------
ValueError : if `ar` is not two-dimensional.
Notes
-----
The function will generate a copy of `ar` if it is not
C-contiguous, which will negatively affect performance for large
input arrays.
Examples
--------
>>> ar = np.array([[1, 0, 1],
... [0, 1, 0],
... [1, 0, 1]], np.uint8)
>>> unique_rows(ar)
array([[0, 1, 0],
[1, 0, 1]], dtype=uint8)
"""
if ar.ndim != 2:
raise ValueError(
"unique_rows() only makes sense for 2D arrays, " f"got {ar.ndim}"
)
# the view in the next line only works if the array is C-contiguous
ar = np.ascontiguousarray(ar)
# np.unique() finds identical items in a raveled array. To make it
# see each row as a single item, we create a view of each row as a
# byte string of length itemsize times number of columns in `ar`
ar_row_view = ar.view(f"|S{ar.itemsize * ar.shape[1]}")
_, unique_row_indices = np.unique(ar_row_view, return_index=True)
ar_out = ar[unique_row_indices]
return ar_out