rm CondaPkg environment

This commit is contained in:
ton
2023-04-06 13:53:47 +07:00
parent 0a57ed7884
commit c43d949309
3329 changed files with 5725 additions and 447022 deletions

View File

@@ -1,52 +0,0 @@
from ._find_contours import find_contours
from ._marching_cubes_lewiner import marching_cubes, mesh_surface_area
from ._regionprops import (regionprops, perimeter,
perimeter_crofton, euler_number, regionprops_table)
from ._polygon import approximate_polygon, subdivide_polygon
from .pnpoly import (points_in_poly, grid_points_in_poly)
from ._moments import (moments, moments_central, moments_coords,
moments_coords_central, moments_normalized, centroid,
moments_hu, inertia_tensor, inertia_tensor_eigvals)
from .profile import profile_line
from .fit import LineModelND, CircleModel, EllipseModel, ransac
from .block import block_reduce
from ._label import label
from .entropy import shannon_entropy
from ._blur_effect import blur_effect
from ._colocalization import (pearson_corr_coeff, manders_coloc_coeff,
manders_overlap_coeff, intersection_coeff)
__all__ = ['find_contours',
'regionprops',
'regionprops_table',
'perimeter',
'perimeter_crofton',
'euler_number',
'approximate_polygon',
'subdivide_polygon',
'LineModelND',
'CircleModel',
'EllipseModel',
'ransac',
'block_reduce',
'moments',
'moments_central',
'moments_coords',
'moments_coords_central',
'moments_normalized',
'moments_hu',
'inertia_tensor',
'inertia_tensor_eigvals',
'marching_cubes',
'mesh_surface_area',
'profile_line',
'label',
'points_in_poly',
'grid_points_in_poly',
'shannon_entropy',
'blur_effect',
'pearson_corr_coeff',
'manders_coloc_coeff',
'manders_overlap_coeff',
'intersection_coeff',
]

View File

@@ -1,81 +0,0 @@
import numpy as np
import scipy.ndimage as ndi
from ..color import rgb2gray
from ..util import img_as_float
__all__ = ['blur_effect']
def blur_effect(image, h_size=11, channel_axis=None, reduce_func=np.max):
"""Compute a metric that indicates the strength of blur in an image
(0 for no blur, 1 for maximal blur).
Parameters
----------
image : ndarray
RGB or grayscale nD image. The input image is converted to grayscale
before computing the blur metric.
h_size : int, optional
Size of the re-blurring filter.
channel_axis : int or None, optional
If None, the image is assumed to be grayscale (single-channel).
Otherwise, this parameter indicates which axis of the array
corresponds to color channels.
reduce_func : callable, optional
Function used to calculate the aggregation of blur metrics along all
axes. If set to None, the entire list is returned, where the i-th
element is the blur metric along the i-th axis.
Returns
-------
blur : float (0 to 1) or list of floats
Blur metric: by default, the maximum of blur metrics along all axes.
Notes
-----
`h_size` must keep the same value in order to compare results between
images. Most of the time, the default size (11) is enough. This means that
the metric can clearly discriminate blur up to an average 11x11 filter; if
blur is higher, the metric still gives good results but its values tend
towards an asymptote.
References
----------
.. [1] Frederique Crete, Thierry Dolmiere, Patricia Ladret, and Marina
Nicolas "The blur effect: perception and estimation with a new
no-reference perceptual blur metric" Proc. SPIE 6492, Human Vision and
Electronic Imaging XII, 64920I (2007)
https://hal.archives-ouvertes.fr/hal-00232709
:DOI:`10.1117/12.702790`
"""
if channel_axis is not None:
try:
# ensure color channels are in the final dimension
image = np.moveaxis(image, channel_axis, -1)
except np.AxisError:
print('channel_axis must be one of the image array dimensions')
raise
except TypeError:
print('channel_axis must be an integer')
raise
image = rgb2gray(image)
n_axes = image.ndim
image = img_as_float(image)
shape = image.shape
B = []
from ..filters import sobel
slices = tuple([slice(2, s - 1) for s in shape])
for ax in range(n_axes):
filt_im = ndi.uniform_filter1d(image, h_size, axis=ax)
im_sharp = np.abs(sobel(image, axis=ax))
im_blur = np.abs(sobel(filt_im, axis=ax))
T = np.maximum(0, im_sharp - im_blur)
M1 = np.sum(im_sharp[slices])
M2 = np.sum(T[slices])
B.append(np.abs(M1 - M2) / M1)
return B if reduce_func is None else reduce_func(B)

View File

@@ -1,303 +0,0 @@
import numpy as np
from scipy.stats import pearsonr
from .._shared.utils import check_shape_equality, as_binary_ndarray
__all__ = ['pearson_corr_coeff',
'manders_coloc_coeff',
'manders_overlap_coeff',
'intersection_coeff',
]
def pearson_corr_coeff(image0, image1, mask=None):
r"""Calculate Pearson's Correlation Coefficient between pixel intensities
in channels.
Parameters
----------
image0 : (M, N) ndarray
Image of channel A.
image1 : (M, N) ndarray
Image of channel 2 to be correlated with channel B.
Must have same dimensions as `image0`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0` and `image1` pixels within this region of interest mask
are included in the calculation. Must have same dimensions as `image0`.
Returns
-------
pcc : float
Pearson's correlation coefficient of the pixel intensities between
the two images, within the mask if provided.
p-value : float
Two-tailed p-value.
Notes
-----
Pearson's Correlation Coefficient (PCC) measures the linear correlation
between the pixel intensities of the two images. Its value ranges from -1
for perfect linear anti-correlation to +1 for perfect linear correlation.
The calculation of the p-value assumes that the intensities of pixels in
each input image are normally distributed.
Scipy's implementation of Pearson's correlation coefficient is used. Please
refer to it for further information and caveats [1]_.
.. math::
r = \frac{\sum (A_i - m_A_i) (B_i - m_B_i)}
{\sqrt{\sum (A_i - m_A_i)^2 \sum (B_i - m_B_i)^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `image1`,
:math:`m_A_i` is the mean of the pixel values in `image0`
:math:`m_B_i` is the mean of the pixel values in `image1`
A low PCC value does not necessarily mean that there is no correlation
between the two channel intensities, just that there is no linear
correlation. You may wish to plot the pixel intensities of each of the two
channels in a 2D scatterplot and use Spearman's rank correlation if a
non-linear correlation is visually identified [2]_. Also consider if you
are interested in correlation or co-occurence, in which case a method
involving segmentation masks (e.g. MCC or intersection coefficient) may be
more suitable [3]_ [4]_.
Providing the mask of only relevant sections of the image (e.g., cells, or
particular cellular compartments) and removing noise is important as the
PCC is sensitive to these measures [3]_ [4]_.
References
----------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html # noqa
.. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html # noqa
.. [3] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [4] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01706.x
"""
image0 = np.asarray(image0)
image1 = np.asarray(image1)
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1, mask)
image0 = image0[mask]
image1 = image1[mask]
else:
check_shape_equality(image0, image1)
# scipy pearsonr function only takes flattened arrays
image0 = image0.reshape(-1)
image1 = image1.reshape(-1)
return pearsonr(image0, image1)
def manders_coloc_coeff(image0, image1_mask, mask=None):
r"""Manders' colocalization coefficient between two channels.
Parameters
----------
image0 : (M, N) ndarray
Image of channel A. All pixel values should be non-negative.
image1_mask : (M, N) ndarray of dtype bool
Binary mask with segmented regions of interest in channel B.
Must have same dimensions as `image0`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0` pixel values within this region of interest mask are
included in the calculation.
Must have same dimensions as `image0`.
Returns
-------
mcc : float
Manders' colocalization coefficient.
Notes
-----
Manders' Colocalization Coefficient (MCC) is the fraction of total
intensity of a certain channel (channel A) that is within the segmented
region of a second channel (channel B) [1]_. It ranges from 0 for no
colocalisation to 1 for complete colocalization. It is also referred to
as M1 and M2.
MCC is commonly used to measure the colocalization of a particular protein
in a subceullar compartment. Typically a segmentation mask for channel B
is generated by setting a threshold that the pixel values must be above
to be included in the MCC calculation. In this implementation,
the channel B mask is provided as the argument `image1_mask`, allowing
the exact segmentation method to be decided by the user beforehand.
The implemented equation is:
.. math::
r = \frac{\sum A_{i,coloc}}{\sum A_i}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`A_{i,coloc} = A_i` if :math:`Bmask_i > 0`
:math:`Bmask_i` is the value of the :math:`i^{th}` pixel in
`mask`
MCC is sensitive to noise, with diffuse signal in the first channel
inflating its value. Images should be processed to remove out of focus and
background light before the MCC is calculated [2]_.
References
----------
.. [1] Manders, E.M.M., Verbeek, F.J. and Aten, J.A. (1993), Measurement of
co-localization of objects in dual-colour confocal images. Journal
of Microscopy, 169: 375-382.
https://doi.org/10.1111/j.1365-2818.1993.tb03313.x
https://imagej.net/media/manders.pdf
.. [2] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723C742.
https://doi.org/10.1152/ajpcell.00462.2010
"""
image0 = np.asarray(image0)
image1_mask = as_binary_ndarray(image1_mask, variable_name="image1_mask")
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1_mask, mask)
image0 = image0[mask]
image1_mask = image1_mask[mask]
else:
check_shape_equality(image0, image1_mask)
# check non-negative image
if image0.min() < 0:
raise ValueError("image contains negative values")
sum = np.sum(image0)
if (sum == 0):
return 0
return np.sum(image0 * image1_mask) / sum
def manders_overlap_coeff(image0, image1, mask=None):
r"""Manders' overlap coefficient
Parameters
----------
image0 : (M, N) ndarray
Image of channel A. All pixel values should be non-negative.
image1 : (M, N) ndarray
Image of channel B. All pixel values should be non-negative.
Must have same dimensions as `image0`
mask : (M, N) ndarray of dtype bool, optional
Only `image0` and `image1` pixel values within this region of interest
mask are included in the calculation.
Must have ♣same dimensions as `image0`.
Returns
-------
moc: float
Manders' Overlap Coefficient of pixel intensities between the two
images.
Notes
-----
Manders' Overlap Coefficient (MOC) is given by the equation [1]_:
.. math::
r = \frac{\sum A_i B_i}{\sqrt{\sum A_i^2 \sum B_i^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `image1`
It ranges between 0 for no colocalization and 1 for complete colocalization
of all pixels.
MOC does not take into account pixel intensities, just the fraction of
pixels that have positive values for both channels[2]_ [3]_. Its usefulness
has been criticized as it changes in response to differences in both
co-occurence and correlation and so a particular MOC value could indicate
a wide range of colocalization patterns [4]_ [5]_.
References
----------
.. [1] Manders, E.M.M., Verbeek, F.J. and Aten, J.A. (1993), Measurement of
co-localization of objects in dual-colour confocal images. Journal
of Microscopy, 169: 375-382.
https://doi.org/10.1111/j.1365-2818.1993.tb03313.x
https://imagej.net/media/manders.pdf
.. [2] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [3] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01
.. [4] Adler J, Parmryd I. (2010), Quantifying colocalization by
correlation: the Pearson correlation coefficient is
superior to the Mander's overlap coefficient. Cytometry A.
Aug;77(8):733-42.https://doi.org/10.1002/cyto.a.20896
.. [5] Adler, J, Parmryd, I. Quantifying colocalization: The case for
discarding the Manders overlap coefficient. Cytometry. 2021; 99:
910 920. https://doi.org/10.1002/cyto.a.24336
"""
image0 = np.asarray(image0)
image1 = np.asarray(image1)
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1, mask)
image0 = image0[mask]
image1 = image1[mask]
else:
check_shape_equality(image0, image1)
# check non-negative image
if image0.min() < 0:
raise ValueError("image0 contains negative values")
if image1.min() < 0:
raise ValueError("image1 contains negative values")
denom = (np.sum(np.square(image0)) * (np.sum(np.square(image1)))) ** 0.5
return np.sum(np.multiply(image0, image1)) / denom
def intersection_coeff(image0_mask, image1_mask, mask=None):
r"""Fraction of a channel's segmented binary mask that overlaps with a
second channel's segmented binary mask.
Parameters
----------
image0_mask : (M, N) ndarray of dtype bool
Image mask of channel A.
image1_mask : (M, N) ndarray of dtype bool
Image mask of channel B.
Must have same dimensions as `image0_mask`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0_mask` and `image1_mask` pixels within this region of
interest
mask are included in the calculation.
Must have same dimensions as `image0_mask`.
Returns
-------
Intersection coefficient, float
Fraction of `image0_mask` that overlaps with `image1_mask`.
"""
image0_mask = as_binary_ndarray(image0_mask, variable_name="image0_mask")
image1_mask = as_binary_ndarray(image1_mask, variable_name="image1_mask")
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0_mask, image1_mask, mask)
image0_mask = image0_mask[mask]
image1_mask = image1_mask[mask]
else:
check_shape_equality(image0_mask, image1_mask)
nonzero_image0 = np.count_nonzero(image0_mask)
if nonzero_image0 == 0:
return 0
nonzero_joint = np.count_nonzero(np.logical_and(image0_mask, image1_mask))
return nonzero_joint / nonzero_image0

View File

@@ -1,218 +0,0 @@
import numpy as np
from ._find_contours_cy import _get_contour_segments
from collections import deque
_param_options = ('high', 'low')
def find_contours(image, level=None,
fully_connected='low', positive_orientation='low',
*,
mask=None):
"""Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
image : 2D ndarray of double
Input image in which to find contours.
level : float, optional
Value along which to find contours in the array. By default, the level
is set to (max(image) + min(image)) / 2
.. versionchanged:: 0.18
This parameter is now optional.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str, {'low', 'high'}
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
mask : 2D ndarray of bool, or None
A boolean mask, True where we want to draw contours.
Note that NaN values are always excluded from the considered region
(``mask`` is set to ``False`` wherever ``array`` is ``NaN``).
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
See Also
--------
skimage.measure.marching_cubes
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here:
http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge or a masked-off region (either where mask is False or where
array is NaN) will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
:DOI:`10.1145/37401.37422`
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[1., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[0. , 0.5],
[0.5, 0. ]])]
"""
if fully_connected not in _param_options:
raise ValueError('Parameters "fully_connected" must be either '
'"high" or "low".')
if positive_orientation not in _param_options:
raise ValueError('Parameters "positive_orientation" must be either '
'"high" or "low".')
if image.shape[0] < 2 or image.shape[1] < 2:
raise ValueError("Input array must be at least 2x2.")
if image.ndim != 2:
raise ValueError('Only 2D arrays are supported.')
if mask is not None:
if mask.shape != image.shape:
raise ValueError('Parameters "array" and "mask"'
' must have same shape.')
if not np.can_cast(mask.dtype, bool, casting='safe'):
raise TypeError('Parameter "mask" must be a binary array.')
mask = mask.astype(np.uint8, copy=False)
if level is None:
level = (np.nanmin(image) + np.nanmax(image)) / 2.0
segments = _get_contour_segments(image.astype(np.float64), float(level),
fully_connected == 'high', mask=mask)
contours = _assemble_contours(segments)
if positive_orientation == 'high':
contours = [c[::-1] for c in contours]
return contours
def _assemble_contours(segments):
current_index = 0
contours = {}
starts = {}
ends = {}
for from_point, to_point in segments:
# Ignore degenerate segments.
# This happens when (and only when) one vertex of the square is
# exactly the contour level, and the rest are above or below.
# This degenerate vertex will be picked up later by neighboring
# squares.
if from_point == to_point:
continue
tail, tail_num = starts.pop(to_point, (None, None))
head, head_num = ends.pop(from_point, (None, None))
if tail is not None and head is not None:
# We need to connect these two contours.
if tail is head:
# We need to closed a contour: add the end point
head.append(to_point)
else: # tail is not head
# We need to join two distinct contours.
# We want to keep the first contour segment created, so that
# the final contours are ordered left->right, top->bottom.
if tail_num > head_num:
# tail was created second. Append tail to head.
head.extend(tail)
# Remove tail from the detected contours
contours.pop(tail_num, None)
# Update starts and ends
starts[head[0]] = (head, head_num)
ends[head[-1]] = (head, head_num)
else: # tail_num <= head_num
# head was created second. Prepend head to tail.
tail.extendleft(reversed(head))
# Remove head from the detected contours
starts.pop(head[0], None) # head[0] can be == to_point!
contours.pop(head_num, None)
# Update starts and ends
starts[tail[0]] = (tail, tail_num)
ends[tail[-1]] = (tail, tail_num)
elif tail is None and head is None:
# We need to add a new contour
new_contour = deque((from_point, to_point))
contours[current_index] = new_contour
starts[from_point] = (new_contour, current_index)
ends[to_point] = (new_contour, current_index)
current_index += 1
elif head is None: # tail is not None
# tail first element is to_point: the new segment should be
# prepended.
tail.appendleft(from_point)
# Update starts
starts[from_point] = (tail, tail_num)
else: # tail is None and head is not None:
# head last element is from_point: the new segment should be
# appended
head.append(to_point)
# Update ends
ends[to_point] = (head, head_num)
return [np.array(contour) for _, contour in sorted(contours.items())]

View File

@@ -1,120 +0,0 @@
from scipy import ndimage
from ._ccomp import label_cython as clabel
def _label_bool(image, background=None, return_num=False, connectivity=None):
"""Faster implementation of clabel for boolean input.
See context: https://github.com/scikit-image/scikit-image/issues/4833
"""
from ..morphology._util import _resolve_neighborhood
if background == 1:
image = ~image
if connectivity is None:
connectivity = image.ndim
if not 1 <= connectivity <= image.ndim:
raise ValueError(
f'Connectivity for {image.ndim}D image should '
f'be in [1, ..., {image.ndim}]. Got {connectivity}.'
)
footprint = _resolve_neighborhood(None, connectivity, image.ndim)
result = ndimage.label(image, structure=footprint)
if return_num:
return result
else:
return result[0]
def label(label_image, background=None, return_num=False, connectivity=None):
r"""Label connected regions of an integer array.
Two pixels are connected when they are neighbors and have the same value.
In 2D, they can be neighbors either in a 1- or 2-connected sense.
The value refers to the maximum number of orthogonal hops to consider a
pixel/voxel a neighbor::
1-connectivity 2-connectivity diagonal connection close-up
[ ] [ ] [ ] [ ] [ ]
| \ | / | <- hop 2
[ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]
| / | \ hop 1
[ ] [ ] [ ] [ ]
Parameters
----------
label_image : ndarray of dtype int
Image to label.
background : int, optional
Consider all pixels with this value as background pixels, and label
them as 0. By default, 0-valued pixels are considered as background
pixels.
return_num : bool, optional
Whether to return the number of assigned labels.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
Returns
-------
labels : ndarray of dtype int
Labeled array, where all connected regions are assigned the
same integer value.
num : int, optional
Number of labels, which equals the maximum label index and is only
returned if return_num is `True`.
See Also
--------
regionprops
regionprops_table
References
----------
.. [1] Christophe Fiorio and Jens Gustedt, "Two linear time Union-Find
strategies for image processing", Theoretical Computer Science
154 (1996), pp. 165-181.
.. [2] Kensheng Wu, Ekow Otoo and Arie Shoshani, "Optimizing connected
component labeling algorithms", Paper LBNL-56864, 2005,
Lawrence Berkeley National Laboratory (University of California),
http://repositories.cdlib.org/lbnl/LBNL-56864
Examples
--------
>>> import numpy as np
>>> x = np.eye(3).astype(int)
>>> print(x)
[[1 0 0]
[0 1 0]
[0 0 1]]
>>> print(label(x, connectivity=1))
[[1 0 0]
[0 2 0]
[0 0 3]]
>>> print(label(x, connectivity=2))
[[1 0 0]
[0 1 0]
[0 0 1]]
>>> print(label(x, background=-1))
[[1 2 2]
[2 1 2]
[2 2 1]]
>>> x = np.array([[1, 0, 0],
... [1, 1, 5],
... [0, 0, 0]])
>>> print(label(x))
[[1 0 0]
[1 1 2]
[0 0 0]]
"""
if label_image.dtype == bool:
return _label_bool(label_image, background=background,
return_num=return_num, connectivity=connectivity)
else:
return clabel(label_image, background, return_num, connectivity)

View File

@@ -1,299 +0,0 @@
import base64
import numpy as np
from . import _marching_cubes_lewiner_luts as mcluts
from . import _marching_cubes_lewiner_cy
def marching_cubes(volume, level=None, *, spacing=(1., 1., 1.),
gradient_direction='descent', step_size=1,
allow_degenerate=True, method='lewiner', mask=None):
"""Marching cubes algorithm to find surfaces in 3d volumetric data.
In contrast with Lorensen et al. approach [2]_, Lewiner et
al. algorithm is faster, resolves ambiguities, and guarantees
topologically correct results. Therefore, this algorithm generally
a better choice.
Parameters
----------
volume : (M, N, P) array
Input data volume to find isosurfaces. Will internally be
converted to float32 if necessary.
level : float, optional
Contour value to search for isosurfaces in `volume`. If not
given or None, the average of the min and max of vol is used.
spacing : length-3 tuple of floats, optional
Voxel spacing in spatial dimensions corresponding to numpy array
indexing dimensions (M, N, P) as in `volume`.
gradient_direction : string, optional
Controls if the mesh was generated from an isosurface with gradient
descent toward objects of interest (the default), or the opposite,
considering the *left-hand* rule.
The two options are:
* descent : Object was greater than exterior
* ascent : Exterior was greater than object
step_size : int, optional
Step size in voxels. Default 1. Larger steps yield faster but
coarser results. The result will always be topologically correct
though.
allow_degenerate : bool, optional
Whether to allow degenerate (i.e. zero-area) triangles in the
end-result. Default True. If False, degenerate triangles are
removed, at the cost of making the algorithm slower.
method: {'lewiner', 'lorensen'}, optional
Whether the method of Lewiner et al. or Lorensen et al. will be used.
mask : (M, N, P) array, optional
Boolean array. The marching cube algorithm will be computed only on
True elements. This will save computational time when interfaces
are located within certain region of the volume M, N, P-e.g. the top
half of the cube-and also allow to compute finite surfaces-i.e. open
surfaces that do not end at the border of the cube.
Returns
-------
verts : (V, 3) array
Spatial coordinates for V unique mesh vertices. Coordinate order
matches input `volume` (M, N, P). If ``allow_degenerate`` is set to
True, then the presence of degenerate triangles in the mesh can make
this array have duplicate vertices.
faces : (F, 3) array
Define triangular faces via referencing vertex indices from ``verts``.
This algorithm specifically outputs triangles, so each face has
exactly three indices.
normals : (V, 3) array
The normal direction at each vertex, as calculated from the
data.
values : (V, ) array
Gives a measure for the maximum value of the data in the local region
near each vertex. This can be used by visualization tools to apply
a colormap to the mesh.
See Also
--------
skimage.measure.mesh_surface_area
skimage.measure.find_contours
Notes
-----
The algorithm [1]_ is an improved version of Chernyaev's Marching
Cubes 33 algorithm. It is an efficient algorithm that relies on
heavy use of lookup tables to handle the many different cases,
keeping the algorithm relatively easy. This implementation is
written in Cython, ported from Lewiner's C++ implementation.
To quantify the area of an isosurface generated by this algorithm, pass
verts and faces to `skimage.measure.mesh_surface_area`.
Regarding visualization of algorithm output, to contour a volume
named `myvolume` about the level 0.0, using the ``mayavi`` package::
>>>
>> from mayavi import mlab
>> verts, faces, _, _ = marching_cubes(myvolume, 0.0)
>> mlab.triangular_mesh([vert[0] for vert in verts],
[vert[1] for vert in verts],
[vert[2] for vert in verts],
faces)
>> mlab.show()
Similarly using the ``visvis`` package::
>>>
>> import visvis as vv
>> verts, faces, normals, values = marching_cubes(myvolume, 0.0)
>> vv.mesh(np.fliplr(verts), faces, normals, values)
>> vv.use().Run()
To reduce the number of triangles in the mesh for better performance,
see this `example
<https://docs.enthought.com/mayavi/mayavi/auto/example_julia_set_decimation.html#example-julia-set-decimation>`_
using the ``mayavi`` package.
References
----------
.. [1] Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan
Tavares. Efficient implementation of Marching Cubes' cases with
topological guarantees. Journal of Graphics Tools 8(2)
pp. 1-15 (december 2003).
:DOI:`10.1080/10867651.2003.10487582`
.. [2] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
:DOI:`10.1145/37401.37422`
"""
use_classic = False
if method == 'lorensen':
use_classic = True
elif method != 'lewiner':
raise ValueError("method should be either 'lewiner' or 'lorensen'")
return _marching_cubes_lewiner(volume, level, spacing,
gradient_direction, step_size,
allow_degenerate, use_classic=use_classic,
mask=mask)
def _marching_cubes_lewiner(volume, level, spacing, gradient_direction,
step_size, allow_degenerate, use_classic, mask):
"""Lewiner et al. algorithm for marching cubes. See
marching_cubes_lewiner for documentation.
"""
# Check volume and ensure its in the format that the alg needs
if not isinstance(volume, np.ndarray) or (volume.ndim != 3):
raise ValueError('Input volume should be a 3D numpy array.')
if volume.shape[0] < 2 or volume.shape[1] < 2 or volume.shape[2] < 2:
raise ValueError("Input array must be at least 2x2x2.")
volume = np.ascontiguousarray(volume,
np.float32) # no copy if not necessary
# Check/convert other inputs:
# level
if level is None:
level = 0.5 * (volume.min() + volume.max())
else:
level = float(level)
if level < volume.min() or level > volume.max():
raise ValueError("Surface level must be within volume data range.")
# spacing
if len(spacing) != 3:
raise ValueError("`spacing` must consist of three floats.")
# step_size
step_size = int(step_size)
if step_size < 1:
raise ValueError('step_size must be at least one.')
# use_classic
use_classic = bool(use_classic)
# Get LutProvider class (reuse if possible)
L = _get_mc_luts()
# Check if a mask array is passed
if mask is not None:
if not mask.shape == volume.shape:
raise ValueError('volume and mask must have the same shape.')
# Apply algorithm
func = _marching_cubes_lewiner_cy.marching_cubes
vertices, faces, normals, values = func(volume, level, L,
step_size, use_classic, mask)
if not len(vertices):
raise RuntimeError('No surface found at the given iso value.')
# Output in z-y-x order, as is common in skimage
vertices = np.fliplr(vertices)
normals = np.fliplr(normals)
# Finishing touches to output
faces.shape = -1, 3
if gradient_direction == 'descent':
# MC implementation is right-handed, but gradient_direction is
# left-handed
faces = np.fliplr(faces)
elif not gradient_direction == 'ascent':
raise ValueError(
f"Incorrect input {gradient_direction} in `gradient_direction`, "
"see docstring."
)
if not np.array_equal(spacing, (1, 1, 1)):
vertices = vertices * np.r_[spacing]
if allow_degenerate:
return vertices, faces, normals, values
else:
fun = _marching_cubes_lewiner_cy.remove_degenerate_faces
return fun(vertices.astype(np.float32), faces, normals, values)
def _to_array(args):
shape, text = args
byts = base64.decodebytes(text.encode('utf-8'))
ar = np.frombuffer(byts, dtype='int8')
ar.shape = shape
return ar
# Map an edge-index to two relative pixel positions. The edge index
# represents a point that lies somewhere in between these pixels.
# Linear interpolation should be used to determine where it is exactly.
# 0
# 3 1 -> 0x
# 2 xx
EDGETORELATIVEPOSX = np.array([ [0,1],[1,1],[1,0],[0,0], [0,1],[1,1],[1,0],[0,0], [0,0],[1,1],[1,1],[0,0] ], 'int8')
EDGETORELATIVEPOSY = np.array([ [0,0],[0,1],[1,1],[1,0], [0,0],[0,1],[1,1],[1,0], [0,0],[0,0],[1,1],[1,1] ], 'int8')
EDGETORELATIVEPOSZ = np.array([ [0,0],[0,0],[0,0],[0,0], [1,1],[1,1],[1,1],[1,1], [0,1],[0,1],[0,1],[0,1] ], 'int8')
def _get_mc_luts():
""" Kind of lazy obtaining of the luts.
"""
if not hasattr(mcluts, 'THE_LUTS'):
mcluts.THE_LUTS = _marching_cubes_lewiner_cy.LutProvider(
EDGETORELATIVEPOSX, EDGETORELATIVEPOSY, EDGETORELATIVEPOSZ,
_to_array(mcluts.CASESCLASSIC), _to_array(mcluts.CASES),
_to_array(mcluts.TILING1), _to_array(mcluts.TILING2), _to_array(mcluts.TILING3_1), _to_array(mcluts.TILING3_2),
_to_array(mcluts.TILING4_1), _to_array(mcluts.TILING4_2), _to_array(mcluts.TILING5), _to_array(mcluts.TILING6_1_1),
_to_array(mcluts.TILING6_1_2), _to_array(mcluts.TILING6_2), _to_array(mcluts.TILING7_1),
_to_array(mcluts.TILING7_2), _to_array(mcluts.TILING7_3), _to_array(mcluts.TILING7_4_1),
_to_array(mcluts.TILING7_4_2), _to_array(mcluts.TILING8), _to_array(mcluts.TILING9),
_to_array(mcluts.TILING10_1_1), _to_array(mcluts.TILING10_1_1_), _to_array(mcluts.TILING10_1_2),
_to_array(mcluts.TILING10_2), _to_array(mcluts.TILING10_2_), _to_array(mcluts.TILING11),
_to_array(mcluts.TILING12_1_1), _to_array(mcluts.TILING12_1_1_), _to_array(mcluts.TILING12_1_2),
_to_array(mcluts.TILING12_2), _to_array(mcluts.TILING12_2_), _to_array(mcluts.TILING13_1),
_to_array(mcluts.TILING13_1_), _to_array(mcluts.TILING13_2), _to_array(mcluts.TILING13_2_),
_to_array(mcluts.TILING13_3), _to_array(mcluts.TILING13_3_), _to_array(mcluts.TILING13_4),
_to_array(mcluts.TILING13_5_1), _to_array(mcluts.TILING13_5_2), _to_array(mcluts.TILING14),
_to_array(mcluts.TEST3), _to_array(mcluts.TEST4), _to_array(mcluts.TEST6),
_to_array(mcluts.TEST7), _to_array(mcluts.TEST10), _to_array(mcluts.TEST12),
_to_array(mcluts.TEST13), _to_array(mcluts.SUBCONFIG13),
)
return mcluts.THE_LUTS
def mesh_surface_area(verts, faces):
"""Compute surface area, given vertices and triangular faces.
Parameters
----------
verts : (V, 3) array of floats
Array containing (x, y, z) coordinates for V unique mesh vertices.
faces : (F, 3) array of ints
List of length-3 lists of integers, referencing vertex coordinates as
provided in `verts`.
Returns
-------
area : float
Surface area of mesh. Units now [coordinate units] ** 2.
Notes
-----
The arguments expected by this function are the first two outputs from
`skimage.measure.marching_cubes`. For unit correct output, ensure correct
`spacing` was passed to `skimage.measure.marching_cubes`.
This algorithm works properly only if the ``faces`` provided are all
triangles.
See Also
--------
skimage.measure.marching_cubes
"""
# Fancy indexing to define two vector arrays from triangle vertices
actual_verts = verts[faces]
a = actual_verts[:, 0, :] - actual_verts[:, 1, :]
b = actual_verts[:, 0, :] - actual_verts[:, 2, :]
del actual_verts
# Area of triangle in 3D = 1/2 * Euclidean norm of cross product
return ((np.cross(a, b) ** 2).sum(axis=1) ** 0.5).sum() / 2.

View File

@@ -1,528 +0,0 @@
# This file was auto-generated from `mc_meta/LookUpTable.h` by
# `mc_meta/createluts.py`. The `mc_meta` scripts are not
# distributed with scikit-image, but are available in the
# repository under tools/precompute/mc_meta.
#static const char casesClassic[256][16]
CASESCLASSIC = (256, 16), """
/////////////////////wAIA/////////////////8AAQn/////////////////AQgDCQgB////
/////////wECCv////////////////8ACAMBAgr/////////////CQIKAAIJ/////////////wII
AwIKCAoJCP////////8DCwL/////////////////AAsCCAsA/////////////wEJAAIDC///////
//////8BCwIBCQsJCAv/////////AwoBCwoD/////////////wAKAQAICggLCv////////8DCQAD
CwkLCgn/////////CQgKCggL/////////////wQHCP////////////////8EAwAHAwT/////////
////AAEJCAQH/////////////wQBCQQHAQcDAf////////8BAgoIBAf/////////////AwQHAwAE
AQIK/////////wkCCgkAAggEB/////////8CCgkCCQcCBwMHCQT/////CAQHAwsC////////////
/wsEBwsCBAIABP////////8JAAEIBAcCAwv/////////BAcLCQQLCQsCCQIB/////wMKAQMLCgcI
BP////////8BCwoBBAsBAAQHCwT/////BAcICQALCQsKCwAD/////wQHCwQLCQkLCv////////8J
BQT/////////////////CQUEAAgD/////////////wAFBAEFAP////////////8IBQQIAwUDAQX/
////////AQIKCQUE/////////////wMACAECCgQJBf////////8FAgoFBAIEAAL/////////AgoF
AwIFAwUEAwQI/////wkFBAIDC/////////////8ACwIACAsECQX/////////AAUEAAEFAgML////
/////wIBBQIFCAIICwQIBf////8KAwsKAQMJBQT/////////BAkFAAgBCAoBCAsK/////wUEAAUA
CwULCgsAA/////8FBAgFCAoKCAv/////////CQcIBQcJ/////////////wkDAAkFAwUHA///////
//8ABwgAAQcBBQf/////////AQUDAwUH/////////////wkHCAkFBwoBAv////////8KAQIJBQAF
AwAFBwP/////CAACCAIFCAUHCgUC/////wIKBQIFAwMFB/////////8HCQUHCAkDCwL/////////
CQUHCQcCCQIAAgcL/////wIDCwABCAEHCAEFB/////8LAgELAQcHAQX/////////CQUICAUHCgED
CgML/////wUHAAUACQcLAAEACgsKAP8LCgALAAMKBQAIAAcFBwD/CwoFBwsF/////////////woG
Bf////////////////8ACAMFCgb/////////////CQABBQoG/////////////wEIAwEJCAUKBv//
//////8BBgUCBgH/////////////AQYFAQIGAwAI/////////wkGBQkABgACBv////////8FCQgF
CAIFAgYDAgj/////AgMLCgYF/////////////wsACAsCAAoGBf////////8AAQkCAwsFCgb/////
////BQoGAQkCCQsCCQgL/////wYDCwYFAwUBA/////////8ACAsACwUABQEFCwb/////AwsGAAMG
AAYFAAUJ/////wYFCQYJCwsJCP////////8FCgYEBwj/////////////BAMABAcDBgUK////////
/wEJAAUKBggEB/////////8KBgUBCQcBBwMHCQT/////BgECBgUBBAcI/////////wECBQUCBgMA
BAMEB/////8IBAcJAAUABgUAAgb/////BwMJBwkEAwIJBQkGAgYJ/wMLAgcIBAoGBf////////8F
CgYEBwIEAgACBwv/////AAEJBAcIAgMLBQoG/////wkCAQkLAgkECwcLBAUKBv8IBAcDCwUDBQEF
Cwb/////BQELBQsGAQALBwsEAAQL/wAFCQAGBQADBgsGAwgEB/8GBQkGCQsEBwkHCwn/////CgQJ
BgQK/////////////wQKBgQJCgAIA/////////8KAAEKBgAGBAD/////////CAMBCAEGCAYEBgEK
/////wEECQECBAIGBP////////8DAAgBAgkCBAkCBgT/////AAIEBAIG/////////////wgDAggC
BAQCBv////////8KBAkKBgQLAgP/////////AAgCAggLBAkKBAoG/////wMLAgABBgAGBAYBCv//
//8GBAEGAQoECAECAQsICwH/CQYECQMGCQEDCwYD/////wgLAQgBAAsGAQkBBAYEAf8DCwYDBgAA
BgT/////////BgQICwYI/////////////wcKBgcICggJCv////////8ABwMACgcACQoGBwr/////
CgYHAQoHAQcIAQgA/////woGBwoHAQEHA/////////8BAgYBBggBCAkIBgf/////AgYJAgkBBgcJ
AAkDBwMJ/wcIAAcABgYAAv////////8HAwIGBwL/////////////AgMLCgYICggJCAYH/////wIA
BwIHCwAJBwYHCgkKB/8BCAABBwgBCgcGBwoCAwv/CwIBCwEHCgYBBgcB/////wgJBggGBwkBBgsG
AwEDBv8ACQELBgf/////////////BwgABwAGAwsACwYA/////wcLBv////////////////8HBgv/
////////////////AwAICwcG/////////////wABCQsHBv////////////8IAQkIAwELBwb/////
////CgECBgsH/////////////wECCgMACAYLB/////////8CCQACCgkGCwf/////////BgsHAgoD
CggDCgkI/////wcCAwYCB/////////////8HAAgHBgAGAgD/////////AgcGAgMHAAEJ////////
/wEGAgEIBgEJCAgHBv////8KBwYKAQcBAwf/////////CgcGAQcKAQgHAQAI/////wADBwAHCgAK
CQYKB/////8HBgoHCggICgn/////////BggECwgG/////////////wMGCwMABgAEBv////////8I
BgsIBAYJAAH/////////CQQGCQYDCQMBCwMG/////wYIBAYLCAIKAf////////8BAgoDAAsABgsA
BAb/////BAsIBAYLAAIJAgoJ/////woJAwoDAgkEAwsDBgQGA/8IAgMIBAIEBgL/////////AAQC
BAYC/////////////wEJAAIDBAIEBgQDCP////8BCQQBBAICBAb/////////CAEDCAYBCAQGBgoB
/////woBAAoABgYABP////////8EBgMEAwgGCgMAAwkKCQP/CgkEBgoE/////////////wQJBQcG
C/////////////8ACAMECQULBwb/////////BQABBQQABwYL/////////wsHBggDBAMFBAMBBf//
//8JBQQKAQIHBgv/////////BgsHAQIKAAgDBAkF/////wcGCwUECgQCCgQAAv////8DBAgDBQQD
AgUKBQILBwb/BwIDBwYCBQQJ/////////wkFBAAIBgAGAgYIB/////8DBgIDBwYBBQAFBAD/////
BgIIBggHAgEIBAgFAQUI/wkFBAoBBgEHBgEDB/////8BBgoBBwYBAAcIBwAJBQT/BAAKBAoFAAMK
BgoHAwcK/wcGCgcKCAUECgQICv////8GCQUGCwkLCAn/////////AwYLAAYDAAUGAAkF/////wAL
CAAFCwABBQUGC/////8GCwMGAwUFAwH/////////AQIKCQULCQsICwUG/////wALAwAGCwAJBgUG
CQECCv8LCAULBQYIAAUKBQIAAgX/BgsDBgMFAgoDCgUD/////wUICQUCCAUGAgMIAv////8JBQYJ
BgAABgL/////////AQUIAQgABQYIAwgCBgII/wEFBgIBBv////////////8BAwYBBgoDCAYFBgkI
CQb/CgEACgAGCQUABQYA/////wADCAUGCv////////////8KBQb/////////////////CwUKBwUL
/////////////wsFCgsHBQgDAP////////8FCwcFCgsBCQD/////////CgcFCgsHCQgBCAMB////
/wsBAgsHAQcFAf////////8ACAMBAgcBBwUHAgv/////CQcFCQIHCQACAgsH/////wcFAgcCCwUJ
AgMCCAkIAv8CBQoCAwUDBwX/////////CAIACAUCCAcFCgIF/////wkAAQUKAwUDBwMKAv////8J
CAIJAgEIBwIKAgUHBQL/AQMFAwcF/////////////wAIBwAHAQEHBf////////8JAAMJAwUFAwf/
////////CQgHBQkH/////////////wUIBAUKCAoLCP////////8FAAQFCwAFCgsLAwD/////AAEJ
CAQKCAoLCgQF/////woLBAoEBQsDBAkEAQMBBP8CBQECCAUCCwgEBQj/////AAQLAAsDBAULAgsB
BQEL/wACBQAFCQILBQQFCAsIBf8JBAUCCwP/////////////AgUKAwUCAwQFAwgE/////wUKAgUC
BAQCAP////////8DCgIDBQoDCAUEBQgAAQn/BQoCBQIEAQkCCQQC/////wgEBQgFAwMFAf//////
//8ABAUBAAX/////////////CAQFCAUDCQAFAAMF/////wkEBf////////////////8ECwcECQsJ
Cgv/////////AAgDBAkHCQsHCQoL/////wEKCwELBAEEAAcEC/////8DAQQDBAgBCgQHBAsKCwT/
BAsHCQsECQILCQEC/////wkHBAkLBwkBCwILAQAIA/8LBwQLBAICBAD/////////CwcECwQCCAME
AwIE/////wIJCgIHCQIDBwcECf////8JCgcJBwQKAgcIBwACAAf/AwcKAwoCBwQKAQoABAAK/wEK
AggHBP////////////8ECQEEAQcHAQP/////////BAkBBAEHAAgBCAcB/////wQAAwcEA///////
//////8ECAf/////////////////CQoICgsI/////////////wMACQMJCwsJCv////////8AAQoA
CggICgv/////////AwEKCwMK/////////////wECCwELCQkLCP////////8DAAkDCQsBAgkCCwn/
////AAILCAAL/////////////wMCC/////////////////8CAwgCCAoKCAn/////////CQoCAAkC
/////////////wIDCAIICgABCAEKCP////8BCgL/////////////////AQMICQEI////////////
/wAJAf////////////////8AAwj//////////////////////////////////////w==
"""
#static const char cases[256][2]
CASES = (256, 2), """
AP8BAAEBAgABAgMAAgMFAAEDAgEDAwUBAgUFBAUJCAABBAICAwQFAgQCBgIGCQsAAwgFBQcDCQEG
EA4DDAwFGAEFAwECBAUDAwYHAAUKCQAEAwYEBgsOAQYRDAQLBgUZAggFBwUMCAEGEgwFDgcFHAYV
CwQMDwUeCgUGIAYnAgwBBgQAAwUGAAIGBgMFCw4AAwkGBQcEDAEFDgsDCQQFGgMKBgYHBQwCBhMK
AQwNBhgHBwwJDQEHCQwUBiEHDQMMAgoGBwUNCwIFEAwHCAMFHQYWCgIMEQYbDgkGIgUnAg4FFA4F
CQUFIAsKBiMFKQIQDBcGJQcOAxAGLgQGAxUBCAEHAwIEAQYBAwcHAQYKDAACBwUGBgwLAQUPCQIO
BgUbAgkFCAYNDgIGFAwGCgMGGQUSCAIMEAUfCwkFIgYoAg0DCwcCBg4MAwcGDQAMDgcIBhcMCgoE
BhwMFQcKBikDDQUVCQMLCAUhDBYHCwYqAw4OCwUkBiwCEQYvAxIEBwEJAgsGCAYPCgAFEQwICwcG
GgUTDgQMEgYdCAQFIwUoAg8FFgsFDBMGHg4KBiQGKwQECQcFJQcPAxEFLAITAxYBCgUXDAsOCAYf
CQYHDAUqAw8LCwYmBi0EBQUtAxMCFQELCAUFJgUrAhIFLgMUAhYBDAUvAhQDFwENAhcBDgEPAP8=
"""
#static const char tiling1[16][3]
TILING1 = (16, 3), """
AAgDAAEJAQIKAwsCBAcICQUECgYFBwYLBwsGCgUGCQQFBAgHAwILAQoCAAkBAAMI
"""
#static const char tiling2[24][6]
TILING2 = (24, 6), """
AQgDCQgBAAsCCAsABAMABwMECQIKAAIJAAUEAQUAAwoBCwoDAQYFAgYBBwIDBgIHCQcIBQcJBggE
CwgGCgQJBgQKCwUKBwULCwoFBwsFCgkEBgoEBgQICwYICQgHBQkHBwMCBgcCAQUGAgEGAwEKCwMK
AAQFAQAFCQoCAAkCBAADBwQDAAILCAALAQMICQEI
"""
#static const char tiling3_1[24][6]
TILING3_1 = (24, 6), """
AAgDAQIKCQUEAAgDAwAICwcGAQkAAgMLAAEJCAQHCQABBQoGAQIKCQUECgECBgsHCAQHAwsCAgML
CgYFBQoGBAcIBAkFBwYLBQkECwYHBgoFCAcECwMCBQYKBwQIAgsDAgEKBwsGCgIBBAUJAQAJBgoF
CQEABwQIAAkBCwMCCAADBgcLBAUJAwgAAwgACgIB
"""
#static const char tiling3_2[24][12]
TILING3_2 = (24, 12), """
CgMCCggDCgEACAoAAwQIAwUEAwAJBQMJBggHBgAIBgsDAAYDCwADCwkACwIBCQsBBwkEBwEJBwgA
AQcABgEKBgABCQAGCQYFBAoFBAIKBAkBAgQBBwILBwECBwYKAQcKAgcLAgQHAgMIBAIIBQsGBQML
BQoCAwUCCAYHCAoGCAQFCggFCwUGCwkFCwcECQsEBgULBQkLBAcLBAsJBwYIBgoIBQQIBQgKBgsF
CwMFAgoFAgUDCwcCBwQCCAMCCAIECwIHAgEHCgYHCgcBBQoECgIEAQkEAQQCCgEGAQAGBgAJBQYJ
BAkHCQEHAAgHAAcBAwALAAkLAQILAQsJBwgGCAAGAwsGAwYACAQDBAUDCQADCQMFAgMKAwgKAAEK
AAoI
"""
#static const char tiling4_1[8][6]
TILING4_1 = (8, 6), """
AAgDBQoGAAEJCwcGAQIKCAQHCQUEAgMLBAUJCwMCCgIBBwQICQEABgcLAwgABgoF
"""
#static const char tiling4_2[8][18]
TILING4_2 = (8, 18), """
CAUABQgGAwYIBgMKAAoDCgAFCQYBBgkHAAcJBwALAQsACwEGCgcCBwoEAQQKBAEIAggBCAIHCwQD
BAsFAgULBQIJAwkCCQMEAwQLBQsECwUCCQIFAgkDBAMJAgcKBAoHCgQBCAEEAQgCBwIIAQYJBwkG
CQcACwAHAAsBBgELAAUIBggFCAYDCgMGAwoABQAK
"""
#static const char tiling5[48][9]
TILING5 = (48, 9), """
AggDAgoICgkIAQsCAQkLCQgLBAEJBAcBBwMBCAUECAMFAwEFAAoBAAgKCAsKCwQHCwIEAgAEBwAI
BwYABgIACQMACQUDBQcDAwYLAwAGAAQGAwkAAwsJCwoJBQIKBQQCBAACCQYFCQAGAAIGAAcIAAEH
AQUHCgABCgYABgQABgMLBgUDBQEDCgcGCgEHAQMHAQQJAQIEAgYECwECCwcBBwUBCAIDCAQCBAYC
AgUKAgMFAwcFBwoGBwgKCAkKBgkFBgsJCwgJBQgEBQoICgsIBAsHBAkLCQoLBAcLBAsJCQsKBQQI
BQgKCggLBgUJBgkLCwkIBwYKBwoICAoJAgoFAgUDAwUHCAMCCAIEBAIGCwIBCwEHBwEFAQkEAQQC
AgQGCgYHCgcBAQcDBgsDBgMFBQMBCgEACgAGBgAEAAgHAAcBAQcFCQUGCQYAAAYCBQoCBQIEBAIA
AwAJAwkLCwkKAwsGAwYAAAYECQADCQMFBQMHBwgABwAGBgACCwcECwQCAgQAAAEKAAoICAoLCAQF
CAUDAwUBBAkBBAEHBwEDAQILAQsJCQsIAgMIAggKCggJ
"""
#static const char tiling6_1_1[48][9]
TILING6_1_1 = (48, 9), """
BgUKAwEICQgBCwcGCQMBAwkIAQIKBwAEAAcDAwAIBQIGAgUBBQQJAgALCAsACgYFCAIAAggLCgYF
AAQDBwMEAwAIBgQKCQoECAMACgcFBwoLCAQHCgACAAoJBwYLAAIJCgkCAgMLBAEFAQQAAAEJBgMH
AwYCCQABCwQGBAsICwcGAQUABAAFAAEJBwULCgsFBAcIAQMKCwoDCQUECwEDAQsKCgECCAUHBQgJ
CAQHAgYBBQEGAQIKBAYICwgGAgMLBQcJCAkHCwIDCQYEBgkKCQUEAwcCBgIHBAUJAgcDBwIGAwIL
BAYJCgkGCwMCCQcFBwkICgIBCAYEBggLBwQIAQYCBgEFAgEKBwUICQgFBAUJAwELCgsBCAcECgMB
AwoLCQEACwUHBQsKBgcLAAUBBQAEAQAJBgQLCAsECQEABwMGAgYDCwMCBQEEAAQBCwYHCQIAAgkK
BwQIAgAKCQoAAAMIBQcKCwoHCAADCgQGBAoJBQYKAwQABAMHBQYKAAIICwgCCQQFCwACAAsICAAD
BgIFAQUCCgIBBAAHAwcABgcLAQMJCAkDCgUGCAEDAQgJ
"""
#static const char tiling6_1_2[48][27]
TILING6_1_2 = (48, 27), """
AQwDDAoDBgMKAwYIBQgGCAUMDAkIAQkMDAUKAQwDAQsMCwEGCQYBBgkHDAcJCQgMDAgDCwcMBAwA
BAEMAQQKBwoECgcCDAIHBwMMDAMAAQIMBgwCBgMMAwYIBQgGCAUADAAFBQEMDAECAwAMAAwCDAkC
BQIJAgULBAsFCwQMDAgLAAgMDAQJAAwCAAoMCgAFCAUABQgGDAYICAsMDAsCCgYMBAwADAUACgAF
AAoDBgMKAwYMDAcDBAcMDAYFBAwGDAgGAwYIBgMKAAoDCgAMDAkKBAkMDAAIBQwHBQgMCAUACgAF
AAoDDAMKCgsMDAsHCAMMAgwAAggMCAIHCgcCBwoEDAQKCgkMDAkACAQMAgwADAsABwALAAcJBgkH
CQYMDAoJAgoMDAYLBQwBBQIMAgULBAsFCwQDDAMEBAAMDAABAgMMBwwDBwAMAAcJBgkHCQYBDAEG
BgIMDAIDAAEMBgwEBgkMCQYBCwEGAQsADAALCwgMDAgECQAMBQwBDAYBCwEGAQsABwALAAcMDAQA
BQQMDAcGBQwHDAkHAAcJBwALAQsACwEMDAoLBQoMDAEJAwwBDAgBBAEIAQQKBwoECgcMDAsKAwsM
DAcIAwwBAwkMCQMECwQDBAsFDAULCwoMDAoBCQUMBwwFBwoMCgcCCAIHAggBDAEICAkMDAkFCgEM
BgwCDAcCCAIHAggBBAEIAQQMDAUBBgUMDAQHBgwEDAoEAQQKBAEIAggBCAIMDAsIBgsMDAIKBwwF
DAsFAgULBQIJAwkCCQMMDAgJBwgMDAMLBAwGBAsMCwQDCQMEAwkCDAIJCQoMDAoGCwIMBwwDDAQD
CQMEAwkCBQIJAgUMDAYCBwYMDAUEAwwHAwQMBAMJAgkDCQIFDAUCAgYMDAYHBAUMBgwEDAsEAwQL
BAMJAgkDCQIMDAoJBgoMDAILBQwHBQsMCwUCCQIFAgkDDAMJCQgMDAgHCwMMBAwGBAoMCgQBCAEE
AQgCDAIICAsMDAsGCgIMAgwGAgcMBwIIAQgCCAEEDAQBAQUMDAUGBwQMBQwHDAoHAgcKBwIIAQgC
CAEMDAkIBQkMDAEKAQwDDAkDBAMJAwQLBQsECwUMDAoLAQoMDAUJAQwDAQgMCAEECgQBBAoHDAcK
CgsMDAsDCAcMBwwFBwkMCQcACwAHAAsBDAELCwoMDAoFCQEMAQwFAQYMBgELAAsBCwAHDAcAAAQM
DAQFBgcMBAwGDAkGAQYJBgELAAsBCwAMDAgLBAgMDAAJAwwHDAAHCQcABwkGAQYJBgEMDAIGAwIM
DAEAAQwFDAIFCwUCBQsEAwQLBAMMDAAEAQAMDAMCAAwCAAsMCwAHCQcABwkGDAYJCQoMDAoCCwYM
AAwCDAgCBwIIAgcKBAoHCgQMDAkKAAkMDAQIBwwFDAgFAAUIBQAKAwoACgMMDAsKBwsMDAMIBgwE
BggMCAYDCgMGAwoADAAKCgkMDAkECAAMAAwEAAUMBQAKAwoACgMGDAYDAwcMDAcEBQYMAgwADAoA
BQAKAAUIBggFCAYMDAsIAgsMDAYKAgwAAgkMCQIFCwUCBQsEDAQLCwgMDAgACQQMAgwGDAMGCAYD
BggFAAUIBQAMDAEFAgEMDAADAAwEDAEECgQBBAoHAgcKBwIMDAMHAAMMDAIBAwwBDAsBBgELAQYJ
BwkGCQcMDAgJAwgMDAcLAwwBAwoMCgMGCAYDBggFDAUICAkMDAkBCgUM
"""
#static const char tiling6_2[48][15]
TILING6_2 = (48, 15), """
AQoDBgMKAwYIBQgGCAUJAQsDCwEGCQYBBgkHCAcJBAEAAQQKBwoECgcCAwIHBgMCAwYIBQgGCAUA
AQAFAAkCBQIJAgULBAsFCwQIAAoCCgAFCAUABQgGCwYIBAUACgAFAAoDBgMKAwYHBAgGAwYIBgMK
AAoDCgAJBQgHCAUACgAFAAoDCwMKAggACAIHCgcCBwoECQQKAgsABwALAAcJBgkHCQYKBQIBAgUL
BAsFCwQDAAMEBwADAAcJBgkHCQYBAgEGBgkECQYBCwEGAQsACAALBQYBCwEGAQsABwALAAcEBQkH
AAcJBwALAQsACwEKAwgBBAEIAQQKBwoECgcLAwkBCQMECwQDBAsFCgULBwoFCgcCCAIHAggBCQEI
BgcCCAIHAggBBAEIAQQFBgoEAQQKBAEIAggBCAILBwsFAgULBQIJAwkCCQMIBAsGCwQDCQMEAwkC
CgIJBwQDCQMEAwkCBQIJAgUGAwQHBAMJAgkDCQIFBgUCBgsEAwQLBAMJAgkDCQIKBQsHCwUCCQIF
AgkDCAMJBAoGCgQBCAEEAQgCCwIIAgcGBwIIAQgCCAEEBQQBBQoHAgcKBwIIAQgCCAEJAQkDBAMJ
AwQLBQsECwUKAQgDCAEECgQBBAoHCwcKBwkFCQcACwAHAAsBCgELAQYFBgELAAsBCwAHBAcABAkG
AQYJBgELAAsBCwAIAwAHCQcABwkGAQYJBgECAQIFCwUCBQsEAwQLBAMAAAsCCwAHCQcABwkGCgYJ
AAgCBwIIAgcKBAoHCgQJBwgFAAUIBQAKAwoACgMLBggECAYDCgMGAwoACQAKAAUEBQAKAwoACgMG
BwYDAgoABQAKAAUIBggFCAYLAgkACQIFCwUCBQsECAQLAgMGCAYDBggFAAUIBQABAAEECgQBBAoH
AgcKBwIDAwsBBgELAQYJBwkGCQcIAwoBCgMGCAYDBggFCQUI
"""
#static const char tiling7_1[16][9]
TILING7_1 = (16, 9), """
CQUECgECCAMACwcGCAMACgECAwAIBQQJBwYLCAQHCQABCwIDCgYFCwIDCQABAAEJBgUKBAcIAQIK
BwYLBQQJAgMLBAcIBgUKCwMCCAcECgUGCgIBCwYHCQQFCQEACgUGCAcEBQYKAwILAQAJBwQIAQAJ
AwILCAADCQQFCwYHBgcLAAMIAgEKBAUJAgEKAAMI
"""
#static const char tiling7_2[16][3][15]
TILING7_2 = (16, 3, 15), """
AQIKAwQIBAMFAAUDBQAJAwAICQEEAgQBBAIFCgUCCQUEAAoBCgAICggCAwIIAwAIAQYKBgEHAgcB
BwILAQIKCwMGAAYDBgAHCAcACwcGAggDCAIKCAoAAQAKCQUECwMGAAYDBgAHCAcACwcGAwQIBAMF
AAUDBQAJAwAIBAkHCwcJBQsJCwUGAAEJAgcLBwIEAwQCBAMIAgMLCAAHAQcABwEECQQBCAQHAwkA
CQMLCQsBAgELAgMLAAUJBQAGAQYABgEKAAEJCgIFAwUCBQMGCwYDBgUKAQsCCwEJCwkDAAMJBgUK
CAAHAQcABwEECQQBCAQHAAUJBQAGAQYABgEKAAEJBQoECAQKBggKCAYHCwcGCQEEAgQBBAIFCgUC
CQUEAQYKBgEHAgcBBwILAQIKBgsFCQULBwkLCQcECAQHCgIFAwUCBQMGCwYDBgUKAgcLBwIEAwQC
BAMIAgMLBwgGCgYIBAoICgQFBwQIBQIKAgUDBgMFAwYLCgUGCwcCBAIHAgQDCAMECwMCBggHCAYK
CAoEBQQKBgcLBAEJAQQCBQIEAgUKBAUJCgYBBwEGAQcCCwIHCgIBBQsGCwUJCwkHBAcJCgUGBwAI
AAcBBAEHAQQJBwQICQUABgAFAAYBCgEGCQEABAoFCgQICggGBwYICwMCCQUABgAFAAYBCgEGCQEA
BQIKAgUDBgMFAwYLCgUGAgsBCQELAwkLCQMACQEACwcCBAIHAgQDCAMECwMCBwAIAAcBBAEHAQQJ
BwQIAAkDCwMJAQsJCwECBAUJBgMLAwYABwAGAAcIBgcLCAQDBQMEAwUACQAFCAADBwkECQcLCQsF
BgULCAADCgYBBwEGAQcCCwIHCgIBBgMLAwYABwAGAAcIBgcLAwgCCgIIAAoICgABCgIBCAQDBQME
AwUACQAFCAADBAEJAQQCBQIEAgUKBAUJAQoACAAKAggKCAID
"""
#static const char tiling7_3[16][3][27]
TILING7_3 = (16, 3, 27), """
DAIKDAoFDAUEDAQIDAgDDAMADAAJDAkBDAECDAUEDAQIDAgDDAMCDAIKDAoBDAEADAAJDAkFBQQM
CgUMAgoMAwIMCAMMAAgMAQAMCQEMBAkMDAAIDAgHDAcGDAYKDAoBDAECDAILDAsDDAMADAcGDAYK
DAoBDAEADAAIDAgDDAMCDAILDAsHBwYMCAcMAAgMAQAMCgEMAgoMAwIMCwMMBgsMCQUMAAkMAwAM
CwMMBgsMBwYMCAcMBAgMBQQMAwAMCwMMBgsMBQYMCQUMBAkMBwQMCAcMAAgMDAMADAAJDAkFDAUG
DAYLDAsHDAcEDAQIDAgDDAEJDAkEDAQHDAcLDAsCDAIDDAMIDAgADAABDAQHDAcLDAsCDAIBDAEJ
DAkADAADDAMIDAgEBAcMCQQMAQkMAgEMCwIMAwsMAAMMCAAMBwgMDAMLDAsGDAYFDAUJDAkADAAB
DAEKDAoCDAIDDAYFDAUJDAkADAADDAMLDAsCDAIBDAEKDAoGBgUMCwYMAwsMAAMMCQAMAQkMAgEM
CgIMBQoMCgYMAQoMAAEMCAAMBwgMBAcMCQQMBQkMBgUMAAEMCAAMBwgMBgcMCgYMBQoMBAUMCQQM
AQkMDAABDAEKDAoGDAYHDAcIDAgEDAQFDAUJDAkACwcMAgsMAQIMCQEMBAkMBQQMCgUMBgoMBwYM
AQIMCQEMBAkMBwQMCwcMBgsMBQYMCgUMAgoMDAECDAILDAsHDAcEDAQJDAkFDAUGDAYKDAoBCAQM
AwgMAgMMCgIMBQoMBgUMCwYMBwsMBAcMAgMMCgIMBQoMBAUMCAQMBwgMBgcMCwYMAwsMDAIDDAMI
DAgEDAQFDAUKDAoGDAYHDAcLDAsCDAQIDAgDDAMCDAIKDAoFDAUGDAYLDAsHDAcEDAMCDAIKDAoF
DAUEDAQIDAgHDAcGDAYLDAsDAwIMCAMMBAgMBQQMCgUMBgoMBwYMCwcMAgsMDAcLDAsCDAIBDAEJ
DAkEDAQFDAUKDAoGDAYHDAIBDAEJDAkEDAQHDAcLDAsGDAYFDAUKDAoCAgEMCwIMBwsMBAcMCQQM
BQkMBgUMCgYMAQoMDAYKDAoBDAEADAAIDAgHDAcEDAQJDAkFDAUGDAEADAAIDAgHDAcGDAYKDAoF
DAUEDAQJDAkBAQAMCgEMBgoMBwYMCAcMBAgMBQQMCQUMAAkMCwMMBgsMBQYMCQUMAAkMAQAMCgEM
AgoMAwIMBQYMCQUMAAkMAwAMCwMMAgsMAQIMCgEMBgoMDAUGDAYLDAsDDAMADAAJDAkBDAECDAIK
DAoFCQEMBAkMBwQMCwcMAgsMAwIMCAMMAAgMAQAMBwQMCwcMAgsMAQIMCQEMAAkMAwAMCAMMBAgM
DAcEDAQJDAkBDAECDAILDAsDDAMADAAIDAgHDAUJDAkADAADDAMLDAsGDAYHDAcIDAgEDAQFDAAD
DAMLDAsGDAYFDAUJDAkEDAQHDAcIDAgAAAMMCQAMBQkMBgUMCwYMBwsMBAcMCAQMAwgMCAAMBwgM
BgcMCgYMAQoMAgEMCwIMAwsMAAMMBgcMCgYMAQoMAAEMCAAMAwgMAgMMCwIMBwsMDAYHDAcIDAgA
DAABDAEKDAoCDAIDDAMLDAsGCgIMBQoMBAUMCAQMAwgMAAMMCQAMAQkMAgEMBAUMCAQMAwgMAgMM
CgIMAQoMAAEMCQAMBQkMDAQFDAUKDAoCDAIDDAMIDAgADAABDAEJDAkE
"""
#static const char tiling7_4_1[16][15]
TILING7_4_1 = (16, 15), """
AwQIBAMKAgoDBAoFCQEAAQYKBgEIAAgBBggHCwMCCwMGCQYDBgkFAAkDBwQIAgcLBwIJAQkCBwkE
CAADAAUJBQALAwsABQsGCgIBCAAHCgcABwoGAQoABAUJCQEECwQBBAsHAgsBBQYKCgIFCAUCBQgE
AwgCBgcLBQIKAgUIBAgFAggDCwcGBAEJAQQLBwsEAQsCCgYFBwAIAAcKBgoHAAoBCQUECQUACwAF
AAsDBgsFAQIKCwcCCQIHAgkBBAkHAwAIBgMLAwYJBQkGAwkACAQHCgYBCAEGAQgABwgGAgMLCAQD
CgMEAwoCBQoEAAEJ
"""
#static const char tiling7_4_2[16][27]
TILING7_4_2 = (16, 27), """
CQQIBAkFCgUJAQoJCgECAAIBAgADCAMACQgACwYKBgsHCAcLAwgLCAMAAgADAAIBCgECCwoCCwMI
AAgDCAAJCAkEBQQJBAUHBgcFBwYLBwsICAcLBwgECQQIAAkICQABAwEAAQMCCwIDCAsDCgUJBQoG
CwYKAgsKCwIDAQMCAwEACQABCgkBCAAJAQkACQEKCQoFBgUKBQYEBwQGBAcIBAgJCQEKAgoBCgIL
CgsGBwYLBgcFBAUHBQQJBQkKCgILAwsCCwMICwgHBAcIBwQGBQYEBgUKBgoLCwIKAgsDCAMLBwgL
CAcEBgQHBAYFCgUGCwoGCgEJAQoCCwIKBgsKCwYHBQcGBwUECQQFCgkFCQAIAAkBCgEJBQoJCgUG
BAYFBgQHCAcECQgECQUKBgoFCgYLCgsCAwILAgMBAAEDAQAJAQkKCwcIBAgHCAQJCAkAAQAJAAED
AgMBAwILAwsICAMLAwgACQAIBAkICQQFBwUEBQcGCwYHCAsHCgYLBwsGCwcICwgDAAMIAwACAQIA
AgEKAgoLCAQJBQkECQUKCQoBAgEKAQIAAwACAAMIAAgJ
"""
#static const char tiling8[6][6]
TILING8 = (6, 6), """
CQgKCggLAQUDAwUHAAQCBAYCAAIEBAIGAQMFAwcFCQoICgsI
"""
#static const char tiling9[8][12]
TILING9 = (8, 12), """
AgoFAwIFAwUEAwQIBAcLCQQLCQsCCQIBCgcGAQcKAQgHAQAIAwYLAAYDAAUGAAkFAwsGAAMGAAYF
AAUJCgYHAQoHAQcIAQgABAsHCQsECQILCQECAgUKAwUCAwQFAwgE
"""
#static const char tiling10_1_1[6][12]
TILING10_1_1 = (6, 12), """
BQoHCwcKCAEJAQgDAQIFBgUCBAMAAwQHCwAIAAsCBAkGCgYJCQAKAgoABggECAYLBwIDAgcGAAEE
BQQBBwkFCQcICgELAwsB
"""
#static const char tiling10_1_1_[6][12]
TILING10_1_1_ = (6, 12), """
BQkHCAcJCwEKAQsDAwIHBgcCBAEAAQQFCgAJAAoCBAgGCwYICAALAgsABgkECQYKBQIBAgUGAAME
BwQDBwoFCgcLCQEIAwgB
"""
#static const char tiling10_1_2[6][24]
TILING10_1_2 = (6, 24), """
AwsHAwcICQgHBQkHCQUKCQoBAwEKCwMKBwYFBwUEAAQFAQAFAAECAAIDBwMCBgcCCwIKBgsKCwYE
CwQIAAgECQAEAAkKAAoCCwIKCwoGBAYKCQQKBAkABAAICwgAAgsABwYFBAcFBwQABwADAgMAAQIA
AgEFAgUGBwgDCwcDBwsKBwoFCQUKAQkKCQEDCQMI
"""
#static const char tiling10_2[6][24]
TILING10_2 = (6, 24), """
DAUJDAkIDAgDDAMBDAEKDAoLDAsHDAcFDAEADAAEDAQHDAcDDAMCDAIGDAYFDAUBBAgMBgQMCgYM
CQoMAAkMAgAMCwIMCAsMDAkEDAQGDAYLDAsIDAgADAACDAIKDAoJAAMMBAAMBQQMAQUMAgEMBgIM
BwYMAwcMCgUMCwoMAwsMAQMMCQEMCAkMBwgMBQcM
"""
#static const char tiling10_2_[6][24]
TILING10_2_ = (6, 24), """
CAcMCQgMAQkMAwEMCwMMCgsMBQoMBwUMBAUMAAQMAwAMBwMMBgcMAgYMAQIMBQEMDAsGDAYEDAQJ
DAkKDAoCDAIADAAIDAgLBgoMBAYMCAQMCwgMAgsMAAIMCQAMCgkMDAcEDAQADAABDAEFDAUGDAYC
DAIDDAMHDAcLDAsKDAoBDAEDDAMIDAgJDAkFDAUH
"""
#static const char tiling11[12][12]
TILING11 = (12, 12), """
AgoJAgkHAgcDBwkEAQYCAQgGAQkICAcGCAMBCAEGCAYEBgEKAAgLAAsFAAUBBQsGCQUHCQcCCQIA
AgcLBQAEBQsABQoLCwMABQQABQALBQsKCwADCQcFCQIHCQACAgsHAAsIAAULAAEFBQYLCAEDCAYB
CAQGBgoBAQIGAQYIAQgJCAYHAgkKAgcJAgMHBwQJ
"""
#static const char tiling12_1_1[24][12]
TILING12_1_1 = (24, 12), """
BwYLCgMCAwoICQgKBgUKCQIBAgkLCAsJCgYFBwkECQcBAwEHBwYLBAgFAwUIBQMBBQQJCAEAAQgK
CwoIAQIKAAkDBQMJAwUHCgECAAsDCwAGBAYACAMAAgkBCQIEBgQCAwAIAgsBBwELAQcFBgUKBwsE
AgQLBAIACQUEBggHCAYAAgAGCAMABwQLCQsECwkKBAcICwADAAsJCgkLBAcIBQkGAAYJBgACCwcG
BAoFCgQCAAIECwIDAQgACAEHBQcBAAEJAwgCBAIIAgQGAgMLAQoABgAKAAYECQABAwoCCgMFBwUD
CQABBAUICggFCAoLCAQHBQsGCwUDAQMFBQQJBgoHAQcKBwEDCgECBQYJCwkGCQsICwIDBgcKCAoH
CggJ
"""
#static const char tiling12_1_1_[24][12]
TILING12_1_1_ = (24, 12), """
AwILCgcGBwoICQgKAgEKCQYFBgkLCAsJCQQFBwoGCgcBAwEHBwQIBgsFAwULBQMBAQAJCAUEBQgK
CwoIAQAJAgoDBQMKAwUHCwMCAAoBCgAGBAYACQEAAggDCAIEBgQCAwILAAgBBwEIAQcFBgcLBQoE
AgQKBAIACAcEBgkFCQYAAgAGCAcEAwALCQsACwkKAAMICwQHBAsJCgkLBAUJBwgGAAYIBgACCgUG
BAsHCwQCAAIECAADAQsCCwEHBQcBAAMIAQkCBAIJAgQGAgEKAwsABgALAAYECgIBAwkACQMFBwUD
CQQFAAEICggBCAoLCwYHBQgECAUDAQMFBQYKBAkHAQcJBwEDCgUGAQIJCwkCCQsICwYHAgMKCAoD
CggJ
"""
#static const char tiling12_1_2[24][24]
TILING12_1_2 = (24, 24), """
BwMLAwcICQgHBgkHCQYKAgoGCwIGAgsDBgIKAgYLCAsGBQgGCAUJAQkFCgEFAQoCCgkFCQoBAwEK
BgMKAwYHBAcGBQQGBAUJBwgLAwsICwMBCwEGBQYBBgUEBgQHCAcEBQEJAQUKCwoFBAsFCwQIAAgE
CQAEAAkBAQkKBQoJCgUHCgcCAwIHAgMAAgABCQEACgsCCwoGBAYKAQQKBAEAAwABAgMBAwILCAkA
CQgEBgQIAwYIBgMCAQIDAAEDAQAJAwsIBwgLCAcFCAUAAQAFAAECAAIDCwMCBgsKAgoLCgIACgAF
BAUABQQHBQcGCwYHCQgECAkAAgAJBQIJAgUGBwYFBAcFBwQICAQACQAEAAkKAAoDCwMKAwsHAwcI
BAgHBAAIAAQJCgkEBwoECgcLAwsHCAMHAwgABAkIAAgJCAACCAIHBgcCBwYFBwUECQQFCwoGCgsC
AAILBwALAAcEBQQHBgUHBQYKCwgDCAsHBQcLAgULBQIBAAECAwACAAMIAAgJBAkICQQGCQYBAgEG
AQIDAQMACAADAgoLBgsKCwYECwQDAAMEAwABAwECCgIBCQoBCgkFBwUJAAcJBwADAgMAAQIAAgEK
CQUBCgEFAQoLAQsACAALAAgEAAQJBQkECAsHCwgDAQMIBAEIAQQFBgUEBwYEBgcLBQoJAQkKCQED
CQMEBwQDBAcGBAYFCgUGCgYCCwIGAgsIAggBCQEIAQkFAQUKBgoFCwcDCAMHAwgJAwkCCgIJAgoG
AgYLBwsG
"""
#static const char tiling12_2[24][24]
TILING12_2 = (24, 24), """
CQgMCgkMAgoMAwIMCwMMBgsMBwYMCAcMCAsMCQgMAQkMAgEMCgIMBQoMBgUMCwYMAwEMBwMMBAcM
CQQMBQkMBgUMCgYMAQoMDAMBDAEFDAUGDAYLDAsHDAcEDAQIDAgDCwoMCAsMAAgMAQAMCQEMBAkM
BQQMCgUMDAUHDAcDDAMCDAIKDAoBDAEADAAJDAkFBAYMAAQMAQAMCgEMAgoMAwIMCwMMBgsMBgQM
AgYMAwIMCAMMAAgMAQAMCQEMBAkMDAcFDAUBDAEADAAIDAgDDAMCDAILDAsHDAIADAAEDAQFDAUK
DAoGDAYHDAcLDAsCAgAMBgIMBwYMCAcMBAgMBQQMCQUMAAkMDAkKDAoLDAsHDAcEDAQIDAgDDAMA
DAAJCgkMCwoMBwsMBAcMCAQMAwgMAAMMCQAMDAACDAIGDAYHDAcIDAgEDAQFDAUJDAkAAAIMBAAM
BQQMCgUMBgoMBwYMCwcMAgsMBQcMAQUMAAEMCAAMAwgMAgMMCwIMBwsMDAQGDAYCDAIDDAMIDAgA
DAABDAEJDAkEDAYEDAQADAABDAEKDAoCDAIDDAMLDAsGBwUMAwcMAgMMCgIMAQoMAAEMCQAMBQkM
DAoLDAsIDAgADAABDAEJDAkEDAQFDAUKAQMMBQEMBgUMCwYMBwsMBAcMCAQMAwgMDAEDDAMHDAcE
DAQJDAkFDAUGDAYKDAoBDAsIDAgJDAkBDAECDAIKDAoFDAUGDAYLDAgJDAkKDAoCDAIDDAMLDAsG
DAYHDAcI
"""
#static const char tiling12_2_[24][24]
TILING12_2_ = (24, 24), """
DAILDAsHDAcGDAYKDAoJDAkIDAgDDAMCDAEKDAoGDAYFDAUJDAkIDAgLDAsCDAIBDAQFDAUKDAoG
DAYHDAcDDAMBDAEJDAkEBwYMCAcMBAgMBQQMAQUMAwEMCwMMBgsMDAAJDAkFDAUEDAQIDAgLDAsK
DAoBDAEAAQIMCQEMAAkMAwAMBwMMBQcMCgUMAgoMDAECDAILDAsDDAMADAAEDAQGDAYKDAoBDAMA
DAAJDAkBDAECDAIGDAYEDAQIDAgDAwAMCwMMAgsMAQIMBQEMBwUMCAcMAAgMBgUMCwYMBwsMBAcM
AAQMAgAMCgIMBQoMDAcEDAQJDAkFDAUGDAYCDAIADAAIDAgHCAcMAAgMAwAMCwMMCgsMCQoMBAkM
BwQMDAcIDAgADAADDAMLDAsKDAoJDAkEDAQHBAcMCQQMBQkMBgUMAgYMAAIMCAAMBwgMDAUGDAYL
DAsHDAcEDAQADAACDAIKDAoFDAADDAMLDAsCDAIBDAEFDAUHDAcIDAgAAAMMCQAMAQkMAgEMBgIM
BAYMCAQMAwgMAgEMCwIMAwsMAAMMBAAMBgQMCgYMAQoMDAIBDAEJDAkADAADDAMHDAcFDAUKDAoC
CQAMBQkMBAUMCAQMCwgMCgsMAQoMAAEMDAYHDAcIDAgEDAQFDAUBDAEDDAMLDAsGBQQMCgUMBgoM
BwYMAwcMAQMMCQEMBAkMCgEMBgoMBQYMCQUMCAkMCwgMAgsMAQIMCwIMBwsMBgcMCgYMCQoMCAkM
AwgMAgMM
"""
#static const char tiling13_1[2][12]
TILING13_1 = (2, 12), """
CwcGAQIKCAMACQUECAQHAgMLCQABCgYF
"""
#static const char tiling13_1_[2][12]
TILING13_1_ = (2, 12), """
BwQICwMCAQAJBQYKBgcLCgIBAAMIBAUJ
"""
#static const char tiling13_2[2][6][18]
TILING13_2 = (2, 6, 18), """
AQIKCwcGAwQIBAMFAAUDBQAJCAMACwcGCQEEAgQBBAIFCgUCCQUECAMAAQYKBgEHAgcBBwILCQUE
AQIKCwMGAAYDBgAHCAcACQUECwcGAAoBCgAICggCAwIIAQIKAwAIBAkHCwcJBQsJCwUGAgMLCAQH
AAUJBQAGAQYABgEKCQABCAQHCgIFAwUCBQMGCwYDBgUKCQABAgcLBwIEAwQCBAMIBgUKAgMLCAAH
AQcABwEECQQBBgUKCAQHAQsCCwEJCwkDAAMJAgMLAAEJBQoECAQKBggKCAYH
"""
#static const char tiling13_2_[2][6][18]
TILING13_2_ = (2, 6, 18), """
CgUGCwMCBwAIAAcBBAEHAQQJCwMCBwQICQUABgAFAAYBCgEGAQAJBwQIBQIKAgUDBgMFAwYLCgUG
AQAJCwcCBAIHAgQDCAMECgUGBwQIAgsBCQELAwkLCQMACwMCCQEABAoFCgQICggGBwYIBgcLCAAD
BAEJAQQCBQIEAgUKCAADBAUJCgYBBwEGAQcCCwIHAgEKBAUJBgMLAwYABwAGAAcIBgcLAgEKCAQD
BQMEAwUACQAFBgcLBAUJAwgCCgIIAAoICgABCAADCgIBBQsGCwUJCwkHBAcJ
"""
#static const char tiling13_3[2][12][30]
TILING13_3 = (2, 12, 30), """
CwcGDAIKDAoFDAUEDAQIDAgDDAMADAAJDAkBDAECAQIKCQUMAAkMAwAMCwMMBgsMBwYMCAcMBAgM
BQQMCwcGDAUEDAQIDAgDDAMCDAIKDAoBDAEADAAJDAkFAQIKDAMADAAJDAkFDAUGDAYLDAsHDAcE
DAQIDAgDCAMACwcMAgsMAQIMCQEMBAkMBQQMCgUMBgoMBwYMCwcGBQQMCgUMAgoMAwIMCAMMAAgM
AQAMCQEMBAkMCAMAAQIMCQEMBAkMBwQMCwcMBgsMBQYMCgUMAgoMCQUEDAAIDAgHDAcGDAYKDAoB
DAECDAILDAsDDAMACQUEDAcGDAYKDAoBDAEADAAIDAgDDAMCDAILDAsHCAMADAECDAILDAsHDAcE
DAQJDAkFDAUGDAYKDAoBCQUEBwYMCAcMAAgMAQAMCgEMAgoMAwIMCwMMBgsMAQIKAwAMCwMMBgsM
BQYMCQUMBAkMBwQMCAcMAAgMCAQHDAMLDAsGDAYFDAUJDAkADAABDAEKDAoCDAIDAgMLCgYMAQoM
AAEMCAAMBwgMBAcMCQQMBQkMBgUMCAQHDAYFDAUJDAkADAADDAMLDAsCDAIBDAEKDAoGAgMLDAAB
DAEKDAoGDAYHDAcIDAgEDAQFDAUJDAkAAAEJCAQMAwgMAgMMCgIMBQoMBgUMCwYMBwsMBAcMCAQH
BgUMCwYMAwsMAAMMCQAMAQkMAgEMCgIMBQoMCQABAgMMCgIMBQoMBAUMCAQMBwgMBgcMCwYMAwsM
BgUKDAEJDAkEDAQHDAcLDAsCDAIDDAMIDAgADAABBgUKDAQHDAcLDAsCDAIBDAEJDAkADAADDAMI
DAgECQABDAIDDAMIDAgEDAQFDAUKDAoGDAYHDAcLDAsCBgUKBAcMCQQMAQkMAgEMCwIMAwsMAAMM
CAAMBwgMAgMLAAEMCAAMBwgMBgcMCgYMBQoMBAUMCQQMAQkM
"""
#static const char tiling13_3_[2][12][30]
TILING13_3_ = (2, 12, 30), """
AwILCAcMAAgMAQAMCgEMBgoMBQYMCQUMBAkMBwQMBQYKDAILDAsHDAcEDAQJDAkBDAEADAAIDAgD
DAMCCgUGDAcEDAQJDAkBDAECDAILDAsDDAMADAAIDAgHCwMCDAEADAAIDAgHDAcGDAYKDAoFDAUE
DAQJDAkBBwQICwMMBgsMBQYMCQUMAAkMAQAMCgEMAgoMAwIMBwQIBQYMCQUMAAkMAwAMCwMMAgsM
AQIMCgEMBgoMCwMCAQAMCgEMBgoMBwYMCAcMBAgMBQQMCQUMAAkMAQAJDAQIDAgDDAMCDAIKDAoF
DAUGDAYLDAsHDAcEBwQIDAUGDAYLDAsDDAMADAAJDAkBDAECDAIKDAoFAQAJDAMCDAIKDAoFDAUE
DAQIDAgHDAcGDAYLDAsDCgUGBwQMCwcMAgsMAQIMCQEMAAkMAwAMCAMMBAgMCQEAAwIMCAMMBAgM
BQQMCgUMBgoMBwYMCwcMAgsMAAMICQQMAQkMAgEMCwIMBwsMBgcMCgYMBQoMBAUMCwYHDAMIDAgE
DAQFDAUKDAoCDAIBDAEJDAkADAADBgcLDAQFDAUKDAoCDAIDDAMIDAgADAABDAEJDAkECAADDAIB
DAEJDAkEDAQHDAcLDAsGDAYFDAUKDAoCBAUJCAAMBwgMBgcMCgYMAQoMAgEMCwIMAwsMAAMMBAUJ
BgcMCgYMAQoMAAEMCAAMAwgMAgMMCwIMBwsMCAADAgEMCwIMBwsMBAcMCQQMBQkMBgUMCgYMAQoM
AgEKDAUJDAkADAADDAMLDAsGDAYHDAcIDAgEDAQFBAUJDAYHDAcIDAgADAABDAEKDAoCDAIDDAML
DAsGAgEKDAADDAMLDAsGDAYFDAUJDAkEDAQHDAcIDAgABgcLBAUMCAQMAwgMAgMMCgIMAQoMAAEM
CQAMBQkMCgIBAAMMCQAMBQkMBgUMCwYMBwsMBAcMCAQMAwgM
"""
#static const char tiling13_4[2][4][36]
TILING13_4 = (2, 4, 36), """
DAIKDAoFDAUGDAYLDAsHDAcEDAQIDAgDDAMADAAJDAkBDAECCwMMBgsMBwYMCAcMBAgMBQQMCQUM
AAkMAQAMCgEMAgoMAwIMCQEMBAkMBQQMCgUMBgoMBwYMCwcMAgsMAwIMCAMMAAgMAQAMDAAIDAgH
DAcEDAQJDAkFDAUGDAYKDAoBDAECDAILDAsDDAMADAMLDAsGDAYHDAcIDAgEDAQFDAUJDAkADAAB
DAEKDAoCDAIDCAAMBwgMBAcMCQQMBQkMBgUMCgYMAQoMAgEMCwIMAwsMAAMMCgIMBQoMBgUMCwYM
BwsMBAcMCAQMAwgMAAMMCQAMAQkMAgEMDAEJDAkEDAQFDAUKDAoGDAYHDAcLDAsCDAIDDAMIDAgA
DAAB
"""
#static const char tiling13_5_1[2][4][18]
TILING13_5_1 = (2, 4, 18), """
BwYLAQAJCgMCAwoFAwUIBAgFAQIKBwQIAwALBgsACQYABgkFAwAIBQYKAQIJBAkCCwQCBAsHBQQJ
AwILCAEAAQgHAQcKBgoHBAcIAgEKCwADAAsGAAYJBQkGAgMLBAUJAAEIBwgBCgcBBwoGAAEJBgcL
AgMKBQoDCAUDBQgEBgUKAAMICQIBAgkEAgQLBwsE
"""
#static const char tiling13_5_2[2][4][30]
TILING13_5_2 = (2, 4, 30), """
AQAJBwQIBwgDBwMLAgsDCwIKCwoGBQYKBgUHBAcFBwQICwMCBgsCCgYCBgoFCQUKAQkKCQEAAgAB
AAIDBQYKCQEABAkACAQABAgHCwcIAwsICwMCAAIDAgABAwILBQYKBQoBBQEJAAkBCQAICQgEBAgH
BAcFBgUHAgEKBAUJBAkABAAIAwgACAMLCAsHBgcLBwYEBQQGBAUJCAADBwgDCwcDBwsGCgYLAgoL
CgIBAwECAQMABgcLCgIBBQoBCQUBBQkECAQJAAgJCAADAQMAAwECAAMIBgcLBgsCBgIKAQoCCgEJ
CgkFBQkEBQQGBwYE
"""
#static const char tiling14[12][12]
TILING14 = (12, 12), """
BQkIBQgCBQIGAwIIAgEFAgUIAggLBAgFCQQGCQYDCQMBCwMGAQsKAQQLAQAEBwsECAIACAUCCAcF
CgIFAAcDAAoHAAkKBgcKAAMHAAcKAAoJBgoHCAACCAIFCAUHCgUCAQoLAQsEAQQABwQLCQYECQMG
CQEDCwYDAgUBAggFAgsIBAUIBQgJBQIIBQYCAwgC
"""
#static const char test3[24]
TEST3 = (24,), """
BQEEBQECAgMEAwYG+vr9/P3+/v/7/P/7
"""
#static const char test4[8]
TEST4 = (8,), """
BwcHB/n5+fk=
"""
#static const char test6[48][3]
TEST6 = (48, 3), """
AgcKBAcLBQcBBQcDAQcJAwcKBgcFAQcIBAcIAQcIAwcLBQcCBQcAAQcJBgcGAgcJBAcIAgcJAgcK
BgcHAwcKBAcLAwcLBgcE+vkE/fkL/PkL/fkK+vkH/vkK/vkJ/PkI/vkJ+vkG//kJ+/kA+/kC/fkL
//kI/PkI//kI+vkF/fkK//kJ+/kD+/kB/PkL/vkK
"""
#static const char test7[16][5]
TEST7 = (16, 5), """
AQIFBwEDBAUHAwQBBgcEBAEFBwACAwUHAgECBgcFAgMGBwYDBAYHB/38+vkH/v36+Qb//vr5Bf79
+/kC/P/7+QD8//r5BP38+/kD//77+QE=
"""
#static const char test10[6][3]
TEST10 = (6, 3), """
AgQHBQYHAQMHAQMHBQYHAgQH
"""
#static const char test12[24][4]
TEST12 = (24, 4), """
BAMHCwMCBwoCBgcFBgQHBwIBBwkFAgcBBQMHAgUBBwAFBAcDBgMHBgEGBwQBBAcIBAEHCAYBBwQD
BgcGBAUHAwEFBwADBQcCAgUHAQECBwkEBgcHBgIHBQIDBwoDBAcL
"""
#static const char test13[2][7]
TEST13 = (2, 7), """
AQIDBAUGBwIDBAEFBgc=
"""
#static const char subconfig13[64]
SUBCONFIG13 = (64,), """
AAECBwP/C/8ECP//Dv///wUJDBcP/xUmERT/JBohHiwGCg0TEP8ZJRIY/yMWIB0r////Iv//HCr/
H/8pGygnLQ==
"""

View File

@@ -1,498 +0,0 @@
import itertools
import numpy as np
from .._shared.utils import _supported_float_type, check_nD
from . import _moments_cy
from ._moments_analytical import moments_raw_to_central
def moments_coords(coords, order=3):
"""Calculate all raw image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
coords : (N, D) double or uint8 array
Array of N points that describe an image of D dimensionality in
Cartesian space.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
M : (``order + 1``, ``order + 1``, ...) array
Raw image moments. (D dimensions)
References
----------
.. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham
University, version 0.2, Durham, 2001.
Examples
--------
>>> coords = np.array([[row, col]
... for row in range(13, 17)
... for col in range(14, 18)], dtype=np.float64)
>>> M = moments_coords(coords)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> centroid
(14.5, 15.5)
"""
return moments_coords_central(coords, 0, order=order)
def moments_coords_central(coords, center=None, order=3):
"""Calculate all central image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
coords : (N, D) double or uint8 array
Array of N points that describe an image of D dimensionality in
Cartesian space. A tuple of coordinates as returned by
``np.nonzero`` is also accepted as input.
center : tuple of float, optional
Coordinates of the image centroid. This will be computed if it
is not provided.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
Mc : (``order + 1``, ``order + 1``, ...) array
Central image moments. (D dimensions)
References
----------
.. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham
University, version 0.2, Durham, 2001.
Examples
--------
>>> coords = np.array([[row, col]
... for row in range(13, 17)
... for col in range(14, 18)])
>>> moments_coords_central(coords)
array([[16., 0., 20., 0.],
[ 0., 0., 0., 0.],
[20., 0., 25., 0.],
[ 0., 0., 0., 0.]])
As seen above, for symmetric objects, odd-order moments (columns 1 and 3,
rows 1 and 3) are zero when centered on the centroid, or center of mass,
of the object (the default). If we break the symmetry by adding a new
point, this no longer holds:
>>> coords2 = np.concatenate((coords, [[17, 17]]), axis=0)
>>> np.round(moments_coords_central(coords2),
... decimals=2) # doctest: +NORMALIZE_WHITESPACE
array([[17. , 0. , 22.12, -2.49],
[ 0. , 3.53, 1.73, 7.4 ],
[25.88, 6.02, 36.63, 8.83],
[ 4.15, 19.17, 14.8 , 39.6 ]])
Image moments and central image moments are equivalent (by definition)
when the center is (0, 0):
>>> np.allclose(moments_coords(coords),
... moments_coords_central(coords, (0, 0)))
True
"""
if isinstance(coords, tuple):
# This format corresponds to coordinate tuples as returned by
# e.g. np.nonzero: (row_coords, column_coords).
# We represent them as an npoints x ndim array.
coords = np.stack(coords, axis=-1)
check_nD(coords, 2)
ndim = coords.shape[1]
float_type = _supported_float_type(coords.dtype)
if center is None:
center = np.mean(coords, axis=0, dtype=float)
# center the coordinates
coords = coords.astype(float_type, copy=False) - center
# generate all possible exponents for each axis in the given set of points
# produces a matrix of shape (N, D, order + 1)
coords = np.stack([coords ** c for c in range(order + 1)], axis=-1)
# add extra dimensions for proper broadcasting
coords = coords.reshape(coords.shape + (1,) * (ndim - 1))
calc = 1
for axis in range(ndim):
# isolate each point's axis
isolated_axis = coords[:, axis]
# rotate orientation of matrix for proper broadcasting
isolated_axis = np.moveaxis(isolated_axis, 1, 1 + axis)
# calculate the moments for each point, one axis at a time
calc = calc * isolated_axis
# sum all individual point moments to get our final answer
Mc = np.sum(calc, axis=0)
return Mc
def moments(image, order=3, *, spacing=None):
"""Calculate all raw image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
image : nD double or uint8 array
Rasterized shape as image.
order : int, optional
Maximum order of moments. Default is 3.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
m : (``order + 1``, ``order + 1``) array
Raw image moments.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 1
>>> M = moments(image)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> centroid
(14.5, 14.5)
"""
return moments_central(image, (0,) * image.ndim, order=order, spacing=spacing)
def moments_central(image, center=None, order=3, *, spacing=None, **kwargs):
"""Calculate all central image moments up to a certain order.
The center coordinates (cr, cc) can be calculated from the raw moments as:
{``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that central moments are translation invariant but not scale and
rotation invariant.
Parameters
----------
image : nD double or uint8 array
Rasterized shape as image.
center : tuple of float, optional
Coordinates of the image centroid. This will be computed if it
is not provided.
order : int, optional
The maximum order of moments computed.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
mu : (``order + 1``, ``order + 1``) array
Central image moments.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 1
>>> M = moments(image)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> moments_central(image, centroid)
array([[16., 0., 20., 0.],
[ 0., 0., 0., 0.],
[20., 0., 25., 0.],
[ 0., 0., 0., 0.]])
"""
if center is None:
# Note: No need for an explicit call to centroid.
# The centroid will be obtained from the raw moments.
moments_raw = moments(image, order=order, spacing=spacing)
return moments_raw_to_central(moments_raw)
if spacing is None:
spacing = np.ones(image.ndim)
float_dtype = _supported_float_type(image.dtype)
calc = image.astype(float_dtype, copy=False)
for dim, dim_length in enumerate(image.shape):
delta = (
np.arange(dim_length, dtype=float_dtype) * spacing[dim] - center[dim]
)
powers_of_delta = (
delta[:, np.newaxis] ** np.arange(order + 1, dtype=float_dtype)
)
calc = np.rollaxis(calc, dim, image.ndim)
calc = np.dot(calc, powers_of_delta)
calc = np.rollaxis(calc, -1, dim)
return calc
def moments_normalized(mu, order=3, spacing=None):
"""Calculate all normalized central image moments up to a certain order.
Note that normalized central moments are translation and scale invariant
but not rotation invariant.
Parameters
----------
mu : (M,[ ...,] M) array
Central image moments, where M must be greater than or equal
to ``order``.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
nu : (``order + 1``,[ ...,] ``order + 1``) array
Normalized central image moments.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 1
>>> m = moments(image)
>>> centroid = (m[0, 1] / m[0, 0], m[1, 0] / m[0, 0])
>>> mu = moments_central(image, centroid)
>>> moments_normalized(mu)
array([[ nan, nan, 0.078125 , 0. ],
[ nan, 0. , 0. , 0. ],
[0.078125 , 0. , 0.00610352, 0. ],
[0. , 0. , 0. , 0. ]])
"""
if np.any(np.array(mu.shape) <= order):
raise ValueError("Shape of image moments must be >= `order`")
if spacing is None:
spacing = np.ones(mu.ndim)
nu = np.zeros_like(mu)
mu0 = mu.ravel()[0]
scale = min(spacing)
for powers in itertools.product(range(order + 1), repeat=mu.ndim):
if sum(powers) < 2:
nu[powers] = np.nan
else:
nu[powers] = (mu[powers] / scale ** sum(powers)) / (mu0 ** (sum(powers) / nu.ndim + 1))
return nu
def moments_hu(nu):
"""Calculate Hu's set of image moments (2D-only).
Note that this set of moments is proved to be translation, scale and
rotation invariant.
Parameters
----------
nu : (M, M) array
Normalized central image moments, where M must be >= 4.
Returns
-------
nu : (7,) array
Hu's set of image moments.
References
----------
.. [1] M. K. Hu, "Visual Pattern Recognition by Moment Invariants",
IRE Trans. Info. Theory, vol. IT-8, pp. 179-187, 1962
.. [2] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [3] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [4] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [5] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> mu = moments_central(image)
>>> nu = moments_normalized(mu)
>>> moments_hu(nu)
array([0.74537037, 0.35116598, 0.10404918, 0.04064421, 0.00264312,
0.02408546, 0. ])
"""
dtype = np.float32 if nu.dtype == 'float32' else np.float64
return _moments_cy.moments_hu(nu.astype(dtype, copy=False))
def centroid(image, *, spacing=None):
"""Return the (weighted) centroid of an image.
Parameters
----------
image : array
The input image.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
center : tuple of float, length ``image.ndim``
The centroid of the (nonzero) pixels in ``image``.
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> centroid(image)
array([13.16666667, 13.16666667])
"""
M = moments_central(image, center=(0,) * image.ndim, order=1, spacing=spacing)
center = (M[tuple(np.eye(image.ndim, dtype=int))] # array of weighted sums
# for each axis
/ M[(0,) * image.ndim]) # weighted sum of all points
return center
def inertia_tensor(image, mu=None, *, spacing=None):
"""Compute the inertia tensor of the input image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``. The inertia tensor
computation requires the central moments of the image. If an
application requires both the central moments and the inertia tensor
(for example, `skimage.measure.regionprops`), then it is more
efficient to pre-compute them and pass them to the inertia tensor
call.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
T : array, shape ``(image.ndim, image.ndim)``
The inertia tensor of the input image. :math:`T_{i, j}` contains
the covariance of image intensity along axes :math:`i` and :math:`j`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor
.. [2] Bernd Jähne. Spatio-Temporal Image Processing: Theory and
Scientific Applications. (Chapter 8: Tensor Methods) Springer, 1993.
"""
if mu is None:
mu = moments_central(image, order=2, spacing=spacing) # don't need higher-order moments
mu0 = mu[(0,) * image.ndim]
result = np.zeros((image.ndim, image.ndim), dtype=mu.dtype)
# nD expression to get coordinates ([2, 0], [0, 2]) (2D),
# ([2, 0, 0], [0, 2, 0], [0, 0, 2]) (3D), etc.
corners2 = tuple(2 * np.eye(image.ndim, dtype=int))
d = np.diag(result)
d.flags.writeable = True
# See https://ocw.mit.edu/courses/aeronautics-and-astronautics/
# 16-07-dynamics-fall-2009/lecture-notes/MIT16_07F09_Lec26.pdf
# Iii is the sum of second-order moments of every axis *except* i, not the
# second order moment of axis i.
# See also https://github.com/scikit-image/scikit-image/issues/3229
d[:] = (np.sum(mu[corners2]) - mu[corners2]) / mu0
for dims in itertools.combinations(range(image.ndim), 2):
mu_index = np.zeros(image.ndim, dtype=int)
mu_index[list(dims)] = 1
result[dims] = -mu[tuple(mu_index)] / mu0
result.T[dims] = -mu[tuple(mu_index)] / mu0
return result
def inertia_tensor_eigvals(image, mu=None, T=None, *, spacing=None):
"""Compute the eigenvalues of the inertia tensor of the image.
The inertia tensor measures covariance of the image intensity along
the image axes. (See `inertia_tensor`.) The relative magnitude of the
eigenvalues of the tensor is thus a measure of the elongation of a
(bright) object in the image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``.
T : array, shape ``(image.ndim, image.ndim)``
The pre-computed inertia tensor. If ``T`` is given, ``mu`` and
``image`` are ignored.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
eigvals : list of float, length ``image.ndim``
The eigenvalues of the inertia tensor of ``image``, in descending
order.
Notes
-----
Computing the eigenvalues requires the inertia tensor of the input image.
This is much faster if the central moments (``mu``) are provided, or,
alternatively, one can provide the inertia tensor (``T``) directly.
"""
if T is None:
T = inertia_tensor(image, mu, spacing=spacing)
eigvals = np.linalg.eigvalsh(T)
# Floating point precision problems could make a positive
# semidefinite matrix have an eigenvalue that is very slightly
# negative. This can cause problems down the line, so set values
# very near zero to zero.
eigvals = np.clip(eigvals, 0, None, out=eigvals)
return sorted(eigvals, reverse=True)

View File

@@ -1,157 +0,0 @@
"""Analytical transformations from raw image moments to central moments.
The expressions for the 2D central moments of order <=2 are often given in
textbooks. Expressions for higher orders and dimensions were generated in SymPy
using ``tools/precompute/moments_sympy.py`` in the GitHub repository.
"""
import itertools
import math
import numpy as np
def _moments_raw_to_central_fast(moments_raw):
"""Analytical formulae for 2D and 3D central moments of order < 4.
`moments_raw_to_central` will automatically call this function when
ndim < 4 and order < 4.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
float_dtype = moments_raw.dtype
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments of order < 4."
)
m = moments_raw
if ndim == 2:
cx = m[1, 0] / m[0, 0]
cy = m[0, 1] / m[0, 0]
moments_central[0, 0] = m[0, 0]
# Note: 1st order moments are both 0
if order > 1:
# 2nd order moments
moments_central[1, 1] = m[1, 1] - cx*m[0, 1]
moments_central[2, 0] = m[2, 0] - cx*m[1, 0]
moments_central[0, 2] = m[0, 2] - cy*m[0, 1]
if order > 2:
# 3rd order moments
moments_central[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0]
+ cx**2*m[0, 1] + cy*cx*m[1, 0])
moments_central[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2]
+ 2*cy*cx*m[0, 1])
moments_central[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx**2*m[1, 0]
moments_central[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy**2*m[0, 1]
else:
# 3D case
cx = m[1, 0, 0] / m[0, 0, 0]
cy = m[0, 1, 0] / m[0, 0, 0]
cz = m[0, 0, 1] / m[0, 0, 0]
moments_central[0, 0, 0] = m[0, 0, 0]
# Note: all first order moments are 0
if order > 1:
# 2nd order moments
moments_central[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2]
moments_central[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1]
moments_central[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0]
moments_central[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1]
moments_central[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0]
moments_central[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0]
if order > 2:
# 3rd order moments
moments_central[0, 0, 3] = (2*cz**2*m[0, 0, 1]
- 3*cz*m[0, 0, 2]
+ m[0, 0, 3])
moments_central[0, 1, 2] = (-cy*m[0, 0, 2]
+ 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1])
+ m[0, 1, 2])
moments_central[0, 2, 1] = (cy**2*m[0, 0, 1] - 2*cy*m[0, 1, 1]
+ cz*(cy*m[0, 1, 0] - m[0, 2, 0])
+ m[0, 2, 1])
moments_central[0, 3, 0] = (2*cy**2*m[0, 1, 0]
- 3*cy*m[0, 2, 0]
+ m[0, 3, 0])
moments_central[1, 0, 2] = (-cx*m[0, 0, 2]
+ 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1])
+ m[1, 0, 2])
moments_central[1, 1, 1] = (-cx*m[0, 1, 1]
+ cy*(cx*m[0, 0, 1] - m[1, 0, 1])
+ cz*(cx*m[0, 1, 0] - m[1, 1, 0])
+ m[1, 1, 1])
moments_central[1, 2, 0] = (-cx*m[0, 2, 0]
- 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0])
+ m[1, 2, 0])
moments_central[2, 0, 1] = (cx**2*m[0, 0, 1]
- 2*cx*m[1, 0, 1]
+ cz*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 0, 1])
moments_central[2, 1, 0] = (cx**2*m[0, 1, 0]
- 2*cx*m[1, 1, 0]
+ cy*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 1, 0])
moments_central[3, 0, 0] = (2*cx**2*m[1, 0, 0]
- 3*cx*m[2, 0, 0]
+ m[3, 0, 0])
return moments_central.astype(float_dtype, copy=False)
def moments_raw_to_central(moments_raw):
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
if ndim in [2, 3] and order < 4:
return _moments_raw_to_central_fast(moments_raw)
moments_central = np.zeros_like(moments_raw)
m = moments_raw
# centers as computed in centroid above
centers = tuple(m[tuple(np.eye(ndim, dtype=int))] / m[(0,)*ndim])
if ndim == 2:
# This is the general 2D formula from
# https://en.wikipedia.org/wiki/Image_moment#Central_moments
for p in range(order + 1):
for q in range(order + 1):
if p + q > order:
continue
for i in range(p + 1):
term1 = math.comb(p, i)
term1 *= (-centers[0]) ** (p - i)
for j in range(q + 1):
term2 = math.comb(q, j)
term2 *= (-centers[1]) ** (q - j)
moments_central[p, q] += term1*term2*m[i, j]
return moments_central
# The nested loops below are an n-dimensional extension of the 2D formula
# given at https://en.wikipedia.org/wiki/Image_moment#Central_moments
# iterate over all [0, order] (inclusive) on each axis
for orders in itertools.product(*((range(order + 1),) * ndim)):
# `orders` here is the index into the `moments_central` output array
if sum(orders) > order:
# skip any moment that is higher than the requested order
continue
# loop over terms from `m` contributing to `moments_central[orders]`
for idxs in itertools.product(*[range(o + 1) for o in orders]):
val = m[idxs]
for i_order, c, idx in zip(orders, centers, idxs):
val *= math.comb(i_order, idx)
val *= (-c) ** (i_order - idx)
moments_central[orders] += val
return moments_central

View File

@@ -1,168 +0,0 @@
import numpy as np
from scipy import signal
def approximate_polygon(coords, tolerance):
"""Approximate a polygonal chain with the specified tolerance.
It is based on the Douglas-Peucker algorithm.
Note that the approximated polygon is always within the convex hull of the
original polygon.
Parameters
----------
coords : (N, 2) array
Coordinate array.
tolerance : float
Maximum distance from original points of polygon to approximated
polygonal chain. If tolerance is 0, the original coordinate array
is returned.
Returns
-------
coords : (M, 2) array
Approximated polygonal chain where M <= N.
References
----------
.. [1] https://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
if tolerance <= 0:
return coords
chain = np.zeros(coords.shape[0], 'bool')
# pre-allocate distance array for all points
dists = np.zeros(coords.shape[0])
chain[0] = True
chain[-1] = True
pos_stack = [(0, chain.shape[0] - 1)]
end_of_chain = False
while not end_of_chain:
start, end = pos_stack.pop()
# determine properties of current line segment
r0, c0 = coords[start, :]
r1, c1 = coords[end, :]
dr = r1 - r0
dc = c1 - c0
segment_angle = - np.arctan2(dr, dc)
segment_dist = c0 * np.sin(segment_angle) + r0 * np.cos(segment_angle)
# select points in-between line segment
segment_coords = coords[start + 1:end, :]
segment_dists = dists[start + 1:end]
# check whether to take perpendicular or euclidean distance with
# inner product of vectors
# vectors from points -> start and end
dr0 = segment_coords[:, 0] - r0
dc0 = segment_coords[:, 1] - c0
dr1 = segment_coords[:, 0] - r1
dc1 = segment_coords[:, 1] - c1
# vectors points -> start and end projected on start -> end vector
projected_lengths0 = dr0 * dr + dc0 * dc
projected_lengths1 = - dr1 * dr - dc1 * dc
perp = np.logical_and(projected_lengths0 > 0,
projected_lengths1 > 0)
eucl = np.logical_not(perp)
segment_dists[perp] = np.abs(
segment_coords[perp, 0] * np.cos(segment_angle)
+ segment_coords[perp, 1] * np.sin(segment_angle)
- segment_dist
)
segment_dists[eucl] = np.minimum(
# distance to start point
np.sqrt(dc0[eucl] ** 2 + dr0[eucl] ** 2),
# distance to end point
np.sqrt(dc1[eucl] ** 2 + dr1[eucl] ** 2)
)
if np.any(segment_dists > tolerance):
# select point with maximum distance to line
new_end = start + np.argmax(segment_dists) + 1
pos_stack.append((new_end, end))
pos_stack.append((start, new_end))
chain[new_end] = True
if len(pos_stack) == 0:
end_of_chain = True
return coords[chain, :]
# B-Spline subdivision
_SUBDIVISION_MASKS = {
# degree: (mask_even, mask_odd)
# extracted from (degree + 2)th row of Pascal's triangle
1: ([1, 1], [1, 1]),
2: ([3, 1], [1, 3]),
3: ([1, 6, 1], [0, 4, 4]),
4: ([5, 10, 1], [1, 10, 5]),
5: ([1, 15, 15, 1], [0, 6, 20, 6]),
6: ([7, 35, 21, 1], [1, 21, 35, 7]),
7: ([1, 28, 70, 28, 1], [0, 8, 56, 56, 8]),
}
def subdivide_polygon(coords, degree=2, preserve_ends=False):
"""Subdivision of polygonal curves using B-Splines.
Note that the resulting curve is always within the convex hull of the
original polygon. Circular polygons stay closed after subdivision.
Parameters
----------
coords : (N, 2) array
Coordinate array.
degree : {1, 2, 3, 4, 5, 6, 7}, optional
Degree of B-Spline. Default is 2.
preserve_ends : bool, optional
Preserve first and last coordinate of non-circular polygon. Default is
False.
Returns
-------
coords : (M, 2) array
Subdivided coordinate array.
References
----------
.. [1] http://mrl.nyu.edu/publications/subdiv-course2000/coursenotes00.pdf
"""
if degree not in _SUBDIVISION_MASKS:
raise ValueError("Invalid B-Spline degree. Only degree 1 - 7 is "
"supported.")
circular = np.all(coords[0, :] == coords[-1, :])
method = 'valid'
if circular:
# remove last coordinate because of wrapping
coords = coords[:-1, :]
# circular convolution by wrapping boundaries
method = 'same'
mask_even, mask_odd = _SUBDIVISION_MASKS[degree]
# divide by total weight
mask_even = np.array(mask_even, float) / (2 ** degree)
mask_odd = np.array(mask_odd, float) / (2 ** degree)
even = signal.convolve2d(coords.T, np.atleast_2d(mask_even), mode=method,
boundary='wrap')
odd = signal.convolve2d(coords.T, np.atleast_2d(mask_odd), mode=method,
boundary='wrap')
out = np.zeros((even.shape[1] + odd.shape[1], 2))
out[1::2] = even.T
out[::2] = odd.T
if circular:
# close polygon
out = np.vstack([out, out[0, :]])
if preserve_ends and not circular:
out = np.vstack([coords[0, :], out, coords[-1, :]])
return out

File diff suppressed because it is too large Load Diff

View File

@@ -1,328 +0,0 @@
from math import sqrt
import numpy as np
from scipy import ndimage as ndi
STREL_4 = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=np.uint8)
STREL_8 = np.ones((3, 3), dtype=np.uint8)
# Coefficients from
# Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of Discretized Sets
# - On the Choice of Adjacency in Homogeneous Lattices.
# In: Mecke K., Stoyan D. (eds) Morphology of Condensed Matter. Lecture Notes
# in Physics, vol 600. Springer, Berlin, Heidelberg.
# The value of coefficients correspond to the contributions to the Euler number
# of specific voxel configurations, which are themselves encoded thanks to a
# LUT. Computing the Euler number from the addition of the contributions of
# local configurations is possible thanks to an integral geometry formula
# (see the paper by Ohser et al. for more details).
EULER_COEFS2D_4 = [0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0]
EULER_COEFS2D_8 = [0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0]
EULER_COEFS3D_26 = np.array([0, 1, 1, 0, 1, 0, -2, -1,
1, -2, 0, -1, 0, -1, -1, 0,
1, 0, -2, -1, -2, -1, -1, -2,
-6, -3, -3, -2, -3, -2, 0, -1,
1, -2, 0, -1, -6, -3, -3, -2,
-2, -1, -1, -2, -3, 0, -2, -1,
0, -1, -1, 0, -3, -2, 0, -1,
-3, 0, -2, -1, 0, 1, 1, 0,
1, -2, -6, -3, 0, -1, -3, -2,
-2, -1, -3, 0, -1, -2, -2, -1,
0, -1, -3, -2, -1, 0, 0, -1,
-3, 0, 0, 1, -2, -1, 1, 0,
-2, -1, -3, 0, -3, 0, 0, 1,
-1, 4, 0, 3, 0, 3, 1, 2,
-1, -2, -2, -1, -2, -1, 1,
0, 0, 3, 1, 2, 1, 2, 2, 1,
1, -6, -2, -3, -2, -3, -1, 0,
0, -3, -1, -2, -1, -2, -2, -1,
-2, -3, -1, 0, -1, 0, 4, 3,
-3, 0, 0, 1, 0, 1, 3, 2,
0, -3, -1, -2, -3, 0, 0, 1,
-1, 0, 0, -1, -2, 1, -1, 0,
-1, -2, -2, -1, 0, 1, 3, 2,
-2, 1, -1, 0, 1, 2, 2, 1,
0, -3, -3, 0, -1, -2, 0, 1,
-1, 0, -2, 1, 0, -1, -1, 0,
-1, -2, 0, 1, -2, -1, 3, 2,
-2, 1, 1, 2, -1, 0, 2, 1,
-1, 0, -2, 1, -2, 1, 1, 2,
-2, 3, -1, 2, -1, 2, 0, 1,
0, -1, -1, 0, -1, 0, 2, 1,
-1, 2, 0, 1, 0, 1, 1, 0, ])
def euler_number(image, connectivity=None):
"""Calculate the Euler characteristic in binary image.
For 2D objects, the Euler number is the number of objects minus the number
of holes. For 3D objects, the Euler number is obtained as the number of
objects plus the number of holes, minus the number of tunnels, or loops.
Parameters
----------
image: (N, M) ndarray or (N, M, D) ndarray.
2D or 3D images.
If image is not binary, all values strictly greater than zero
are considered as the object.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2,
respectively).
6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3,
respectively). Connectivity 2 is not defined.
Returns
-------
euler_number : int
Euler characteristic of the set of all objects in the image.
Notes
-----
The Euler characteristic is an integer number that describes the
topology of the set of all objects in the input image. If object is
4-connected, then background is 8-connected, and conversely.
The computation of the Euler characteristic is based on an integral
geometry formula in discretized space. In practice, a neighborhood
configuration is constructed, and a LUT is applied for each
configuration. The coefficients used are the ones of Ohser et al.
It can be useful to compute the Euler characteristic for several
connectivities. A large relative difference between results
for different connectivities suggests that the image resolution
(with respect to the size of objects and holes) is too low.
References
----------
.. [1] S. Rivollier. Analyse dimage geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010. Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
.. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of
Discretized Sets - On the Choice of Adjacency in Homogeneous
Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed
Matter. Lecture Notes in Physics, vol 600. Springer, Berlin,
Heidelberg.
Examples
--------
>>> import numpy as np
>>> SAMPLE = np.zeros((100,100,100));
>>> SAMPLE[40:60, 40:60, 40:60]=1
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
1...
>>> SAMPLE[45:55,45:55,45:55] = 0;
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
2...
>>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
>>> euler_number(SAMPLE) # doctest:
0
>>> euler_number(SAMPLE, connectivity=1) # doctest:
2
"""
# as image can be a label image, transform it to binary
image = (image > 0).astype(int)
image = np.pad(image, pad_width=1, mode='constant')
# check connectivity
if connectivity is None:
connectivity = image.ndim
# config variable is an adjacency configuration. A coefficient given by
# variable coefs is attributed to each configuration in order to get
# the Euler characteristic.
if image.ndim == 2:
config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])
if connectivity == 1:
coefs = EULER_COEFS2D_4
else:
coefs = EULER_COEFS2D_8
bins = 16
else: # 3D images
if connectivity == 2:
raise NotImplementedError(
'For 3D images, Euler number is implemented '
'for connectivities 1 and 3 only')
config = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 4], [0, 2, 8]],
[[0, 0, 0], [0, 16, 64], [0, 32, 128]]])
if connectivity == 1:
coefs = EULER_COEFS3D_26[::-1]
else:
coefs = EULER_COEFS3D_26
bins = 256
# XF has values in the 0-255 range in 3D, and in the 0-15 range in 2D,
# with one unique value for each binary configuration of the
# 27-voxel cube in 3D / 8-pixel square in 2D, up to symmetries
XF = ndi.convolve(image, config, mode='constant', cval=0)
h = np.bincount(XF.ravel(), minlength=bins)
if image.ndim == 2:
return coefs @ h
else:
return int(0.125 * coefs @ h)
def perimeter(image, neighborhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighborhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It is used to
compute the contour. A higher neighborhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter(img_coins, neighborhood=4) # doctest: +ELLIPSIS
7796.867...
>>> perimeter(img_coins, neighborhood=8) # doctest: +ELLIPSIS
8806.268...
"""
if image.ndim != 2:
raise NotImplementedError('`perimeter` supports 2D images only')
if neighborhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.float64)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
[2, 1, 2],
[10, 2, 10]]),
mode='constant', cval=0)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
def perimeter_crofton(image, directions=4):
"""Calculate total Crofton perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D image. If image is not binary, all values strictly greater than zero
are considered as the object.
directions : 2 or 4, optional
Number of directions used to approximate the Crofton perimeter. By
default, 4 is used: it should be more accurate than 2.
Computation time is the same in both cases.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
Notes
-----
This measure is based on Crofton formula [1], which is a measure from
integral geometry. It is defined for general curve length evaluation via
a double integral along all directions. In a discrete
space, 2 or 4 directions give a quite good approximation, 4 being more
accurate than 2 for more complex shapes.
Similar to :func:`~.measure.perimeter`, this function returns an
approximation of the perimeter in continuous space.
References
----------
.. [1] https://en.wikipedia.org/wiki/Crofton_formula
.. [2] S. Rivollier. Analyse dimage geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010.
Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter_crofton(img_coins, directions=2) # doctest: +ELLIPSIS
8144.578...
>>> perimeter_crofton(img_coins, directions=4) # doctest: +ELLIPSIS
7837.077...
"""
if image.ndim != 2:
raise NotImplementedError(
'`perimeter_crofton` supports 2D images only')
# as image could be a label image, transform it to binary image
image = (image > 0).astype(np.uint8)
image = np.pad(image, pad_width=1, mode='constant')
XF = ndi.convolve(image, np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]),
mode='constant', cval=0)
h = np.bincount(XF.ravel(), minlength=16)
# definition of the LUT
if directions == 2:
coefs = [0, np.pi / 2, 0, 0, 0, np.pi / 2, 0, 0,
np.pi / 2, np.pi, 0, 0, np.pi / 2, np.pi, 0, 0]
else:
coefs = [0, np.pi / 4 * (1 + 1 / (np.sqrt(2))),
np.pi / (4 * np.sqrt(2)),
np.pi / (2 * np.sqrt(2)), 0,
np.pi / 4 * (1 + 1 / (np.sqrt(2))),
0, np.pi / (4 * np.sqrt(2)), np.pi / 4, np.pi / 2,
np.pi / (4 * np.sqrt(2)), np.pi / (4 * np.sqrt(2)),
np.pi / 4, np.pi / 2, 0, 0]
total_perimeter = coefs @ h
return total_perimeter

View File

@@ -1,90 +0,0 @@
import numpy as np
from ..util import view_as_blocks
def block_reduce(image, block_size=2, func=np.sum, cval=0, func_kwargs=None):
"""Downsample image by applying function `func` to local blocks.
This function is useful for max and mean pooling, for example.
Parameters
----------
image : ndarray
N-dimensional input image.
block_size : array_like or int
Array containing down-sampling integer factor along each axis.
Default block_size is 2.
func : callable
Function object which is used to calculate the return value for each
local block. This function must implement an ``axis`` parameter.
Primary functions are ``numpy.sum``, ``numpy.min``, ``numpy.max``,
``numpy.mean`` and ``numpy.median``. See also `func_kwargs`.
cval : float
Constant padding value if image is not perfectly divisible by the
block size.
func_kwargs : dict
Keyword arguments passed to `func`. Notably useful for passing dtype
argument to ``np.mean``. Takes dictionary of inputs, e.g.:
``func_kwargs={'dtype': np.float16})``.
Returns
-------
image : ndarray
Down-sampled image with same number of dimensions as input image.
Examples
--------
>>> from skimage.measure import block_reduce
>>> image = np.arange(3*3*4).reshape(3, 3, 4)
>>> image # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]],
[[24, 25, 26, 27],
[28, 29, 30, 31],
[32, 33, 34, 35]]])
>>> block_reduce(image, block_size=(3, 3, 1), func=np.mean)
array([[[16., 17., 18., 19.]]])
>>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=np.max)
>>> image_max1 # doctest: +NORMALIZE_WHITESPACE
array([[[11]],
[[23]],
[[35]]])
>>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=np.max)
>>> image_max2 # doctest: +NORMALIZE_WHITESPACE
array([[[27],
[31],
[35]]])
"""
if np.isscalar(block_size):
block_size = (block_size,) * image.ndim
elif len(block_size) != image.ndim:
raise ValueError("`block_size` must be a scalar or have "
"the same length as `image.shape`")
if func_kwargs is None:
func_kwargs = {}
pad_width = []
for i in range(len(block_size)):
if block_size[i] < 1:
raise ValueError("Down-sampling factors must be >= 1. Use "
"`skimage.transform.resize` to up-sample an "
"image.")
if image.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (image.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
image = np.pad(image, pad_width=pad_width, mode='constant',
constant_values=cval)
blocked = view_as_blocks(image, block_size)
return func(blocked, axis=tuple(range(image.ndim, blocked.ndim)),
**func_kwargs)

View File

@@ -1,41 +0,0 @@
from numpy import unique
from scipy.stats import entropy as scipy_entropy
def shannon_entropy(image, base=2):
"""Calculate the Shannon entropy of an image.
The Shannon entropy is defined as S = -sum(pk * log(pk)),
where pk are frequency/probability of pixels of value k.
Parameters
----------
image : (N, M) ndarray
Grayscale input image.
base : float, optional
The logarithmic base to use.
Returns
-------
entropy : float
Notes
-----
The returned value is measured in bits or shannon (Sh) for base=2, natural
unit (nat) for base=np.e and hartley (Hart) for base=10.
References
----------
.. [1] `https://en.wikipedia.org/wiki/Entropy_(information_theory) <https://en.wikipedia.org/wiki/Entropy_(information_theory)>`_
.. [2] https://en.wiktionary.org/wiki/Shannon_entropy
Examples
--------
>>> from skimage import data
>>> from skimage.measure import shannon_entropy
>>> shannon_entropy(data.camera())
7.231695011055706
"""
_, counts = unique(image, return_counts=True)
return scipy_entropy(counts, base=base)

View File

@@ -1,899 +0,0 @@
import math
from warnings import warn
import numpy as np
from numpy.linalg import inv
from scipy import optimize, spatial
_EPSILON = np.spacing(1)
def _check_data_dim(data, dim):
if data.ndim != 2 or data.shape[1] != dim:
raise ValueError(f"Input data must have shape (N, {dim}).")
def _check_data_atleast_2D(data):
if data.ndim < 2 or data.shape[1] < 2:
raise ValueError('Input data must be at least 2D.')
class BaseModel:
def __init__(self):
self.params = None
class LineModelND(BaseModel):
"""Total least squares estimator for N-dimensional lines.
In contrast to ordinary least squares line estimation, this estimator
minimizes the orthogonal distances of points to the estimated line.
Lines are defined by a point (origin) and a unit vector (direction)
according to the following vector equation::
X = origin + lambda * direction
Attributes
----------
params : tuple
Line model parameters in the following order `origin`, `direction`.
Examples
--------
>>> x = np.linspace(1, 2, 25)
>>> y = 1.5 * x + 3
>>> lm = LineModelND()
>>> lm.estimate(np.stack([x, y], axis=-1))
True
>>> tuple(np.round(lm.params, 5))
(array([1.5 , 5.25]), array([0.5547 , 0.83205]))
>>> res = lm.residuals(np.stack([x, y], axis=-1))
>>> np.abs(np.round(res, 9))
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
>>> np.round(lm.predict_y(x[:5]), 3)
array([4.5 , 4.562, 4.625, 4.688, 4.75 ])
>>> np.round(lm.predict_x(y[:5]), 3)
array([1. , 1.042, 1.083, 1.125, 1.167])
"""
def estimate(self, data):
"""Estimate line model from data.
This minimizes the sum of shortest (orthogonal) distances
from the given data points to the estimated line.
Parameters
----------
data : (N, dim) array
N points in a space of dimensionality dim >= 2.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
_check_data_atleast_2D(data)
origin = data.mean(axis=0)
data = data - origin
if data.shape[0] == 2: # well determined
direction = data[1] - data[0]
norm = np.linalg.norm(direction)
if norm != 0: # this should not happen to be norm 0
direction /= norm
elif data.shape[0] > 2: # over-determined
# Note: with full_matrices=1 Python dies with joblib parallel_for.
_, _, v = np.linalg.svd(data, full_matrices=False)
direction = v[0]
else: # under-determined
return False
self.params = (origin, direction)
return True
def residuals(self, data, params=None):
"""Determine residuals of data to model.
For each point, the shortest (orthogonal) distance to the line is
returned. It is obtained by projecting the data onto the line.
Parameters
----------
data : (N, dim) array
N points in a space of dimension dim.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_atleast_2D(data)
if params is None:
if self.params is None:
raise ValueError('Parameters cannot be None')
params = self.params
if len(params) != 2:
raise ValueError('Parameters are defined by 2 sets.')
origin, direction = params
res = (data - origin) - \
((data - origin) @ direction)[..., np.newaxis] * direction
return np.linalg.norm(res, axis=1)
def predict(self, x, axis=0, params=None):
"""Predict intersection of the estimated line model with a hyperplane
orthogonal to a given axis.
Parameters
----------
x : (n, 1) array
Coordinates along an axis.
axis : int
Axis orthogonal to the hyperplane intersecting the line.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
data : (n, m) array
Predicted coordinates.
Raises
------
ValueError
If the line is parallel to the given axis.
"""
if params is None:
if self.params is None:
raise ValueError('Parameters cannot be None')
params = self.params
if len(params) != 2:
raise ValueError('Parameters are defined by 2 sets.')
origin, direction = params
if direction[axis] == 0:
# line parallel to axis
raise ValueError(f'Line parallel to axis {axis}')
l = (x - origin[axis]) / direction[axis]
data = origin + l[..., np.newaxis] * direction
return data
def predict_x(self, y, params=None):
"""Predict x-coordinates for 2D lines using the estimated model.
Alias for::
predict(y, axis=1)[:, 0]
Parameters
----------
y : array
y-coordinates.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
x : array
Predicted x-coordinates.
"""
x = self.predict(y, axis=1, params=params)[:, 0]
return x
def predict_y(self, x, params=None):
"""Predict y-coordinates for 2D lines using the estimated model.
Alias for::
predict(x, axis=0)[:, 1]
Parameters
----------
x : array
x-coordinates.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
y : array
Predicted y-coordinates.
"""
y = self.predict(x, axis=0, params=params)[:, 1]
return y
class CircleModel(BaseModel):
"""Total least squares estimator for 2D circles.
The functional model of the circle is::
r**2 = (x - xc)**2 + (y - yc)**2
This estimator minimizes the squared distances from all points to the
circle::
min{ sum((r - sqrt((x_i - xc)**2 + (y_i - yc)**2))**2) }
A minimum number of 3 points is required to solve for the parameters.
Attributes
----------
params : tuple
Circle model parameters in the following order `xc`, `yc`, `r`.
Notes
-----
The estimation is carried out using a 2D version of the spherical
estimation given in [1]_.
References
----------
.. [1] Jekel, Charles F. Obtaining non-linear orthotropic material models
for pvc-coated polyester via inverse bubble inflation.
Thesis (MEng), Stellenbosch University, 2016. Appendix A, pp. 83-87.
https://hdl.handle.net/10019.1/98627
Examples
--------
>>> t = np.linspace(0, 2 * np.pi, 25)
>>> xy = CircleModel().predict_xy(t, params=(2, 3, 4))
>>> model = CircleModel()
>>> model.estimate(xy)
True
>>> tuple(np.round(model.params, 5))
(2.0, 3.0, 4.0)
>>> res = model.residuals(xy)
>>> np.abs(np.round(res, 9))
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
"""
def estimate(self, data):
"""Estimate circle model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
_check_data_dim(data, dim=2)
# to prevent integer overflow, cast data to float, if it isn't already
float_type = np.promote_types(data.dtype, np.float32)
data = data.astype(float_type, copy=False)
# Adapted from a spherical estimator covered in a blog post by Charles
# Jeckel (see also reference 1 above):
# https://jekel.me/2015/Least-Squares-Sphere-Fit/
A = np.append(data * 2,
np.ones((data.shape[0], 1), dtype=float_type),
axis=1)
f = np.sum(data ** 2, axis=1)
C, _, rank, _ = np.linalg.lstsq(A, f, rcond=None)
if rank != 3:
warn("Input does not contain enough significant data points.")
return False
center = C[0:2]
distances = spatial.minkowski_distance(center, data)
r = np.sqrt(np.mean(distances ** 2))
self.params = tuple(center) + (r,)
return True
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the circle is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
xc, yc, r = self.params
x = data[:, 0]
y = data[:, 1]
return r - np.sqrt((x - xc)**2 + (y - yc)**2)
def predict_xy(self, t, params=None):
"""Predict x- and y-coordinates using the estimated model.
Parameters
----------
t : array
Angles in circle in radians. Angles start to count from positive
x-axis to positive y-axis in a right-handed system.
params : (3, ) array, optional
Optional custom parameter set.
Returns
-------
xy : (..., 2) array
Predicted x- and y-coordinates.
"""
if params is None:
params = self.params
xc, yc, r = params
x = xc + r * np.cos(t)
y = yc + r * np.sin(t)
return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)
class EllipseModel(BaseModel):
"""Total least squares estimator for 2D ellipses.
The functional model of the ellipse is::
xt = xc + a*cos(theta)*cos(t) - b*sin(theta)*sin(t)
yt = yc + a*sin(theta)*cos(t) + b*cos(theta)*sin(t)
d = sqrt((x - xt)**2 + (y - yt)**2)
where ``(xt, yt)`` is the closest point on the ellipse to ``(x, y)``. Thus
d is the shortest distance from the point to the ellipse.
The estimator is based on a least squares minimization. The optimal
solution is computed directly, no iterations are required. This leads
to a simple, stable and robust fitting method.
The ``params`` attribute contains the parameters in the following order::
xc, yc, a, b, theta
Attributes
----------
params : tuple
Ellipse model parameters in the following order `xc`, `yc`, `a`, `b`,
`theta`.
Examples
--------
>>> xy = EllipseModel().predict_xy(np.linspace(0, 2 * np.pi, 25),
... params=(10, 15, 4, 8, np.deg2rad(30)))
>>> ellipse = EllipseModel()
>>> ellipse.estimate(xy)
True
>>> np.round(ellipse.params, 2)
array([10. , 15. , 4. , 8. , 0.52])
>>> np.round(abs(ellipse.residuals(xy)), 5)
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
"""
def estimate(self, data):
"""Estimate circle model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
References
----------
.. [1] Halir, R.; Flusser, J. "Numerically stable direct least squares
fitting of ellipses". In Proc. 6th International Conference in
Central Europe on Computer Graphics and Visualization.
WSCG (Vol. 98, pp. 125-132).
"""
# Original Implementation: Ben Hammel, Nick Sullivan-Molina
# another REFERENCE: [2] http://mathworld.wolfram.com/Ellipse.html
_check_data_dim(data, dim=2)
# to prevent integer overflow, cast data to float, if it isn't already
float_type = np.promote_types(data.dtype, np.float32)
data = data.astype(float_type, copy=False)
x = data[:, 0]
y = data[:, 1]
# Quadratic part of design matrix [eqn. 15] from [1]
D1 = np.vstack([x ** 2, x * y, y ** 2]).T
# Linear part of design matrix [eqn. 16] from [1]
D2 = np.vstack([x, y, np.ones_like(x)]).T
# forming scatter matrix [eqn. 17] from [1]
S1 = D1.T @ D1
S2 = D1.T @ D2
S3 = D2.T @ D2
# Constraint matrix [eqn. 18]
C1 = np.array([[0., 0., 2.], [0., -1., 0.], [2., 0., 0.]])
try:
# Reduced scatter matrix [eqn. 29]
M = inv(C1) @ (S1 - S2 @ inv(S3) @ S2.T)
except np.linalg.LinAlgError: # LinAlgError: Singular matrix
return False
# M*|a b c >=l|a b c >. Find eigenvalues and eigenvectors
# from this equation [eqn. 28]
eig_vals, eig_vecs = np.linalg.eig(M)
# eigenvector must meet constraint 4ac - b^2 to be valid.
cond = 4 * np.multiply(eig_vecs[0, :], eig_vecs[2, :]) \
- np.power(eig_vecs[1, :], 2)
a1 = eig_vecs[:, (cond > 0)]
# seeks for empty matrix
if 0 in a1.shape or len(a1.ravel()) != 3:
return False
a, b, c = a1.ravel()
# |d f g> = -S3^(-1)*S2^(T)*|a b c> [eqn. 24]
a2 = -inv(S3) @ S2.T @ a1
d, f, g = a2.ravel()
# eigenvectors are the coefficients of an ellipse in general form
# a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 (eqn. 15) from [2]
b /= 2.
d /= 2.
f /= 2.
# finding center of ellipse [eqn.19 and 20] from [2]
x0 = (c * d - b * f) / (b ** 2. - a * c)
y0 = (a * f - b * d) / (b ** 2. - a * c)
# Find the semi-axes lengths [eqn. 21 and 22] from [2]
numerator = a * f ** 2 + c * d ** 2 + g * b ** 2 \
- 2 * b * d * f - a * c * g
term = np.sqrt((a - c) ** 2 + 4 * b ** 2)
denominator1 = (b ** 2 - a * c) * (term - (a + c))
denominator2 = (b ** 2 - a * c) * (- term - (a + c))
width = np.sqrt(2 * numerator / denominator1)
height = np.sqrt(2 * numerator / denominator2)
# angle of counterclockwise rotation of major-axis of ellipse
# to x-axis [eqn. 23] from [2].
phi = 0.5 * np.arctan((2. * b) / (a - c))
if a > c:
phi += 0.5 * np.pi
self.params = np.nan_to_num([x0, y0, width, height, phi]).tolist()
self.params = [float(np.real(x)) for x in self.params]
return True
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the ellipse is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
xc, yc, a, b, theta = self.params
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = data[:, 0]
y = data[:, 1]
N = data.shape[0]
def fun(t, xi, yi):
ct = math.cos(t)
st = math.sin(t)
xt = xc + a * ctheta * ct - b * stheta * st
yt = yc + a * stheta * ct + b * ctheta * st
return (xi - xt) ** 2 + (yi - yt) ** 2
# def Dfun(t, xi, yi):
# ct = math.cos(t)
# st = math.sin(t)
# xt = xc + a * ctheta * ct - b * stheta * st
# yt = yc + a * stheta * ct + b * ctheta * st
# dfx_t = - 2 * (xi - xt) * (- a * ctheta * st
# - b * stheta * ct)
# dfy_t = - 2 * (yi - yt) * (- a * stheta * st
# + b * ctheta * ct)
# return [dfx_t + dfy_t]
residuals = np.empty((N, ), dtype=np.float64)
# initial guess for parameter t of closest point on ellipse
t0 = np.arctan2(y - yc, x - xc) - theta
# determine shortest distance to ellipse for each point
for i in range(N):
xi = x[i]
yi = y[i]
# faster without Dfun, because of the python overhead
t, _ = optimize.leastsq(fun, t0[i], args=(xi, yi))
residuals[i] = np.sqrt(fun(t, xi, yi))
return residuals
def predict_xy(self, t, params=None):
"""Predict x- and y-coordinates using the estimated model.
Parameters
----------
t : array
Angles in circle in radians. Angles start to count from positive
x-axis to positive y-axis in a right-handed system.
params : (5, ) array, optional
Optional custom parameter set.
Returns
-------
xy : (..., 2) array
Predicted x- and y-coordinates.
"""
if params is None:
params = self.params
xc, yc, a, b, theta = params
ct = np.cos(t)
st = np.sin(t)
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = xc + a * ctheta * ct - b * stheta * st
y = yc + a * stheta * ct + b * ctheta * st
return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
if probability == 0:
return 0
if n_inliers == 0:
return np.inf
inlier_ratio = n_inliers / n_samples
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
return np.ceil(np.log(nom) / np.log(denom))
def ransac(data, model_class, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None,
max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0,
stop_probability=1, random_state=None, initial_inliers=None):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : [list, tuple of] (N, ...) array
Data set to which the model is fitted, where N is the number of data
points and the remaining dimension are depending on model requirements.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : object
Object with the following object methods:
* ``success = estimate(*data)``
* ``residuals(*data)``
where `success` indicates whether the model estimation succeeded
(`True` or `None` for success, `False` for failure).
min_samples : int in range (0, N)
The minimum number of data points to fit a model to.
residual_threshold : float larger than 0
Maximum distance for a data point to be classified as an inlier.
is_data_valid : function, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : function, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less than or equal to this
threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the
training data is sampled with ``probability >= stop_probability``,
depending on the current best model's inlier ratio and the number
of trials. This requires to generate at least N samples (trials):
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to a high value
such as 0.99, e is the current fraction of inliers w.r.t. the
total number of samples, and m is the min_samples value.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
initial_inliers : array-like of bool, shape (N,), optional
Initial samples selection for model estimation
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N, ) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, https://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> xc, yc = 20, 30
>>> a, b = 5, 10
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> rng = np.random.default_rng(203560) # do not copy this value
>>> data += rng.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel()
>>> model.estimate(data)
True
>>> np.round(model.params) # doctest: +SKIP
array([ 72., 75., 77., 14., 1.])
Estimate ellipse model using RANSAC:
>>> ransac_model, inliers = ransac(data, EllipseModel, 20, 3, max_trials=50)
>>> abs(np.round(ransac_model.params))
array([20., 30., 10., 6., 2.])
>>> inliers # doctest: +SKIP
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
>>> sum(inliers) > 40
True
RANSAC can be used to robustly estimate a geometric
transformation. In this section, we also show how to use a
proportion of the total samples, rather than an absolute number.
>>> from skimage.transform import SimilarityTransform
>>> rng = np.random.default_rng()
>>> src = 100 * rng.random((50, 2))
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> ratio = 0.5 # use half of the samples
>>> min_samples = int(ratio * len(src))
>>> model, inliers = ransac((src, dst), SimilarityTransform, min_samples,
... 10,
... initial_inliers=np.ones(len(src), dtype=bool))
>>> inliers # doctest: +SKIP
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True])
"""
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = []
validate_model = is_model_valid is not None
validate_data = is_data_valid is not None
random_state = np.random.default_rng(random_state)
# in case data is not pair of input and output, male it like it
if not isinstance(data, (tuple, list)):
data = (data, )
num_samples = len(data[0])
if not (0 < min_samples < num_samples):
raise ValueError(f"`min_samples` must be in range (0, {num_samples})")
if residual_threshold < 0:
raise ValueError("`residual_threshold` must be greater than zero")
if max_trials < 0:
raise ValueError("`max_trials` must be greater than zero")
if not (0 <= stop_probability <= 1):
raise ValueError("`stop_probability` must be in range [0, 1]")
if initial_inliers is not None and len(initial_inliers) != num_samples:
raise ValueError(
f"RANSAC received a vector of initial inliers (length "
f"{len(initial_inliers)}) that didn't match the number of "
f"samples ({num_samples}). The vector of initial inliers should "
f"have the same length as the number of samples and contain only "
f"True (this sample is an initial inlier) and False (this one "
f"isn't) values.")
# for the first run use initial guess of inliers
spl_idxs = (initial_inliers if initial_inliers is not None
else random_state.choice(num_samples, min_samples,
replace=False))
# estimate model for current random sample set
model = model_class()
num_trials = 0
# max_trials can be updated inside the loop, so this cannot be a for-loop
while num_trials < max_trials:
num_trials += 1
# do sample selection according data pairs
samples = [d[spl_idxs] for d in data]
# for next iteration choose random sample set and be sure that
# no samples repeat
spl_idxs = random_state.choice(num_samples, min_samples, replace=False)
# optional check if random sample set is valid
if validate_data and not is_data_valid(*samples):
continue
success = model.estimate(*samples)
# backwards compatibility
if success is not None and not success:
continue
# optional check if estimated model is valid
if validate_model and not is_model_valid(model, *samples):
continue
residuals = np.abs(model.residuals(*data))
# consensus set / inliers
inliers = residuals < residual_threshold
residuals_sum = residuals.dot(residuals)
# choose as new best model if number of inliers is maximal
inliers_count = np.count_nonzero(inliers)
if (
# more inliers
inliers_count > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (inliers_count == best_inlier_num
and residuals_sum < best_inlier_residuals_sum)):
best_inlier_num = inliers_count
best_inlier_residuals_sum = residuals_sum
best_inliers = inliers
max_trials = min(max_trials,
_dynamic_max_trials(best_inlier_num,
num_samples,
min_samples,
stop_probability))
if (best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum):
break
# estimate final model using all inliers
if any(best_inliers):
# select inliers for each data array
data_inliers = [d[best_inliers] for d in data]
model.estimate(*data_inliers)
if validate_model and not is_model_valid(model, *data_inliers):
warn("Estimated model is not valid. Try increasing max_trials.")
else:
model = None
best_inliers = None
warn("No inliers found. Model not fitted")
return model, best_inliers

View File

@@ -1,67 +0,0 @@
from ._pnpoly import _grid_points_in_poly, _points_in_poly
def grid_points_in_poly(shape, verts, binarize=True):
"""Test whether points on a specified grid are inside a polygon.
For each ``(r, c)`` coordinate on a grid, i.e. ``(0, 0)``, ``(0, 1)`` etc.,
test whether that point lies inside a polygon.
You can control the output type with the `binarize` flag. Please refer to its
documentation for further details.
Parameters
----------
shape : tuple (M, N)
Shape of the grid.
verts : (V, 2) array
Specify the V vertices of the polygon, sorted either clockwise
or anti-clockwise. The first point may (but does not need to be)
duplicated.
binarize: bool
If `True`, the output of the function is a boolean mask.
Otherwise, it is a labeled array. The labels are:
O - outside, 1 - inside, 2 - vertex, 3 - edge.
See Also
--------
points_in_poly
Returns
-------
mask : (M, N) ndarray
If `binarize` is True, the output is a boolean mask. True means the
corresponding pixel falls inside the polygon.
If `binarize` is False, the output is a labeled array, with pixels
having a label between 0 and 3. The meaning of the values is:
O - outside, 1 - inside, 2 - vertex, 3 - edge.
"""
output = _grid_points_in_poly(shape, verts)
if binarize:
output = output.astype(bool)
return output
def points_in_poly(points, verts):
"""Test whether points lie inside a polygon.
Parameters
----------
points : (N, 2) array
Input points, ``(x, y)``.
verts : (M, 2) array
Vertices of the polygon, sorted either clockwise or anti-clockwise.
The first point may (but does not need to be) duplicated.
See Also
--------
grid_points_in_poly
Returns
-------
mask : (N,) array of bool
True if corresponding point is inside the polygon.
"""
return _points_in_poly(points, verts)

View File

@@ -1,167 +0,0 @@
import numpy as np
from scipy import ndimage as ndi
from .._shared.utils import _validate_interpolation_order, _fix_ndimage_mode
def profile_line(image, src, dst, linewidth=1,
order=None, mode='reflect', cval=0.0,
*, reduce_func=np.mean):
"""Return the intensity profile of an image measured along a scan line.
Parameters
----------
image : ndarray, shape (M, N[, C])
The image, either grayscale (2D array) or multichannel
(3D array, where the final axis contains the channel
information).
src : array_like, shape (2, )
The coordinates of the start point of the scan line.
dst : array_like, shape (2, )
The coordinates of the end point of the scan
line. The destination point is *included* in the profile, in
contrast to standard numpy indexing.
linewidth : int, optional
Width of the scan, perpendicular to the line
order : int in {0, 1, 2, 3, 4, 5}, optional
The order of the spline interpolation, default is 0 if
image.dtype is bool and 1 otherwise. The order has to be in
the range 0-5. See `skimage.transform.warp` for detail.
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
How to compute any values falling outside of the image.
cval : float, optional
If `mode` is 'constant', what constant value to use outside the image.
reduce_func : callable, optional
Function used to calculate the aggregation of pixel values
perpendicular to the profile_line direction when `linewidth` > 1.
If set to None the unreduced array will be returned.
Returns
-------
return_value : array
The intensity profile along the scan line. The length of the profile
is the ceil of the computed length of the scan line.
Examples
--------
>>> x = np.array([[1, 1, 1, 2, 2, 2]])
>>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)])
>>> img
array([[0, 0, 0, 0, 0, 0],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0]])
>>> profile_line(img, (2, 1), (2, 4))
array([1., 1., 2., 2.])
>>> profile_line(img, (1, 0), (1, 6), cval=4)
array([1., 1., 1., 2., 2., 2., 2.])
The destination point is included in the profile, in contrast to
standard numpy indexing.
For example:
>>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds
array([1., 1., 1., 2., 2., 2., 2.])
>>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row
array([1., 1., 1., 2., 2., 2.])
For different reduce_func inputs:
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.mean)
array([0.66666667, 0.66666667, 0.66666667, 1.33333333])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.max)
array([1, 1, 1, 2])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sum)
array([2, 2, 2, 4])
The unreduced array will be returned when `reduce_func` is None or when
`reduce_func` acts on each pixel value individually.
>>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0,
... reduce_func=None)
array([[1, 1, 2],
[1, 1, 2],
[1, 1, 2],
[0, 0, 0]])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sqrt)
array([[1. , 1. , 0. ],
[1. , 1. , 0. ],
[1. , 1. , 0. ],
[1.41421356, 1.41421356, 0. ]])
"""
order = _validate_interpolation_order(image.dtype, order)
mode = _fix_ndimage_mode(mode)
perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)
if image.ndim == 3:
pixels = [ndi.map_coordinates(image[..., i], perp_lines,
prefilter=order > 1,
order=order, mode=mode,
cval=cval) for i in
range(image.shape[2])]
pixels = np.transpose(np.asarray(pixels), (1, 2, 0))
else:
pixels = ndi.map_coordinates(image, perp_lines, prefilter=order > 1,
order=order, mode=mode, cval=cval)
# The outputted array with reduce_func=None gives an array where the
# row values (axis=1) are flipped. Here, we make this consistent.
pixels = np.flip(pixels, axis=1)
if reduce_func is None:
intensities = pixels
else:
try:
intensities = reduce_func(pixels, axis=1)
except TypeError: # function doesn't allow axis kwarg
intensities = np.apply_along_axis(reduce_func, arr=pixels, axis=1)
return intensities
def _line_profile_coordinates(src, dst, linewidth=1):
"""Return the coordinates of the profile of an image along a scan line.
Parameters
----------
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
Returns
-------
coords : array, shape (2, N, C), float
The coordinates of the profile along the scan line. The length of the
profile is the ceil of the computed length of the scan line.
Notes
-----
This is a utility method meant to be used internally by skimage functions.
The destination point is included in the profile, in contrast to
standard numpy indexing.
"""
src_row, src_col = src = np.asarray(src, dtype=float)
dst_row, dst_col = dst = np.asarray(dst, dtype=float)
d_row, d_col = dst - src
theta = np.arctan2(d_row, d_col)
length = int(np.ceil(np.hypot(d_row, d_col) + 1))
# we add one above because we include the last point in the profile
# (in contrast to standard numpy indexing)
line_col = np.linspace(src_col, dst_col, length)
line_row = np.linspace(src_row, dst_row, length)
# we subtract 1 from linewidth to change from pixel-counting
# (make this line 3 pixels wide) to point distances (the
# distance between pixel centers)
col_width = (linewidth - 1) * np.sin(-theta) / 2
row_width = (linewidth - 1) * np.cos(theta) / 2
perp_rows = np.stack([np.linspace(row_i - row_width, row_i + row_width,
linewidth) for row_i in line_row])
perp_cols = np.stack([np.linspace(col_i - col_width, col_i + col_width,
linewidth) for col_i in line_col])
return np.stack([perp_rows, perp_cols])

View File

@@ -1,135 +0,0 @@
import numpy as np
from skimage.measure import block_reduce
from skimage._shared import testing
from skimage._shared.testing import assert_equal
def test_block_reduce_sum():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = block_reduce(image1, (2, 3))
expected1 = np.array([[ 24, 42],
[ 96, 114]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = block_reduce(image2, (3, 3))
expected2 = np.array([[ 81, 108, 87],
[174, 192, 138]])
assert_equal(expected2, out2)
def test_block_reduce_mean():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = block_reduce(image1, (2, 3), func=np.mean)
expected1 = np.array([[ 4., 7.],
[ 16., 19.]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = block_reduce(image2, (4, 5), func=np.mean)
expected2 = np.array([[14. , 10.8],
[ 8.5, 5.7]])
assert_equal(expected2, out2)
def test_block_reduce_median():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = block_reduce(image1, (2, 3), func=np.median)
expected1 = np.array([[ 4., 7.],
[ 16., 19.]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = block_reduce(image2, (4, 5), func=np.median)
expected2 = np.array([[ 14., 6.5],
[ 0., 0. ]])
assert_equal(expected2, out2)
image3 = np.array([[1, 5, 5, 5], [5, 5, 5, 1000]])
out3 = block_reduce(image3, (2, 4), func=np.median)
assert_equal(5, out3)
def test_block_reduce_min():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = block_reduce(image1, (2, 3), func=np.min)
expected1 = np.array([[ 0, 3],
[12, 15]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = block_reduce(image2, (4, 5), func=np.min)
expected2 = np.array([[0, 0],
[0, 0]])
assert_equal(expected2, out2)
def test_block_reduce_max():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = block_reduce(image1, (2, 3), func=np.max)
expected1 = np.array([[ 8, 11],
[20, 23]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = block_reduce(image2, (4, 5), func=np.max)
expected2 = np.array([[28, 31],
[36, 39]])
assert_equal(expected2, out2)
def test_invalid_block_size():
image = np.arange(4 * 6).reshape(4, 6)
with testing.raises(ValueError):
block_reduce(image, [1, 2, 3])
with testing.raises(ValueError):
block_reduce(image, [1, 0.5])
def test_default_block_size():
image = np.arange(4 * 6).reshape(4, 6)
out = block_reduce(image, func=np.min)
expected = np.array([[0, 2, 4],
[12, 14, 16]])
assert_equal(expected, out)
def test_scalar_block_size():
image = np.arange(6 * 6).reshape(6, 6)
out = block_reduce(image, 3, func=np.min)
expected1 = np.array([[0, 3],
[18, 21]])
assert_equal(expected1, out)
expected2 = block_reduce(image, (3, 3), func=np.min)
assert_equal(expected2, out)
def test_func_kwargs_same_dtype():
image = np.array([[97, 123, 173, 227],
[217, 241, 221, 214],
[211, 11, 170, 53],
[214, 205, 101, 57]], dtype=np.uint8)
out = block_reduce(image, (2, 2), func=np.mean,
func_kwargs={'dtype': np.uint8})
expected = np.array([[41, 16], [32, 31]], dtype=np.uint8)
assert_equal(out, expected)
assert out.dtype == expected.dtype
def test_func_kwargs_different_dtype():
image = np.array([[0.45745366, 0.67479345, 0.20949775, 0.3147348],
[0.7209286, 0.88915504, 0.66153409, 0.07919526],
[0.04640037, 0.54008495, 0.34664343, 0.56152301],
[0.58085003, 0.80144708, 0.87844473, 0.29811511]],
dtype=np.float64)
out = block_reduce(image, (2, 2), func=np.mean,
func_kwargs={'dtype': np.float16})
expected = np.array([[0.6855, 0.3164], [0.4922, 0.521]], dtype=np.float16)
assert_equal(out, expected)
assert out.dtype == expected.dtype

View File

@@ -1,54 +0,0 @@
from numpy.testing import assert_array_equal
from skimage.color import rgb2gray
from skimage.data import astronaut, cells3d
from skimage.filters import gaussian
from skimage.measure import blur_effect
def test_blur_effect():
"""Test that the blur metric increases with more blurring."""
image = astronaut()
B0 = blur_effect(image, channel_axis=-1)
B1 = blur_effect(gaussian(image, sigma=1, channel_axis=-1),
channel_axis=-1)
B2 = blur_effect(gaussian(image, sigma=4, channel_axis=-1),
channel_axis=-1)
assert 0 <= B0 < 1
assert B0 < B1 < B2
def test_blur_effect_h_size():
"""Test that the blur metric decreases with increasing size of the
re-blurring filter.
"""
image = astronaut()
B0 = blur_effect(image, h_size=3, channel_axis=-1)
B1 = blur_effect(image, channel_axis=-1) # default h_size is 11
B2 = blur_effect(image, h_size=30, channel_axis=-1)
assert 0 <= B0 < 1
assert B0 > B1 > B2
def test_blur_effect_channel_axis():
"""Test that passing an RGB image is equivalent to passing its grayscale
version.
"""
image = astronaut()
B0 = blur_effect(image, channel_axis=-1)
B1 = blur_effect(rgb2gray(image))
B0_arr = blur_effect(image, channel_axis=-1, reduce_func=None)
B1_arr = blur_effect(rgb2gray(image), reduce_func=None)
assert 0 <= B0 < 1
assert B0 == B1
assert_array_equal(B0_arr, B1_arr)
def test_blur_effect_3d():
"""Test that the blur metric works on a 3D image."""
image_3d = cells3d()[:, 1, :, :] # grab just the nuclei
B0 = blur_effect(image_3d)
B1 = blur_effect(gaussian(image_3d, sigma=1))
B2 = blur_effect(gaussian(image_3d, sigma=4))
assert 0 <= B0 < 1
assert B0 < B1 < B2

View File

@@ -1,295 +0,0 @@
import numpy as np
from skimage.measure import label
import skimage.measure._ccomp as ccomp
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal
BG = 0 # background value
class TestConnectedComponents:
def setup_method(self):
self.x = np.array([
[0, 0, 3, 2, 1, 9],
[0, 1, 1, 9, 2, 9],
[0, 0, 1, 9, 9, 9],
[3, 1, 1, 5, 3, 0]])
self.labels = np.array([
[0, 0, 1, 2, 3, 4],
[0, 5, 5, 4, 2, 4],
[0, 0, 5, 4, 4, 4],
[6, 5, 5, 7, 8, 0]])
# No background - there is no label 0, instead, labelling starts with 1
# and all labels are incremented by 1.
self.labels_nobg = self.labels + 1
# The 0 at lower right corner is isolated, so it should get a new label
self.labels_nobg[-1, -1] = 10
# We say that background value is 9 (and bg label is 0)
self.labels_bg_9 = self.labels_nobg.copy()
self.labels_bg_9[self.x == 9] = 0
# Then, where there was the label 5, we now expect 4 etc.
# (we assume that the label of value 9 would normally be 5)
self.labels_bg_9[self.labels_bg_9 > 5] -= 1
def test_basic(self):
assert_array_equal(label(self.x), self.labels)
# Make sure data wasn't modified
assert self.x[0, 2] == 3
# Check that everything works if there is no background
assert_array_equal(label(self.x, background=99), self.labels_nobg)
# Check that everything works if background value != 0
assert_array_equal(label(self.x, background=9), self.labels_bg_9)
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.array([[0, 1],
[1, 0]], dtype=int)
assert_array_equal(label(x, connectivity=1),
[[0, 1],
[2, 0]])
assert_array_equal(label(x, connectivity=2),
[[0, 1],
[1, 0]])
def test_background(self):
x = np.array([[1, 0, 0],
[1, 1, 5],
[0, 0, 0]])
assert_array_equal(label(x), [[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
assert_array_equal(label(x, background=0),
[[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
def test_background_two_regions(self):
x = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
res = label(x, background=0)
assert_array_equal(res,
[[0, 0, 1],
[0, 0, 1],
[2, 2, 2]])
def test_background_one_region_center(self):
x = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
assert_array_equal(label(x, connectivity=1, background=0),
[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
class TestConnectedComponents3d:
def setup_method(self):
self.x = np.zeros((3, 4, 5), int)
self.x[0] = np.array([[0, 3, 2, 1, 9],
[0, 1, 9, 2, 9],
[0, 1, 9, 9, 9],
[3, 1, 5, 3, 0]])
self.x[1] = np.array([[3, 3, 2, 1, 9],
[0, 3, 9, 2, 1],
[0, 3, 3, 1, 1],
[3, 1, 3, 3, 0]])
self.x[2] = np.array([[3, 3, 8, 8, 0],
[2, 3, 9, 8, 8],
[2, 3, 0, 8, 0],
[2, 1, 0, 0, 0]])
self.labels = np.zeros((3, 4, 5), int)
self.labels[0] = np.array([[0, 1, 2, 3, 4],
[0, 5, 4, 2, 4],
[0, 5, 4, 4, 4],
[1, 5, 6, 1, 0]])
self.labels[1] = np.array([[1, 1, 2, 3, 4],
[0, 1, 4, 2, 3],
[0, 1, 1, 3, 3],
[1, 5, 1, 1, 0]])
self.labels[2] = np.array([[1, 1, 7, 7, 0],
[8, 1, 4, 7, 7],
[8, 1, 0, 7, 0],
[8, 5, 0, 0, 0]])
def test_basic(self):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, \
"Data was modified!"
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label4)
assert_array_equal(label(x, connectivity=3), x)
def test_connectivity_1_vs_2(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label1 = x.copy()
label1[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label1)
assert_array_equal(label(x, connectivity=3), x)
def test_background(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[1, 0, 0],
[1, 0, 0],
[0, 0, 0]])
x[1] = np.array([[0, 0, 0],
[0, 1, 5],
[0, 0, 0]])
lnb = x.copy()
lnb[0] = np.array([[1, 2, 2],
[1, 2, 2],
[2, 2, 2]])
lnb[1] = np.array([[2, 2, 2],
[2, 1, 3],
[2, 2, 2]])
lb = x.copy()
lb[0] = np.array([[1, BG, BG],
[1, BG, BG],
[BG, BG, BG]])
lb[1] = np.array([[BG, BG, BG],
[BG, 1, 2],
[BG, BG, BG]])
assert_array_equal(label(x), lb)
assert_array_equal(label(x, background=-1), lnb)
def test_background_two_regions(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
x[1] = np.array([[6, 6, 0],
[5, 0, 0],
[0, 0, 0]])
lb = x.copy()
lb[0] = np.array([[BG, BG, 1],
[BG, BG, 1],
[2, 2, 2]])
lb[1] = np.array([[1, 1, BG],
[2, BG, BG],
[BG, BG, BG]])
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = np.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = np.ones_like(x) * BG
lb[1, 1, 1] = 1
assert_array_equal(label(x, connectivity=1, background=0), lb)
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
def test_1D(self):
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = np.array((0, 1, 2, 2, 3, 3, 0, 0))
reshapes = ((xlen,),
(1, xlen), (xlen, 1),
(1, xlen, 1), (xlen, 1, 1), (1, 1, xlen))
for reshape in reshapes:
x2 = x.reshape(reshape)
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
def test_nd(self):
x = np.ones((1, 2, 3, 4))
with testing.raises(NotImplementedError):
label(x)
class TestSupport:
def test_reshape(self):
shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
for shape in shapes_in:
shape = np.array(shape)
numones = sum(shape == 1)
inp = np.random.random(shape)
fixed, swaps = ccomp.reshape_array(inp)
shape2 = fixed.shape
# now check that all ones are at the beginning
for i in range(numones):
assert shape2[i] == 1
back = ccomp.undo_reshape_array(fixed, swaps)
# check that the undo works as expected
assert_array_equal(inp, back)

View File

@@ -1,100 +0,0 @@
import numpy as np
import pytest
from skimage.measure import (intersection_coeff, manders_coloc_coeff,
manders_overlap_coeff, pearson_corr_coeff)
def test_invalid_input():
# images are not same size
img1 = np.array([[i + j for j in range(4)] for i in range(4)])
img2 = np.ones((3, 5, 6))
mask = np.array([[i <= 1 for i in range(5)] for _ in range(5)])
non_binary_mask = np.array([[2 for __ in range(4)] for _ in range(4)])
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
pearson_corr_coeff(img1, img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_coloc_coeff(img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_coloc_coeff(img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_coloc_coeff(img1, img1 > 0, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_coloc_coeff(img1, img1 > 0, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_overlap_coeff(img1, img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
intersection_coeff(img1 > 2, img2 > 1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
intersection_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
intersection_coeff(img1 > 2, img1 > 1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
intersection_coeff(img1 > 2, img1 > 1, non_binary_mask)
def test_pcc():
# simple example
img1 = np.array([[i + j for j in range(4)] for i in range(4)])
assert pearson_corr_coeff(img1, img1) == (1.0, 0.0)
img2 = np.where(img1 <= 2, 0, img1)
np.testing.assert_almost_equal(pearson_corr_coeff(img1, img2), (0.944911182523068, 3.5667540654536515e-08))
# change background of roi and see if values are same
roi = np.where(img1 <= 2, 0, 1)
np.testing.assert_almost_equal(pearson_corr_coeff(img1, img1, roi), pearson_corr_coeff(img1, img2, roi))
def test_mcc():
img1 = np.array([[j for j in range(4)] for i in range(4)])
mask = np.array([[i <= 1 for j in range(4)]for i in range(4)])
assert manders_coloc_coeff(img1, mask) == 0.5
# test negative values
img_negativeint = np.where(img1 == 1, -1, img1)
img_negativefloat = img_negativeint / 2.0
with pytest.raises(ValueError):
manders_coloc_coeff(img_negativeint, mask)
with pytest.raises(ValueError):
manders_coloc_coeff(img_negativefloat, mask)
def test_moc():
img1 = np.ones((4, 4))
img2 = 2 * np.ones((4, 4))
assert manders_overlap_coeff(img1, img2) == 1
# test negative values
img_negativeint = np.where(img1 == 1, -1, img1)
img_negativefloat = img_negativeint / 2.0
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativeint, img2)
with pytest.raises(ValueError):
manders_overlap_coeff(img1, img_negativeint)
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativefloat, img2)
with pytest.raises(ValueError):
manders_overlap_coeff(img1, img_negativefloat)
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativefloat, img_negativefloat)
def test_intersection_coefficient():
img1_mask = np.array([[j <= 1 for j in range(4)] for i in range(4)])
img2_mask = np.array([[i <= 1 for j in range(4)] for i in range(4)])
img3_mask = np.array([[1 for j in range(4)] for i in range(4)])
assert intersection_coeff(img1_mask, img2_mask) == 0.5
assert intersection_coeff(img1_mask, img3_mask) == 1

View File

@@ -1,16 +0,0 @@
import numpy as np
from skimage.measure import shannon_entropy
from skimage._shared.testing import assert_almost_equal
def test_shannon_ones():
img = np.ones((10, 10))
res = shannon_entropy(img, base=np.e)
assert_almost_equal(res, 0.0)
def test_shannon_all_unique():
img = np.arange(64)
res = shannon_entropy(img, base=2)
assert_almost_equal(res, np.log(64) / np.log(2))

View File

@@ -1,175 +0,0 @@
import numpy as np
from skimage.measure import find_contours
from skimage._shared.testing import assert_array_equal
import pytest
a = np.ones((8, 8), dtype=np.float32)
a[1:-1, 1] = 0
a[1, 1:-1] = 0
x, y = np.mgrid[-1:1:5j, -1:1:5j]
r = np.sqrt(x**2 + y**2)
def test_binary():
ref = [[6.0, 1.5],
[5.0, 1.5],
[4.0, 1.5],
[3.0, 1.5],
[2.0, 1.5],
[1.5, 2.0],
[1.5, 3.0],
[1.5, 4.0],
[1.5, 5.0],
[1.5, 6.0],
[1.0, 6.5],
[0.5, 6.0],
[0.5, 5.0],
[0.5, 4.0],
[0.5, 3.0],
[0.5, 2.0],
[0.5, 1.0],
[1.0, 0.5],
[2.0, 0.5],
[3.0, 0.5],
[4.0, 0.5],
[5.0, 0.5],
[6.0, 0.5],
[6.5, 1.0],
[6.0, 1.5]]
contours = find_contours(a, 0.5, positive_orientation='high')
assert len(contours) == 1
assert_array_equal(contours[0][::-1], ref)
# target contour for mask tests
mask_contour = [
[6.0, 0.5],
[5.0, 0.5],
[4.0, 0.5],
[3.0, 0.5],
[2.0, 0.5],
[1.0, 0.5],
[0.5, 1.0],
[0.5, 2.0],
[0.5, 3.0],
[0.5, 4.0],
[0.5, 5.0],
[0.5, 6.0],
[1.0, 6.5],
[1.5, 6.0],
[1.5, 5.0],
[1.5, 4.0],
[1.5, 3.0],
[1.5, 2.0],
[2.0, 1.5],
[3.0, 1.5],
[4.0, 1.5],
[5.0, 1.5],
[6.0, 1.5],
]
mask = np.ones((8, 8), dtype=bool)
# Some missing data that should result in a hole in the contour:
mask[7, 0:3] = False
@pytest.mark.parametrize("level", [0.5, None])
def test_nodata(level):
# Test missing data via NaNs in input array
b = np.copy(a)
b[~mask] = np.nan
contours = find_contours(b, level, positive_orientation='high')
assert len(contours) == 1
assert_array_equal(contours[0], mask_contour)
@pytest.mark.parametrize("level", [0.5, None])
def test_mask(level):
# Test missing data via explicit masking
contours = find_contours(a, level, positive_orientation='high', mask=mask)
assert len(contours) == 1
assert_array_equal(contours[0], mask_contour)
@pytest.mark.parametrize("level", [0, None])
def test_mask_shape(level):
bad_mask = np.ones((8, 7), dtype=bool)
with pytest.raises(ValueError, match='shape'):
find_contours(a, level, mask=bad_mask)
@pytest.mark.parametrize("level", [0, None])
def test_mask_dtype(level):
bad_mask = np.ones((8, 8), dtype=np.uint8)
with pytest.raises(TypeError, match='binary'):
find_contours(a, level, mask=bad_mask)
def test_float():
contours = find_contours(r, 0.5)
assert len(contours) == 1
assert_array_equal(contours[0], [[2., 3.],
[1., 2.],
[2., 1.],
[3., 2.],
[2., 3.]])
@pytest.mark.parametrize("level", [0.5, None])
def test_memory_order(level):
contours = find_contours(np.ascontiguousarray(r), level)
assert len(contours) == 1
contours = find_contours(np.asfortranarray(r), level)
assert len(contours) == 1
def test_invalid_input():
with pytest.raises(ValueError):
find_contours(r, 0.5, 'foo', 'bar')
with pytest.raises(ValueError):
find_contours(r[..., None], 0.5)
def test_level_default():
# image with range [0.9, 0.91]
image = np.random.random((100, 100)) * 0.01 + 0.9
contours = find_contours(image) # use default level
# many contours should be found
assert len(contours) > 1
@pytest.mark.parametrize("image", [
[[0.13680, 0.11220, 0.0, 0.0, 0.0, 0.19417, 0.19417, 0.33701],
[0.0, 0.15140, 0.10267, 0.0, np.nan, 0.14908, 0.18158, 0.19178],
[0.0, 0.06949, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01860],
[0.0, 0.06949, 0.0, 0.17852, 0.08469, 0.02135, 0.08198, np.nan],
[0.0, 0.08244, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[0.12342, 0.21330, 0.0, np.nan, 0.01301, 0.04335, 0.0, 0.0]],
[[0.08, -0.03, -0.17, -0.08, 0.24, 0.06, 0.17, -0.02],
[0.12, 0., np.nan, 0.24, 0., -0.53, 0.26, 0.16],
[0.39, 0., 0., 0., 0., -0.02, -0.3, 0.01],
[0.28, -0.04, -0.03, 0.16, 0.12, 0.01, -0.87, 0.16],
[0.26, 0.08, 0.08, 0.08, 0.12, 0.13, 0.11, 0.19],
[0.27, 0.24, 0., 0.25, 0.32, 0.19, 0.26, 0.22]],
[[-0.18, np.nan, np.nan, 0.22, -0.14, -0.23, -0.2, -0.17, -0.19, -0.24],
[0., np.nan, np.nan, np.nan, -0.1, -0.24, -0.15, -0.02, -0.09, -0.21],
[0.43, 0.19, np.nan, np.nan, -0.01, -0.2, -0.22, -0.18, -0.16, -0.07],
[0.23, 0., np.nan, -0.06, -0.07, -0.21, -0.24, -0.25, -0.23, -0.13],
[-0.05, -0.11, 0., 0.1, -0.19, -0.23, -0.23, -0.18, -0.19, -0.16],
[-0.19, -0.05, 0.13, -0.08, -0.22, -0.23, -0.26, -0.15, -0.12, -0.13],
[-0.2, -0.11, -0.11, -0.24, -0.29, -0.27, -0.35, -0.36, -0.27, -0.13],
[-0.28, -0.33, -0.31, -0.36, -0.39, -0.37, -0.38, -0.32, -0.34, -0.2],
[-0.28, -0.33, -0.39, -0.4, -0.42, -0.38, -0.35, -0.39, -0.35, -0.34],
[-0.38, -0.35, -0.41, -0.42, -0.39, -0.36, -0.34, -0.36, -0.28, -0.34]]
])
def test_keyerror_fix(image):
"""Failing samples from issue #4830
"""
find_contours(np.array(image, np.float32), 0)

View File

@@ -1,440 +0,0 @@
import numpy as np
from skimage._shared import testing
from skimage._shared._warnings import expected_warnings
from skimage._shared.testing import (arch32, assert_almost_equal,
assert_array_less, assert_equal, xfail)
from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac
from skimage.measure.fit import _dynamic_max_trials
from skimage.transform import AffineTransform
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_nd_invalid_input():
with testing.raises(ValueError):
LineModelND().predict_x(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_x(np.zeros(1), np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1), np.zeros(1))
assert not LineModelND().estimate(np.empty((1, 3)))
assert not LineModelND().estimate(np.empty((1, 2)))
with testing.raises(ValueError):
LineModelND().residuals(np.empty((1, 3)))
def test_line_model_nd_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.8]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_nd_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (np.array([0, 0, 0], dtype='float'),
np.array([1, 1, 1], dtype='float')/np.sqrt(3))
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (model0.params[0] +
10 * np.arange(-100, 100)[..., np.newaxis] * model0.params[1])
# add gaussian noise to data
random_state = np.random.default_rng(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# assert_almost_equal(model_est.residuals(data0), np.zeros(len(data)), 1)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1],
model_est.params[1])), 0, 1)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_model_nd_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
# test params argument in model.rediduals
data = np.array([[10, 0, 0]])
params = (np.array([0, 0, 0]), np.array([2, 0, 0]))
assert_equal(abs(model.residuals(data, params=params)), 30)
def test_circle_model_invalid_input():
with testing.raises(ValueError):
CircleModel().estimate(np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.default_rng(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 0)
def test_circle_model_int_overflow():
xy = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]], dtype=np.int32)
xy += 500
model = CircleModel()
model.estimate(xy)
assert_almost_equal(model.params, [500, 500, 1])
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_circle_model_insufficient_data():
model = CircleModel()
warning_message = ["Input does not contain enough significant data points."]
with expected_warnings(warning_message):
model.estimate(np.array([[1, 2], [3, 4]]))
with expected_warnings(warning_message):
model.estimate(np.ones((6, 2)))
with expected_warnings(warning_message):
model.estimate(np.array([[0, 0], [1, 1], [2, 2]]))
def test_ellipse_model_invalid_input():
with testing.raises(ValueError):
EllipseModel().estimate(np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
for angle in range(0, 180, 15):
rad = np.deg2rad(angle)
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, rad)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.default_rng(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params[:2], model_est.params[:2], 0)
res = model_est.residuals(data0)
assert_array_less(res, np.ones(res.shape))
def test_ellipse_model_estimate_from_data():
data = np.array([
[264, 854], [265, 875], [268, 863], [270, 857], [275, 905], [285, 915],
[305, 925], [324, 934], [335, 764], [336, 915], [345, 925], [345, 945],
[354, 933], [355, 745], [364, 936], [365, 754], [375, 745], [375, 735],
[385, 736], [395, 735], [394, 935], [405, 727], [415, 736], [415, 727],
[425, 727], [426, 929], [435, 735], [444, 933], [445, 735], [455, 724],
[465, 934], [465, 735], [475, 908], [475, 726], [485, 753], [485, 728],
[492, 762], [495, 745], [491, 910], [493, 909], [499, 904], [505, 905],
[504, 747], [515, 743], [516, 752], [524, 855], [525, 844], [525, 885],
[533, 845], [533, 873], [535, 883], [545, 874], [543, 864], [553, 865],
[553, 845], [554, 825], [554, 835], [563, 845], [565, 826], [563, 855],
[563, 795], [565, 735], [573, 778], [572, 815], [574, 804], [575, 665],
[575, 685], [574, 705], [574, 745], [575, 875], [572, 732], [582, 795],
[579, 709], [583, 805], [583, 854], [586, 755], [584, 824], [585, 655],
[581, 718], [586, 844], [585, 915], [587, 905], [594, 824], [593, 855],
[590, 891], [594, 776], [596, 767], [593, 763], [603, 785], [604, 775],
[603, 885], [605, 753], [605, 655], [606, 935], [603, 761], [613, 802],
[613, 945], [613, 965], [615, 693], [617, 665], [623, 962], [624, 972],
[625, 995], [633, 673], [633, 965], [633, 683], [633, 692], [633, 954],
[634, 1016], [635, 664], [641, 804], [637, 999], [641, 956], [643, 946],
[643, 926], [644, 975], [643, 655], [646, 705], [651, 664], [651, 984],
[647, 665], [651, 715], [651, 725], [651, 734], [647, 809], [651, 825],
[651, 873], [647, 900], [652, 917], [651, 944], [652, 742], [648, 811],
[651, 994], [652, 783], [650, 911], [654, 879]], dtype=np.int32)
# estimate parameters of real data
model = EllipseModel()
model.estimate(data)
# test whether estimated parameters are smaller then 1000, so means stable
assert_array_less(model.params[:4], np.full(4, 1000))
# test whether all parameters are more than 0. Negative values were the
# result of an integer overflow
assert_array_less(np.zeros(4), np.abs(model.params[:4]))
@xfail(condition=arch32,
reason=('Known test failure on 32-bit platforms. See links for '
'details: '
'https://github.com/scikit-image/scikit-image/issues/3091 '
'https://github.com/scikit-image/scikit-image/issues/2670'))
def test_ellipse_model_estimate_failers():
# estimate parameters of real data
model = EllipseModel()
assert not model.estimate(np.ones((5, 2)))
assert not model.estimate(np.array([[50, 80], [51, 81], [52, 80]]))
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5, random_state=1)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
random_state = np.random.default_rng(12373240)
# generate original data without noise
src = 100 * random_state.random((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20,
random_state=random_state)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
def is_data_valid(data):
return data.shape[0] > 2
with expected_warnings(["No inliers found"]):
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_data_valid=is_data_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
def is_model_valid(model, data):
return False
with expected_warnings(["No inliers found"]):
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_model_valid=is_model_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
assert_equal(_dynamic_max_trials(100, 100, 2, 1), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
assert_equal(_dynamic_max_trials(95, 100, 2, 1), 16)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
assert_equal(_dynamic_max_trials(90, 100, 2, 1), 22)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
assert_equal(_dynamic_max_trials(70, 100, 2, 1), 54)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
assert_equal(_dynamic_max_trials(50, 100, 2, 1), 126)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
assert_equal(_dynamic_max_trials(95, 100, 8, 1), 34)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
assert_equal(_dynamic_max_trials(90, 100, 8, 1), 65)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
assert_equal(_dynamic_max_trials(70, 100, 8, 1), 608)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
assert_equal(_dynamic_max_trials(50, 100, 8, 1), 9210)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), 360436504051)
def test_ransac_invalid_input():
# `residual_threshold` must be greater than zero
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=2,
residual_threshold=-0.5)
# "`max_trials` must be greater than zero"
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, max_trials=-1)
# `stop_probability` must be in range (0, 1)
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=-1)
# `stop_probability` must be in range (0, 1)
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=1.01)
# `min_samples` as ratio must be in range (0, nb)
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=0,
residual_threshold=0)
# `min_samples` as ratio must be in range (0, nb)
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=10,
residual_threshold=0)
# `min_samples` must be greater than zero
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=-1,
residual_threshold=0)
def test_ransac_sample_duplicates():
class DummyModel:
"""Dummy model to check for duplicates."""
def estimate(self, data):
# Assert that all data points are unique.
assert_equal(np.unique(data).size, data.size)
return True
def residuals(self, data):
return np.ones(len(data), dtype=np.float64)
# Create dataset with four unique points. Force 10 iterations
# and check that there are no duplicated data points.
data = np.arange(4)
with expected_warnings(["No inliers found"]):
ransac(data, DummyModel, min_samples=3, residual_threshold=0.0,
max_trials=10)
def test_ransac_with_no_final_inliers():
data = np.random.rand(5, 2)
with expected_warnings(['No inliers found. Model not fitted']):
model, inliers = ransac(data, model_class=LineModelND, min_samples=3,
residual_threshold=0, random_state=1523427)
assert inliers is None
assert model is None
def test_ransac_non_valid_best_model():
"""Example from GH issue #5572"""
def is_model_valid(model, *random_data) -> bool:
"""Allow models with a maximum of 10 degree tilt from the vertical
"""
tilt = abs(np.arccos(np.dot(model.params[1], [0, 0, 1])))
return tilt <= (10 / 180 * np.pi)
rnd = np.random.RandomState(1)
data = np.linspace([0, 0, 0], [0.3, 0, 1], 1000) + rnd.rand(1000, 3) - 0.5
with expected_warnings(["Estimated model is not valid"]):
ransac(data, LineModelND, min_samples=2,
residual_threshold=0.3, max_trials=50, random_state=0,
is_model_valid=is_model_valid)

View File

@@ -1,59 +0,0 @@
import pytest
import numpy as np
from skimage import data
from skimage.measure._label import _label_bool, label
from skimage.measure._ccomp import label_cython as clabel
from skimage._shared import testing
# In this testsuite, we ensure that the results provided by
# label_cython are identical to the one from _label_bool,
# which is based on ndimage.
def test_no_option():
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
l_ndi = _label_bool(img)
l_cy = clabel(img)
testing.assert_equal(l_ndi, l_cy)
def test_background():
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
l_ndi = _label_bool(img, background=0)
l_cy = clabel(img, background=0)
testing.assert_equal(l_ndi, l_cy)
l_ndi = _label_bool(img, background=1)
l_cy = clabel(img, background=1)
testing.assert_equal(l_ndi, l_cy)
def test_return_num():
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
l_ndi = _label_bool(img, return_num=True)
l_cy = clabel(img, return_num=True)
testing.assert_equal(l_ndi, l_cy)
def test_connectivity():
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
for c in (1, 2, 3):
l_ndi = _label_bool(img, connectivity=c)
l_cy = clabel(img, connectivity=c)
testing.assert_equal(l_ndi, l_cy)
for c in (0, 4):
with pytest.raises(ValueError):
l_ndi = _label_bool(img, connectivity=c)
with pytest.raises(ValueError):
l_cy = clabel(img, connectivity=c)
@pytest.mark.parametrize("dtype", [bool, int])
def test_zero_size(dtype):
img = np.ones((300, 0, 300), dtype=dtype)
lab, num = label(img, return_num=True)
assert lab.shape == img.shape
assert num == 0

View File

@@ -1,173 +0,0 @@
import numpy as np
import pytest
from numpy.testing import assert_allclose
from skimage.draw import ellipsoid, ellipsoid_stats
from skimage.measure import marching_cubes, mesh_surface_area
def test_marching_cubes_isotropic():
ellipsoid_isotropic = ellipsoid(6, 10, 16, levelset=True)
_, surf = ellipsoid_stats(6, 10, 16)
# Classic
verts, faces = marching_cubes(ellipsoid_isotropic, 0.,
method='lorensen')[:2]
surf_calc = mesh_surface_area(verts, faces)
# Test within 1% tolerance for isotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.99
# Lewiner
verts, faces = marching_cubes(ellipsoid_isotropic, 0.)[:2]
surf_calc = mesh_surface_area(verts, faces)
# Test within 1% tolerance for isotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.99
def test_marching_cubes_anisotropic():
# test spacing as numpy array (and not just tuple)
spacing = np.array([1., 10 / 6., 16 / 6.])
ellipsoid_anisotropic = ellipsoid(6, 10, 16, spacing=spacing,
levelset=True)
_, surf = ellipsoid_stats(6, 10, 16)
# Classic
verts, faces = marching_cubes(ellipsoid_anisotropic, 0.,
spacing=spacing, method='lorensen')[:2]
surf_calc = mesh_surface_area(verts, faces)
# Test within 1.5% tolerance for anisotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.985
# Lewiner
verts, faces = marching_cubes(ellipsoid_anisotropic, 0.,
spacing=spacing)[:2]
surf_calc = mesh_surface_area(verts, faces)
# Test within 1.5% tolerance for anisotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.985
# Test marching cube with mask
with pytest.raises(ValueError):
verts, faces = marching_cubes(
ellipsoid_anisotropic, 0., spacing=spacing,
mask=np.array([]))[:2]
# Test spacing together with allow_degenerate=False
marching_cubes(ellipsoid_anisotropic, 0, spacing=spacing,
allow_degenerate=False)
def test_invalid_input():
# Classic
with pytest.raises(ValueError):
marching_cubes(np.zeros((2, 2, 1)), 0, method='lorensen')
with pytest.raises(ValueError):
marching_cubes(np.zeros((2, 2, 1)), 1, method='lorensen')
with pytest.raises(ValueError):
marching_cubes(np.ones((3, 3, 3)), 1, spacing=(1, 2),
method='lorensen')
with pytest.raises(ValueError):
marching_cubes(np.zeros((20, 20)), 0, method='lorensen')
# Lewiner
with pytest.raises(ValueError):
marching_cubes(np.zeros((2, 2, 1)), 0)
with pytest.raises(ValueError):
marching_cubes(np.zeros((2, 2, 1)), 1)
with pytest.raises(ValueError):
marching_cubes(np.ones((3, 3, 3)), 1, spacing=(1, 2))
with pytest.raises(ValueError):
marching_cubes(np.zeros((20, 20)), 0)
# invalid method name
ellipsoid_isotropic = ellipsoid(6, 10, 16, levelset=True)
with pytest.raises(ValueError):
marching_cubes(ellipsoid_isotropic, 0., method='abcd')
def test_both_algs_same_result_ellipse():
# Performing this test on data that does not have ambiguities
sphere_small = ellipsoid(1, 1, 1, levelset=True)
vertices1, faces1 = marching_cubes(sphere_small, 0,
allow_degenerate=False)[:2]
vertices2, faces2 = marching_cubes(sphere_small, 0,
allow_degenerate=False,
method='lorensen')[:2]
# Order is different, best we can do is test equal shape and same
# vertices present
assert _same_mesh(vertices1, faces1, vertices2, faces2)
def _same_mesh(vertices1, faces1, vertices2, faces2, tol=1e-10):
""" Compare two meshes, using a certain tolerance and invariant to
the order of the faces.
"""
# Unwind vertices
triangles1 = vertices1[np.array(faces1)]
triangles2 = vertices2[np.array(faces2)]
# Sort vertices within each triangle
triang1 = [np.concatenate(sorted(t, key=lambda x:tuple(x)))
for t in triangles1]
triang2 = [np.concatenate(sorted(t, key=lambda x:tuple(x)))
for t in triangles2]
# Sort the resulting 9-element "tuples"
triang1 = np.array(sorted([tuple(x) for x in triang1]))
triang2 = np.array(sorted([tuple(x) for x in triang2]))
return (triang1.shape == triang2.shape and
np.allclose(triang1, triang2, 0, tol))
def test_both_algs_same_result_donut():
# Performing this test on data that does not have ambiguities
n = 48
a, b = 2.5/n, -1.25
vol = np.empty((n, n, n), 'float32')
for iz in range(vol.shape[0]):
for iy in range(vol.shape[1]):
for ix in range(vol.shape[2]):
# Double-torii formula by Thomas Lewiner
z, y, x = float(iz)*a+b, float(iy)*a+b, float(ix)*a+b
vol[iz,iy,ix] = ( (
(8*x)**2 + (8*y-2)**2 + (8*z)**2 + 16 - 1.85*1.85 ) * ( (8*x)**2 +
(8*y-2)**2 + (8*z)**2 + 16 - 1.85*1.85 ) - 64 * ( (8*x)**2 + (8*y-2)**2 )
) * ( ( (8*x)**2 + ((8*y-2)+4)*((8*y-2)+4) + (8*z)**2 + 16 - 1.85*1.85 )
* ( (8*x)**2 + ((8*y-2)+4)*((8*y-2)+4) + (8*z)**2 + 16 - 1.85*1.85 ) -
64 * ( ((8*y-2)+4)*((8*y-2)+4) + (8*z)**2
) ) + 1025
vertices1, faces1 = marching_cubes(vol, 0, method='lorensen')[:2]
vertices2, faces2 = marching_cubes(vol, 0)[:2]
# Old and new alg are different
assert not _same_mesh(vertices1, faces1, vertices2, faces2)
def test_masked_marching_cubes():
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
mask = np.ones_like(ellipsoid_scalar, dtype=bool)
mask[:10, :, :] = False
mask[:, :, 20:] = False
ver, faces, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
area = mesh_surface_area(ver, faces)
assert_allclose(area, 299.56878662109375, rtol=.01)
def test_masked_marching_cubes_empty():
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
mask = np.array([])
with pytest.raises(ValueError):
_ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
def test_masked_marching_cubes_all_true():
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
mask = np.ones_like(ellipsoid_scalar, dtype=bool)
ver_m, faces_m, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
ver, faces, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
assert_allclose(ver_m, ver, rtol=.00001)
assert_allclose(faces_m, faces, rtol=.00001)

View File

@@ -1,345 +0,0 @@
import itertools
import numpy as np
import pytest
from scipy import ndimage as ndi
from skimage import draw
from skimage._shared import testing
from skimage._shared.testing import (assert_allclose, assert_almost_equal,
assert_equal)
from skimage._shared.utils import _supported_float_type
from skimage.measure import (centroid, inertia_tensor, inertia_tensor_eigvals,
moments, moments_central, moments_coords,
moments_coords_central, moments_hu,
moments_normalized)
def compare_moments(m1, m2, thresh=1e-8):
"""Compare two moments arrays.
Compares only values in the upper-left triangle of m1, m2 since
values below the diagonal exceed the specified order and are not computed
when the analytical computation is used.
Also, there the first-order central moments will be exactly zero with the
analytical calculation, but will not be zero due to limited floating point
precision when using a numerical computation. Here we just specify the
tolerance as a fraction of the maximum absolute value in the moments array.
"""
m1 = m1.copy()
m2 = m2.copy()
# make sure location of any NaN values match and then ignore the NaN values
# in the subsequent comparisons
nan_idx1 = np.where(np.isnan(m1.ravel()))[0]
nan_idx2 = np.where(np.isnan(m2.ravel()))[0]
assert len(nan_idx1) == len(nan_idx2)
assert np.all(nan_idx1 == nan_idx2)
m1[np.isnan(m1)] = 0
m2[np.isnan(m2)] = 0
max_val = np.abs(m1[m1 != 0]).max()
for orders in itertools.product(*((range(m1.shape[0]),) * m1.ndim)):
if sum(orders) > m1.shape[0] - 1:
m1[orders] = 0
m2[orders] = 0
continue
abs_diff = abs(m1[orders] - m2[orders])
rel_diff = abs_diff / max_val
assert rel_diff < thresh
@pytest.mark.parametrize('anisotropic', [False, True, None])
def test_moments(anisotropic):
image = np.zeros((20, 20), dtype=np.float64)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
if anisotropic:
spacing = (1.4, 2)
else:
spacing = (1, 1)
if anisotropic is None:
m = moments(image)
else:
m = moments(image, spacing=spacing)
assert_equal(m[0, 0], 3)
assert_almost_equal(m[1, 0] / m[0, 0], 14.5 * spacing[0])
assert_almost_equal(m[0, 1] / m[0, 0], 14.5 * spacing[1])
@pytest.mark.parametrize('anisotropic', [False, True, None])
def test_moments_central(anisotropic):
image = np.zeros((20, 20), dtype=np.float64)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
if anisotropic:
spacing = (2, 1)
else:
spacing = (1, 1)
if anisotropic is None:
mu = moments_central(image, (14.5, 14.5))
# check for proper centroid computation
mu_calc_centroid = moments_central(image)
else:
mu = moments_central(image, (14.5 * spacing[0], 14.5 * spacing[1]),
spacing=spacing)
# check for proper centroid computation
mu_calc_centroid = moments_central(image, spacing=spacing)
compare_moments(mu, mu_calc_centroid, thresh=1e-14)
# shift image by dx=2, dy=2
image2 = np.zeros((20, 20), dtype=np.double)
image2[16, 16] = 1
image2[17, 17] = 1
image2[16, 17] = 0.5
image2[17, 16] = 0.5
if anisotropic is None:
mu2 = moments_central(image2, (14.5 + 2, 14.5 + 2))
else:
mu2 = moments_central(
image2,
((14.5 + 2) * spacing[0], (14.5 + 2) * spacing[1]),
spacing=spacing
)
# central moments must be translation invariant
compare_moments(mu, mu2, thresh=1e-14)
def test_moments_coords():
image = np.zeros((20, 20), dtype=np.float64)
image[13:17, 13:17] = 1
mu_image = moments(image)
coords = np.array([[r, c] for r in range(13, 17)
for c in range(13, 17)], dtype=np.float64)
mu_coords = moments_coords(coords)
assert_almost_equal(mu_coords, mu_image)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_moments_coords_dtype(dtype):
image = np.zeros((20, 20), dtype=dtype)
image[13:17, 13:17] = 1
expected_dtype = _supported_float_type(dtype)
mu_image = moments(image)
assert mu_image.dtype == expected_dtype
coords = np.array([[r, c] for r in range(13, 17)
for c in range(13, 17)], dtype=dtype)
mu_coords = moments_coords(coords)
assert mu_coords.dtype == expected_dtype
assert_almost_equal(mu_coords, mu_image)
def test_moments_central_coords():
image = np.zeros((20, 20), dtype=np.float64)
image[13:17, 13:17] = 1
mu_image = moments_central(image, (14.5, 14.5))
coords = np.array([[r, c] for r in range(13, 17)
for c in range(13, 17)], dtype=np.float64)
mu_coords = moments_coords_central(coords, (14.5, 14.5))
assert_almost_equal(mu_coords, mu_image)
# ensure that center is being calculated normally
mu_coords_calc_centroid = moments_coords_central(coords)
assert_almost_equal(mu_coords_calc_centroid, mu_coords)
# shift image by dx=3 dy=3
image = np.zeros((20, 20), dtype=np.float64)
image[16:20, 16:20] = 1
mu_image = moments_central(image, (14.5, 14.5))
coords = np.array([[r, c] for r in range(16, 20)
for c in range(16, 20)], dtype=np.float64)
mu_coords = moments_coords_central(coords, (14.5, 14.5))
assert_almost_equal(mu_coords, mu_image)
def test_moments_normalized():
image = np.zeros((20, 20), dtype=np.float64)
image[13:17, 13:17] = 1
mu = moments_central(image, (14.5, 14.5))
nu = moments_normalized(mu)
# shift image by dx=-2, dy=-2 and scale non-zero extent by 0.5
image2 = np.zeros((20, 20), dtype=np.float64)
# scale amplitude by 0.7
image2[11:13, 11:13] = 0.7
mu2 = moments_central(image2, (11.5, 11.5))
nu2 = moments_normalized(mu2)
# central moments must be translation and scale invariant
assert_almost_equal(nu, nu2, decimal=1)
@pytest.mark.parametrize('anisotropic', [False, True])
def test_moments_normalized_spacing(anisotropic):
image = np.zeros((20, 20), dtype=np.double)
image[13:17, 13:17] = 1
if not anisotropic:
spacing1 = (1, 1)
spacing2 = (3, 3)
else:
spacing1 = (1, 2)
spacing2 = (2, 4)
mu = moments_central(image, spacing=spacing1)
nu = moments_normalized(mu, spacing=spacing1)
mu2 = moments_central(image, spacing=spacing2)
nu2 = moments_normalized(mu2, spacing=spacing2)
# result should be invariant to absolute scale of spacing
compare_moments(nu, nu2)
def test_moments_normalized_3d():
image = draw.ellipsoid(1, 1, 10)
mu_image = moments_central(image)
nu = moments_normalized(mu_image)
assert nu[0, 0, 2] > nu[0, 2, 0]
assert_almost_equal(nu[0, 2, 0], nu[2, 0, 0])
coords = np.where(image)
mu_coords = moments_coords_central(coords)
assert_almost_equal(mu_image, mu_coords)
@pytest.mark.parametrize('dtype', [np.uint8, np.int32, np.float32, np.float64])
@pytest.mark.parametrize('order', [1, 2, 3, 4])
@pytest.mark.parametrize('ndim', [2, 3, 4])
def test_analytical_moments_calculation(dtype, order, ndim):
if ndim == 2:
shape = (256, 256)
elif ndim == 3:
shape = (64, 64, 64)
else:
shape = (16, ) * ndim
rng = np.random.default_rng(1234)
if np.dtype(dtype).kind in 'iu':
x = rng.integers(0, 256, shape, dtype=dtype)
else:
x = rng.standard_normal(shape, dtype=dtype)
# setting center=None will use the analytical expressions
m1 = moments_central(x, center=None, order=order)
# providing explicit centroid will bypass the analytical code path
m2 = moments_central(x, center=centroid(x), order=order)
# ensure numeric and analytical central moments are close
thresh = 1e-4 if x.dtype == np.float32 else 1e-9
compare_moments(m1, m2, thresh=thresh)
def test_moments_normalized_invalid():
with testing.raises(ValueError):
moments_normalized(np.zeros((3, 3)), 3)
with testing.raises(ValueError):
moments_normalized(np.zeros((3, 3)), 4)
def test_moments_hu():
image = np.zeros((20, 20), dtype=np.float64)
image[13:15, 13:17] = 1
mu = moments_central(image, (13.5, 14.5))
nu = moments_normalized(mu)
hu = moments_hu(nu)
# shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
image2 = np.zeros((20, 20), dtype=np.float64)
image2[11, 11:13] = 1
image2 = image2.T
mu2 = moments_central(image2, (11.5, 11))
nu2 = moments_normalized(mu2)
hu2 = moments_hu(nu2)
# central moments must be translation and scale invariant
assert_almost_equal(hu, hu2, decimal=1)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_moments_dtype(dtype):
image = np.zeros((20, 20), dtype=dtype)
image[13:15, 13:17] = 1
expected_dtype = _supported_float_type(dtype)
mu = moments_central(image, (13.5, 14.5))
assert mu.dtype == expected_dtype
nu = moments_normalized(mu)
assert nu.dtype == expected_dtype
hu = moments_hu(nu)
assert hu.dtype == expected_dtype
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_centroid(dtype):
image = np.zeros((20, 20), dtype=dtype)
image[14, 14:16] = 1
image[15, 14:16] = 1/3
image_centroid = centroid(image)
if dtype == np.float16:
rtol = 1e-3
elif dtype == np.float32:
rtol = 1e-5
else:
rtol = 1e-7
assert_allclose(image_centroid, (14.25, 14.5), rtol=rtol)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_inertia_tensor_2d(dtype):
image = np.zeros((40, 40), dtype=dtype)
image[15:25, 5:35] = 1 # big horizontal rectangle (aligned with axis 1)
expected_dtype = _supported_float_type(image.dtype)
T = inertia_tensor(image)
assert T.dtype == expected_dtype
assert T[0, 0] > T[1, 1]
np.testing.assert_allclose(T[0, 1], 0)
v0, v1 = inertia_tensor_eigvals(image, T=T)
assert v0.dtype == expected_dtype
assert v1.dtype == expected_dtype
np.testing.assert_allclose(np.sqrt(v0/v1), 3, rtol=0.01, atol=0.05)
def test_inertia_tensor_3d():
image = draw.ellipsoid(10, 5, 3)
T0 = inertia_tensor(image)
eig0, V0 = np.linalg.eig(T0)
# principal axis of ellipse = eigenvector of smallest eigenvalue
v0 = V0[:, np.argmin(eig0)]
assert np.allclose(v0, [1, 0, 0]) or np.allclose(-v0, [1, 0, 0])
imrot = ndi.rotate(image.astype(float), 30, axes=(0, 1), order=1)
Tr = inertia_tensor(imrot)
eigr, Vr = np.linalg.eig(Tr)
vr = Vr[:, np.argmin(eigr)]
# Check that axis has rotated by expected amount
pi, cos, sin = np.pi, np.cos, np.sin
R = np.array([[ cos(pi/6), -sin(pi/6), 0],
[ sin(pi/6), cos(pi/6), 0],
[ 0, 0, 1]])
expected_vr = R @ v0
assert (np.allclose(vr, expected_vr, atol=1e-3, rtol=0.01) or
np.allclose(-vr, expected_vr, atol=1e-3, rtol=0.01))
def test_inertia_tensor_eigvals():
# Floating point precision problems could make a positive
# semidefinite matrix have an eigenvalue that is very slightly
# negative. Check that we have caught and fixed this problem.
image = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
# mu = np.array([[3, 0, 98], [0, 14, 0], [2, 0, 98]])
eigvals = inertia_tensor_eigvals(image=image)
assert (min(eigvals) >= 0)

View File

@@ -1,49 +0,0 @@
import numpy as np
from skimage.measure import points_in_poly, grid_points_in_poly
from skimage._shared.testing import assert_array_equal
class TestNpnpoly():
def test_square(self):
v = np.array([[0, 0],
[0, 1],
[1, 1],
[1, 0]])
assert(points_in_poly([[0.5, 0.5]], v)[0])
assert(not points_in_poly([[-0.1, 0.1]], v)[0])
def test_triangle(self):
v = np.array([[0, 0],
[1, 0],
[0.5, 0.75]])
assert(points_in_poly([[0.5, 0.7]], v)[0])
assert(not points_in_poly([[0.5, 0.76]], v)[0])
assert(not points_in_poly([[0.7, 0.5]], v)[0])
def test_type(self):
assert(points_in_poly([[0, 0]], [[0, 0]]).dtype == bool)
def test_grid_points_in_poly():
v = np.array([[0, 0],
[5, 0],
[5, 5]])
expected = np.tril(np.ones((5, 5), dtype=bool))
assert_array_equal(grid_points_in_poly((5, 5), v), expected)
def test_grid_points_in_poly_binarize():
v = np.array([[0, 0],
[5, 0],
[5, 5]])
expected = np.array([[2, 0, 0, 0, 0],
[3, 3, 0, 0, 0],
[3, 1, 3, 0, 0],
[3, 1, 1, 3, 0],
[3, 1, 1, 1, 3]])
assert_array_equal(grid_points_in_poly((5, 5), v, binarize=False), expected)

View File

@@ -1,64 +0,0 @@
import numpy as np
from skimage.measure import approximate_polygon, subdivide_polygon
from skimage.measure._polygon import _SUBDIVISION_MASKS
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal, assert_equal
square = np.array([
[0, 0], [0, 1], [0, 2], [0, 3],
[1, 3], [2, 3], [3, 3],
[3, 2], [3, 1], [3, 0],
[2, 0], [1, 0], [0, 0]
])
def test_approximate_polygon():
out = approximate_polygon(square, 0.1)
assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
out = approximate_polygon(square, 2.2)
assert_array_equal(out, square[(0, 6, 12), :])
out = approximate_polygon(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1)
assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
out = approximate_polygon(square, -1)
assert_array_equal(out, square)
out = approximate_polygon(square, 0)
assert_array_equal(out, square)
def test_subdivide_polygon():
new_square1 = square
new_square2 = square[:-1]
new_square3 = square[:-1]
# test iterative subdvision
for _ in range(10):
square1, square2, square3 = new_square1, new_square2, new_square3
# test different B-Spline degrees
for degree in range(1, 7):
mask_len = len(_SUBDIVISION_MASKS[degree][0])
# test circular
new_square1 = subdivide_polygon(square1, degree)
assert_array_equal(new_square1[-1], new_square1[0])
assert_equal(new_square1.shape[0],
2 * square1.shape[0] - 1)
# test non-circular
new_square2 = subdivide_polygon(square2, degree)
assert_equal(new_square2.shape[0],
2 * (square2.shape[0] - mask_len + 1))
# test non-circular, preserve_ends
new_square3 = subdivide_polygon(square3, degree, True)
assert_equal(new_square3[0], square3[0])
assert_equal(new_square3[-1], square3[-1])
assert_equal(new_square3.shape[0],
2 * (square3.shape[0] - mask_len + 2))
# not supported B-Spline degree
with testing.raises(ValueError):
subdivide_polygon(square, 0)
with testing.raises(ValueError):
subdivide_polygon(square, 8)

View File

@@ -1,213 +0,0 @@
import numpy as np
from ..._shared.testing import assert_equal, assert_almost_equal
from ..profile import profile_line
image = np.arange(100).reshape((10, 10)).astype(float)
def test_horizontal_rightward():
prof = profile_line(image, (0, 2), (0, 8), order=0, mode='constant')
expected_prof = np.arange(2, 9)
assert_equal(prof, expected_prof)
def test_horizontal_leftward():
prof = profile_line(image, (0, 8), (0, 2), order=0, mode='constant')
expected_prof = np.arange(8, 1, -1)
assert_equal(prof, expected_prof)
def test_vertical_downward():
prof = profile_line(image, (2, 5), (8, 5), order=0, mode='constant')
expected_prof = np.arange(25, 95, 10)
assert_equal(prof, expected_prof)
def test_vertical_upward():
prof = profile_line(image, (8, 5), (2, 5), order=0, mode='constant')
expected_prof = np.arange(85, 15, -10)
assert_equal(prof, expected_prof)
def test_45deg_right_downward():
prof = profile_line(image, (2, 2), (8, 8), order=0, mode='constant')
expected_prof = np.array([22, 33, 33, 44, 55, 55, 66, 77, 77, 88])
# repeats are due to aliasing using nearest neighbor interpolation.
# to see this, imagine a diagonal line with markers every unit of
# length traversing a checkerboard pattern of squares also of unit
# length. Because the line is diagonal, sometimes more than one
# marker will fall on the same checkerboard box.
assert_almost_equal(prof, expected_prof)
def test_45deg_right_downward_interpolated():
prof = profile_line(image, (2, 2), (8, 8), order=1, mode='constant')
expected_prof = np.linspace(22, 88, 10)
assert_almost_equal(prof, expected_prof)
def test_45deg_right_upward():
prof = profile_line(image, (8, 2), (2, 8), order=1, mode='constant')
expected_prof = np.arange(82, 27, -6)
assert_almost_equal(prof, expected_prof)
def test_45deg_left_upward():
prof = profile_line(image, (8, 8), (2, 2), order=1, mode='constant')
expected_prof = np.arange(88, 21, -22. / 3)
assert_almost_equal(prof, expected_prof)
def test_45deg_left_downward():
prof = profile_line(image, (2, 8), (8, 2), order=1, mode='constant')
expected_prof = np.arange(28, 83, 6)
assert_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_right_downward():
prof = profile_line(image, (1, 1), (7, 9), order=0, mode='constant')
expected_prof = np.array([11, 22, 23, 33, 34, 45, 56, 57, 67, 68, 79])
assert_equal(prof, expected_prof)
def test_pythagorean_triangle_right_downward_interpolated():
prof = profile_line(image, (1, 1), (7, 9), order=1, mode='constant')
expected_prof = np.linspace(11, 79, 11)
assert_almost_equal(prof, expected_prof)
pyth_image = np.zeros((6, 7), float)
line = ((1, 2, 2, 3, 3, 4), (1, 2, 3, 3, 4, 5))
below = ((2, 2, 3, 4, 4, 5), (0, 1, 2, 3, 4, 4))
above = ((0, 1, 1, 2, 3, 3), (2, 2, 3, 4, 5, 6))
pyth_image[line] = 1.8
pyth_image[below] = 0.6
pyth_image[above] = 0.6
def test_pythagorean_triangle_right_downward_linewidth():
prof = profile_line(pyth_image, (1, 1), (4, 5), linewidth=3, order=0,
mode='constant')
expected_prof = np.ones(6)
assert_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_right_upward_linewidth():
prof = profile_line(pyth_image[::-1, :], (4, 1), (1, 5),
linewidth=3, order=0, mode='constant')
expected_prof = np.ones(6)
assert_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_transpose_left_down_linewidth():
prof = profile_line(pyth_image.T[:, ::-1], (1, 4), (5, 1),
linewidth=3, order=0, mode='constant')
expected_prof = np.ones(6)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_mean():
prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0,
reduce_func=np.mean, mode='reflect')
expected_prof = pyth_image[:4, :3].mean(1)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_max():
prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0,
reduce_func=np.max, mode='reflect')
expected_prof = pyth_image[:4, :3].max(1)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_sum():
prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=3, order=0,
reduce_func=np.sum, mode='reflect')
expected_prof = pyth_image[:4, :3].sum(1)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_mean_linewidth_1():
prof = profile_line(pyth_image, (0, 1), (3, 1), linewidth=1, order=0,
reduce_func=np.mean, mode='constant')
expected_prof = pyth_image[:4, 1]
assert_almost_equal(prof, expected_prof)
def test_reduce_func_None_linewidth_1():
prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=1,
order=0, reduce_func=None, mode='constant')
expected_prof = pyth_image[1:5, 2, np.newaxis]
assert_almost_equal(prof, expected_prof)
def test_reduce_func_None_linewidth_3():
prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3,
order=0, reduce_func=None, mode='constant')
expected_prof = pyth_image[1:5, 1:4]
assert_almost_equal(prof, expected_prof)
def test_reduce_func_lambda_linewidth_3():
def reduce_func(x):
return x + x ** 2
prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, order=0,
reduce_func=reduce_func, mode='constant')
expected_prof = np.apply_along_axis(reduce_func,
arr=pyth_image[1:5, 1:4], axis=1)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_sqrt_linewidth_3():
def reduce_func(x):
return x ** 0.5
prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3,
order=0, reduce_func=reduce_func,
mode='constant')
expected_prof = np.apply_along_axis(reduce_func,
arr=pyth_image[1:5, 1:4], axis=1)
assert_almost_equal(prof, expected_prof)
def test_reduce_func_sumofsqrt_linewidth_3():
def reduce_func(x):
return np.sum(x ** 0.5)
prof = profile_line(pyth_image, (1, 2), (4, 2), linewidth=3, order=0,
reduce_func=reduce_func, mode='constant')
expected_prof = np.apply_along_axis(reduce_func,
arr=pyth_image[1:5, 1:4], axis=1)
assert_almost_equal(prof, expected_prof)
def test_oob_coodinates():
offset = 2
idx = pyth_image.shape[0] + offset
prof = profile_line(pyth_image, (-offset, 2), (idx, 2), linewidth=1,
order=0, reduce_func=None, mode='constant')
expected_prof = np.vstack([np.zeros((offset, 1)),
pyth_image[:, 2, np.newaxis],
np.zeros((offset + 1, 1))])
assert_almost_equal(prof, expected_prof)
def test_bool_array_input():
shape = (200, 200)
center_x, center_y = (140, 150)
radius = 20
x, y = np.meshgrid(range(shape[1]), range(shape[0]))
mask = (y - center_y) ** 2 + (x - center_x) ** 2 < radius ** 2
src = (center_y, center_x)
phi = 4 * np.pi / 9.
dy = 31 * np.cos(phi)
dx = 31 * np.sin(phi)
dst = (center_y + dy, center_x + dx)
profile_u8 = profile_line(mask.astype(np.uint8), src, dst, mode='reflect')
assert all(profile_u8[:radius] == 1)
profile_b = profile_line(mask, src, dst, mode='reflect')
assert all(profile_b[:radius] == 1)
assert all(profile_b == profile_u8)

File diff suppressed because it is too large Load Diff