using for loop to install conda package

This commit is contained in:
ton
2023-04-16 11:03:27 +07:00
parent 49da9f29c1
commit 0c2b34d6f8
12168 changed files with 2656238 additions and 1 deletions

View File

@@ -0,0 +1,20 @@
from ._adapted_rand_error import adapted_rand_error
from ._contingency_table import contingency_table
from ._structural_similarity import structural_similarity
from ._variation_of_information import variation_of_information
from .set_metrics import hausdorff_distance, hausdorff_pair
from .simple_metrics import (mean_squared_error, normalized_mutual_information,
normalized_root_mse, peak_signal_noise_ratio)
__all__ = [
"adapted_rand_error",
"variation_of_information",
"contingency_table",
"mean_squared_error",
"normalized_mutual_information",
"normalized_root_mse",
"peak_signal_noise_ratio",
"structural_similarity",
"hausdorff_distance",
"hausdorff_pair",
]

View File

@@ -0,0 +1,97 @@
from .._shared.utils import check_shape_equality
from ._contingency_table import contingency_table
__all__ = ['adapted_rand_error']
def adapted_rand_error(image_true=None, image_test=None, *, table=None,
ignore_labels=(0,), alpha=0.5):
r"""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_
Parameters
----------
image_true : ndarray of int
Ground-truth label image, same shape as im_test.
image_test : ndarray of int
Test image.
table : scipy.sparse array in crs format, optional
A contingency table built with skimage.evaluate.contingency_table.
If None, it will be computed on the fly.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
alpha : float, optional
Relative weight given to precision and recall in the adapted Rand error
calculation.
Returns
-------
are : float
The adapted Rand error.
prec : float
The adapted Rand precision: this is the number of pairs of pixels that
have the same label in the test label image *and* in the true image,
divided by the number in the test image.
rec : float
The adapted Rand recall: this is the number of pairs of pixels that
have the same label in the test label image *and* in the true image,
divided by the number in the true image.
Notes
-----
Pixels with label 0 in the true segmentation are ignored in the score.
The adapted Rand error is calculated as follows:
:math:`1 - \frac{\sum_{ij} p_{ij}^{2}}{\alpha \sum_{k} s_{k}^{2} +
(1-\alpha)\sum_{k} t_{k}^{2}}`,
where :math:`p_{ij}` is the probability that a pixel has the same label
in the test image *and* in the true image, :math:`t_{k}` is the
probability that a pixel has label :math:`k` in the true image,
and :math:`s_{k}` is the probability that a pixel has label :math:`k`
in the test image.
Default behavior is to weight precision and recall equally in the
adapted Rand error calculation.
When alpha = 0, adapted Rand error = recall.
When alpha = 1, adapted Rand error = precision.
References
----------
.. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015)
Crowdsourcing the creation of image segmentation algorithms
for connectomics. Front. Neuroanat. 9:142.
:DOI:`10.3389/fnana.2015.00142`
"""
if image_test is not None and image_true is not None:
check_shape_equality(image_true, image_test)
if table is None:
p_ij = contingency_table(image_true, image_test,
ignore_labels=ignore_labels, normalize=False)
else:
p_ij = table
if alpha < 0.0 or alpha > 1.0:
raise ValueError('alpha must be between 0 and 1')
# Sum of the joint distribution squared
sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum()
a_i = p_ij.sum(axis=1).A.ravel()
b_i = p_ij.sum(axis=0).A.ravel()
# Sum of squares of the test segment sizes (this is 2x the number of pairs
# of pixels with the same label in im_test)
sum_a2 = a_i @ a_i - a_i.sum()
# Same for im_true
sum_b2 = b_i @ b_i - b_i.sum()
precision = sum_p_ij2 / sum_a2
recall = sum_p_ij2 / sum_b2
fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2)
are = 1. - fscore
return are, precision, recall

View File

@@ -0,0 +1,39 @@
import scipy.sparse as sparse
import numpy as np
__all__ = ['contingency_table']
def contingency_table(im_true, im_test, *, ignore_labels=None,
normalize=False):
"""
Return the contingency table for all regions in matched segmentations.
Parameters
----------
im_true : ndarray of int
Ground-truth label image, same shape as im_test.
im_test : ndarray of int
Test image.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
normalize : bool
Determines if the contingency table is normalized by pixel count.
Returns
-------
cont : scipy.sparse.csr_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `im_true` and `j` in `im_test`.
"""
if ignore_labels is None:
ignore_labels = []
im_test_r = im_test.reshape(-1)
im_true_r = im_true.reshape(-1)
data = np.isin(im_true_r, ignore_labels, invert=True).astype(float)
if normalize:
data /= np.count_nonzero(data)
cont = sparse.coo_matrix((data, (im_true_r, im_test_r))).tocsr()
return cont

View File

@@ -0,0 +1,275 @@
import functools
import numpy as np
from scipy.ndimage import uniform_filter
from .._shared import utils
from .._shared.filters import gaussian
from .._shared.utils import _supported_float_type, check_shape_equality, warn
from ..util.arraycrop import crop
from ..util.dtype import dtype_range
__all__ = ['structural_similarity']
def structural_similarity(im1, im2,
*,
win_size=None, gradient=False, data_range=None,
channel_axis=None,
gaussian_weights=False, full=False, **kwargs):
"""
Compute the mean structural similarity index between two images.
Please pay attention to the `data_range` parameter with floating-point images.
Parameters
----------
im1, im2 : ndarray
Images. Any dimensionality with same shape.
win_size : int or None, optional
The side-length of the sliding window used in comparison. Must be an
odd value. If `gaussian_weights` is True, this is ignored and the
window size will depend on `sigma`.
gradient : bool, optional
If True, also return the gradient with respect to im2.
data_range : float, optional
The data range of the input image (distance between minimum and
maximum possible values). By default, this is estimated from the image
data type. This estimate may be wrong for floating-point image data.
Therefore it is recommended to always pass this value explicitly
(see note below).
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
gaussian_weights : bool, optional
If True, each patch has its mean and variance spatially weighted by a
normalized Gaussian kernel of width sigma=1.5.
full : bool, optional
If True, also return the full structural similarity image.
Other Parameters
----------------
use_sample_covariance : bool
If True, normalize covariances by N-1 rather than, N where N is the
number of pixels within the sliding window.
K1 : float
Algorithm parameter, K1 (small constant, see [1]_).
K2 : float
Algorithm parameter, K2 (small constant, see [1]_).
sigma : float
Standard deviation for the Gaussian when `gaussian_weights` is True.
Returns
-------
mssim : float
The mean structural similarity index over the image.
grad : ndarray
The gradient of the structural similarity between im1 and im2 [2]_.
This is only returned if `gradient` is set to True.
S : ndarray
The full SSIM image. This is only returned if `full` is set to True.
Notes
-----
If `data_range` is not specified, the range is automatically guessed
based on the image data type. However for floating-point image data, this
estimate yields a result double the value of the desired range, as the
`dtype_range` in `skimage.util.dtype.py` has defined intervals from -1 to
+1. This yields an estimate of 2, instead of 1, which is most often
required when working with image data (as negative light intentsities are
nonsensical). In case of working with YCbCr-like color data, note that
these ranges are different per channel (Cb and Cr have double the range
of Y), so one cannot calculate a channel-averaged SSIM with a single call
to this function, as identical ranges are assumed for each channel.
To match the implementation of Wang et al. [1]_, set `gaussian_weights`
to True, `sigma` to 1.5, `use_sample_covariance` to False, and
specify the `data_range` argument.
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_ssim`` to
``skimage.metrics.structural_similarity``.
References
----------
.. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing,
13, 600-612.
https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
:DOI:`10.1109/TIP.2003.819861`
.. [2] Avanaki, A. N. (2009). Exact global histogram specification
optimized for structural similarity. Optical Review, 16, 613-621.
:arxiv:`0901.0065`
:DOI:`10.1007/s10043-009-0119-z`
"""
check_shape_equality(im1, im2)
float_type = _supported_float_type(im1.dtype)
if channel_axis is not None:
# loop over channels
args = dict(win_size=win_size,
gradient=gradient,
data_range=data_range,
channel_axis=None,
gaussian_weights=gaussian_weights,
full=full)
args.update(kwargs)
nch = im1.shape[channel_axis]
mssim = np.empty(nch, dtype=float_type)
if gradient:
G = np.empty(im1.shape, dtype=float_type)
if full:
S = np.empty(im1.shape, dtype=float_type)
channel_axis = channel_axis % im1.ndim
_at = functools.partial(utils.slice_at_axis, axis=channel_axis)
for ch in range(nch):
ch_result = structural_similarity(im1[_at(ch)],
im2[_at(ch)], **args)
if gradient and full:
mssim[ch], G[_at(ch)], S[_at(ch)] = ch_result
elif gradient:
mssim[ch], G[_at(ch)] = ch_result
elif full:
mssim[ch], S[_at(ch)] = ch_result
else:
mssim[ch] = ch_result
mssim = mssim.mean()
if gradient and full:
return mssim, G, S
elif gradient:
return mssim, G
elif full:
return mssim, S
else:
return mssim
K1 = kwargs.pop('K1', 0.01)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
if K1 < 0:
raise ValueError("K1 must be positive")
if K2 < 0:
raise ValueError("K2 must be positive")
if sigma < 0:
raise ValueError("sigma must be positive")
use_sample_covariance = kwargs.pop('use_sample_covariance', True)
if gaussian_weights:
# Set to give an 11-tap filter with the default sigma of 1.5 to match
# Wang et. al. 2004.
truncate = 3.5
if win_size is None:
if gaussian_weights:
# set win_size used by crop to match the filter size
r = int(truncate * sigma + 0.5) # radius as in ndimage
win_size = 2 * r + 1
else:
win_size = 7 # backwards compatibility
if np.any((np.asarray(im1.shape) - win_size) < 0):
raise ValueError(
'win_size exceeds image extent. '
'Either ensure that your images are '
'at least 7x7; or pass win_size explicitly '
'in the function call, with an odd value '
'less than or equal to the smaller side of your '
'images. If your images are multichannel '
'(with color channels), set channel_axis to '
'the axis number corresponding to the channels.')
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
if data_range is None:
if (np.issubdtype(im1.dtype, np.floating) or
np.issubdtype(im2.dtype, np.floating)):
raise ValueError(
'Since image dtype is floating point, you must specify '
'the data_range parameter. Please read the documentation '
'carefully (including the note). It is recommended that '
'you always specify the data_range anyway.')
if im1.dtype != im2.dtype:
warn("Inputs have mismatched dtypes. Setting data_range based on im1.dtype.",
stacklevel=2)
dmin, dmax = dtype_range[im1.dtype.type]
data_range = dmax - dmin
if np.issubdtype(im1.dtype, np.integer) and (im1.dtype != np.uint8):
warn("Setting data_range based on im1.dtype. " +
("data_range = %.0f. " % data_range) +
"Please specify data_range explicitly to avoid mistakes.", stacklevel=2)
ndim = im1.ndim
if gaussian_weights:
filter_func = gaussian
filter_args = {'sigma': sigma, 'truncate': truncate, 'mode': 'reflect'}
else:
filter_func = uniform_filter
filter_args = {'size': win_size}
# ndimage filters need floating point data
im1 = im1.astype(float_type, copy=False)
im2 = im2.astype(float_type, copy=False)
NP = win_size ** ndim
# filter has already normalized by NP
if use_sample_covariance:
cov_norm = NP / (NP - 1) # sample covariance
else:
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
# compute (weighted) means
ux = filter_func(im1, **filter_args)
uy = filter_func(im2, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(im1 * im1, **filter_args)
uyy = filter_func(im2 * im2, **filter_args)
uxy = filter_func(im1 * im2, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
R = data_range
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = (A1 * A2) / D
# to avoid edge effects will ignore filter radius strip around edges
pad = (win_size - 1) // 2
# compute (weighted) mean of ssim. Use float64 for accuracy.
mssim = crop(S, pad).mean(dtype=np.float64)
if gradient:
# The following is Eqs. 7-8 of Avanaki 2009.
grad = filter_func(A1 / D, **filter_args) * im1
grad += filter_func(-S / B2, **filter_args) * im2
grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D,
**filter_args)
grad *= (2 / im1.size)
if full:
return mssim, grad, S
else:
return mssim, grad
else:
if full:
return mssim, S
else:
return mssim

View File

@@ -0,0 +1,136 @@
import numpy as np
import scipy.sparse as sparse
from ._contingency_table import contingency_table
from .._shared.utils import check_shape_equality
__all__ = ['variation_of_information']
def variation_of_information(image0=None, image1=None, *, table=None,
ignore_labels=()):
"""Return symmetric conditional entropies associated with the VI. [1]_
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If X is the ground-truth segmentation, then H(X|Y) can be interpreted
as the amount of under-segmentation and H(Y|X) as the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(X|Y)=0 and a perfect under-segmentation will have H(Y|X)=0.
Parameters
----------
image0, image1 : ndarray of int
Label images / segmentations, must have same shape.
table : scipy.sparse array in csr format, optional
A contingency table built with skimage.evaluate.contingency_table.
If None, it will be computed with skimage.evaluate.contingency_table.
If given, the entropies will be computed from this table and any images
will be ignored.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
Returns
-------
vi : ndarray of float, shape (2,)
The conditional entropies of image1|image0 and image0|image1.
References
----------
.. [1] Marina Meilă (2007), Comparing clusterings—an information based
distance, Journal of Multivariate Analysis, Volume 98, Issue 5,
Pages 873-895, ISSN 0047-259X, :DOI:`10.1016/j.jmva.2006.11.013`.
"""
h0g1, h1g0 = _vi_tables(image0, image1, table=table,
ignore_labels=ignore_labels)
# false splits, false merges
return np.array([h1g0.sum(), h0g1.sum()])
def _xlogx(x):
"""Compute x * log_2(x).
We define 0 * log_2(0) = 0
Parameters
----------
x : ndarray or scipy.sparse.csc_matrix or csr_matrix
The input array.
Returns
-------
y : same type as x
Result of x * log_2(x).
"""
y = x.copy()
if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):
z = y.data
else:
z = np.asarray(y) # ensure np.matrix converted to np.array
nz = z.nonzero()
z[nz] *= np.log2(z[nz])
return y
def _vi_tables(im_true, im_test, table=None, ignore_labels=()):
"""Compute probability tables used for calculating VI.
Parameters
----------
im_true, im_test : ndarray of int
Input label images, any dimensionality.
table : csr matrix, optional
Pre-computed contingency table.
ignore_labels : sequence of int, optional
Labels to ignore when computing scores.
Returns
-------
hxgy, hygx : ndarray of float
Per-segment conditional entropies of ``im_true`` given ``im_test`` and
vice-versa.
"""
check_shape_equality(im_true, im_test)
if table is None:
# normalize, since it is an identity op if already done
pxy = contingency_table(
im_true, im_test,
ignore_labels=ignore_labels, normalize=True
)
else:
pxy = table
# compute marginal probabilities, converting to 1D array
px = np.ravel(pxy.sum(axis=1))
py = np.ravel(pxy.sum(axis=0))
# use sparse matrix linear algebra to compute VI
# first, compute the inverse diagonal matrices
px_inv = sparse.diags(_invert_nonzero(px))
py_inv = sparse.diags(_invert_nonzero(py))
# then, compute the entropies
hygx = -px @ _xlogx(px_inv @ pxy).sum(axis=1)
hxgy = -_xlogx(pxy @ py_inv).sum(axis=0) @ py
return list(map(np.asarray, [hxgy, hygx]))
def _invert_nonzero(arr):
"""Compute the inverse of the non-zero elements of arr, not changing 0.
Parameters
----------
arr : ndarray
Returns
-------
arr_inv : ndarray
Array containing the inverse of the non-zero elements of arr, and
zero elsewhere.
"""
arr_inv = arr.copy()
nz = np.nonzero(arr)
arr_inv[nz] = 1 / arr[nz]
return arr_inv

View File

@@ -0,0 +1,147 @@
import warnings
import numpy as np
from scipy.spatial import cKDTree
def hausdorff_distance(image0, image1, method="standard"):
"""Calculate the Hausdorff distance between nonzero elements of given images.
Parameters
----------
image0, image1 : ndarray
Arrays where ``True`` represents a point that is included in a
set of points. Both arrays must have the same shape.
method : {'standard', 'modified'}, optional, default = 'standard'
The method to use for calculating the Hausdorff distance.
``standard`` is the standard Hausdorff distance, while ``modified``
is the modified Hausdorff distance.
Returns
-------
distance : float
The Hausdorff distance between coordinates of nonzero pixels in
``image0`` and ``image1``, using the Euclidean distance.
Notes
-----
The Hausdorff distance [1]_ is the maximum distance between any point on
``image0`` and its nearest point on ``image1``, and vice-versa.
The Modified Hausdorff Distance (MHD) has been shown to perform better
than the directed Hausdorff Distance (HD) in the following work by
Dubuisson et al. [2]_. The function calculates forward and backward
mean distances and returns the largest of the two.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hausdorff_distance
.. [2] M. P. Dubuisson and A. K. Jain. A Modified Hausdorff distance for object
matching. In ICPR94, pages A:566-568, Jerusalem, Israel, 1994.
:DOI:`10.1109/ICPR.1994.576361`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.8155
Examples
--------
>>> points_a = (3, 0)
>>> points_b = (6, 0)
>>> shape = (7, 1)
>>> image_a = np.zeros(shape, dtype=bool)
>>> image_b = np.zeros(shape, dtype=bool)
>>> image_a[points_a] = True
>>> image_b[points_b] = True
>>> hausdorff_distance(image_a, image_b)
3.0
"""
if method not in ('standard', 'modified'):
raise ValueError(f'unrecognized method {method}')
a_points = np.transpose(np.nonzero(image0))
b_points = np.transpose(np.nonzero(image1))
# Handle empty sets properly:
# - if both sets are empty, return zero
# - if only one set is empty, return infinity
if len(a_points) == 0:
return 0 if len(b_points) == 0 else np.inf
elif len(b_points) == 0:
return np.inf
fwd, bwd = (
cKDTree(a_points).query(b_points, k=1)[0],
cKDTree(b_points).query(a_points, k=1)[0],
)
if method == 'standard': # standard Hausdorff distance
return max(max(fwd), max(bwd))
elif method == 'modified': # modified Hausdorff distance
return max(np.mean(fwd), np.mean(bwd))
def hausdorff_pair(image0, image1):
"""Returns pair of points that are Hausdorff distance apart between nonzero
elements of given images.
The Hausdorff distance [1]_ is the maximum distance between any point on
``image0`` and its nearest point on ``image1``, and vice-versa.
Parameters
----------
image0, image1 : ndarray
Arrays where ``True`` represents a point that is included in a
set of points. Both arrays must have the same shape.
Returns
-------
point_a, point_b : array
A pair of points that have Hausdorff distance between them.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hausdorff_distance
Examples
--------
>>> points_a = (3, 0)
>>> points_b = (6, 0)
>>> shape = (7, 1)
>>> image_a = np.zeros(shape, dtype=bool)
>>> image_b = np.zeros(shape, dtype=bool)
>>> image_a[points_a] = True
>>> image_b[points_b] = True
>>> hausdorff_pair(image_a, image_b)
(array([3, 0]), array([6, 0]))
"""
a_points = np.transpose(np.nonzero(image0))
b_points = np.transpose(np.nonzero(image1))
# If either of the sets are empty, there is no corresponding pair of points
if len(a_points) == 0 or len(b_points) == 0:
warnings.warn("One or both of the images is empty.", stacklevel=2)
return (), ()
nearest_dists_from_b, nearest_a_point_indices_from_b = cKDTree(a_points).query(
b_points
)
nearest_dists_from_a, nearest_b_point_indices_from_a = cKDTree(b_points).query(
a_points
)
max_index_from_a = nearest_dists_from_b.argmax()
max_index_from_b = nearest_dists_from_a.argmax()
max_dist_from_a = nearest_dists_from_b[max_index_from_a]
max_dist_from_b = nearest_dists_from_a[max_index_from_b]
if max_dist_from_b > max_dist_from_a:
return (
a_points[max_index_from_b],
b_points[nearest_b_point_indices_from_a[max_index_from_b]],
)
else:
return (
a_points[nearest_a_point_indices_from_b[max_index_from_a]],
b_points[max_index_from_a],
)

View File

@@ -0,0 +1,261 @@
import numpy as np
from scipy.stats import entropy
from ..util.dtype import dtype_range
from .._shared.utils import _supported_float_type, check_shape_equality, warn
__all__ = ['mean_squared_error',
'normalized_root_mse',
'peak_signal_noise_ratio',
'normalized_mutual_information',
]
def _as_floats(image0, image1):
"""
Promote im1, im2 to nearest appropriate floating point precision.
"""
float_type = _supported_float_type([image0.dtype, image1.dtype])
image0 = np.asarray(image0, dtype=float_type)
image1 = np.asarray(image1, dtype=float_type)
return image0, image1
def mean_squared_error(image0, image1):
"""
Compute the mean-squared error between two images.
Parameters
----------
image0, image1 : ndarray
Images. Any dimensionality, must have same shape.
Returns
-------
mse : float
The mean-squared error (MSE) metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_mse`` to
``skimage.metrics.mean_squared_error``.
"""
check_shape_equality(image0, image1)
image0, image1 = _as_floats(image0, image1)
return np.mean((image0 - image1) ** 2, dtype=np.float64)
def normalized_root_mse(image_true, image_test, *, normalization='euclidean'):
"""
Compute the normalized root mean-squared error (NRMSE) between two
images.
Parameters
----------
image_true : ndarray
Ground-truth image, same shape as im_test.
image_test : ndarray
Test image.
normalization : {'euclidean', 'min-max', 'mean'}, optional
Controls the normalization method to use in the denominator of the
NRMSE. There is no standard method of normalization across the
literature [1]_. The methods available here are as follows:
- 'euclidean' : normalize by the averaged Euclidean norm of
``im_true``::
NRMSE = RMSE * sqrt(N) / || im_true ||
where || . || denotes the Frobenius norm and ``N = im_true.size``.
This result is equivalent to::
NRMSE = || im_true - im_test || / || im_true ||.
- 'min-max' : normalize by the intensity range of ``im_true``.
- 'mean' : normalize by the mean of ``im_true``
Returns
-------
nrmse : float
The NRMSE metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_nrmse`` to
``skimage.metrics.normalized_root_mse``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Root-mean-square_deviation
"""
check_shape_equality(image_true, image_test)
image_true, image_test = _as_floats(image_true, image_test)
# Ensure that both 'Euclidean' and 'euclidean' match
normalization = normalization.lower()
if normalization == 'euclidean':
denom = np.sqrt(np.mean((image_true * image_true), dtype=np.float64))
elif normalization == 'min-max':
denom = image_true.max() - image_true.min()
elif normalization == 'mean':
denom = image_true.mean()
else:
raise ValueError("Unsupported norm_type")
return np.sqrt(mean_squared_error(image_true, image_test)) / denom
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
"""
Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
image_true : ndarray
Ground-truth image, same shape as im_test.
image_test : ndarray
Test image.
data_range : int, optional
The data range of the input image (distance between minimum and
maximum possible values). By default, this is estimated from the image
data-type.
Returns
-------
psnr : float
The PSNR metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_psnr`` to
``skimage.metrics.peak_signal_noise_ratio``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
check_shape_equality(image_true, image_test)
if data_range is None:
if image_true.dtype != image_test.dtype:
warn("Inputs have mismatched dtype. Setting data_range based on "
"image_true.")
dmin, dmax = dtype_range[image_true.dtype.type]
true_min, true_max = np.min(image_true), np.max(image_true)
if true_max > dmax or true_min < dmin:
raise ValueError(
"image_true has intensity values outside the range expected "
"for its data type. Please manually specify the data_range.")
if true_min >= 0:
# most common case (255 for uint8, 1 for float)
data_range = dmax
else:
data_range = dmax - dmin
image_true, image_test = _as_floats(image_true, image_test)
err = mean_squared_error(image_true, image_test)
return 10 * np.log10((data_range ** 2) / err)
def _pad_to(arr, shape):
"""Pad an array with trailing zeros to a given target shape.
Parameters
----------
arr : ndarray
The input array.
shape : tuple
The target shape.
Returns
-------
padded : ndarray
The padded array.
Examples
--------
>>> _pad_to(np.ones((1, 1), dtype=int), (1, 3))
array([[1, 0, 0]])
"""
if not all(s >= i for s, i in zip(shape, arr.shape)):
raise ValueError(f'Target shape {shape} cannot be smaller than input'
f'shape {arr.shape} along any axis.')
padding = [(0, s-i) for s, i in zip(shape, arr.shape)]
return np.pad(arr, pad_width=padding, mode='constant', constant_values=0)
def normalized_mutual_information(image0, image1, *, bins=100):
r"""Compute the normalized mutual information (NMI).
The normalized mutual information of :math:`A` and :math:`B` is given by::
.. math::
Y(A, B) = \frac{H(A) + H(B)}{H(A, B)}
where :math:`H(X) := - \sum_{x \in X}{x \log x}` is the entropy.
It was proposed to be useful in registering images by Colin Studholme and
colleagues [1]_. It ranges from 1 (perfectly uncorrelated image values)
to 2 (perfectly correlated image values, whether positively or negatively).
Parameters
----------
image0, image1 : ndarray
Images to be compared. The two input images must have the same number
of dimensions.
bins : int or sequence of int, optional
The number of bins along each axis of the joint histogram.
Returns
-------
nmi : float
The normalized mutual information between the two arrays, computed at
the granularity given by ``bins``. Higher NMI implies more similar
input images.
Raises
------
ValueError
If the images don't have the same number of dimensions.
Notes
-----
If the two input images are not the same shape, the smaller image is padded
with zeros.
References
----------
.. [1] C. Studholme, D.L.G. Hill, & D.J. Hawkes (1999). An overlap
invariant entropy measure of 3D medical image alignment.
Pattern Recognition 32(1):71-86
:DOI:`10.1016/S0031-3203(98)00091-0`
"""
if image0.ndim != image1.ndim:
raise ValueError(f'NMI requires images of same number of dimensions. '
f'Got {image0.ndim}D for `image0` and '
f'{image1.ndim}D for `image1`.')
if image0.shape != image1.shape:
max_shape = np.maximum(image0.shape, image1.shape)
padded0 = _pad_to(image0, max_shape)
padded1 = _pad_to(image1, max_shape)
else:
padded0, padded1 = image0, image1
hist, bin_edges = np.histogramdd(
[np.reshape(padded0, -1), np.reshape(padded1, -1)],
bins=bins,
density=True,
)
H0 = entropy(np.sum(hist, axis=0))
H1 = entropy(np.sum(hist, axis=1))
H01 = entropy(np.reshape(hist, -1))
return (H0 + H1) / H01

View File

@@ -0,0 +1,58 @@
import numpy as np
import pytest
from skimage.metrics import (adapted_rand_error,
variation_of_information,
contingency_table)
from skimage._shared.testing import (assert_equal,
assert_almost_equal,
assert_array_equal)
def test_contingency_table():
im_true = np.array([1, 2, 3, 4])
im_test = np.array([1, 1, 8, 8])
table1 = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0.25, 0., 0., 0., 0., 0., 0., 0.],
[0., 0.25, 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.25],
[0., 0., 0., 0., 0., 0., 0., 0., 0.25]])
sparse_table2 = contingency_table(im_true, im_test, normalize=True)
table2 = sparse_table2.toarray()
assert_array_equal(table1, table2)
def test_vi():
im_true = np.array([1, 2, 3, 4])
im_test = np.array([1, 1, 8, 8])
assert_equal(np.sum(variation_of_information(im_true, im_test)), 1)
def test_vi_ignore_labels():
im1 = np.array([[1, 0],
[2, 3]], dtype='uint8')
im2 = np.array([[1, 1],
[1, 0]], dtype='uint8')
false_splits, false_merges = variation_of_information(im1, im2,
ignore_labels=[0])
assert (false_splits, false_merges) == (0, 2 / 3)
def test_are():
im_true = np.array([[2, 1], [1, 2]])
im_test = np.array([[1, 2], [3, 1]])
assert_almost_equal(adapted_rand_error(im_true, im_test),
(0.3333333, 0.5, 1.0))
assert_almost_equal(adapted_rand_error(im_true, im_test, alpha=0),
(0, 0.5, 1.0))
assert_almost_equal(adapted_rand_error(im_true, im_test, alpha=1),
(0.5, 0.5, 1.0))
with pytest.raises(ValueError):
adapted_rand_error(im_true, im_test, alpha=1.01)
with pytest.raises(ValueError):
adapted_rand_error(im_true, im_test, alpha=-0.01)

View File

@@ -0,0 +1,179 @@
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_array_equal
from scipy.spatial import distance
from skimage._shared._warnings import expected_warnings
from skimage.metrics import hausdorff_distance, hausdorff_pair
def test_hausdorff_empty():
empty = np.zeros((0, 2), dtype=bool)
non_empty = np.zeros((3, 2), dtype=bool)
assert hausdorff_distance(empty, non_empty) == 0.0 # standard Hausdorff
assert (
hausdorff_distance(empty, non_empty, method="modified") == 0.0
) # modified Hausdorff
with expected_warnings(["One or both of the images is empty"]):
assert_array_equal(hausdorff_pair(empty, non_empty), [(), ()])
assert hausdorff_distance(non_empty, empty) == 0.0 # standard Hausdorff
assert (
hausdorff_distance(non_empty, empty, method="modified") == 0.0
) # modified Hausdorff
with expected_warnings(["One or both of the images is empty"]):
assert_array_equal(hausdorff_pair(non_empty, empty), [(), ()])
assert hausdorff_distance(empty, non_empty) == 0.0 # standard Hausdorff
assert (
hausdorff_distance(empty, non_empty, method="modified") == 0.0
) # modified Hausdorff
with expected_warnings(["One or both of the images is empty"]):
assert_array_equal(hausdorff_pair(empty, non_empty), [(), ()])
def test_hausdorff_simple():
points_a = (3, 0)
points_b = (6, 0)
shape = (7, 1)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
coords_a[points_a] = True
coords_b[points_b] = True
dist = np.sqrt(sum((ca - cb) ** 2 for ca, cb in zip(points_a, points_b)))
d = distance.cdist([points_a], [points_b])
dist_modified = max(np.mean(np.min(d, axis=0)), np.mean(np.min(d, axis=1)))
assert_almost_equal(hausdorff_distance(coords_a, coords_b), dist)
assert_array_equal(hausdorff_pair(coords_a, coords_b), (points_a, points_b))
assert_almost_equal(
hausdorff_distance(
coords_a,
coords_b,
method="modified",
),
dist_modified,
)
@pytest.mark.parametrize("points_a", [(0, 0), (3, 0), (1, 4), (4, 1)])
@pytest.mark.parametrize("points_b", [(0, 0), (3, 0), (1, 4), (4, 1)])
def test_hausdorff_region_single(points_a, points_b):
shape = (5, 5)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
coords_a[points_a] = True
coords_b[points_b] = True
dist = np.sqrt(sum((ca - cb) ** 2 for ca, cb in zip(points_a, points_b)))
d = distance.cdist([points_a], [points_b])
dist_modified = max(np.mean(np.min(d, axis=0)), np.mean(np.min(d, axis=1)))
assert_almost_equal(hausdorff_distance(coords_a, coords_b), dist)
assert_array_equal(hausdorff_pair(coords_a, coords_b), (points_a, points_b))
assert_almost_equal(
hausdorff_distance(coords_a, coords_b, method="modified"), dist_modified
)
@pytest.mark.parametrize("points_a", [(5, 4), (4, 5), (3, 4), (4, 3)])
@pytest.mark.parametrize("points_b", [(6, 4), (2, 6), (2, 4), (4, 0)])
def test_hausdorff_region_different_points(points_a, points_b):
shape = (7, 7)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
coords_a[points_a] = True
coords_b[points_b] = True
dist = np.sqrt(sum((ca - cb) ** 2 for ca, cb in zip(points_a, points_b)))
d = distance.cdist([points_a], [points_b])
dist_modified = max(np.mean(np.min(d, axis=0)), np.mean(np.min(d, axis=1)))
assert_almost_equal(hausdorff_distance(coords_a, coords_b), dist)
assert_array_equal(hausdorff_pair(coords_a, coords_b), (points_a, points_b))
assert_almost_equal(
hausdorff_distance(coords_a, coords_b, method="modified"), dist_modified
)
def test_gallery():
shape = (60, 60)
# Create a diamond-like shape where the four corners form the 1st set
# of points
x_diamond = 30
y_diamond = 30
r = 10
plt_x = [0, 1, 0, -1]
plt_y = [1, 0, -1, 0]
set_ax = [(x_diamond + r * x) for x in plt_x]
set_ay = [(y_diamond + r * y) for y in plt_y]
# Create a kite-like shape where the four corners form the 2nd set of
# points
x_kite = 30
y_kite = 30
x_r = 15
y_r = 20
set_bx = [(x_kite + x_r * x) for x in plt_x]
set_by = [(y_kite + y_r * y) for y in plt_y]
# Set up the data to compute the Hausdorff distance
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
for x, y in zip(set_ax, set_ay):
coords_a[(x, y)] = True
for x, y in zip(set_bx, set_by):
coords_b[(x, y)] = True
# Test the Hausdorff function on the coordinates
# Should return 10, the distance between the furthest tip of the kite and
# its closest point on the diamond, which is the furthest someone can make
# you travel to encounter your nearest neighboring point on the other set.
assert_almost_equal(hausdorff_distance(coords_a, coords_b), 10.0)
# There are two pairs of points ((30, 20), (30, 10) or (30, 40), (30, 50)),
# that are Hausdorff distance apart. This tests for either of them.
hd_points = hausdorff_pair(coords_a, coords_b)
assert (
np.equal(hd_points, ((30, 20), (30, 10))).all()
or np.equal(hd_points, ((30, 40), (30, 50))).all()
)
# Test the Modified Hausdorff function on the coordinates
# Should return 7.5.
assert_almost_equal(hausdorff_distance(coords_a, coords_b, method="modified"), 7.5)
@pytest.mark.parametrize("points_a", [(0, 0, 1), (0, 1, 0), (1, 0, 0)])
@pytest.mark.parametrize("points_b", [(0, 0, 2), (0, 2, 0), (2, 0, 0)])
def test_3d_hausdorff_region(points_a, points_b):
shape = (3, 3, 3)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
coords_a[points_a] = True
coords_b[points_b] = True
dist = np.sqrt(sum((ca - cb) ** 2 for ca, cb in zip(points_a, points_b)))
d = distance.cdist([points_a], [points_b])
dist_modified = max(np.mean(np.min(d, axis=0)), np.mean(np.min(d, axis=1)))
assert_almost_equal(hausdorff_distance(coords_a, coords_b), dist)
assert_array_equal(hausdorff_pair(coords_a, coords_b), (points_a, points_b))
assert_almost_equal(
hausdorff_distance(coords_a, coords_b, method="modified"), dist_modified
)
def test_hausdorff_metrics_match():
# Test that Hausdorff distance is the Euclidean distance between Hausdorff
# pair
points_a = (3, 0)
points_b = (6, 0)
shape = (7, 1)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
coords_a[points_a] = True
coords_b[points_b] = True
assert_array_equal(hausdorff_pair(coords_a, coords_b), (points_a, points_b))
euclidean_distance = distance.euclidean(points_a, points_b)
assert_almost_equal(euclidean_distance, hausdorff_distance(coords_a, coords_b))

View File

@@ -0,0 +1,139 @@
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_almost_equal
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.metrics import (peak_signal_noise_ratio, normalized_root_mse,
mean_squared_error, normalized_mutual_information)
np.random.seed(5)
cam = data.camera()
sigma = 20.0
cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255)
cam_noisy = cam_noisy.astype(cam.dtype)
def test_PSNR_vs_IPOL():
""" Tests vs. imdiff result from the following IPOL article and code:
https://www.ipol.im/pub/art/2011/g_lmii/.
Notes
-----
To generate p_IPOL, we need a local copy of cam_noisy:
>>> from skimage import io
>>> io.imsave('/tmp/cam_noisy.png', cam_noisy)
Then, we use the following command:
$ ./imdiff -m psnr <path to camera.png>/camera.png /tmp/cam_noisy.png
Values for current data.camera() calculated by Gregory Lee on Sep, 2020.
Available at:
https://github.com/scikit-image/scikit-image/pull/4913#issuecomment-700653165
"""
p_IPOL = 22.409353363576034
p = peak_signal_noise_ratio(cam, cam_noisy)
assert_almost_equal(p, p_IPOL, decimal=4)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_PSNR_float(dtype):
p_uint8 = peak_signal_noise_ratio(cam, cam_noisy)
camf = (cam / 255.).astype(dtype, copy=False)
camf_noisy = (cam_noisy / 255.).astype(dtype, copy=False)
p_float64 = peak_signal_noise_ratio(camf, camf_noisy, data_range=1)
assert p_float64.dtype == np.float64
decimal = 3 if dtype == np.float16 else 5
assert_almost_equal(p_uint8, p_float64, decimal=decimal)
# mixed precision inputs
p_mixed = peak_signal_noise_ratio(cam / 255., np.float32(cam_noisy / 255.),
data_range=1)
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
def test_PSNR_errors():
# shape mismatch
with pytest.raises(ValueError):
peak_signal_noise_ratio(cam, cam[:-1, :])
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_NRMSE(dtype):
x = np.ones(4, dtype=dtype)
y = np.asarray([0., 2., 2., 2.], dtype=dtype)
nrmse = normalized_root_mse(y, x, normalization='mean')
assert nrmse.dtype == np.float64
assert_equal(nrmse, 1 / np.mean(y))
assert_equal(normalized_root_mse(y, x, normalization='euclidean'),
1 / np.sqrt(3))
assert_equal(normalized_root_mse(y, x, normalization='min-max'),
1 / (y.max() - y.min()))
# mixed precision inputs are allowed
assert_almost_equal(normalized_root_mse(y, np.float32(x),
normalization='min-max'),
1 / (y.max() - y.min()))
def test_NRMSE_no_int_overflow():
camf = cam.astype(np.float32)
cam_noisyf = cam_noisy.astype(np.float32)
assert_almost_equal(mean_squared_error(cam, cam_noisy),
mean_squared_error(camf, cam_noisyf))
assert_almost_equal(normalized_root_mse(cam, cam_noisy),
normalized_root_mse(camf, cam_noisyf))
def test_NRMSE_errors():
x = np.ones(4)
# shape mismatch
with pytest.raises(ValueError):
normalized_root_mse(x[:-1], x)
# invalid normalization name
with pytest.raises(ValueError):
normalized_root_mse(x, x, normalization='foo')
def test_nmi():
assert_almost_equal(normalized_mutual_information(cam, cam), 2)
assert (normalized_mutual_information(cam, cam_noisy)
< normalized_mutual_information(cam, cam))
def test_nmi_different_sizes():
assert normalized_mutual_information(cam[:, :400], cam[:400, :]) > 1
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_nmi_random(dtype):
rng = np.random.default_rng()
random1 = rng.random((100, 100)).astype(dtype)
random2 = rng.random((100, 100)).astype(dtype)
nmi = normalized_mutual_information(random1, random2, bins=10)
assert nmi.dtype == np.float64
assert_almost_equal(nmi, 1, decimal=2)
def test_nmi_random_3d():
random1, random2 = np.random.random((2, 10, 100, 100))
assert_almost_equal(
normalized_mutual_information(random1, random2, bins=10),
1,
decimal=2,
)

View File

@@ -0,0 +1,270 @@
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_almost_equal
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage._shared.utils import _supported_float_type
from skimage.metrics import structural_similarity
np.random.seed(5)
cam = data.camera()
sigma = 20.0
cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255)
cam_noisy = cam_noisy.astype(cam.dtype)
np.random.seed(1234)
def test_structural_similarity_patch_range():
N = 51
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
assert(structural_similarity(X, Y, win_size=N) < 0.1)
assert_equal(structural_similarity(X, X, win_size=N), 1)
def test_structural_similarity_image():
N = 100
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
S0 = structural_similarity(X, X, win_size=3)
assert_equal(S0, 1)
S1 = structural_similarity(X, Y, win_size=3)
assert(S1 < 0.3)
S2 = structural_similarity(X, Y, win_size=11, gaussian_weights=True)
assert(S2 < 0.3)
mssim0, S3 = structural_similarity(X, Y, full=True)
assert_equal(S3.shape, X.shape)
mssim = structural_similarity(X, Y)
assert_equal(mssim0, mssim)
# structural_similarity of image with itself should be 1.0
assert_equal(structural_similarity(X, X), 1.0)
# Because we are forcing a random seed state, it is probably good to test
# against a few seeds in case on seed gives a particularly bad example
@pytest.mark.parametrize('seed', [1, 2, 3, 5, 8, 13])
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_structural_similarity_grad(seed, dtype):
N = 60
# NOTE: This test is known to randomly fail on some systems (Mac OS X 10.6)
# And when testing tests in parallel. Therefore, we choose a few
# seeds that are known to work.
# The likely cause of this failure is that we are setting a hard
# threshold on the value of the gradient. Often the computed gradient
# is only slightly larger than what was measured.
rnd = np.random.default_rng(seed)
X = rnd.random((N, N)).astype(dtype, copy=False) * 255
Y = rnd.random((N, N)).astype(dtype, copy=False) * 255
f = structural_similarity(X, Y, data_range=255)
g = structural_similarity(X, Y, data_range=255, gradient=True)
assert f < 0.05
assert g[0] < 0.05
assert np.all(g[1] < 0.05)
mssim, grad, s = structural_similarity(
X, Y, data_range=255, gradient=True, full=True)
assert s.dtype == _supported_float_type(dtype)
assert grad.dtype == _supported_float_type(dtype)
assert np.all(grad < 0.05)
@pytest.mark.parametrize(
'dtype', [np.uint8, np.int32, np.float16, np.float32, np.float64]
)
def test_structural_similarity_dtype(dtype):
N = 30
X = np.random.rand(N, N)
Y = np.random.rand(N, N)
if np.dtype(dtype).kind in 'iub':
data_range = 255.0
X = (X * 255).astype(np.uint8)
Y = (X * 255).astype(np.uint8)
else:
data_range = 1.0
X = X.astype(dtype, copy=False)
Y = Y.astype(dtype, copy=False)
S1 = structural_similarity(X, Y, data_range=data_range)
assert S1.dtype == np.float64
assert S1 < 0.1
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1])
def test_structural_similarity_multichannel(channel_axis):
N = 100
X = (np.random.rand(N, N) * 255).astype(np.uint8)
Y = (np.random.rand(N, N) * 255).astype(np.uint8)
S1 = structural_similarity(X, Y, win_size=3)
# replicate across three channels. should get identical value
Xc = np.tile(X[..., np.newaxis], (1, 1, 3))
Yc = np.tile(Y[..., np.newaxis], (1, 1, 3))
# move channels from last position to specified channel_axis
Xc, Yc = (np.moveaxis(_arr, -1, channel_axis) for _arr in (Xc, Yc))
S2 = structural_similarity(Xc, Yc, channel_axis=channel_axis, win_size=3)
assert_almost_equal(S1, S2)
# full case should return an image as well
m, S3 = structural_similarity(Xc, Yc, channel_axis=channel_axis, full=True)
assert_equal(S3.shape, Xc.shape)
# gradient case
m, grad = structural_similarity(Xc, Yc, channel_axis=channel_axis,
gradient=True)
assert_equal(grad.shape, Xc.shape)
# full and gradient case
m, grad, S3 = structural_similarity(Xc, Yc,
channel_axis=channel_axis,
full=True,
gradient=True)
assert_equal(grad.shape, Xc.shape)
assert_equal(S3.shape, Xc.shape)
# fail if win_size exceeds any non-channel dimension
with pytest.raises(ValueError):
structural_similarity(Xc, Yc, win_size=7, channel_axis=None)
@pytest.mark.parametrize('dtype', [np.uint8, np.float32, np.float64])
def test_structural_similarity_nD(dtype):
# test 1D through 4D on small random arrays
N = 10
for ndim in range(1, 5):
xsize = [N, ] * 5
X = (np.random.rand(*xsize) * 255).astype(dtype)
Y = (np.random.rand(*xsize) * 255).astype(dtype)
mssim = structural_similarity(X, Y, win_size=3, data_range=255.0)
assert mssim.dtype == np.float64
assert mssim < 0.05
def test_structural_similarity_multichannel_chelsea():
# color image example
Xc = data.chelsea()
sigma = 15.0
Yc = np.clip(Xc + sigma * np.random.randn(*Xc.shape), 0, 255)
Yc = Yc.astype(Xc.dtype)
# multichannel result should be mean of the individual channel results
mssim = structural_similarity(Xc, Yc, channel_axis=-1)
mssim_sep = [structural_similarity(
Yc[..., c], Xc[..., c]) for c in range(Xc.shape[-1])]
assert_almost_equal(mssim, np.mean(mssim_sep))
# structural_similarity of image with itself should be 1.0
assert_equal(structural_similarity(Xc, Xc, channel_axis=-1), 1.0)
def test_gaussian_structural_similarity_vs_IPOL():
""" Tests vs. imdiff result from the following IPOL article and code:
https://www.ipol.im/pub/art/2011/g_lmii/.
Notes
-----
To generate mssim_IPOL, we need a local copy of cam_noisy:
>>> from skimage import io
>>> io.imsave('/tmp/cam_noisy.png', cam_noisy)
Then, we use the following command:
$ ./imdiff -m mssim <path to camera.png>/camera.png /tmp/cam_noisy.png
Values for current data.camera() calculated by Gregory Lee on Sep, 2020.
Available at:
https://github.com/scikit-image/scikit-image/pull/4913#issuecomment-700653165
"""
mssim_IPOL = 0.357959091663361
assert cam.dtype == np.uint8
assert cam_noisy.dtype == np.uint8
mssim = structural_similarity(cam, cam_noisy, gaussian_weights=True,
use_sample_covariance=False)
assert_almost_equal(mssim, mssim_IPOL, decimal=3)
@pytest.mark.parametrize(
'dtype', [np.uint8, np.int32, np.float16, np.float32, np.float64]
)
def test_mssim_vs_legacy(dtype):
# check that ssim with default options matches skimage 0.17 result
mssim_skimage_0pt17 = 0.3674518327910367
assert cam.dtype == np.uint8
assert cam_noisy.dtype == np.uint8
mssim = structural_similarity(cam.astype(dtype),
cam_noisy.astype(dtype), data_range=255)
assert_almost_equal(mssim, mssim_skimage_0pt17)
def test_ssim_warns_about_data_range():
mssim = structural_similarity(cam, cam_noisy)
with expected_warnings(['Setting data_range based on im1.dtype']):
mssim_uint16 = structural_similarity(cam.astype(np.uint16),
cam_noisy.astype(np.uint16))
# The value computed for mssim_uint16 is wrong, because the
# dtype of im1 led to infer an erroneous data_range. The user
# is getting a warning about avoiding mistakes.
assert mssim_uint16 > 0.99
with expected_warnings(['Setting data_range based on im1.dtype',
'Inputs have mismatched dtypes']):
mssim_mixed = structural_similarity(cam, cam_noisy.astype(np.int32))
# no warning when user supplies data_range
mssim_mixed = structural_similarity(
cam, cam_noisy.astype(np.float32), data_range=255)
assert_almost_equal(mssim, mssim_mixed)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_structural_similarity_small_image(dtype):
X = np.zeros((5, 5), dtype=dtype)
# structural_similarity can be computed for small images if win_size is
# a) odd and b) less than or equal to the images' smaller side
assert_equal(structural_similarity(X, X, win_size=3, data_range=1.0), 1.0)
assert_equal(structural_similarity(X, X, win_size=5, data_range=1.0), 1.0)
# structural_similarity errors for small images if user doesn't specify
# win_size
with pytest.raises(ValueError):
structural_similarity(X, X)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_structural_similarity_errors_on_float_without_data_range(dtype):
X = np.zeros((64, 64), dtype=dtype)
with pytest.raises(ValueError):
structural_similarity(X, X)
def test_invalid_input():
# size mismatch
X = np.zeros((9, 9), dtype=np.float64)
Y = np.zeros((8, 8), dtype=np.float64)
with pytest.raises(ValueError):
structural_similarity(X, Y)
# win_size exceeds image extent
with pytest.raises(ValueError):
structural_similarity(X, X, win_size=X.shape[0] + 1)
# some kwarg inputs must be non-negative
with pytest.raises(ValueError):
structural_similarity(X, X, K1=-0.1)
with pytest.raises(ValueError):
structural_similarity(X, X, K2=-0.1)
with pytest.raises(ValueError):
structural_similarity(X, X, sigma=-1.0)