using for loop to install conda package

This commit is contained in:
ton
2023-04-16 11:03:27 +07:00
parent 49da9f29c1
commit 0c2b34d6f8
12168 changed files with 2656238 additions and 1 deletions

View File

@@ -0,0 +1,57 @@
from .hough_transform import (hough_line, hough_line_peaks,
probabilistic_hough_line, hough_circle,
hough_circle_peaks, hough_ellipse)
from .radon_transform import (radon, iradon, iradon_sart,
order_angles_golden_ratio)
from .finite_radon_transform import frt2, ifrt2
from .integral import integral_image, integrate
from ._geometric import (estimate_transform,
matrix_transform, EuclideanTransform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, FundamentalMatrixTransform,
EssentialMatrixTransform, PolynomialTransform,
PiecewiseAffineTransform)
from ._warps import (swirl, resize, rotate, rescale,
downscale_local_mean, warp, warp_coords, warp_polar,
resize_local_mean)
from .pyramids import (pyramid_reduce, pyramid_expand,
pyramid_gaussian, pyramid_laplacian)
__all__ = ['hough_circle',
'hough_ellipse',
'hough_line',
'probabilistic_hough_line',
'hough_circle_peaks',
'hough_line_peaks',
'radon',
'iradon',
'iradon_sart',
'order_angles_golden_ratio',
'frt2',
'ifrt2',
'integral_image',
'integrate',
'warp',
'warp_coords',
'warp_polar',
'estimate_transform',
'matrix_transform',
'EuclideanTransform',
'SimilarityTransform',
'AffineTransform',
'ProjectiveTransform',
'EssentialMatrixTransform',
'FundamentalMatrixTransform',
'PolynomialTransform',
'PiecewiseAffineTransform',
'swirl',
'resize',
'resize_local_mean',
'rotate',
'rescale',
'downscale_local_mean',
'pyramid_reduce',
'pyramid_expand',
'pyramid_gaussian',
'pyramid_laplacian']

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,134 @@
"""
:author: Gary Ruben, 2009
:license: modified BSD
"""
__all__ = ["frt2", "ifrt2"]
import numpy as np
from numpy import roll, newaxis
def frt2(a):
"""Compute the 2-dimensional finite radon transform (FRT) for an n x n
integer array.
Parameters
----------
a : array_like
A 2-D square n x n integer array.
Returns
-------
FRT : 2-D ndarray
Finite Radon Transform array of (n+1) x n integer coefficients.
See Also
--------
ifrt2 : The two-dimensional inverse FRT.
Notes
-----
The FRT has a unique inverse if and only if n is prime. [FRT]
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
Generate a test image:
Use a prime number for the array dimensions
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
References
----------
.. [FRT] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError("Input must be a square, 2-D array")
ai = a.copy()
n = ai.shape[0]
f = np.empty((n + 1, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Roll the pth row of ai left by p places
for row in range(1, n):
ai[row] = roll(ai[row], -row)
f[m] = ai.sum(axis=0)
f[n] = ai.sum(axis=1)
return f
def ifrt2(a):
"""Compute the 2-dimensional inverse finite radon transform (iFRT) for
an (n+1) x n integer array.
Parameters
----------
a : array_like
A 2-D (n+1) row x n column integer array.
Returns
-------
iFRT : 2-D n x n ndarray
Inverse Finite Radon Transform array of n x n integer coefficients.
See Also
--------
frt2 : The two-dimensional FRT
Notes
-----
The FRT has a unique inverse if and only if n is prime.
See [1]_ for an overview.
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
Apply the Inverse Finite Radon Transform to recover the input
>>> fi = ifrt2(f)
Check that it's identical to the original
>>> assert len(np.nonzero(img-fi)[0]) == 0
References
----------
.. [1] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1] + 1:
raise ValueError("Input must be an (n+1) row x n column, 2-D array")
ai = a.copy()[:-1]
n = ai.shape[1]
f = np.empty((n, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Rolls the pth row of ai right by p places.
for row in range(1, ai.shape[0]):
ai[row] = roll(ai[row], row)
f[m] = ai.sum(axis=0)
f += a[-1][newaxis].T
f = (f - ai[0].sum()) / n
return f

View File

@@ -0,0 +1,428 @@
import numpy as np
from scipy.spatial import cKDTree
from ._hough_transform import _hough_circle, _hough_ellipse, _hough_line
from ._hough_transform import _probabilistic_hough_line as _prob_hough_line
def hough_line_peaks(hspace, angles, dists, min_distance=9, min_angle=10,
threshold=None, num_peaks=np.inf):
"""Return peaks in a straight line Hough transform.
Identifies most prominent lines separated by a certain angle and distance
in a Hough transform. Non-maximum suppression with different sizes is
applied separately in the first (distances) and second (angles) dimension
of the Hough space to identify peaks.
Parameters
----------
hspace : (N, M) array
Hough space returned by the `hough_line` function.
angles : (M,) array
Angles returned by the `hough_line` function. Assumed to be continuous.
(`angles[-1] - angles[0] == PI`).
dists : (N, ) array
Distances returned by the `hough_line` function.
min_distance : int, optional
Minimum distance separating lines (maximum filter size for first
dimension of hough space).
min_angle : int, optional
Minimum angle separating lines (maximum filter size for second
dimension of hough space).
threshold : float, optional
Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
Returns
-------
accum, angles, dists : tuple of array
Peak values in Hough space, angles and distances.
Examples
--------
>>> from skimage.transform import hough_line, hough_line_peaks
>>> from skimage.draw import line
>>> img = np.zeros((15, 15), dtype=bool)
>>> rr, cc = line(0, 0, 14, 14)
>>> img[rr, cc] = 1
>>> rr, cc = line(0, 14, 14, 0)
>>> img[cc, rr] = 1
>>> hspace, angles, dists = hough_line(img)
>>> hspace, angles, dists = hough_line_peaks(hspace, angles, dists)
>>> len(angles)
2
"""
from ..feature.peak import _prominent_peaks
min_angle = min(min_angle, hspace.shape[1])
h, a, d = _prominent_peaks(hspace, min_xdistance=min_angle,
min_ydistance=min_distance,
threshold=threshold,
num_peaks=num_peaks)
if a.size > 0:
return (h, angles[a], dists[d])
else:
return (h, np.array([]), np.array([]))
def hough_circle(image, radius, normalize=True, full_output=False):
"""Perform a circular Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
radius : scalar or sequence of scalars
Radii at which to compute the Hough transform.
Floats are converted to integers.
normalize : boolean, optional (default True)
Normalize the accumulator with the number
of pixels used to draw the radius.
full_output : boolean, optional (default False)
Extend the output size by twice the largest
radius in order to detect centers outside the
input picture.
Returns
-------
H : 3D ndarray (radius index, (M + 2R, N + 2R) ndarray)
Hough transform accumulator for each radius.
R designates the larger radius if full_output is True.
Otherwise, R = 0.
Examples
--------
>>> from skimage.transform import hough_circle
>>> from skimage.draw import circle_perimeter
>>> img = np.zeros((100, 100), dtype=bool)
>>> rr, cc = circle_perimeter(25, 35, 23)
>>> img[rr, cc] = 1
>>> try_radii = np.arange(5, 50)
>>> res = hough_circle(img, try_radii)
>>> ridx, r, c = np.unravel_index(np.argmax(res), res.shape)
>>> r, c, try_radii[ridx]
(25, 35, 23)
"""
radius = np.atleast_1d(np.asarray(radius))
return _hough_circle(image, radius.astype(np.intp),
normalize=normalize, full_output=full_output)
def hough_ellipse(image, threshold=4, accuracy=1, min_size=4, max_size=None):
"""Perform an elliptical Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
threshold : int, optional
Accumulator threshold value.
accuracy : double, optional
Bin size on the minor axis used in the accumulator.
min_size : int, optional
Minimal major axis length.
max_size : int, optional
Maximal minor axis length.
If None, the value is set to the half of the smaller
image dimension.
Returns
-------
result : ndarray with fields [(accumulator, yc, xc, a, b, orientation)].
Where ``(yc, xc)`` is the center, ``(a, b)`` the major and minor
axes, respectively. The `orientation` value follows
`skimage.draw.ellipse_perimeter` convention.
Examples
--------
>>> from skimage.transform import hough_ellipse
>>> from skimage.draw import ellipse_perimeter
>>> img = np.zeros((25, 25), dtype=np.uint8)
>>> rr, cc = ellipse_perimeter(10, 10, 6, 8)
>>> img[cc, rr] = 1
>>> result = hough_ellipse(img, threshold=8)
>>> result.tolist()
[(10, 10.0, 10.0, 8.0, 6.0, 0.0)]
Notes
-----
The accuracy must be chosen to produce a peak in the accumulator
distribution. In other words, a flat accumulator distribution with low
values may be caused by a too low bin size.
References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
method." Pattern Recognition, 2002. Proceedings. 16th International
Conference on. Vol. 2. IEEE, 2002
"""
return _hough_ellipse(image, threshold=threshold, accuracy=accuracy,
min_size=min_size, max_size=max_size)
def hough_line(image, theta=None):
"""Perform a straight line Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
theta : 1D ndarray of double, optional
Angles at which to compute the transform, in radians.
Defaults to a vector of 180 angles evenly spaced in the
range [-pi/2, pi/2).
Returns
-------
hspace : 2-D ndarray of uint64
Hough transform accumulator.
angles : ndarray
Angles at which the transform is computed, in radians.
distances : ndarray
Distance values.
Notes
-----
The origin is the top left corner of the original image.
X and Y axis are horizontal and vertical edges respectively.
The distance is the minimal algebraic distance from the origin
to the detected line.
The angle accuracy can be improved by decreasing the step size in
the `theta` array.
Examples
--------
Generate a test image:
>>> img = np.zeros((100, 150), dtype=bool)
>>> img[30, :] = 1
>>> img[:, 65] = 1
>>> img[35:45, 35:50] = 1
>>> for i in range(90):
... img[i, i] = 1
>>> rng = np.random.default_rng()
>>> img += rng.random(img.shape) > 0.95
Apply the Hough transform:
>>> out, angles, d = hough_line(img)
"""
if image.ndim != 2:
raise ValueError('The input image `image` must be 2D.')
if theta is None:
# These values are approximations of pi/2
theta = np.linspace(-np.pi / 2, np.pi / 2, 180, endpoint=False)
return _hough_line(image, theta=theta)
def probabilistic_hough_line(image, threshold=10, line_length=50, line_gap=10,
theta=None, seed=None):
"""Return lines from a progressive probabilistic line Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
threshold : int, optional
Threshold
line_length : int, optional
Minimum accepted length of detected lines.
Increase the parameter to extract longer lines.
line_gap : int, optional
Maximum gap between pixels to still form a line.
Increase the parameter to merge broken lines more aggressively.
theta : 1D ndarray, dtype=double, optional
Angles at which to compute the transform, in radians.
Defaults to a vector of 180 angles evenly spaced in the
range [-pi/2, pi/2).
seed : int, optional
Seed to initialize the random number generator.
Returns
-------
lines : list
List of lines identified, lines in format ((x0, y0), (x1, y1)),
indicating line start and end.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler, "Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
"""
if image.ndim != 2:
raise ValueError('The input image `image` must be 2D.')
if theta is None:
theta = np.linspace(-np.pi / 2, np.pi / 2, 180, endpoint=False)
return _prob_hough_line(image, threshold=threshold,
line_length=line_length, line_gap=line_gap,
theta=theta, seed=seed)
def hough_circle_peaks(hspaces, radii, min_xdistance=1, min_ydistance=1,
threshold=None, num_peaks=np.inf,
total_num_peaks=np.inf, normalize=False):
"""Return peaks in a circle Hough transform.
Identifies most prominent circles separated by certain distances in given
Hough spaces. Non-maximum suppression with different sizes is applied
separately in the first and second dimension of the Hough space to
identify peaks. For circles with different radius but close in distance,
only the one with highest peak is kept.
Parameters
----------
hspaces : (N, M) array
Hough spaces returned by the `hough_circle` function.
radii : (M,) array
Radii corresponding to Hough spaces.
min_xdistance : int, optional
Minimum distance separating centers in the x dimension.
min_ydistance : int, optional
Minimum distance separating centers in the y dimension.
threshold : float, optional
Minimum intensity of peaks in each Hough space.
Default is `0.5 * max(hspace)`.
num_peaks : int, optional
Maximum number of peaks in each Hough space. When the
number of peaks exceeds `num_peaks`, only `num_peaks`
coordinates based on peak intensity are considered for the
corresponding radius.
total_num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
normalize : bool, optional
If True, normalize the accumulator by the radius to sort the prominent
peaks.
Returns
-------
accum, cx, cy, rad : tuple of array
Peak values in Hough space, x and y center coordinates and radii.
Examples
--------
>>> from skimage import transform, draw
>>> img = np.zeros((120, 100), dtype=int)
>>> radius, x_0, y_0 = (20, 99, 50)
>>> y, x = draw.circle_perimeter(y_0, x_0, radius)
>>> img[x, y] = 1
>>> hspaces = transform.hough_circle(img, radius)
>>> accum, cx, cy, rad = hough_circle_peaks(hspaces, [radius,])
Notes
-----
Circles with bigger radius have higher peaks in Hough space. If larger
circles are preferred over smaller ones, `normalize` should be False.
Otherwise, circles will be returned in the order of decreasing voting
number.
"""
from ..feature.peak import _prominent_peaks
r = []
cx = []
cy = []
accum = []
for rad, hp in zip(radii, hspaces):
h_p, x_p, y_p = _prominent_peaks(hp,
min_xdistance=min_xdistance,
min_ydistance=min_ydistance,
threshold=threshold,
num_peaks=num_peaks)
r.extend((rad,)*len(h_p))
cx.extend(x_p)
cy.extend(y_p)
accum.extend(h_p)
r = np.array(r)
cx = np.array(cx)
cy = np.array(cy)
accum = np.array(accum)
if normalize:
s = np.argsort(accum / r)
else:
s = np.argsort(accum)
accum_sorted, cx_sorted, cy_sorted, r_sorted = \
accum[s][::-1], cx[s][::-1], cy[s][::-1], r[s][::-1]
tnp = len(accum_sorted) if total_num_peaks == np.inf else total_num_peaks
# Skip searching for neighboring circles
# if default min_xdistance and min_ydistance are used
# or if no peak was detected
if (min_xdistance == 1 and min_ydistance == 1) or len(accum_sorted) == 0:
return (accum_sorted[:tnp],
cx_sorted[:tnp],
cy_sorted[:tnp],
r_sorted[:tnp])
# For circles with centers too close, only keep the one with
# the highest peak
should_keep = label_distant_points(
cx_sorted, cy_sorted, min_xdistance, min_ydistance, tnp
)
return (accum_sorted[should_keep],
cx_sorted[should_keep],
cy_sorted[should_keep],
r_sorted[should_keep])
def label_distant_points(xs, ys, min_xdistance, min_ydistance, max_points):
"""Keep points that are separated by certain distance in each dimension.
The first point is always accpeted and all subsequent points are selected
so that they are distant from all their preceding ones.
Parameters
----------
xs : array
X coordinates of points.
ys : array
Y coordinates of points.
min_xdistance : int
Minimum distance separating points in the x dimension.
min_ydistance : int
Minimum distance separating points in the y dimension.
max_points : int
Max number of distant points to keep.
Returns
-------
should_keep : array of bool
A mask array for distant points to keep.
"""
is_neighbor = np.zeros(len(xs), dtype=bool)
coordinates = np.stack([xs, ys], axis=1)
# Use a KDTree to search for neighboring points effectively
kd_tree = cKDTree(coordinates)
n_pts = 0
for i in range(len(xs)):
if n_pts >= max_points:
# Ignore the point if points to keep reaches maximum
is_neighbor[i] = True
elif not is_neighbor[i]:
# Find a short list of candidates to remove
# by searching within a circle
neighbors_i = kd_tree.query_ball_point(
(xs[i], ys[i]),
np.hypot(min_xdistance, min_ydistance)
)
# Check distance in both dimensions and mark if close
for ni in neighbors_i:
x_close = abs(xs[ni] - xs[i]) <= min_xdistance
y_close = abs(ys[ni] - ys[i]) <= min_ydistance
if x_close and y_close and ni > i:
is_neighbor[ni] = True
n_pts += 1
should_keep = ~is_neighbor
return should_keep

View File

@@ -0,0 +1,140 @@
import numpy as np
def integral_image(image, *, dtype=None):
r"""Integral image / summed area table.
The integral image contains the sum of all elements above and to the
left of it, i.e.:
.. math::
S[m, n] = \sum_{i \leq m} \sum_{j \leq n} X[i, j]
Parameters
----------
image : ndarray
Input image.
Returns
-------
S : ndarray
Integral image/summed area table of same shape as input image.
Notes
-----
For better accuracy and to avoid potential overflow, the data type of the
output may differ from the input's when the default dtype of None is used.
For inputs with integer dtype, the behavior matches that for
:func:`numpy.cumsum`. Floating point inputs will be promoted to at least
double precision. The user can set `dtype` to override this behavior.
References
----------
.. [1] F.C. Crow, "Summed-area tables for texture mapping,"
ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212.
"""
if dtype is None and image.real.dtype.kind == 'f':
# default to at least double precision cumsum for accuracy
dtype = np.promote_types(image.dtype, np.float64)
S = image
for i in range(image.ndim):
S = S.cumsum(axis=i, dtype=dtype)
return S
def integrate(ii, start, end):
"""Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
start : List of tuples, each tuple of length equal to dimension of `ii`
Coordinates of top left corner of window(s).
Each tuple in the list contains the starting row, col, ... index
i.e `[(row_win1, col_win1, ...), (row_win2, col_win2,...), ...]`.
end : List of tuples, each tuple of length equal to dimension of `ii`
Coordinates of bottom right corner of window(s).
Each tuple in the list containing the end row, col, ... index i.e
`[(row_win1, col_win1, ...), (row_win2, col_win2, ...), ...]`.
Returns
-------
S : scalar or ndarray
Integral (sum) over the given window(s).
Examples
--------
>>> arr = np.ones((5, 6), dtype=float)
>>> ii = integral_image(arr)
>>> integrate(ii, (1, 0), (1, 2)) # sum from (1, 0) to (1, 2)
array([3.])
>>> integrate(ii, [(3, 3)], [(4, 5)]) # sum from (3, 3) to (4, 5)
array([6.])
>>> # sum from (1, 0) to (1, 2) and from (3, 3) to (4, 5)
>>> integrate(ii, [(1, 0), (3, 3)], [(1, 2), (4, 5)])
array([3., 6.])
"""
start = np.atleast_2d(np.array(start))
end = np.atleast_2d(np.array(end))
rows = start.shape[0]
total_shape = ii.shape
total_shape = np.tile(total_shape, [rows, 1])
# convert negative indices into equivalent positive indices
start_negatives = start < 0
end_negatives = end < 0
start = (start + total_shape) * start_negatives + \
start * ~(start_negatives)
end = (end + total_shape) * end_negatives + \
end * ~(end_negatives)
if np.any((end - start) < 0):
raise IndexError('end coordinates must be greater or equal to start')
# bit_perm is the total number of terms in the expression
# of S. For example, in the case of a 4x4 2D image
# sum of image from (1,1) to (2,2) is given by
# S = + ii[2, 2]
# - ii[0, 2] - ii[2, 0]
# + ii[0, 0]
# The total terms = 4 = 2 ** 2(dims)
S = np.zeros(rows)
bit_perm = 2 ** ii.ndim
width = len(bin(bit_perm - 1)[2:])
# Sum of a (hyper)cube, from an integral image is computed using
# values at the corners of the cube. The corners of cube are
# selected using binary numbers as described in the following example.
# In a 3D cube there are 8 corners. The corners are selected using
# binary numbers 000 to 111. Each number is called a permutation, where
# perm(000) means, select end corner where none of the coordinates
# is replaced, i.e ii[end_row, end_col, end_depth]. Similarly, perm(001)
# means replace last coordinate by start - 1, i.e
# ii[end_row, end_col, start_depth - 1], and so on.
# Sign of even permutations is positive, while those of odd is negative.
# If 'start_coord - 1' is -ve it is labeled bad and not considered in
# the final sum.
for i in range(bit_perm): # for all permutations
# boolean permutation array eg [True, False] for '10'
binary = bin(i)[2:].zfill(width)
bool_mask = [bit == '1' for bit in binary]
sign = (-1)**sum(bool_mask) # determine sign of permutation
bad = [np.any(((start[r] - 1) * bool_mask) < 0)
for r in range(rows)] # find out bad start rows
corner_points = (end * (np.invert(bool_mask))) + \
((start - 1) * bool_mask) # find corner for each row
S += [sign * ii[tuple(corner_points[r])] if(not bad[r]) else 0
for r in range(rows)] # add only good rows
return S

View File

@@ -0,0 +1,356 @@
import math
import numpy as np
from .._shared.filters import gaussian
from .._shared.utils import convert_to_float
from ..transform import resize
def _smooth(image, sigma, mode, cval, channel_axis):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = np.empty_like(image)
# apply Gaussian filter to all channels independently
if channel_axis is not None:
# can rely on gaussian to insert a 0 entry at channel_axis
channel_axis = channel_axis % image.ndim
sigma = (sigma,) * (image.ndim - 1)
else:
channel_axis = None
gaussian(image, sigma, output=smoothed, mode=mode, cval=cval,
channel_axis=channel_axis)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0,
preserve_range=False, *, channel_axis=None):
"""Smooth and then downsample image.
Parameters
----------
image : ndarray
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = convert_to_float(image, preserve_range)
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
out_shape = tuple(
math.ceil(d / float(downscale)) if ax != channel_axis else d
for ax, d in enumerate(image.shape)
)
else:
out_shape = tuple(math.ceil(d / float(downscale)) for d in image.shape)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval, channel_axis)
out = resize(smoothed, out_shape, order=order, mode=mode, cval=cval,
anti_aliasing=False)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0,
preserve_range=False, *, channel_axis=None):
"""Upsample and then smooth image.
Parameters
----------
image : ndarray
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = convert_to_float(image, preserve_range)
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
out_shape = tuple(
math.ceil(upscale * d) if ax != channel_axis else d
for ax, d in enumerate(image.shape)
)
else:
out_shape = tuple(math.ceil(upscale * d) for d in image.shape)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
out = _smooth(resized, sigma, mode, cval, channel_axis)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0,
preserve_range=False, *, channel_axis=None):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
layer = 0
current_shape = image.shape
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval, channel_axis=channel_axis)
prev_shape = current_shape
prev_layer_image = layer_image
current_shape = layer_image.shape
# no change to previous pyramid layer
if current_shape == prev_shape:
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0,
preserve_range=False, *, channel_axis=None):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/data/media/public/sep/morgan/texturematch/paper_html/node3.html
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
current_shape = image.shape
smoothed_image = _smooth(image, sigma, mode, cval, channel_axis)
yield image - smoothed_image
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
shape_without_channels = list(current_shape)
shape_without_channels.pop(channel_axis)
shape_without_channels = tuple(shape_without_channels)
else:
shape_without_channels = current_shape
# build downsampled images until max_layer is reached or downscale process
# does not change image size
if max_layer == -1:
max_layer = math.ceil(math.log(max(shape_without_channels), downscale))
for layer in range(max_layer):
if channel_axis is not None:
out_shape = tuple(
math.ceil(d / float(downscale)) if ax != channel_axis else d
for ax, d in enumerate(current_shape)
)
else:
out_shape = tuple(math.ceil(d / float(downscale))
for d in current_shape)
resized_image = resize(smoothed_image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
smoothed_image = _smooth(resized_image, sigma, mode, cval,
channel_axis)
current_shape = resized_image.shape
yield resized_image - smoothed_image

View File

@@ -0,0 +1,502 @@
import numpy as np
from scipy.interpolate import interp1d
from scipy.constants import golden_ratio
from scipy.fft import fft, ifft, fftfreq, fftshift
from ._warps import warp
from ._radon_transform import sart_projection_update
from .._shared.utils import convert_to_float
from warnings import warn
from functools import partial
__all__ = ['radon', 'order_angles_golden_ratio', 'iradon', 'iradon_sart']
def radon(image, theta=None, circle=True, *, preserve_range=False):
"""
Calculates the radon transform of an image given specified
projection angles.
Parameters
----------
image : array_like
Input image. The rotation axis will be located in the pixel with
indices ``(image.shape[0] // 2, image.shape[1] // 2)``.
theta : array_like, optional
Projection angles (in degrees). If `None`, the value is set to
np.arange(180).
circle : boolean, optional
Assume image is zero outside the inscribed circle, making the
width of each projection (the first dimension of the sinogram)
equal to ``min(image.shape)``.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
radon_image : ndarray
Radon transform (sinogram). The tomography rotation axis will lie
at the pixel index ``radon_image.shape[0] // 2`` along the 0th
dimension of ``radon_image``.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989
Notes
-----
Based on code of Justin K. Romberg
(https://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)
"""
if image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.arange(180)
image = convert_to_float(image, preserve_range)
if circle:
shape_min = min(image.shape)
radius = shape_min // 2
img_shape = np.array(image.shape)
coords = np.array(np.ogrid[:image.shape[0], :image.shape[1]],
dtype=object)
dist = ((coords - img_shape // 2) ** 2).sum(0)
outside_reconstruction_circle = dist > radius ** 2
if np.any(image[outside_reconstruction_circle]):
warn('Radon transform: image must be zero outside the '
'reconstruction circle')
# Crop image to make it square
slices = tuple(slice(int(np.ceil(excess / 2)),
int(np.ceil(excess / 2) + shape_min))
if excess > 0 else slice(None)
for excess in (img_shape - shape_min))
padded_image = image[slices]
else:
diagonal = np.sqrt(2) * max(image.shape)
pad = [int(np.ceil(diagonal - s)) for s in image.shape]
new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]
old_center = [s // 2 for s in image.shape]
pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]
pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]
padded_image = np.pad(image, pad_width, mode='constant',
constant_values=0)
# padded_image is always square
if padded_image.shape[0] != padded_image.shape[1]:
raise ValueError('padded_image must be a square')
center = padded_image.shape[0] // 2
radon_image = np.zeros((padded_image.shape[0], len(theta)),
dtype=image.dtype)
for i, angle in enumerate(np.deg2rad(theta)):
cos_a, sin_a = np.cos(angle), np.sin(angle)
R = np.array([[cos_a, sin_a, -center * (cos_a + sin_a - 1)],
[-sin_a, cos_a, -center * (cos_a - sin_a - 1)],
[0, 0, 1]])
rotated = warp(padded_image, R, clip=False)
radon_image[:, i] = rotated.sum(0)
return radon_image
def _sinogram_circle_to_square(sinogram):
diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))
pad = diagonal - sinogram.shape[0]
old_center = sinogram.shape[0] // 2
new_center = diagonal // 2
pad_before = new_center - old_center
pad_width = ((pad_before, pad - pad_before), (0, 0))
return np.pad(sinogram, pad_width, mode='constant', constant_values=0)
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter.
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61.
Parameters
----------
size : int
filter size. Must be even.
filter_name : str
Filter used in frequency domain filtering. Filters available:
ramp, shepp-logan, cosine, hamming, hann. Assign None to use
no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=int),
np.arange(size / 2 - 1, 0, -2, dtype=int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.linspace(0, np.pi, size, endpoint=False)
cosine_filter = fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
def iradon(radon_image, theta=None, output_size=None,
filter_name="ramp", interpolation="linear", circle=True,
preserve_range=True):
"""Inverse radon transform.
Reconstruct an image from the radon transform, using the filtered
back projection algorithm.
Parameters
----------
radon_image : array
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different
angle. The tomography rotation axis should lie at the pixel
index ``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : array_like, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
output_size : int, optional
Number of rows and columns in the reconstruction.
filter_name : str, optional
Filter used in frequency domain filtering. Ramp filter used by default.
Filters available: ramp, shepp-logan, cosine, hamming, hann.
Assign None to use no filter.
interpolation : str, optional
Interpolation method used in reconstruction. Methods available:
'linear', 'nearest', and 'cubic' ('cubic' is slow).
circle : boolean, optional
Assume the reconstructed image is zero outside the inscribed circle.
Also changes the default output_size to match the behaviour of
``radon`` called with ``circle=True``.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
.. versionchanged:: 0.19
In ``iradon``, ``filter`` argument is deprecated in favor of
``filter_name``.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989
Notes
-----
It applies the Fourier slice theorem to reconstruct an image by
multiplying the frequency domain of the filter with the FFT of the
projection data. This algorithm is called filtered back projection.
"""
if radon_image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)
angles_count = len(theta)
if angles_count != radon_image.shape[1]:
raise ValueError("The given ``theta`` does not match the number of "
"projections in ``radon_image``.")
interpolation_types = ('linear', 'nearest', 'cubic')
if interpolation not in interpolation_types:
raise ValueError(f"Unknown interpolation: {interpolation}")
filter_types = ('ramp', 'shepp-logan', 'cosine', 'hamming', 'hann', None)
if filter_name not in filter_types:
raise ValueError(f"Unknown filter: {filter_name}")
radon_image = convert_to_float(radon_image, preserve_range)
dtype = radon_image.dtype
img_shape = radon_image.shape[0]
if output_size is None:
# If output size not specified, estimate from input radon image
if circle:
output_size = img_shape
else:
output_size = int(np.floor(np.sqrt((img_shape) ** 2 / 2.0)))
if circle:
radon_image = _sinogram_circle_to_square(radon_image)
img_shape = radon_image.shape[0]
# Resize image to next power of two (but no less than 64) for
# Fourier analysis; speeds up Fourier and lessens artifacts
projection_size_padded = max(64, int(2 ** np.ceil(np.log2(2 * img_shape))))
pad_width = ((0, projection_size_padded - img_shape), (0, 0))
img = np.pad(radon_image, pad_width, mode='constant', constant_values=0)
# Apply filter in Fourier domain
fourier_filter = _get_fourier_filter(projection_size_padded, filter_name)
projection = fft(img, axis=0) * fourier_filter
radon_filtered = np.real(ifft(projection, axis=0)[:img_shape, :])
# Reconstruct image by interpolation
reconstructed = np.zeros((output_size, output_size),
dtype=dtype)
radius = output_size // 2
xpr, ypr = np.mgrid[:output_size, :output_size] - radius
x = np.arange(img_shape) - img_shape // 2
for col, angle in zip(radon_filtered.T, np.deg2rad(theta)):
t = ypr * np.cos(angle) - xpr * np.sin(angle)
if interpolation == 'linear':
interpolant = partial(np.interp, xp=x, fp=col, left=0, right=0)
else:
interpolant = interp1d(x, col, kind=interpolation,
bounds_error=False, fill_value=0)
reconstructed += interpolant(t)
if circle:
out_reconstruction_circle = (xpr ** 2 + ypr ** 2) > radius ** 2
reconstructed[out_reconstruction_circle] = 0.
return reconstructed * np.pi / (2 * angles_count)
def order_angles_golden_ratio(theta):
"""Order angles to reduce the amount of correlated information in
subsequent projections.
Parameters
----------
theta : 1D array of floats
Projection angles in degrees. Duplicate angles are not allowed.
Returns
-------
indices_generator : generator yielding unsigned integers
The returned generator yields indices into ``theta`` such that
``theta[indices]`` gives the approximate golden ratio ordering
of the projections. In total, ``len(theta)`` indices are yielded.
All non-negative integers < ``len(theta)`` are yielded exactly once.
Notes
-----
The method used here is that of the golden ratio introduced
by T. Kohler.
References
----------
.. [1] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [2] Winkelmann, Stefanie, et al. "An optimal radial profile order
based on the Golden Ratio for time-resolved MRI."
Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.
"""
interval = 180
remaining_indices = list(np.argsort(theta)) # indices into theta
# yield an arbitrary angle to start things off
angle = theta[remaining_indices[0]]
yield remaining_indices.pop(0)
# determine subsequent angles using the golden ratio method
angle_increment = interval / golden_ratio ** 2
while remaining_indices:
remaining_angles = theta[remaining_indices]
angle = (angle + angle_increment) % interval
index_above = np.searchsorted(remaining_angles, angle)
index_below = index_above - 1
index_above %= len(remaining_indices)
diff_below = abs(angle - remaining_angles[index_below])
distance_below = min(diff_below % interval, diff_below % -interval)
diff_above = abs(angle - remaining_angles[index_above])
distance_above = min(diff_above % interval, diff_above % -interval)
if distance_below < distance_above:
yield remaining_indices.pop(index_below)
else:
yield remaining_indices.pop(index_above)
def iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,
clip=None, relaxation=0.15, dtype=None):
"""Inverse radon transform.
Reconstruct an image from the radon transform, using a single iteration of
the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.
Parameters
----------
radon_image : 2D array
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : 1D array, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
image : 2D array, optional
Image containing an initial reconstruction estimate. Shape of this
array should be ``(radon_image.shape[0], radon_image.shape[0])``. The
default is an array of zeros.
projection_shifts : 1D array, optional
Shift the projections contained in ``radon_image`` (the sinogram) by
this many pixels before reconstructing the image. The i'th value
defines the shift of the i'th column of ``radon_image``.
clip : length-2 sequence of floats, optional
Force all values in the reconstructed tomogram to lie in the range
``[clip[0], clip[1]]``
relaxation : float, optional
Relaxation parameter for the update step. A higher value can
improve the convergence rate, but one runs the risk of instabilities.
Values close to or higher than 1 are not recommended.
dtype : dtype, optional
Output data type, must be floating point. By default, if input
data type is not float, input is cast to double, otherwise
dtype is set to input data type.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
Algebraic Reconstruction Techniques are based on formulating the tomography
reconstruction problem as a set of linear equations. Along each ray,
the projected value is the sum of all the values of the cross section along
the ray. A typical feature of SART (and a few other variants of algebraic
techniques) is that it samples the cross section at equidistant points
along the ray, using linear interpolation between the pixel values of the
cross section. The resulting set of linear equations are then solved using
a slightly modified Kaczmarz method.
When using SART, a single iteration is usually sufficient to obtain a good
reconstruction. Further iterations will tend to enhance high-frequency
information, but will also often increase the noise.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
.. [3] S Kaczmarz, "Angenäherte auflösung von systemen linearer
gleichungen", Bulletin International de lAcademie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [5] Kaczmarz' method, Wikipedia,
https://en.wikipedia.org/wiki/Kaczmarz_method
"""
if radon_image.ndim != 2:
raise ValueError('radon_image must be two dimensional')
if dtype is None:
if radon_image.dtype.char in 'fd':
dtype = radon_image.dtype
else:
warn("Only floating point data type are valid for SART inverse "
"radon transform. Input data is cast to float. To disable "
"this warning, please cast image_radon to float.")
dtype = np.dtype(float)
elif np.dtype(dtype).char not in 'fd':
raise ValueError("Only floating point data type are valid for inverse "
"radon transform.")
dtype = np.dtype(dtype)
radon_image = radon_image.astype(dtype, copy=False)
reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1],
endpoint=False, dtype=dtype)
elif len(theta) != radon_image.shape[1]:
raise ValueError(f'Shape of theta ({len(theta)}) does not match the '
f'number of projections ({radon_image.shape[1]})')
else:
theta = np.asarray(theta, dtype=dtype)
if image is None:
image = np.zeros(reconstructed_shape, dtype=dtype)
elif image.shape != reconstructed_shape:
raise ValueError(f'Shape of image ({image.shape}) does not match first dimension '
f'of radon_image ({reconstructed_shape})')
elif image.dtype != dtype:
warn(f'image dtype does not match output dtype: '
f'image is cast to {dtype}')
image = np.asarray(image, dtype=dtype)
if projection_shifts is None:
projection_shifts = np.zeros((radon_image.shape[1],), dtype=dtype)
elif len(projection_shifts) != radon_image.shape[1]:
raise ValueError(f'Shape of projection_shifts ({len(projection_shifts)}) does not match the '
f'number of projections ({radon_image.shape[1]})')
else:
projection_shifts = np.asarray(projection_shifts, dtype=dtype)
if clip is not None:
if len(clip) != 2:
raise ValueError('clip must be a length-2 sequence')
clip = np.asarray(clip, dtype=dtype)
for angle_index in order_angles_golden_ratio(theta):
image_update = sart_projection_update(image, theta[angle_index],
radon_image[:, angle_index],
projection_shifts[angle_index])
image += relaxation * image_update
if clip is not None:
image = np.clip(image, clip[0], clip[1])
return image

View File

@@ -0,0 +1,13 @@
import numpy as np
from skimage.transform import frt2, ifrt2
def test_frt():
SIZE = 59 # must be prime to ensure that f inverse is unique
# Generate a test image
L = np.tri(SIZE, dtype=np.int32) + np.tri(SIZE, dtype=np.int32)[::-1]
f = frt2(L)
fi = ifrt2(f)
assert np.array_equal(L, fi)

View File

@@ -0,0 +1,819 @@
import re
import textwrap
import numpy as np
import pytest
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from skimage.transform import (AffineTransform, EssentialMatrixTransform,
EuclideanTransform, FundamentalMatrixTransform,
PiecewiseAffineTransform, PolynomialTransform,
ProjectiveTransform, SimilarityTransform,
estimate_transform, matrix_transform)
from skimage.transform._geometric import (GeometricTransform,
_affine_matrix_from_vector,
_center_and_normalize_points,
_euler_rotation_matrix)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_estimate_transform():
for tform in ('euclidean', 'similarity', 'affine', 'projective',
'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
with pytest.raises(ValueError):
estimate_transform('foobar', SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
def test_euclidean_estimation():
# exact solution
tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10)
assert_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10)
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('euclidean', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = EuclideanTransform()
assert tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_3d_euclidean_estimation():
src_points = np.random.rand(1000, 3)
# Random transformation for testing
angles = np.random.random((3,)) * 2 * np.pi - np.pi
rotation_matrix = _euler_rotation_matrix(angles)
translation_vector = np.random.random((3,))
dst_points = []
for pt in src_points:
pt_r = pt.reshape(3, 1)
dst = np.matmul(rotation_matrix, pt_r) + \
translation_vector.reshape(3, 1)
dst = dst.reshape(3)
dst_points.append(dst)
dst_points = np.array(dst_points)
# estimating the transformation
tform = EuclideanTransform(dimensionality=3)
assert tform.estimate(src_points, dst_points)
estimated_rotation = tform.rotation
estimated_translation = tform.translation
assert_almost_equal(estimated_rotation, rotation_matrix)
assert_almost_equal(estimated_translation, translation_vector)
def test_euclidean_init():
# init with implicit parameters
rotation = 1
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = EuclideanTransform(tform.params)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
rotation = 0
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
rotation = np.pi / 2
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = SimilarityTransform()
assert tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_3d_similarity_estimation():
src_points = np.random.rand(1000, 3)
# Random transformation for testing
angles = np.random.random((3,)) * 2 * np.pi - np.pi
scale = np.random.randint(0, 20)
rotation_matrix = _euler_rotation_matrix(angles) * scale
translation_vector = np.random.random((3,))
dst_points = []
for pt in src_points:
pt_r = pt.reshape(3, 1)
dst = np.matmul(rotation_matrix, pt_r) + \
translation_vector.reshape(3, 1)
dst = dst.reshape(3)
dst_points.append(dst)
dst_points = np.array(dst_points)
# estimating the transformation
tform = SimilarityTransform(dimensionality=3)
assert tform.estimate(src_points, dst_points)
estimated_rotation = tform.rotation
estimated_translation = tform.translation
estimated_scale = tform.scale
assert_almost_equal(estimated_translation, translation_vector)
assert_almost_equal(estimated_scale, scale)
assert_almost_equal(estimated_rotation, rotation_matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
scale = 0.1
rotation = np.pi / 2
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale where the rotation isn't exactly 90deg,
# but very close
scale = 1.0
rotation = np.pi / 2
translation = (0, 0)
params = np.array([[0, -1, 1.33226763e-15],
[1, 2.22044605e-16, -1.33226763e-15],
[0, 0, 1]])
tform = SimilarityTransform(params)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
assert tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.shear, shear)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.shear, shear)
assert_almost_equal(tform2.translation, translation)
# scalar vs. tuple scale arguments
assert_almost_equal(AffineTransform(scale=0.5).scale,
AffineTransform(scale=(0.5, 0.5)).scale)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
assert tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_almost_equal(tform(SRC), DST)
assert_almost_equal(tform.inverse(DST), SRC)
def test_fundamental_matrix_estimation():
src = np.array([1.839035, 1.924743, 0.543582, 0.375221,
0.473240, 0.142522, 0.964910, 0.598376,
0.102388, 0.140092, 15.994343, 9.622164,
0.285901, 0.430055, 0.091150, 0.254594]).reshape(-1, 2)
dst = np.array([1.002114, 1.129644, 1.521742, 1.846002,
1.084332, 0.275134, 0.293328, 0.588992,
0.839509, 0.087290, 1.779735, 1.116857,
0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2)
tform = estimate_transform('fundamental', src, dst)
# Reference values obtained using COLMAP SfM library.
tform_ref = np.array([[-0.217859, 0.419282, -0.0343075],
[-0.0717941, 0.0451643, 0.0216073],
[0.248062, -0.429478, 0.0221019]])
assert_almost_equal(tform.params, tform_ref, 6)
def test_fundamental_matrix_residuals():
essential_matrix_tform = EssentialMatrixTransform(
rotation=np.eye(3), translation=np.array([1, 0, 0]))
tform = FundamentalMatrixTransform()
tform.params = essential_matrix_tform.params
src = np.array([[0, 0], [0, 0], [0, 0]])
dst = np.array([[2, 0], [2, 1], [2, 2]])
assert_almost_equal(tform.residuals(src, dst)**2, [0, 0.5, 2])
@pytest.mark.parametrize('array_like_input', [False, True])
def test_fundamental_matrix_forward(array_like_input):
if array_like_input:
rotation = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
translation = (1, 0, 0)
else:
rotation = np.eye(3)
translation = np.array([1, 0, 0])
essential_matrix_tform = EssentialMatrixTransform(
rotation=rotation, translation=translation)
if array_like_input:
params = [list(p) for p in essential_matrix_tform.params]
else:
params = essential_matrix_tform.params
tform = FundamentalMatrixTransform(matrix=params)
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform(src), [[0, -1, 0], [0, -1, 1], [0, -1, 1]])
def test_fundamental_matrix_inverse():
essential_matrix_tform = EssentialMatrixTransform(
rotation=np.eye(3), translation=np.array([1, 0, 0]))
tform = FundamentalMatrixTransform()
tform.params = essential_matrix_tform.params
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform.inverse(src),
[[0, 1, 0], [0, 1, -1], [0, 1, -1]])
def test_essential_matrix_init():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([0, 0, 1]))
assert_equal(tform.params,
np.array([0, -1, 0, 1, 0, 0, 0, 0, 0]).reshape(3, 3))
def test_essential_matrix_estimation():
src = np.array([1.839035, 1.924743, 0.543582, 0.375221,
0.473240, 0.142522, 0.964910, 0.598376,
0.102388, 0.140092, 15.994343, 9.622164,
0.285901, 0.430055, 0.091150, 0.254594]).reshape(-1, 2)
dst = np.array([1.002114, 1.129644, 1.521742, 1.846002,
1.084332, 0.275134, 0.293328, 0.588992,
0.839509, 0.087290, 1.779735, 1.116857,
0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2)
tform = estimate_transform('essential', src, dst)
# Reference values obtained using COLMAP SfM library.
tform_ref = np.array([[-0.0811666, 0.255449, -0.0478999],
[-0.192392, -0.0531675, 0.119547],
[0.177784, -0.22008, -0.015203]])
assert_almost_equal(tform.params, tform_ref, 6)
def test_essential_matrix_forward():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform(src), [[0, -1, 0], [0, -1, 1], [0, -1, 1]])
def test_essential_matrix_inverse():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform.inverse(src),
[[0, 1, 0], [0, 1, -1], [0, 1, -1]])
def test_essential_matrix_residuals():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 0], [0, 0]])
dst = np.array([[2, 0], [2, 1], [2, 2]])
assert_almost_equal(tform.residuals(src, dst)**2, [0, 0.5, 2])
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
assert tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_projective_weighted_estimation():
# Exact solution with same points, and unity weights
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
tform_w = estimate_transform('projective',
SRC[:4, :], DST[:4, :], np.ones(4))
assert_almost_equal(tform.params, tform_w.params)
# Over-determined solution with same points, and unity weights
tform = estimate_transform('projective', SRC, DST)
tform_w = estimate_transform('projective',
SRC, DST, np.ones(SRC.shape[0]))
assert_almost_equal(tform.params, tform_w.params)
# Repeating a point, but setting its weight small, should give nearly
# the same result.
point_weights = np.ones(SRC.shape[0] + 1)
point_weights[0] = 1.0e-15
tform1 = estimate_transform('projective', SRC, DST)
tform2 = estimate_transform('projective',
SRC[np.arange(-1, SRC.shape[0]), :],
DST[np.arange(-1, SRC.shape[0]), :],
point_weights)
assert_almost_equal(tform1.params, tform2.params, decimal=3)
@pytest.mark.parametrize('array_like_input', [False, True])
def test_projective_init(array_like_input):
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
if array_like_input:
params = [list(p) for p in tform.params]
else:
params = tform.params
tform2 = ProjectiveTransform(params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
assert tform2.estimate(SRC, DST, order=10)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_weighted_estimation():
# Over-determined solution with same points, and unity weights
tform = estimate_transform('polynomial', SRC, DST, order=10)
tform_w = estimate_transform('polynomial',
SRC,
DST,
order=10,
weights=np.ones(SRC.shape[0]))
assert_almost_equal(tform.params, tform_w.params)
# Repeating a point, but setting its weight small, should give nearly
# the same result.
point_weights = np.ones(SRC.shape[0] + 1)
point_weights[0] = 1.0e-15
tform1 = estimate_transform('polynomial', SRC, DST, order=10)
tform2 = estimate_transform('polynomial',
SRC[np.arange(-1, SRC.shape[0]), :],
DST[np.arange(-1, SRC.shape[0]), :],
order=10,
weights=point_weights)
assert_almost_equal(tform1.params, tform2.params, decimal=4)
@pytest.mark.parametrize('array_like_input', [False, True])
def test_polynomial_init(array_like_input):
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
if array_like_input:
params = [list(p) for p in tform.params]
else:
params = tform.params
tform2 = PolynomialTransform(params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_inverse():
with pytest.raises(Exception):
PolynomialTransform().inverse(0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
assert tform.__class__ == ProjectiveTransform
tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
assert_almost_equal((tform + tform.inverse).params, np.eye(3))
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 * 1/0.1, rotation=0.3 - 0.9)
tform = tform1 + tform2.inverse
assert_almost_equal(tform.params, tform3.params)
def test_union_differing_types():
tform1 = SimilarityTransform()
tform2 = PolynomialTransform()
with pytest.raises(TypeError):
tform1.__add__(tform2)
def test_geometric_tform():
tform = GeometricTransform()
with pytest.raises(NotImplementedError):
tform(0)
with pytest.raises(NotImplementedError):
tform.inverse(0)
with pytest.raises(NotImplementedError):
tform.__add__(0)
# See gh-3926 for discussion details
for i in range(20):
# Generate random Homography
H = np.random.rand(3, 3) * 100
H[2, H[2] == 0] += np.finfo(float).eps
H /= H[2, 2]
# Craft some src coords
src = np.array([
[(H[2, 1] + 1) / -H[2, 0], 1],
[1, (H[2, 0] + 1) / -H[2, 1]],
[1, 1],
])
# Prior to gh-3926, under the above circumstances,
# destination coordinates could be returned with nan/inf values.
tform = ProjectiveTransform(H) # Construct the transform
dst = tform(src) # Obtain the dst coords
# Ensure dst coords are finite numeric values
assert(np.isfinite(dst).all())
def test_invalid_input():
with pytest.raises(ValueError):
ProjectiveTransform(np.zeros((2, 3)))
with pytest.raises(ValueError):
AffineTransform(np.zeros((2, 3)))
with pytest.raises(ValueError):
SimilarityTransform(np.zeros((2, 3)))
with pytest.raises(ValueError):
EuclideanTransform(np.zeros((2, 3)))
with pytest.raises(ValueError):
AffineTransform(matrix=np.zeros((2, 3)), scale=1)
with pytest.raises(ValueError):
SimilarityTransform(matrix=np.zeros((2, 3)), scale=1)
with pytest.raises(ValueError):
EuclideanTransform(
matrix=np.zeros((2, 3)), translation=(0, 0))
with pytest.raises(ValueError):
PolynomialTransform(np.zeros((3, 3)))
with pytest.raises(ValueError):
FundamentalMatrixTransform(matrix=np.zeros((3, 2)))
with pytest.raises(ValueError):
EssentialMatrixTransform(matrix=np.zeros((3, 2)))
with pytest.raises(ValueError):
EssentialMatrixTransform(rotation=np.zeros((3, 2)))
with pytest.raises(ValueError):
EssentialMatrixTransform(
rotation=np.zeros((3, 3)))
with pytest.raises(ValueError):
EssentialMatrixTransform(
rotation=np.eye(3))
with pytest.raises(ValueError):
EssentialMatrixTransform(rotation=np.eye(3),
translation=np.zeros((2,)))
with pytest.raises(ValueError):
EssentialMatrixTransform(rotation=np.eye(3),
translation=np.zeros((2,)))
with pytest.raises(ValueError):
EssentialMatrixTransform(
rotation=np.eye(3), translation=np.zeros((3,)))
def test_degenerate():
src = dst = np.zeros((10, 2))
tform = SimilarityTransform()
assert not tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = EuclideanTransform()
assert not tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = AffineTransform()
assert not tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = ProjectiveTransform()
assert not tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
# See gh-3926 for discussion details
tform = ProjectiveTransform()
for i in range(20):
# Some random coordinates
src = np.random.rand(4, 2) * 100
dst = np.random.rand(4, 2) * 100
# Degenerate the case by arranging points on a single line
src[:, 1] = np.random.rand()
# Prior to gh-3926, under the above circumstances,
# a transform could be returned with nan values.
assert(not tform.estimate(src, dst) or np.isfinite(tform.params).all())
src = np.array([[0, 2, 0], [0, 2, 0], [0, 4, 0]])
dst = np.array([[0, 1, 0], [0, 1, 0], [0, 3, 0]])
tform = AffineTransform()
assert not tform.estimate(src, dst)
# Prior to gh-6207, the above would set the parameters as the identity.
assert np.all(np.isnan(tform.params))
# The tessellation on the following points produces one degenerate affine
# warp within PiecewiseAffineTransform.
src = np.asarray([
[0, 192, 256], [0, 256, 256], [5, 0, 192], [5, 64, 0], [5, 64, 64],
[5, 64, 256], [5, 192, 192], [5, 256, 256], [0, 192, 256],
])
dst = np.asarray([
[0, 142, 206], [0, 206, 206], [5, -50, 142], [5, 14, 0], [5, 14, 64],
[5, 14, 206], [5, 142, 142], [5, 206, 206], [0, 142, 206],
])
tform = PiecewiseAffineTransform()
assert not tform.estimate(src, dst)
assert np.all(np.isnan(tform.affines[4].params)) # degenerate affine
for idx, affine in enumerate(tform.affines):
if idx != 4:
assert not np.all(np.isnan(affine.params))
for affine in tform.inverse_affines:
assert not np.all(np.isnan(affine.params))
def test_normalize_degenerate_points():
"""Return nan matrix *of appropriate size* when point is repeated."""
pts = np.array([[73.42834308, 94.2977623]] * 3)
mat, pts_tf = _center_and_normalize_points(pts)
assert np.all(np.isnan(mat))
assert np.all(np.isnan(pts_tf))
assert mat.shape == (3, 3)
assert pts_tf.shape == pts.shape
def test_projective_repr():
tform = ProjectiveTransform()
want = re.escape(textwrap.dedent(
'''
<ProjectiveTransform(matrix=
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]) at
''').strip()) + ' 0x[a-f0-9]+' + re.escape('>')
# Hack the escaped regex to allow whitespace before each number for
# compatibility with different numpy versions.
want = want.replace('0\\.', ' *0\\.')
want = want.replace('1\\.', ' *1\\.')
assert re.match(want, repr(tform))
def test_projective_str():
tform = ProjectiveTransform()
want = re.escape(textwrap.dedent(
'''
<ProjectiveTransform(matrix=
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])>
''').strip())
# Hack the escaped regex to allow whitespace before each number for
# compatibility with different numpy versions.
want = want.replace('0\\.', ' *0\\.')
want = want.replace('1\\.', ' *1\\.')
print(want)
assert re.match(want, str(tform))
def _assert_least_squares(tf, src, dst):
baseline = np.sum((tf(src) - dst) ** 2)
for i in range(tf.params.size):
for update in [0.001, -0.001]:
params = np.copy(tf.params)
params.flat[i] += update
new_tf = tf.__class__(matrix=params)
new_ssq = np.sum((new_tf(src) - dst) ** 2)
assert new_ssq > baseline
@pytest.mark.parametrize('array_like_input', [False, True])
def test_estimate_affine_3d(array_like_input):
ndim = 3
src = np.random.random((25, ndim)) * 2 ** np.arange(7, 7 + ndim)
matrix = np.array([
[4.8, 0.1, 0.2, 25],
[0.0, 1.0, 0.1, 30],
[0.0, 0.0, 1.0, -2],
[0.0, 0.0, 0.0, 1.]
])
if array_like_input:
# list of lists for matrix and src coords
src = [list(c) for c in src]
matrix = [list(c) for c in matrix]
tf = AffineTransform(matrix=matrix)
dst = tf(src)
dst_noisy = dst + np.random.random((25, ndim))
if array_like_input:
# list of lists for destination coords
dst = [list(c) for c in dst]
tf2 = AffineTransform(dimensionality=ndim)
assert tf2.estimate(src, dst_noisy)
# we check rot/scale/etc more tightly than translation because translation
# estimation is on the 1 pixel scale
matrix = np.asarray(matrix)
assert_almost_equal(tf2.params[:, :-1], matrix[:, :-1], decimal=2)
assert_almost_equal(tf2.params[:, -1], matrix[:, -1], decimal=0)
_assert_least_squares(tf2, src, dst_noisy)
def test_fundamental_3d_not_implemented():
with pytest.raises(NotImplementedError):
_ = FundamentalMatrixTransform(dimensionality=3)
with pytest.raises(NotImplementedError):
_ = FundamentalMatrixTransform(np.eye(4))
def test_array_protocol():
mat = np.eye(4)
tf = ProjectiveTransform(mat)
assert_equal(np.array(tf), mat)
assert_equal(np.array(tf, dtype=int), mat.astype(int))
def test_affine_transform_from_linearized_parameters():
mat = np.concatenate(
(np.random.random((3, 4)), np.eye(4)[-1:]), axis=0
)
v = mat[:-1].ravel()
mat_from_v = _affine_matrix_from_vector(v)
tf = AffineTransform(matrix=mat_from_v)
assert_equal(np.array(tf), mat)
# incorrect number of parameters
with pytest.raises(ValueError):
_ = _affine_matrix_from_vector(v[:-1])
with pytest.raises(ValueError):
_ = AffineTransform(matrix=v[:-1])
def test_affine_params_nD_error():
with pytest.raises(ValueError):
_ = AffineTransform(scale=5, dimensionality=3)
def test_euler_rotation():
v = [0, 10, 0]
angles = np.radians([90, 45, 45])
expected = [-5, -5, 7.1]
R = _euler_rotation_matrix(angles)
assert_almost_equal(R @ v, expected, decimal=1)
def test_euclidean_param_defaults():
# 2D rotation is 0 when only translation is given
tf = EuclideanTransform(translation=(5, 5))
assert np.array(tf)[0, 1] == 0
# off diagonals are 0 when only translation is given
tf = EuclideanTransform(translation=(4, 5, 9), dimensionality=3)
assert_equal(np.array(tf)[[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]], 0)
with pytest.raises(ValueError):
# specifying parameters for D>3 is not supported
_ = EuclideanTransform(translation=(5, 6, 7, 8), dimensionality=4)
with pytest.raises(ValueError):
# incorrect number of angles for given dimensionality
_ = EuclideanTransform(rotation=(4, 8), dimensionality=3)
# translation is 0 when rotation is given
tf = EuclideanTransform(rotation=np.pi * np.arange(3), dimensionality=3)
assert_equal(np.array(tf)[:-1, 3], 0)
def test_similarity_transform_params():
with pytest.raises(ValueError):
_ = SimilarityTransform(translation=(4, 5, 6, 7), dimensionality=4)
tf = SimilarityTransform(scale=4, dimensionality=3)
assert_equal(tf([[1, 1, 1]]), [[4, 4, 4]])
def test_euler_angle_consistency():
angles = np.random.random((3,)) * 2 * np.pi - np.pi
euclid = EuclideanTransform(rotation=angles, dimensionality=3)
similar = SimilarityTransform(rotation=angles, dimensionality=3)
assert_array_almost_equal(euclid, similar)
def test_2D_only_implementations():
with pytest.raises(NotImplementedError):
_ = PolynomialTransform(dimensionality=3)
tf = AffineTransform(dimensionality=3)
with pytest.raises(NotImplementedError):
_ = tf.rotation
with pytest.raises(NotImplementedError):
_ = tf.shear

View File

@@ -0,0 +1,576 @@
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_equal
from skimage import data, transform
from skimage._shared.testing import test_parallel
from skimage.draw import circle_perimeter, ellipse_perimeter, line
@test_parallel()
def test_hough_line():
# Generate a test image
img = np.zeros((100, 150), dtype=int)
rr, cc = line(60, 130, 80, 10)
img[rr, cc] = 1
out, angles, d = transform.hough_line(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_almost_equal(dist, 80.0, 1)
assert_almost_equal(theta, 1.41, 1)
def test_hough_line_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = transform.hough_line(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_hough_line_bad_input():
img = np.zeros(100)
img[10] = 1
# Expected error, img must be 2D
with pytest.raises(ValueError):
transform.hough_line(img, np.linspace(0, 360, 10))
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta = np.linspace(0, np.pi, 45)
lines = transform.probabilistic_hough_line(
img, threshold=10, line_length=10, line_gap=1, theta=theta)
# sort the lines according to the x-axis
sorted_lines = []
for ln in lines:
ln = list(ln)
ln.sort(key=lambda x: x[0])
sorted_lines.append(ln)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
# Execute with default theta
transform.probabilistic_hough_line(img, line_length=10, line_gap=3)
def test_probabilistic_hough_seed():
# Load image that is likely to give a randomly varying number of lines
image = data.checkerboard()
# Use constant seed to ensure a deterministic output
lines = transform.probabilistic_hough_line(image, threshold=50,
line_length=50, line_gap=1,
seed=41537233)
assert len(lines) == 56
def test_probabilistic_hough_bad_input():
img = np.zeros(100)
img[10] = 1
# Expected error, img must be 2D
with pytest.raises(ValueError):
transform.probabilistic_hough_line(img)
def test_hough_line_peaks():
img = np.zeros((100, 150), dtype=int)
rr, cc = line(60, 130, 80, 10)
img[rr, cc] = 1
out, angles, d = transform.hough_line(img)
out, theta, dist = transform.hough_line_peaks(out, angles, d)
assert_equal(len(dist), 1)
assert_almost_equal(dist[0], 81.0, 1)
assert_almost_equal(theta[0], 1.41, 1)
def test_hough_line_peaks_ordered():
# Regression test per PR #1421
testim = np.zeros((256, 64), dtype=bool)
testim[50:100, 20] = True
testim[20:225, 25] = True
testim[15:35, 50] = True
testim[1:-1, 58] = True
hough_space, angles, dists = transform.hough_line(testim)
hspace, _, _ = transform.hough_line_peaks(hough_space, angles, dists)
assert hspace[0] > hspace[1]
def test_hough_line_peaks_single_line():
# Regression test for gh-6187, gh-4129
# create an empty test image
img = np.zeros((100, 100), dtype=bool)
# draw a horizontal line into our test image
img[30, :] = 1
hough_space, angles, dist = transform.hough_line(img)
best_h_space, best_angles, best_dist = transform.hough_line_peaks(
hough_space, angles, dist
)
assert len(best_angles) == 1
assert len(best_dist) == 1
expected_angle = -np.pi / 2
expected_dist = -30
assert abs(best_angles[0] - expected_angle) < 0.01
assert abs(best_dist[0] - expected_dist) < 0.01
def test_hough_line_peaks_dist():
img = np.zeros((100, 100), dtype=bool)
img[:, 30] = True
img[:, 40] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=5)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=15)[0]) == 1
def test_hough_line_peaks_angle():
check_hough_line_peaks_angle()
def check_hough_line_peaks_angle():
img = np.zeros((100, 100), dtype=bool)
img[:, 0] = True
img[0, :] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
theta = np.linspace(0, np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
theta = np.linspace(np.pi / 3, 4. / 3 * np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
def test_hough_line_peaks_num():
img = np.zeros((100, 100), dtype=bool)
img[:, 30] = True
img[:, 40] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=0, min_angle=0,
num_peaks=1)[0]) == 1
def test_hough_line_peaks_zero_input():
# Test to make sure empty input doesn't cause a failure
img = np.zeros((100, 100), dtype='uint8')
theta = np.linspace(0, np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
h, a, d = transform.hough_line_peaks(hspace, angles, dists)
assert_equal(a, np.array([]))
def test_hough_line_peaks_single_angle():
# Regression test for gh-4814
# This code snippet used to raise an IndexError
img = np.random.random((100, 100))
tested_angles = np.array([np.pi / 2])
h, theta, d = transform.hough_line(img, theta=tested_angles)
accum, angles, dists = transform.hough_line_peaks(h, theta, d, threshold=2)
@test_parallel()
def test_hough_circle():
# Prepare picture
img = np.zeros((120, 100), dtype=int)
radius = 20
x_0, y_0 = (99, 50)
y, x = circle_perimeter(y_0, x_0, radius)
img[x, y] = 1
out1 = transform.hough_circle(img, radius)
out2 = transform.hough_circle(img, [radius])
assert_equal(out1, out2)
out = transform.hough_circle(img, np.array([radius], dtype=np.intp))
assert_equal(out, out1)
x, y = np.where(out[0] == out[0].max())
assert_equal(x[0], x_0)
assert_equal(y[0], y_0)
def test_hough_circle_extended():
# Prepare picture
# The circle center is outside the image
img = np.zeros((100, 100), dtype=int)
radius = 20
x_0, y_0 = (-5, 50)
y, x = circle_perimeter(y_0, x_0, radius)
img[x[np.where(x > 0)], y[np.where(x > 0)]] = 1
out = transform.hough_circle(img, np.array([radius], dtype=np.intp),
full_output=True)
x, y = np.where(out[0] == out[0].max())
# Offset for x_0, y_0
assert_equal(x[0], x_0 + radius)
assert_equal(y[0], y_0 + radius)
def test_hough_circle_peaks():
x_0, y_0, rad_0 = (99, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (49, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1,
min_ydistance=1, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf)
s = np.argsort(out[3]) # sort by radii
assert_equal(out[1][s], np.array([y_0, y_1]))
assert_equal(out[2][s], np.array([x_0, x_1]))
assert_equal(out[3][s], np.array([rad_0, rad_1]))
def test_hough_circle_peaks_total_peak():
img = np.zeros((120, 100), dtype=int)
x_0, y_0, rad_0 = (99, 50, 20)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (49, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1,
min_ydistance=1, threshold=None,
num_peaks=np.inf, total_num_peaks=1)
assert_equal(out[1][0], np.array([y_1, ]))
assert_equal(out[2][0], np.array([x_1, ]))
assert_equal(out[3][0], np.array([rad_1, ]))
def test_hough_circle_peaks_min_distance():
x_0, y_0, rad_0 = (50, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (60, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
# Add noise and create an imperfect circle to lower the peak in Hough space
y[::2] += 1
x[::2] += 1
img[x, y] = 1
x_2, y_2, rad_2 = (70, 70, 20)
y, x = circle_perimeter(y_2, x_2, rad_2)
# Add noise and create an imperfect circle to lower the peak in Hough space
y[::2] += 1
x[::2] += 1
img[x, y] = 1
radii = [rad_0, rad_1, rad_2]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf,
normalize=True)
# The second circle is too close to the first one
# and has a weaker peak in Hough space due to imperfectness.
# Therefore it got removed.
assert_equal(out[1], np.array([y_0, y_2]))
assert_equal(out[2], np.array([x_0, x_2]))
assert_equal(out[3], np.array([rad_0, rad_2]))
def test_hough_circle_peaks_total_peak_and_min_distance():
img = np.zeros((120, 120), dtype=int)
cx = cy = [40, 50, 60, 70, 80]
radii = range(20, 30, 2)
for i in range(len(cx)):
y, x = circle_perimeter(cy[i], cx[i], radii[i])
img[x, y] = 1
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=2,
normalize=True)
# 2nd (4th) circle is removed as it is close to 1st (3rd) oneself.
# 5th is removed as total_num_peaks = 2
assert_equal(out[1], np.array(cy[:4:2]))
assert_equal(out[2], np.array(cx[:4:2]))
assert_equal(out[3], np.array(radii[:4:2]))
def test_hough_circle_peaks_normalize():
x_0, y_0, rad_0 = (50, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (60, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf,
normalize=False)
# Two perfect circles are close but the second one is bigger.
# Therefore, it is picked due to its high peak.
assert_equal(out[1], np.array([y_1]))
assert_equal(out[2], np.array([x_1]))
assert_equal(out[3], np.array([rad_1]))
def test_hough_ellipse_zero_angle():
img = np.zeros((25, 25), dtype=int)
rx = 6
ry = 8
x0 = 12
y0 = 15
angle = 0
rr, cc = ellipse_perimeter(y0, x0, ry, rx)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=9)
best = result[-1]
assert_equal(best[1], y0)
assert_equal(best[2], x0)
assert_almost_equal(best[3], ry, decimal=1)
assert_almost_equal(best[4], rx, decimal=1)
assert_equal(best[5], angle)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle1():
# ry > rx, angle in [0:pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 6
ry = 12
x0 = 10
y0 = 15
angle = np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
assert_almost_equal(best[1] / 100., y0 / 100., decimal=1)
assert_almost_equal(best[2] / 100., x0 / 100., decimal=1)
assert_almost_equal(best[3] / 10., ry / 10., decimal=1)
assert_almost_equal(best[4] / 100., rx / 100., decimal=1)
assert_almost_equal(best[5], angle, decimal=1)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle2():
# ry < rx, angle in [0:pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
assert_almost_equal(best[1] / 100., y0 / 100., decimal=1)
assert_almost_equal(best[2] / 100., x0 / 100., decimal=1)
assert_almost_equal(best[3] / 10., ry / 10., decimal=1)
assert_almost_equal(best[4] / 100., rx / 100., decimal=1)
assert_almost_equal(best[5], angle, decimal=1)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle3():
# ry < rx, angle in [pi/2:pi]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35 + np.pi / 2.
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle4():
# ry < rx, angle in [pi:3pi/4]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35 + np.pi
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle1():
# ry > rx, angle in [0:-pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 6
ry = 12
x0 = 10
y0 = 15
angle = - np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle2():
# ry < rx, angle in [0:-pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle3():
# ry < rx, angle in [-pi/2:-pi]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35 - np.pi / 2.
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle4():
# ry < rx, angle in [-pi:-3pi/4]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35 - np.pi
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_all_black_img():
assert(transform.hough_ellipse(np.zeros((100, 100))).shape == (0, 6))

View File

@@ -0,0 +1,64 @@
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from skimage.transform import integral_image, integrate
np.random.seed(0)
x = (np.random.rand(50, 50) * 255).astype(np.uint8)
s = integral_image(x)
@pytest.mark.parametrize(
'dtype', [np.float16, np.float32, np.float64, np.uint8, np.int32]
)
@pytest.mark.parametrize('dtype_as_kwarg', [False, True])
def test_integral_image_validity(dtype, dtype_as_kwarg):
rstate = np.random.default_rng(1234)
dtype_kwarg = dtype if dtype_as_kwarg else None
y = (rstate.random((20, 20)) * 255).astype(dtype)
out = integral_image(y, dtype=dtype_kwarg)
if y.dtype.kind == 'f':
if dtype_as_kwarg:
assert out.dtype == dtype
rtol = 1e-3 if dtype == np.float16 else 1e-7
assert_allclose(out[-1, -1], y.sum(dtype=np.float64), rtol=rtol)
else:
assert out.dtype == np.float64
assert_allclose(out[-1, -1], y.sum(dtype=np.float64))
else:
assert out.dtype.kind == y.dtype.kind
if not (dtype_as_kwarg and dtype == np.uint8):
# omit check for dtype=uint8 case as it will overflow
assert_equal(out[-1, -1], y.sum())
def test_integrate_basic():
assert_equal(x[12:24, 10:20].sum(), integrate(s, (12, 10), (23, 19)))
assert_equal(x[:20, :20].sum(), integrate(s, (0, 0), (19, 19)))
assert_equal(x[:20, 10:20].sum(), integrate(s, (0, 10), (19, 19)))
assert_equal(x[10:20, :20].sum(), integrate(s, (10, 0), (19, 19)))
def test_integrate_single():
assert_equal(x[0, 0], integrate(s, (0, 0), (0, 0)))
assert_equal(x[10, 10], integrate(s, (10, 10), (10, 10)))
def test_vectorized_integrate():
r0 = np.array([12, 0, 0, 10, 0, 10, 30])
c0 = np.array([10, 0, 10, 0, 0, 10, 31])
r1 = np.array([23, 19, 19, 19, 0, 10, 49])
c1 = np.array([19, 19, 19, 19, 0, 10, 49])
expected = np.array([x[12:24, 10:20].sum(),
x[:20, :20].sum(),
x[:20, 10:20].sum(),
x[10:20, :20].sum(),
x[0, 0],
x[10, 10],
x[30:, 31:].sum()])
start_pts = [(r0[i], c0[i]) for i in range(len(r0))]
end_pts = [(r1[i], c1[i]) for i in range(len(r0))]
assert_equal(expected, integrate(s, start_pts, end_pts))

View File

@@ -0,0 +1,212 @@
import math
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
from skimage import data
from skimage._shared.utils import _supported_float_type
from skimage.transform import pyramids
image = data.astronaut()
image_gray = image[..., 0]
@pytest.mark.parametrize('channel_axis', [0, 1, -1])
def test_pyramid_reduce_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
out_ = pyramids.pyramid_reduce(image, downscale=2,
channel_axis=channel_axis)
out = np.moveaxis(out_, channel_axis, -1)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_reduce_gray():
rows, cols = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray, downscale=2,
channel_axis=None)
assert_array_equal(out1.shape, (rows / 2, cols / 2))
assert_almost_equal(out1.ptp(), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(image_gray, downscale=2,
channel_axis=None, preserve_range=True)
assert_almost_equal(out2.ptp() / image_gray.ptp(), 1.0, decimal=2)
def test_pyramid_reduce_gray_defaults():
rows, cols = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray)
assert_array_equal(out1.shape, (rows / 2, cols / 2))
assert_almost_equal(out1.ptp(), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(image_gray, preserve_range=True)
assert_almost_equal(out2.ptp() / image_gray.ptp(), 1.0, decimal=2)
def test_pyramid_reduce_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8, ) * ndim))
out = pyramids.pyramid_reduce(img, downscale=2,
channel_axis=None)
expected_shape = np.asarray(img.shape) / 2
assert_array_equal(out.shape, expected_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_pyramid_expand_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
out = pyramids.pyramid_expand(image, upscale=2,
channel_axis=channel_axis)
expected_shape = [rows * 2, cols * 2]
expected_shape.insert(channel_axis % image.ndim, dim)
assert_array_equal(out.shape, expected_shape)
def test_pyramid_expand_gray():
rows, cols = image_gray.shape
out = pyramids.pyramid_expand(image_gray, upscale=2)
assert_array_equal(out.shape, (rows * 2, cols * 2))
def test_pyramid_expand_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((4, ) * ndim))
out = pyramids.pyramid_expand(img, upscale=2,
channel_axis=None)
expected_shape = np.asarray(img.shape) * 2
assert_array_equal(out.shape, expected_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_build_gaussian_pyramid_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
pyramid = pyramids.pyramid_gaussian(image, downscale=2,
channel_axis=channel_axis)
for layer, out in enumerate(pyramid):
layer_shape = [rows / 2 ** layer, cols / 2 ** layer]
layer_shape.insert(channel_axis % image.ndim, dim)
assert out.shape == tuple(layer_shape)
def test_build_gaussian_pyramid_gray():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2,
channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_gray_defaults():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8, ) * ndim))
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_gaussian(img, downscale=2,
channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = original_shape / 2 ** layer
assert_array_equal(out.shape, layer_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_build_laplacian_pyramid_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
pyramid = pyramids.pyramid_laplacian(image, downscale=2,
channel_axis=channel_axis)
for layer, out in enumerate(pyramid):
layer_shape = [rows / 2 ** layer, cols / 2 ** layer]
layer_shape.insert(channel_axis % image.ndim, dim)
assert out.shape == tuple(layer_shape)
def test_build_laplacian_pyramid_defaults():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_laplacian(image_gray)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*(16, )*ndim)
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_laplacian(img, downscale=2,
channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = original_shape / 2 ** layer
assert_array_equal(out.shape, layer_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_laplacian_pyramid_max_layers(channel_axis):
for downscale in [2, 3, 5, 7]:
if channel_axis is None:
shape = (32, 8)
shape_without_channels = shape
else:
shape_without_channels = (32, 8)
ndim = len(shape_without_channels) + 1
n_channels = 5
shape = list(shape_without_channels)
shape.insert(channel_axis % ndim, n_channels)
shape = tuple(shape)
img = np.ones(shape)
pyramid = pyramids.pyramid_laplacian(img, downscale=downscale,
channel_axis=channel_axis)
max_layer = math.ceil(math.log(max(shape_without_channels), downscale))
for layer, out in enumerate(pyramid):
if channel_axis is None:
out_shape_without_channels = out.shape
else:
assert out.shape[channel_axis] == n_channels
out_shape_without_channels = list(out.shape)
out_shape_without_channels.pop(channel_axis)
out_shape_without_channels = tuple(out_shape_without_channels)
if layer < max_layer:
# should not reach all axes as size 1 prior to final level
assert max(out_shape_without_channels) > 1
# total number of images is max_layer + 1
assert_equal(max_layer, layer)
# final layer should be size 1 on all axes
assert out_shape_without_channels == (1, 1)
def test_check_factor():
with pytest.raises(ValueError):
pyramids._check_factor(0.99)
with pytest.raises(ValueError):
pyramids._check_factor(- 2)
@pytest.mark.parametrize(
'dtype', ['float16', 'float32', 'float64', 'uint8', 'int64']
)
@pytest.mark.parametrize(
'pyramid_func', [pyramids.pyramid_gaussian, pyramids.pyramid_laplacian]
)
def test_pyramid_dtype_support(pyramid_func, dtype):
img = np.random.randn(32, 8).astype(dtype)
pyramid = pyramid_func(img)
float_dtype = _supported_float_type(dtype)
assert np.all([im.dtype == float_dtype for im in pyramid])

View File

@@ -0,0 +1,493 @@
import itertools
import numpy as np
import pytest
from skimage._shared._dependency_checks import has_mpl
from skimage._shared._warnings import expected_warnings
from skimage._shared.testing import test_parallel
from skimage._shared.utils import _supported_float_type, convert_to_float
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, iradon, iradon_sart, rescale
PHANTOM = shepp_logan_phantom()[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, channel_axis=None)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def test_iradon_bias_circular_phantom():
"""
test that a uniform circular phantom has a small reconstruction bias
"""
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta)
reconstruction_fbp = iradon(sinogram, theta=theta)
error = reconstruction_fbp - image
tol = 5e-5
roi_err = np.abs(np.mean(error))
assert roi_err < tol
def check_radon_center(shape, circle, dtype, preserve_range):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=dtype)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle,
preserve_range=preserve_range)
assert sinogram.dtype == _supported_float_type(sinogram.dtype)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
@pytest.mark.parametrize("shape", [(16, 16), (17, 17)])
@pytest.mark.parametrize("circle", [False, True])
@pytest.mark.parametrize(
"dtype", [np.float64, np.float32, np.float16, np.uint8, bool]
)
@pytest.mark.parametrize("preserve_range", [False, True])
def test_radon_center(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
@pytest.mark.parametrize("shape", [(32, 16), (33, 17)])
@pytest.mark.parametrize("circle", [False])
@pytest.mark.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@pytest.mark.parametrize("preserve_range", [False, True])
def test_radon_center_rectangular(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug and has_mpl:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@pytest.mark.parametrize(
"size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_iradon_center)
)
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug and has_mpl:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
# cubic interpolation is slow; only run one test for it
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@pytest.mark.parametrize(
"interpolation_type, filter_type", radon_iradon_inputs
)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug and has_mpl:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@pytest.mark.parametrize(
"shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes)
)
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with pytest.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@pytest.mark.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
# if adding more shapes to test data, you might want to look at commit d0f2bac3f
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@pytest.mark.parametrize(
"shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle, interpolations, output_sizes)
)
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
channel_axis=None, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug and has_mpl:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])]).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug and has_mpl:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
@pytest.mark.parametrize("preserve_range", [True, False])
def test_iradon_dtype(preserve_range):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
assert iradon(sinogram, theta=[0],
preserve_range=preserve_range).dtype == 'float64'
assert iradon(sinogram64, theta=[0],
preserve_range=preserve_range).dtype == sinogram64.dtype
assert iradon(sinogram32, theta=[0],
preserve_range=preserve_range).dtype == sinogram32.dtype
def test_radon_dtype():
img = convert_to_float(PHANTOM, False)
img32 = img.astype(np.float32)
assert radon(img).dtype == img.dtype
assert radon(img32).dtype == img32.dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_iradon_sart_dtype(dtype):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
with expected_warnings(['Input data is cast to float']):
assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'
assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype
assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype
def test_iradon_sart_wrong_dtype():
sinogram = np.zeros((16, 1))
with pytest.raises(ValueError):
iradon_sart(sinogram, dtype=int)

File diff suppressed because it is too large Load Diff