update
This commit is contained in:
0
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__init__.py
vendored
Normal file
0
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__init__.py
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_block.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_block.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_blur_effect.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_blur_effect.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_ccomp.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_ccomp.cpython-312.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_entropy.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_entropy.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_find_contours.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_find_contours.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_fit.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_fit.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_label.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_label.cpython-312.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_moments.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_moments.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_pnpoly.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_pnpoly.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_polygon.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_polygon.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_profile.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_profile.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_regionprops.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/__pycache__/test_regionprops.cpython-312.pyc
vendored
Normal file
Binary file not shown.
130
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_block.py
vendored
Normal file
130
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_block.py
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
import numpy as np
|
||||
from skimage.measure import block_reduce
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal
|
||||
|
||||
|
||||
def test_block_reduce_sum():
|
||||
image1 = np.arange(4 * 6).reshape(4, 6)
|
||||
out1 = block_reduce(image1, (2, 3))
|
||||
expected1 = np.array([[24, 42], [96, 114]])
|
||||
assert_equal(expected1, out1)
|
||||
|
||||
image2 = np.arange(5 * 8).reshape(5, 8)
|
||||
out2 = block_reduce(image2, (3, 3))
|
||||
expected2 = np.array([[81, 108, 87], [174, 192, 138]])
|
||||
assert_equal(expected2, out2)
|
||||
|
||||
|
||||
def test_block_reduce_mean():
|
||||
image1 = np.arange(4 * 6).reshape(4, 6)
|
||||
out1 = block_reduce(image1, (2, 3), func=np.mean)
|
||||
expected1 = np.array([[4.0, 7.0], [16.0, 19.0]])
|
||||
assert_equal(expected1, out1)
|
||||
|
||||
image2 = np.arange(5 * 8).reshape(5, 8)
|
||||
out2 = block_reduce(image2, (4, 5), func=np.mean)
|
||||
expected2 = np.array([[14.0, 10.8], [8.5, 5.7]])
|
||||
assert_equal(expected2, out2)
|
||||
|
||||
|
||||
def test_block_reduce_median():
|
||||
image1 = np.arange(4 * 6).reshape(4, 6)
|
||||
out1 = block_reduce(image1, (2, 3), func=np.median)
|
||||
expected1 = np.array([[4.0, 7.0], [16.0, 19.0]])
|
||||
assert_equal(expected1, out1)
|
||||
|
||||
image2 = np.arange(5 * 8).reshape(5, 8)
|
||||
out2 = block_reduce(image2, (4, 5), func=np.median)
|
||||
expected2 = np.array([[14.0, 6.5], [0.0, 0.0]])
|
||||
assert_equal(expected2, out2)
|
||||
|
||||
image3 = np.array([[1, 5, 5, 5], [5, 5, 5, 1000]])
|
||||
out3 = block_reduce(image3, (2, 4), func=np.median)
|
||||
assert_equal(5, out3)
|
||||
|
||||
|
||||
def test_block_reduce_min():
|
||||
image1 = np.arange(4 * 6).reshape(4, 6)
|
||||
out1 = block_reduce(image1, (2, 3), func=np.min)
|
||||
expected1 = np.array([[0, 3], [12, 15]])
|
||||
assert_equal(expected1, out1)
|
||||
|
||||
image2 = np.arange(5 * 8).reshape(5, 8)
|
||||
out2 = block_reduce(image2, (4, 5), func=np.min)
|
||||
expected2 = np.array([[0, 0], [0, 0]])
|
||||
assert_equal(expected2, out2)
|
||||
|
||||
|
||||
def test_block_reduce_max():
|
||||
image1 = np.arange(4 * 6).reshape(4, 6)
|
||||
out1 = block_reduce(image1, (2, 3), func=np.max)
|
||||
expected1 = np.array([[8, 11], [20, 23]])
|
||||
assert_equal(expected1, out1)
|
||||
|
||||
image2 = np.arange(5 * 8).reshape(5, 8)
|
||||
out2 = block_reduce(image2, (4, 5), func=np.max)
|
||||
expected2 = np.array([[28, 31], [36, 39]])
|
||||
assert_equal(expected2, out2)
|
||||
|
||||
|
||||
def test_invalid_block_size():
|
||||
image = np.arange(4 * 6).reshape(4, 6)
|
||||
|
||||
with testing.raises(ValueError):
|
||||
block_reduce(image, [1, 2, 3])
|
||||
with testing.raises(ValueError):
|
||||
block_reduce(image, [1, 0.5])
|
||||
|
||||
|
||||
def test_default_block_size():
|
||||
image = np.arange(4 * 6).reshape(4, 6)
|
||||
out = block_reduce(image, func=np.min)
|
||||
expected = np.array([[0, 2, 4], [12, 14, 16]])
|
||||
assert_equal(expected, out)
|
||||
|
||||
|
||||
def test_scalar_block_size():
|
||||
image = np.arange(6 * 6).reshape(6, 6)
|
||||
out = block_reduce(image, 3, func=np.min)
|
||||
expected1 = np.array([[0, 3], [18, 21]])
|
||||
assert_equal(expected1, out)
|
||||
expected2 = block_reduce(image, (3, 3), func=np.min)
|
||||
assert_equal(expected2, out)
|
||||
|
||||
|
||||
def test_func_kwargs_same_dtype():
|
||||
image = np.array(
|
||||
[
|
||||
[97, 123, 173, 227],
|
||||
[217, 241, 221, 214],
|
||||
[211, 11, 170, 53],
|
||||
[214, 205, 101, 57],
|
||||
],
|
||||
dtype=np.uint8,
|
||||
)
|
||||
|
||||
out = block_reduce(image, (2, 2), func=np.mean, func_kwargs={'dtype': np.uint8})
|
||||
expected = np.array([[41, 16], [32, 31]], dtype=np.uint8)
|
||||
|
||||
assert_equal(out, expected)
|
||||
assert out.dtype == expected.dtype
|
||||
|
||||
|
||||
def test_func_kwargs_different_dtype():
|
||||
image = np.array(
|
||||
[
|
||||
[0.45745366, 0.67479345, 0.20949775, 0.3147348],
|
||||
[0.7209286, 0.88915504, 0.66153409, 0.07919526],
|
||||
[0.04640037, 0.54008495, 0.34664343, 0.56152301],
|
||||
[0.58085003, 0.80144708, 0.87844473, 0.29811511],
|
||||
],
|
||||
dtype=np.float64,
|
||||
)
|
||||
|
||||
out = block_reduce(image, (2, 2), func=np.mean, func_kwargs={'dtype': np.float16})
|
||||
expected = np.array([[0.6855, 0.3164], [0.4922, 0.521]], dtype=np.float16)
|
||||
|
||||
assert_equal(out, expected)
|
||||
assert out.dtype == expected.dtype
|
||||
52
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_blur_effect.py
vendored
Normal file
52
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_blur_effect.py
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
from numpy.testing import assert_array_equal
|
||||
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.data import astronaut, cells3d
|
||||
from skimage.filters import gaussian
|
||||
from skimage.measure import blur_effect
|
||||
|
||||
|
||||
def test_blur_effect():
|
||||
"""Test that the blur metric increases with more blurring."""
|
||||
image = astronaut()
|
||||
B0 = blur_effect(image, channel_axis=-1)
|
||||
B1 = blur_effect(gaussian(image, sigma=1, channel_axis=-1), channel_axis=-1)
|
||||
B2 = blur_effect(gaussian(image, sigma=4, channel_axis=-1), channel_axis=-1)
|
||||
assert 0 <= B0 < 1
|
||||
assert B0 < B1 < B2
|
||||
|
||||
|
||||
def test_blur_effect_h_size():
|
||||
"""Test that the blur metric decreases with increasing size of the
|
||||
re-blurring filter.
|
||||
"""
|
||||
image = astronaut()
|
||||
B0 = blur_effect(image, h_size=3, channel_axis=-1)
|
||||
B1 = blur_effect(image, channel_axis=-1) # default h_size is 11
|
||||
B2 = blur_effect(image, h_size=30, channel_axis=-1)
|
||||
assert 0 <= B0 < 1
|
||||
assert B0 > B1 > B2
|
||||
|
||||
|
||||
def test_blur_effect_channel_axis():
|
||||
"""Test that passing an RGB image is equivalent to passing its grayscale
|
||||
version.
|
||||
"""
|
||||
image = astronaut()
|
||||
B0 = blur_effect(image, channel_axis=-1)
|
||||
B1 = blur_effect(rgb2gray(image))
|
||||
B0_arr = blur_effect(image, channel_axis=-1, reduce_func=None)
|
||||
B1_arr = blur_effect(rgb2gray(image), reduce_func=None)
|
||||
assert 0 <= B0 < 1
|
||||
assert B0 == B1
|
||||
assert_array_equal(B0_arr, B1_arr)
|
||||
|
||||
|
||||
def test_blur_effect_3d():
|
||||
"""Test that the blur metric works on a 3D image."""
|
||||
image_3d = cells3d()[:, 1, :, :] # grab just the nuclei
|
||||
B0 = blur_effect(image_3d)
|
||||
B1 = blur_effect(gaussian(image_3d, sigma=1))
|
||||
B2 = blur_effect(gaussian(image_3d, sigma=4))
|
||||
assert 0 <= B0 < 1
|
||||
assert B0 < B1 < B2
|
||||
252
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_ccomp.py
vendored
Normal file
252
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_ccomp.py
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
import numpy as np
|
||||
|
||||
from skimage.measure import label
|
||||
import skimage.measure._ccomp as ccomp
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
|
||||
BG = 0 # background value
|
||||
|
||||
|
||||
class TestConnectedComponents:
|
||||
def setup_method(self):
|
||||
self.x = np.array(
|
||||
[
|
||||
[0, 0, 3, 2, 1, 9],
|
||||
[0, 1, 1, 9, 2, 9],
|
||||
[0, 0, 1, 9, 9, 9],
|
||||
[3, 1, 1, 5, 3, 0],
|
||||
]
|
||||
)
|
||||
|
||||
self.labels = np.array(
|
||||
[
|
||||
[0, 0, 1, 2, 3, 4],
|
||||
[0, 5, 5, 4, 2, 4],
|
||||
[0, 0, 5, 4, 4, 4],
|
||||
[6, 5, 5, 7, 8, 0],
|
||||
]
|
||||
)
|
||||
|
||||
# No background - there is no label 0, instead, labelling starts with 1
|
||||
# and all labels are incremented by 1.
|
||||
self.labels_nobg = self.labels + 1
|
||||
# The 0 at lower right corner is isolated, so it should get a new label
|
||||
self.labels_nobg[-1, -1] = 10
|
||||
|
||||
# We say that background value is 9 (and bg label is 0)
|
||||
self.labels_bg_9 = self.labels_nobg.copy()
|
||||
self.labels_bg_9[self.x == 9] = 0
|
||||
# Then, where there was the label 5, we now expect 4 etc.
|
||||
# (we assume that the label of value 9 would normally be 5)
|
||||
self.labels_bg_9[self.labels_bg_9 > 5] -= 1
|
||||
|
||||
def test_basic(self):
|
||||
assert_array_equal(label(self.x), self.labels)
|
||||
|
||||
# Make sure data wasn't modified
|
||||
assert self.x[0, 2] == 3
|
||||
|
||||
# Check that everything works if there is no background
|
||||
assert_array_equal(label(self.x, background=99), self.labels_nobg)
|
||||
# Check that everything works if background value != 0
|
||||
assert_array_equal(label(self.x, background=9), self.labels_bg_9)
|
||||
|
||||
def test_random(self):
|
||||
x = (np.random.rand(20, 30) * 5).astype(int)
|
||||
labels = label(x)
|
||||
|
||||
n = labels.max()
|
||||
for i in range(n):
|
||||
values = x[labels == i]
|
||||
assert np.all(values == values[0])
|
||||
|
||||
def test_diag(self):
|
||||
x = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])
|
||||
assert_array_equal(label(x), x)
|
||||
|
||||
def test_4_vs_8(self):
|
||||
x = np.array([[0, 1], [1, 0]], dtype=int)
|
||||
|
||||
assert_array_equal(label(x, connectivity=1), [[0, 1], [2, 0]])
|
||||
assert_array_equal(label(x, connectivity=2), [[0, 1], [1, 0]])
|
||||
|
||||
def test_background(self):
|
||||
x = np.array([[1, 0, 0], [1, 1, 5], [0, 0, 0]])
|
||||
|
||||
assert_array_equal(label(x), [[1, 0, 0], [1, 1, 2], [0, 0, 0]])
|
||||
|
||||
assert_array_equal(label(x, background=0), [[1, 0, 0], [1, 1, 2], [0, 0, 0]])
|
||||
|
||||
def test_background_two_regions(self):
|
||||
x = np.array([[0, 0, 6], [0, 0, 6], [5, 5, 5]])
|
||||
|
||||
res = label(x, background=0)
|
||||
assert_array_equal(res, [[0, 0, 1], [0, 0, 1], [2, 2, 2]])
|
||||
|
||||
def test_background_one_region_center(self):
|
||||
x = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
|
||||
|
||||
assert_array_equal(
|
||||
label(x, connectivity=1, background=0), [[0, 0, 0], [0, 1, 0], [0, 0, 0]]
|
||||
)
|
||||
|
||||
def test_return_num(self):
|
||||
x = np.array([[1, 0, 6], [0, 0, 6], [5, 5, 5]])
|
||||
|
||||
assert_array_equal(label(x, return_num=True)[1], 3)
|
||||
|
||||
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
|
||||
|
||||
|
||||
class TestConnectedComponents3d:
|
||||
def setup_method(self):
|
||||
self.x = np.zeros((3, 4, 5), int)
|
||||
self.x[0] = np.array(
|
||||
[[0, 3, 2, 1, 9], [0, 1, 9, 2, 9], [0, 1, 9, 9, 9], [3, 1, 5, 3, 0]]
|
||||
)
|
||||
|
||||
self.x[1] = np.array(
|
||||
[[3, 3, 2, 1, 9], [0, 3, 9, 2, 1], [0, 3, 3, 1, 1], [3, 1, 3, 3, 0]]
|
||||
)
|
||||
|
||||
self.x[2] = np.array(
|
||||
[[3, 3, 8, 8, 0], [2, 3, 9, 8, 8], [2, 3, 0, 8, 0], [2, 1, 0, 0, 0]]
|
||||
)
|
||||
|
||||
self.labels = np.zeros((3, 4, 5), int)
|
||||
|
||||
self.labels[0] = np.array(
|
||||
[[0, 1, 2, 3, 4], [0, 5, 4, 2, 4], [0, 5, 4, 4, 4], [1, 5, 6, 1, 0]]
|
||||
)
|
||||
|
||||
self.labels[1] = np.array(
|
||||
[[1, 1, 2, 3, 4], [0, 1, 4, 2, 3], [0, 1, 1, 3, 3], [1, 5, 1, 1, 0]]
|
||||
)
|
||||
|
||||
self.labels[2] = np.array(
|
||||
[[1, 1, 7, 7, 0], [8, 1, 4, 7, 7], [8, 1, 0, 7, 0], [8, 5, 0, 0, 0]]
|
||||
)
|
||||
|
||||
def test_basic(self):
|
||||
labels = label(self.x)
|
||||
assert_array_equal(labels, self.labels)
|
||||
|
||||
assert self.x[0, 0, 2] == 2, "Data was modified!"
|
||||
|
||||
def test_random(self):
|
||||
x = (np.random.rand(20, 30) * 5).astype(int)
|
||||
labels = label(x)
|
||||
|
||||
n = labels.max()
|
||||
for i in range(n):
|
||||
values = x[labels == i]
|
||||
assert np.all(values == values[0])
|
||||
|
||||
def test_diag(self):
|
||||
x = np.zeros((3, 3, 3), int)
|
||||
x[0, 2, 2] = 1
|
||||
x[1, 1, 1] = 1
|
||||
x[2, 0, 0] = 1
|
||||
assert_array_equal(label(x), x)
|
||||
|
||||
def test_4_vs_8(self):
|
||||
x = np.zeros((2, 2, 2), int)
|
||||
x[0, 1, 1] = 1
|
||||
x[1, 0, 0] = 1
|
||||
label4 = x.copy()
|
||||
label4[1, 0, 0] = 2
|
||||
assert_array_equal(label(x, connectivity=1), label4)
|
||||
assert_array_equal(label(x, connectivity=3), x)
|
||||
|
||||
def test_connectivity_1_vs_2(self):
|
||||
x = np.zeros((2, 2, 2), int)
|
||||
x[0, 1, 1] = 1
|
||||
x[1, 0, 0] = 1
|
||||
label1 = x.copy()
|
||||
label1[1, 0, 0] = 2
|
||||
assert_array_equal(label(x, connectivity=1), label1)
|
||||
assert_array_equal(label(x, connectivity=3), x)
|
||||
|
||||
def test_background(self):
|
||||
x = np.zeros((2, 3, 3), int)
|
||||
x[0] = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 0]])
|
||||
x[1] = np.array([[0, 0, 0], [0, 1, 5], [0, 0, 0]])
|
||||
|
||||
lnb = x.copy()
|
||||
lnb[0] = np.array([[1, 2, 2], [1, 2, 2], [2, 2, 2]])
|
||||
lnb[1] = np.array([[2, 2, 2], [2, 1, 3], [2, 2, 2]])
|
||||
lb = x.copy()
|
||||
lb[0] = np.array([[1, BG, BG], [1, BG, BG], [BG, BG, BG]])
|
||||
lb[1] = np.array([[BG, BG, BG], [BG, 1, 2], [BG, BG, BG]])
|
||||
|
||||
assert_array_equal(label(x), lb)
|
||||
assert_array_equal(label(x, background=-1), lnb)
|
||||
|
||||
def test_background_two_regions(self):
|
||||
x = np.zeros((2, 3, 3), int)
|
||||
x[0] = np.array([[0, 0, 6], [0, 0, 6], [5, 5, 5]])
|
||||
x[1] = np.array([[6, 6, 0], [5, 0, 0], [0, 0, 0]])
|
||||
lb = x.copy()
|
||||
lb[0] = np.array([[BG, BG, 1], [BG, BG, 1], [2, 2, 2]])
|
||||
lb[1] = np.array([[1, 1, BG], [2, BG, BG], [BG, BG, BG]])
|
||||
|
||||
res = label(x, background=0)
|
||||
assert_array_equal(res, lb)
|
||||
|
||||
def test_background_one_region_center(self):
|
||||
x = np.zeros((3, 3, 3), int)
|
||||
x[1, 1, 1] = 1
|
||||
|
||||
lb = np.ones_like(x) * BG
|
||||
lb[1, 1, 1] = 1
|
||||
|
||||
assert_array_equal(label(x, connectivity=1, background=0), lb)
|
||||
|
||||
def test_return_num(self):
|
||||
x = np.array([[1, 0, 6], [0, 0, 6], [5, 5, 5]])
|
||||
|
||||
assert_array_equal(label(x, return_num=True)[1], 3)
|
||||
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
|
||||
|
||||
def test_1D(self):
|
||||
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
|
||||
xlen = len(x)
|
||||
y = np.array((0, 1, 2, 2, 3, 3, 0, 0))
|
||||
reshapes = (
|
||||
(xlen,),
|
||||
(1, xlen),
|
||||
(xlen, 1),
|
||||
(1, xlen, 1),
|
||||
(xlen, 1, 1),
|
||||
(1, 1, xlen),
|
||||
)
|
||||
for reshape in reshapes:
|
||||
x2 = x.reshape(reshape)
|
||||
labelled = label(x2)
|
||||
assert_array_equal(y, labelled.flatten())
|
||||
|
||||
def test_nd(self):
|
||||
x = np.ones((1, 2, 3, 4))
|
||||
with testing.raises(NotImplementedError):
|
||||
label(x)
|
||||
|
||||
|
||||
class TestSupport:
|
||||
def test_reshape(self):
|
||||
shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
|
||||
for shape in shapes_in:
|
||||
shape = np.array(shape)
|
||||
numones = sum(shape == 1)
|
||||
inp = np.random.random(shape)
|
||||
|
||||
fixed, swaps = ccomp.reshape_array(inp)
|
||||
shape2 = fixed.shape
|
||||
# now check that all ones are at the beginning
|
||||
for i in range(numones):
|
||||
assert shape2[i] == 1
|
||||
|
||||
back = ccomp.undo_reshape_array(fixed, swaps)
|
||||
# check that the undo works as expected
|
||||
assert_array_equal(inp, back)
|
||||
110
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_colocalization.py
vendored
Normal file
110
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_colocalization.py
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from skimage.measure import (
|
||||
intersection_coeff,
|
||||
manders_coloc_coeff,
|
||||
manders_overlap_coeff,
|
||||
pearson_corr_coeff,
|
||||
)
|
||||
|
||||
|
||||
def test_invalid_input():
|
||||
# images are not same size
|
||||
img1 = np.array([[i + j for j in range(4)] for i in range(4)])
|
||||
img2 = np.ones((3, 5, 6))
|
||||
mask = np.array([[i <= 1 for i in range(5)] for _ in range(5)])
|
||||
non_binary_mask = np.array([[2 for __ in range(4)] for _ in range(4)])
|
||||
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
pearson_corr_coeff(img1, img1, mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
pearson_corr_coeff(img1, img2)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
pearson_corr_coeff(img1, img1, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
pearson_corr_coeff(img1, img1, non_binary_mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
manders_coloc_coeff(img1, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
manders_coloc_coeff(img1, non_binary_mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
manders_coloc_coeff(img1, img1 > 0, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
manders_coloc_coeff(img1, img1 > 0, non_binary_mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
manders_overlap_coeff(img1, img1, mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
manders_overlap_coeff(img1, img2)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
manders_overlap_coeff(img1, img1, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
manders_overlap_coeff(img1, img1, non_binary_mask)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
intersection_coeff(img1 > 2, img2 > 1, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
intersection_coeff(img1, img2)
|
||||
with pytest.raises(ValueError, match=". must have the same dimensions"):
|
||||
intersection_coeff(img1 > 2, img1 > 1, mask)
|
||||
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
|
||||
intersection_coeff(img1 > 2, img1 > 1, non_binary_mask)
|
||||
|
||||
|
||||
def test_pcc():
|
||||
# simple example
|
||||
img1 = np.array([[i + j for j in range(4)] for i in range(4)])
|
||||
np.testing.assert_almost_equal(
|
||||
pearson_corr_coeff(img1, img1), (1.0, 0.0), decimal=14
|
||||
)
|
||||
|
||||
img2 = np.where(img1 <= 2, 0, img1)
|
||||
np.testing.assert_almost_equal(
|
||||
pearson_corr_coeff(img1, img2), (0.944911182523068, 3.5667540654536515e-08)
|
||||
)
|
||||
|
||||
# change background of roi and see if values are same
|
||||
roi = np.where(img1 <= 2, 0, 1)
|
||||
np.testing.assert_almost_equal(
|
||||
pearson_corr_coeff(img1, img1, roi), pearson_corr_coeff(img1, img2, roi)
|
||||
)
|
||||
|
||||
|
||||
def test_mcc():
|
||||
img1 = np.array([[j for j in range(4)] for i in range(4)])
|
||||
mask = np.array([[i <= 1 for j in range(4)] for i in range(4)])
|
||||
assert manders_coloc_coeff(img1, mask) == 0.5
|
||||
|
||||
# test negative values
|
||||
img_negativeint = np.where(img1 == 1, -1, img1)
|
||||
img_negativefloat = img_negativeint / 2.0
|
||||
with pytest.raises(ValueError):
|
||||
manders_coloc_coeff(img_negativeint, mask)
|
||||
with pytest.raises(ValueError):
|
||||
manders_coloc_coeff(img_negativefloat, mask)
|
||||
|
||||
|
||||
def test_moc():
|
||||
img1 = np.ones((4, 4))
|
||||
img2 = 2 * np.ones((4, 4))
|
||||
assert manders_overlap_coeff(img1, img2) == 1
|
||||
|
||||
# test negative values
|
||||
img_negativeint = np.where(img1 == 1, -1, img1)
|
||||
img_negativefloat = img_negativeint / 2.0
|
||||
with pytest.raises(ValueError):
|
||||
manders_overlap_coeff(img_negativeint, img2)
|
||||
with pytest.raises(ValueError):
|
||||
manders_overlap_coeff(img1, img_negativeint)
|
||||
with pytest.raises(ValueError):
|
||||
manders_overlap_coeff(img_negativefloat, img2)
|
||||
with pytest.raises(ValueError):
|
||||
manders_overlap_coeff(img1, img_negativefloat)
|
||||
with pytest.raises(ValueError):
|
||||
manders_overlap_coeff(img_negativefloat, img_negativefloat)
|
||||
|
||||
|
||||
def test_intersection_coefficient():
|
||||
img1_mask = np.array([[j <= 1 for j in range(4)] for i in range(4)])
|
||||
img2_mask = np.array([[i <= 1 for j in range(4)] for i in range(4)])
|
||||
img3_mask = np.array([[1 for j in range(4)] for i in range(4)])
|
||||
assert intersection_coeff(img1_mask, img2_mask) == 0.5
|
||||
assert intersection_coeff(img1_mask, img3_mask) == 1
|
||||
16
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_entropy.py
vendored
Normal file
16
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_entropy.py
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
import numpy as np
|
||||
from skimage.measure import shannon_entropy
|
||||
|
||||
from skimage._shared.testing import assert_almost_equal
|
||||
|
||||
|
||||
def test_shannon_ones():
|
||||
img = np.ones((10, 10))
|
||||
res = shannon_entropy(img, base=np.e)
|
||||
assert_almost_equal(res, 0.0)
|
||||
|
||||
|
||||
def test_shannon_all_unique():
|
||||
img = np.arange(64)
|
||||
res = shannon_entropy(img, base=2)
|
||||
assert_almost_equal(res, np.log(64) / np.log(2))
|
||||
181
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_find_contours.py
vendored
Normal file
181
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_find_contours.py
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
import numpy as np
|
||||
from skimage.measure import find_contours
|
||||
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
import pytest
|
||||
|
||||
|
||||
a = np.ones((8, 8), dtype=np.float32)
|
||||
a[1:-1, 1] = 0
|
||||
a[1, 1:-1] = 0
|
||||
|
||||
x, y = np.mgrid[-1:1:5j, -1:1:5j]
|
||||
r = np.sqrt(x**2 + y**2)
|
||||
|
||||
|
||||
def test_binary():
|
||||
ref = [
|
||||
[6.0, 1.5],
|
||||
[5.0, 1.5],
|
||||
[4.0, 1.5],
|
||||
[3.0, 1.5],
|
||||
[2.0, 1.5],
|
||||
[1.5, 2.0],
|
||||
[1.5, 3.0],
|
||||
[1.5, 4.0],
|
||||
[1.5, 5.0],
|
||||
[1.5, 6.0],
|
||||
[1.0, 6.5],
|
||||
[0.5, 6.0],
|
||||
[0.5, 5.0],
|
||||
[0.5, 4.0],
|
||||
[0.5, 3.0],
|
||||
[0.5, 2.0],
|
||||
[0.5, 1.0],
|
||||
[1.0, 0.5],
|
||||
[2.0, 0.5],
|
||||
[3.0, 0.5],
|
||||
[4.0, 0.5],
|
||||
[5.0, 0.5],
|
||||
[6.0, 0.5],
|
||||
[6.5, 1.0],
|
||||
[6.0, 1.5],
|
||||
]
|
||||
|
||||
contours = find_contours(a, 0.5, positive_orientation='high')
|
||||
assert len(contours) == 1
|
||||
assert_array_equal(contours[0][::-1], ref)
|
||||
|
||||
|
||||
# target contour for mask tests
|
||||
mask_contour = [
|
||||
[6.0, 0.5],
|
||||
[5.0, 0.5],
|
||||
[4.0, 0.5],
|
||||
[3.0, 0.5],
|
||||
[2.0, 0.5],
|
||||
[1.0, 0.5],
|
||||
[0.5, 1.0],
|
||||
[0.5, 2.0],
|
||||
[0.5, 3.0],
|
||||
[0.5, 4.0],
|
||||
[0.5, 5.0],
|
||||
[0.5, 6.0],
|
||||
[1.0, 6.5],
|
||||
[1.5, 6.0],
|
||||
[1.5, 5.0],
|
||||
[1.5, 4.0],
|
||||
[1.5, 3.0],
|
||||
[1.5, 2.0],
|
||||
[2.0, 1.5],
|
||||
[3.0, 1.5],
|
||||
[4.0, 1.5],
|
||||
[5.0, 1.5],
|
||||
[6.0, 1.5],
|
||||
]
|
||||
|
||||
mask = np.ones((8, 8), dtype=bool)
|
||||
# Some missing data that should result in a hole in the contour:
|
||||
mask[7, 0:3] = False
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [0.5, None])
|
||||
def test_nodata(level):
|
||||
# Test missing data via NaNs in input array
|
||||
b = np.copy(a)
|
||||
b[~mask] = np.nan
|
||||
contours = find_contours(b, level, positive_orientation='high')
|
||||
assert len(contours) == 1
|
||||
assert_array_equal(contours[0], mask_contour)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [0.5, None])
|
||||
def test_mask(level):
|
||||
# Test missing data via explicit masking
|
||||
contours = find_contours(a, level, positive_orientation='high', mask=mask)
|
||||
assert len(contours) == 1
|
||||
assert_array_equal(contours[0], mask_contour)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [0, None])
|
||||
def test_mask_shape(level):
|
||||
bad_mask = np.ones((8, 7), dtype=bool)
|
||||
with pytest.raises(ValueError, match='shape'):
|
||||
find_contours(a, level, mask=bad_mask)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [0, None])
|
||||
def test_mask_dtype(level):
|
||||
bad_mask = np.ones((8, 8), dtype=np.uint8)
|
||||
with pytest.raises(TypeError, match='binary'):
|
||||
find_contours(a, level, mask=bad_mask)
|
||||
|
||||
|
||||
def test_float():
|
||||
contours = find_contours(r, 0.5)
|
||||
assert len(contours) == 1
|
||||
assert_array_equal(
|
||||
contours[0], [[2.0, 3.0], [1.0, 2.0], [2.0, 1.0], [3.0, 2.0], [2.0, 3.0]]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [0.5, None])
|
||||
def test_memory_order(level):
|
||||
contours = find_contours(np.ascontiguousarray(r), level)
|
||||
assert len(contours) == 1
|
||||
|
||||
contours = find_contours(np.asfortranarray(r), level)
|
||||
assert len(contours) == 1
|
||||
|
||||
|
||||
def test_invalid_input():
|
||||
with pytest.raises(ValueError):
|
||||
find_contours(r, 0.5, 'foo', 'bar')
|
||||
with pytest.raises(ValueError):
|
||||
find_contours(r[..., None], 0.5)
|
||||
|
||||
|
||||
def test_level_default():
|
||||
# image with range [0.9, 0.91]
|
||||
image = np.random.random((100, 100)) * 0.01 + 0.9
|
||||
contours = find_contours(image) # use default level
|
||||
# many contours should be found
|
||||
assert len(contours) > 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"image",
|
||||
[
|
||||
[
|
||||
[0.13680, 0.11220, 0.0, 0.0, 0.0, 0.19417, 0.19417, 0.33701],
|
||||
[0.0, 0.15140, 0.10267, 0.0, np.nan, 0.14908, 0.18158, 0.19178],
|
||||
[0.0, 0.06949, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01860],
|
||||
[0.0, 0.06949, 0.0, 0.17852, 0.08469, 0.02135, 0.08198, np.nan],
|
||||
[0.0, 0.08244, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
||||
[0.12342, 0.21330, 0.0, np.nan, 0.01301, 0.04335, 0.0, 0.0],
|
||||
],
|
||||
[
|
||||
[0.08, -0.03, -0.17, -0.08, 0.24, 0.06, 0.17, -0.02],
|
||||
[0.12, 0.0, np.nan, 0.24, 0.0, -0.53, 0.26, 0.16],
|
||||
[0.39, 0.0, 0.0, 0.0, 0.0, -0.02, -0.3, 0.01],
|
||||
[0.28, -0.04, -0.03, 0.16, 0.12, 0.01, -0.87, 0.16],
|
||||
[0.26, 0.08, 0.08, 0.08, 0.12, 0.13, 0.11, 0.19],
|
||||
[0.27, 0.24, 0.0, 0.25, 0.32, 0.19, 0.26, 0.22],
|
||||
],
|
||||
[
|
||||
[-0.18, np.nan, np.nan, 0.22, -0.14, -0.23, -0.2, -0.17, -0.19, -0.24],
|
||||
[0.0, np.nan, np.nan, np.nan, -0.1, -0.24, -0.15, -0.02, -0.09, -0.21],
|
||||
[0.43, 0.19, np.nan, np.nan, -0.01, -0.2, -0.22, -0.18, -0.16, -0.07],
|
||||
[0.23, 0.0, np.nan, -0.06, -0.07, -0.21, -0.24, -0.25, -0.23, -0.13],
|
||||
[-0.05, -0.11, 0.0, 0.1, -0.19, -0.23, -0.23, -0.18, -0.19, -0.16],
|
||||
[-0.19, -0.05, 0.13, -0.08, -0.22, -0.23, -0.26, -0.15, -0.12, -0.13],
|
||||
[-0.2, -0.11, -0.11, -0.24, -0.29, -0.27, -0.35, -0.36, -0.27, -0.13],
|
||||
[-0.28, -0.33, -0.31, -0.36, -0.39, -0.37, -0.38, -0.32, -0.34, -0.2],
|
||||
[-0.28, -0.33, -0.39, -0.4, -0.42, -0.38, -0.35, -0.39, -0.35, -0.34],
|
||||
[-0.38, -0.35, -0.41, -0.42, -0.39, -0.36, -0.34, -0.36, -0.28, -0.34],
|
||||
],
|
||||
],
|
||||
)
|
||||
def test_keyerror_fix(image):
|
||||
"""Failing samples from issue #4830"""
|
||||
find_contours(np.array(image, np.float32), 0)
|
||||
693
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_fit.py
vendored
Normal file
693
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_fit.py
vendored
Normal file
@@ -0,0 +1,693 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared.testing import (
|
||||
arch32,
|
||||
assert_almost_equal,
|
||||
assert_array_less,
|
||||
assert_equal,
|
||||
xfail,
|
||||
assert_stacklevel,
|
||||
)
|
||||
from skimage.measure import CircleModel, EllipseModel, LineModelND, ransac
|
||||
from skimage.measure.fit import _dynamic_max_trials
|
||||
from skimage.transform import AffineTransform
|
||||
|
||||
|
||||
def test_line_model_predict():
|
||||
model = LineModelND()
|
||||
model.params = ((0, 0), (1, 1))
|
||||
x = np.arange(-10, 10)
|
||||
y = model.predict_y(x)
|
||||
assert_almost_equal(x, model.predict_x(y))
|
||||
|
||||
|
||||
def test_line_model_nd_invalid_input():
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().predict_x(np.zeros(1))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().predict_y(np.zeros(1))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().predict_x(np.zeros(1), np.zeros(1))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().predict_y(np.zeros(1))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().predict_y(np.zeros(1), np.zeros(1))
|
||||
|
||||
assert not LineModelND().estimate(np.empty((1, 3)))
|
||||
assert not LineModelND().estimate(np.empty((1, 2)))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
LineModelND().residuals(np.empty((1, 3)))
|
||||
|
||||
|
||||
def test_line_model_nd_predict():
|
||||
model = LineModelND()
|
||||
model.params = (np.array([0, 0]), np.array([0.2, 0.8]))
|
||||
x = np.arange(-10, 10)
|
||||
y = model.predict_y(x)
|
||||
assert_almost_equal(x, model.predict_x(y))
|
||||
|
||||
|
||||
def test_line_model_nd_estimate():
|
||||
# generate original data without noise
|
||||
model0 = LineModelND()
|
||||
model0.params = (
|
||||
np.array([0, 0, 0], dtype='float'),
|
||||
np.array([1, 1, 1], dtype='float') / np.sqrt(3),
|
||||
)
|
||||
# we scale the unit vector with a factor 10 when generating points on the
|
||||
# line in order to compensate for the scale of the random noise
|
||||
data0 = (
|
||||
model0.params[0] + 10 * np.arange(-100, 100)[..., np.newaxis] * model0.params[1]
|
||||
)
|
||||
|
||||
# add gaussian noise to data
|
||||
rng = np.random.default_rng(1234)
|
||||
data = data0 + rng.normal(size=data0.shape)
|
||||
|
||||
# estimate parameters of noisy data
|
||||
model_est = LineModelND()
|
||||
model_est.estimate(data)
|
||||
# assert_almost_equal(model_est.residuals(data0), np.zeros(len(data)), 1)
|
||||
|
||||
# test whether estimated parameters are correct
|
||||
# we use the following geometric property: two aligned vectors have
|
||||
# a cross-product equal to zero
|
||||
# test if direction vectors are aligned
|
||||
assert_almost_equal(
|
||||
np.linalg.norm(np.cross(model0.params[1], model_est.params[1])), 0, 1
|
||||
)
|
||||
# test if origins are aligned with the direction
|
||||
a = model_est.params[0] - model0.params[0]
|
||||
if np.linalg.norm(a) > 0:
|
||||
a /= np.linalg.norm(a)
|
||||
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
|
||||
|
||||
|
||||
def test_line_model_nd_residuals():
|
||||
model = LineModelND()
|
||||
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
|
||||
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
|
||||
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
|
||||
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
|
||||
# test params argument in model.rediduals
|
||||
data = np.array([[10, 0, 0]])
|
||||
params = (np.array([0, 0, 0]), np.array([2, 0, 0]))
|
||||
assert_equal(abs(model.residuals(data, params=params)), 30)
|
||||
|
||||
|
||||
def test_circle_model_invalid_input():
|
||||
with testing.raises(ValueError):
|
||||
CircleModel().estimate(np.empty((5, 3)))
|
||||
|
||||
|
||||
def test_circle_model_predict():
|
||||
model = CircleModel()
|
||||
r = 5
|
||||
model.params = (0, 0, r)
|
||||
t = np.arange(0, 2 * np.pi, np.pi / 2)
|
||||
|
||||
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
|
||||
assert_almost_equal(xy, model.predict_xy(t))
|
||||
|
||||
|
||||
def test_circle_model_estimate():
|
||||
# generate original data without noise
|
||||
model0 = CircleModel()
|
||||
model0.params = (10, 12, 3)
|
||||
t = np.linspace(0, 2 * np.pi, 1000)
|
||||
data0 = model0.predict_xy(t)
|
||||
|
||||
# add gaussian noise to data
|
||||
rng = np.random.default_rng(1234)
|
||||
data = data0 + rng.normal(size=data0.shape)
|
||||
|
||||
# estimate parameters of noisy data
|
||||
model_est = CircleModel()
|
||||
model_est.estimate(data)
|
||||
|
||||
# test whether estimated parameters almost equal original parameters
|
||||
assert_almost_equal(model0.params, model_est.params, 0)
|
||||
|
||||
|
||||
def test_circle_model_int_overflow():
|
||||
xy = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]], dtype=np.int32)
|
||||
xy += 500
|
||||
|
||||
model = CircleModel()
|
||||
model.estimate(xy)
|
||||
|
||||
assert_almost_equal(model.params, [500, 500, 1])
|
||||
|
||||
|
||||
def test_circle_model_residuals():
|
||||
model = CircleModel()
|
||||
model.params = (0, 0, 5)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))), np.sqrt(2 * 6**2) - 5)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
|
||||
|
||||
|
||||
def test_circle_model_insufficient_data():
|
||||
model = CircleModel()
|
||||
warning_message = ["Input does not contain enough significant data points."]
|
||||
with expected_warnings(warning_message):
|
||||
model.estimate(np.array([[1, 2], [3, 4]]))
|
||||
|
||||
with expected_warnings(warning_message):
|
||||
model.estimate(np.array([[0, 0], [1, 1], [2, 2]]))
|
||||
|
||||
warning_message = (
|
||||
"Standard deviation of data is too small to estimate "
|
||||
"circle with meaningful precision."
|
||||
)
|
||||
with pytest.warns(RuntimeWarning, match=warning_message) as _warnings:
|
||||
assert not model.estimate(np.ones((6, 2)))
|
||||
assert_stacklevel(_warnings)
|
||||
assert len(_warnings) == 1
|
||||
|
||||
|
||||
def test_circle_model_estimate_from_small_scale_data():
|
||||
params = np.array([1.23e-90, 2.34e-90, 3.45e-100], dtype=np.float64)
|
||||
angles = np.array(
|
||||
[
|
||||
0.107,
|
||||
0.407,
|
||||
1.108,
|
||||
1.489,
|
||||
2.216,
|
||||
2.768,
|
||||
3.183,
|
||||
3.969,
|
||||
4.840,
|
||||
5.387,
|
||||
5.792,
|
||||
6.139,
|
||||
],
|
||||
dtype=np.float64,
|
||||
)
|
||||
data = CircleModel().predict_xy(angles, params=params)
|
||||
model = CircleModel()
|
||||
# assert that far small scale data can be estimated
|
||||
assert model.estimate(data.astype(np.float64))
|
||||
# test whether the predicted parameters are close to the original ones
|
||||
assert_almost_equal(params, model.params)
|
||||
|
||||
|
||||
def test_ellipse_model_invalid_input():
|
||||
with testing.raises(ValueError):
|
||||
EllipseModel().estimate(np.empty((5, 3)))
|
||||
|
||||
|
||||
def test_ellipse_model_predict():
|
||||
model = EllipseModel()
|
||||
model.params = (0, 0, 5, 10, 0)
|
||||
t = np.arange(0, 2 * np.pi, np.pi / 2)
|
||||
|
||||
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
|
||||
assert_almost_equal(xy, model.predict_xy(t))
|
||||
|
||||
|
||||
def test_ellipse_model_estimate():
|
||||
for angle in range(0, 180, 15):
|
||||
rad = np.deg2rad(angle)
|
||||
# generate original data without noise
|
||||
model0 = EllipseModel()
|
||||
model0.params = (10, 20, 15, 25, rad)
|
||||
t = np.linspace(0, 2 * np.pi, 100)
|
||||
data0 = model0.predict_xy(t)
|
||||
|
||||
# add gaussian noise to data
|
||||
rng = np.random.default_rng(1234)
|
||||
data = data0 + rng.normal(size=data0.shape)
|
||||
|
||||
# estimate parameters of noisy data
|
||||
model_est = EllipseModel()
|
||||
model_est.estimate(data)
|
||||
|
||||
# test whether estimated parameters almost equal original parameters
|
||||
assert_almost_equal(model0.params[:2], model_est.params[:2], 0)
|
||||
res = model_est.residuals(data0)
|
||||
assert_array_less(res, np.ones(res.shape))
|
||||
|
||||
|
||||
def test_ellipse_parameter_stability():
|
||||
"""The fit should be modified so that a > b"""
|
||||
|
||||
for angle in np.arange(0, 180 + 1, 1):
|
||||
# generate rotation matrix
|
||||
theta = np.deg2rad(angle)
|
||||
c = np.cos(theta)
|
||||
s = np.sin(theta)
|
||||
R = np.array([[c, -s], [s, c]])
|
||||
|
||||
# generate points on ellipse
|
||||
t = np.linspace(0, 2 * np.pi, 20)
|
||||
a = 100
|
||||
b = 50
|
||||
points = np.array([a * np.cos(t), b * np.sin(t)])
|
||||
points = R @ points
|
||||
|
||||
# fit model to points
|
||||
ellipse_model = EllipseModel()
|
||||
ellipse_model.estimate(points.T)
|
||||
_, _, a_prime, b_prime, theta_prime = ellipse_model.params
|
||||
|
||||
assert_almost_equal(theta_prime, theta)
|
||||
assert_almost_equal(a_prime, a)
|
||||
assert_almost_equal(b_prime, b)
|
||||
|
||||
|
||||
def test_ellipse_model_estimate_from_data():
|
||||
data = np.array(
|
||||
[
|
||||
[264, 854],
|
||||
[265, 875],
|
||||
[268, 863],
|
||||
[270, 857],
|
||||
[275, 905],
|
||||
[285, 915],
|
||||
[305, 925],
|
||||
[324, 934],
|
||||
[335, 764],
|
||||
[336, 915],
|
||||
[345, 925],
|
||||
[345, 945],
|
||||
[354, 933],
|
||||
[355, 745],
|
||||
[364, 936],
|
||||
[365, 754],
|
||||
[375, 745],
|
||||
[375, 735],
|
||||
[385, 736],
|
||||
[395, 735],
|
||||
[394, 935],
|
||||
[405, 727],
|
||||
[415, 736],
|
||||
[415, 727],
|
||||
[425, 727],
|
||||
[426, 929],
|
||||
[435, 735],
|
||||
[444, 933],
|
||||
[445, 735],
|
||||
[455, 724],
|
||||
[465, 934],
|
||||
[465, 735],
|
||||
[475, 908],
|
||||
[475, 726],
|
||||
[485, 753],
|
||||
[485, 728],
|
||||
[492, 762],
|
||||
[495, 745],
|
||||
[491, 910],
|
||||
[493, 909],
|
||||
[499, 904],
|
||||
[505, 905],
|
||||
[504, 747],
|
||||
[515, 743],
|
||||
[516, 752],
|
||||
[524, 855],
|
||||
[525, 844],
|
||||
[525, 885],
|
||||
[533, 845],
|
||||
[533, 873],
|
||||
[535, 883],
|
||||
[545, 874],
|
||||
[543, 864],
|
||||
[553, 865],
|
||||
[553, 845],
|
||||
[554, 825],
|
||||
[554, 835],
|
||||
[563, 845],
|
||||
[565, 826],
|
||||
[563, 855],
|
||||
[563, 795],
|
||||
[565, 735],
|
||||
[573, 778],
|
||||
[572, 815],
|
||||
[574, 804],
|
||||
[575, 665],
|
||||
[575, 685],
|
||||
[574, 705],
|
||||
[574, 745],
|
||||
[575, 875],
|
||||
[572, 732],
|
||||
[582, 795],
|
||||
[579, 709],
|
||||
[583, 805],
|
||||
[583, 854],
|
||||
[586, 755],
|
||||
[584, 824],
|
||||
[585, 655],
|
||||
[581, 718],
|
||||
[586, 844],
|
||||
[585, 915],
|
||||
[587, 905],
|
||||
[594, 824],
|
||||
[593, 855],
|
||||
[590, 891],
|
||||
[594, 776],
|
||||
[596, 767],
|
||||
[593, 763],
|
||||
[603, 785],
|
||||
[604, 775],
|
||||
[603, 885],
|
||||
[605, 753],
|
||||
[605, 655],
|
||||
[606, 935],
|
||||
[603, 761],
|
||||
[613, 802],
|
||||
[613, 945],
|
||||
[613, 965],
|
||||
[615, 693],
|
||||
[617, 665],
|
||||
[623, 962],
|
||||
[624, 972],
|
||||
[625, 995],
|
||||
[633, 673],
|
||||
[633, 965],
|
||||
[633, 683],
|
||||
[633, 692],
|
||||
[633, 954],
|
||||
[634, 1016],
|
||||
[635, 664],
|
||||
[641, 804],
|
||||
[637, 999],
|
||||
[641, 956],
|
||||
[643, 946],
|
||||
[643, 926],
|
||||
[644, 975],
|
||||
[643, 655],
|
||||
[646, 705],
|
||||
[651, 664],
|
||||
[651, 984],
|
||||
[647, 665],
|
||||
[651, 715],
|
||||
[651, 725],
|
||||
[651, 734],
|
||||
[647, 809],
|
||||
[651, 825],
|
||||
[651, 873],
|
||||
[647, 900],
|
||||
[652, 917],
|
||||
[651, 944],
|
||||
[652, 742],
|
||||
[648, 811],
|
||||
[651, 994],
|
||||
[652, 783],
|
||||
[650, 911],
|
||||
[654, 879],
|
||||
],
|
||||
dtype=np.int32,
|
||||
)
|
||||
|
||||
# estimate parameters of real data
|
||||
model = EllipseModel()
|
||||
model.estimate(data)
|
||||
|
||||
# test whether estimated parameters are smaller then 1000, so means stable
|
||||
assert_array_less(model.params[:4], np.full(4, 1000))
|
||||
|
||||
# test whether all parameters are more than 0. Negative values were the
|
||||
# result of an integer overflow
|
||||
assert_array_less(np.zeros(4), np.abs(model.params[:4]))
|
||||
|
||||
|
||||
def test_ellipse_model_estimate_from_far_shifted_data():
|
||||
params = np.array([1e6, 2e6, 0.5, 0.1, 0.5], dtype=np.float64)
|
||||
angles = np.array(
|
||||
[
|
||||
0.107,
|
||||
0.407,
|
||||
1.108,
|
||||
1.489,
|
||||
2.216,
|
||||
2.768,
|
||||
3.183,
|
||||
3.969,
|
||||
4.840,
|
||||
5.387,
|
||||
5.792,
|
||||
6.139,
|
||||
],
|
||||
dtype=np.float64,
|
||||
)
|
||||
data = EllipseModel().predict_xy(angles, params=params)
|
||||
model = EllipseModel()
|
||||
# assert that far shifted data can be estimated
|
||||
assert model.estimate(data.astype(np.float64))
|
||||
# test whether the predicted parameters are close to the original ones
|
||||
assert_almost_equal(params, model.params)
|
||||
|
||||
|
||||
@xfail(
|
||||
condition=arch32,
|
||||
reason=(
|
||||
'Known test failure on 32-bit platforms. See links for '
|
||||
'details: '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3091 '
|
||||
'https://github.com/scikit-image/scikit-image/issues/2670'
|
||||
),
|
||||
)
|
||||
def test_ellipse_model_estimate_failers():
|
||||
# estimate parameters of real data
|
||||
model = EllipseModel()
|
||||
warning_message = (
|
||||
"Standard deviation of data is too small to estimate "
|
||||
"ellipse with meaningful precision."
|
||||
)
|
||||
with pytest.warns(RuntimeWarning, match=warning_message) as _warnings:
|
||||
assert not model.estimate(np.ones((6, 2)))
|
||||
assert_stacklevel(_warnings)
|
||||
assert len(_warnings) == 1
|
||||
|
||||
assert not model.estimate(np.array([[50, 80], [51, 81], [52, 80]]))
|
||||
|
||||
|
||||
def test_ellipse_model_residuals():
|
||||
model = EllipseModel()
|
||||
# vertical line through origin
|
||||
model.params = (0, 0, 10, 5, 0)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
|
||||
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
|
||||
|
||||
|
||||
def test_ransac_shape():
|
||||
# generate original data without noise
|
||||
model0 = CircleModel()
|
||||
model0.params = (10, 12, 3)
|
||||
t = np.linspace(0, 2 * np.pi, 1000)
|
||||
data0 = model0.predict_xy(t)
|
||||
|
||||
# add some faulty data
|
||||
outliers = (10, 30, 200)
|
||||
data0[outliers[0], :] = (1000, 1000)
|
||||
data0[outliers[1], :] = (-50, 50)
|
||||
data0[outliers[2], :] = (-100, -10)
|
||||
|
||||
# estimate parameters of corrupted data
|
||||
model_est, inliers = ransac(data0, CircleModel, 3, 5, rng=1)
|
||||
ransac(data0, CircleModel, 3, 5, rng=1)
|
||||
|
||||
# test whether estimated parameters equal original parameters
|
||||
assert_almost_equal(model0.params, model_est.params)
|
||||
for outlier in outliers:
|
||||
assert outlier not in inliers
|
||||
|
||||
|
||||
def test_ransac_geometric():
|
||||
rng = np.random.default_rng(12373240)
|
||||
|
||||
# generate original data without noise
|
||||
src = 100 * rng.random((50, 2))
|
||||
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1, translation=(10, 20))
|
||||
dst = model0(src)
|
||||
|
||||
# add some faulty data
|
||||
outliers = (0, 5, 20)
|
||||
dst[outliers[0]] = (10000, 10000)
|
||||
dst[outliers[1]] = (-100, 100)
|
||||
dst[outliers[2]] = (50, 50)
|
||||
|
||||
# estimate parameters of corrupted data
|
||||
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20, rng=rng)
|
||||
|
||||
# test whether estimated parameters equal original parameters
|
||||
assert_almost_equal(model0.params, model_est.params)
|
||||
assert np.all(np.nonzero(inliers == False)[0] == outliers)
|
||||
|
||||
|
||||
def test_ransac_is_data_valid():
|
||||
def is_data_valid(data):
|
||||
return data.shape[0] > 2
|
||||
|
||||
with expected_warnings(["No inliers found"]):
|
||||
model, inliers = ransac(
|
||||
np.empty((10, 2)),
|
||||
LineModelND,
|
||||
2,
|
||||
np.inf,
|
||||
is_data_valid=is_data_valid,
|
||||
rng=1,
|
||||
)
|
||||
assert_equal(model, None)
|
||||
assert_equal(inliers, None)
|
||||
|
||||
|
||||
def test_ransac_is_model_valid():
|
||||
def is_model_valid(model, data):
|
||||
return False
|
||||
|
||||
with expected_warnings(["No inliers found"]):
|
||||
model, inliers = ransac(
|
||||
np.empty((10, 2)),
|
||||
LineModelND,
|
||||
2,
|
||||
np.inf,
|
||||
is_model_valid=is_model_valid,
|
||||
rng=1,
|
||||
)
|
||||
assert_equal(model, None)
|
||||
assert_equal(inliers, None)
|
||||
|
||||
|
||||
def test_ransac_dynamic_max_trials():
|
||||
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
|
||||
# Hartley, R.~I. and Zisserman, A., 2004,
|
||||
# Multiple View Geometry in Computer Vision, Second Edition,
|
||||
# Cambridge University Press, ISBN: 0521540518
|
||||
|
||||
# e = 0%, min_samples = X
|
||||
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
|
||||
assert_equal(_dynamic_max_trials(100, 100, 2, 1), 1)
|
||||
|
||||
# e = 5%, min_samples = 2
|
||||
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
|
||||
assert_equal(_dynamic_max_trials(95, 100, 2, 1), 16)
|
||||
# e = 10%, min_samples = 2
|
||||
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
|
||||
assert_equal(_dynamic_max_trials(90, 100, 2, 1), 22)
|
||||
# e = 30%, min_samples = 2
|
||||
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
|
||||
assert_equal(_dynamic_max_trials(70, 100, 2, 1), 54)
|
||||
# e = 50%, min_samples = 2
|
||||
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
|
||||
assert_equal(_dynamic_max_trials(50, 100, 2, 1), 126)
|
||||
|
||||
# e = 5%, min_samples = 8
|
||||
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
|
||||
assert_equal(_dynamic_max_trials(95, 100, 8, 1), 34)
|
||||
# e = 10%, min_samples = 8
|
||||
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
|
||||
assert_equal(_dynamic_max_trials(90, 100, 8, 1), 65)
|
||||
# e = 30%, min_samples = 8
|
||||
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
|
||||
assert_equal(_dynamic_max_trials(70, 100, 8, 1), 608)
|
||||
# e = 50%, min_samples = 8
|
||||
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
|
||||
assert_equal(_dynamic_max_trials(50, 100, 8, 1), 9210)
|
||||
|
||||
# e = 0%, min_samples = 5
|
||||
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
|
||||
assert_equal(_dynamic_max_trials(1, 100, 5, 1), 360436504051)
|
||||
|
||||
|
||||
def test_ransac_invalid_input():
|
||||
# `residual_threshold` must be greater than zero
|
||||
with testing.raises(ValueError):
|
||||
ransac(np.zeros((10, 2)), None, min_samples=2, residual_threshold=-0.5)
|
||||
# "`max_trials` must be greater than zero"
|
||||
with testing.raises(ValueError):
|
||||
ransac(
|
||||
np.zeros((10, 2)), None, min_samples=2, residual_threshold=0, max_trials=-1
|
||||
)
|
||||
# `stop_probability` must be in range (0, 1)
|
||||
with testing.raises(ValueError):
|
||||
ransac(
|
||||
np.zeros((10, 2)),
|
||||
None,
|
||||
min_samples=2,
|
||||
residual_threshold=0,
|
||||
stop_probability=-1,
|
||||
)
|
||||
# `stop_probability` must be in range (0, 1)
|
||||
with testing.raises(ValueError):
|
||||
ransac(
|
||||
np.zeros((10, 2)),
|
||||
None,
|
||||
min_samples=2,
|
||||
residual_threshold=0,
|
||||
stop_probability=1.01,
|
||||
)
|
||||
# `min_samples` as ratio must be in range (0, nb)
|
||||
with testing.raises(ValueError):
|
||||
ransac(np.zeros((10, 2)), None, min_samples=0, residual_threshold=0)
|
||||
# `min_samples` as ratio must be in range (0, nb]
|
||||
with testing.raises(ValueError):
|
||||
ransac(np.zeros((10, 2)), None, min_samples=11, residual_threshold=0)
|
||||
# `min_samples` must be greater than zero
|
||||
with testing.raises(ValueError):
|
||||
ransac(np.zeros((10, 2)), None, min_samples=-1, residual_threshold=0)
|
||||
|
||||
|
||||
def test_ransac_sample_duplicates():
|
||||
class DummyModel:
|
||||
"""Dummy model to check for duplicates."""
|
||||
|
||||
def estimate(self, data):
|
||||
# Assert that all data points are unique.
|
||||
assert_equal(np.unique(data).size, data.size)
|
||||
return True
|
||||
|
||||
def residuals(self, data):
|
||||
return np.ones(len(data), dtype=np.float64)
|
||||
|
||||
# Create dataset with four unique points. Force 10 iterations
|
||||
# and check that there are no duplicated data points.
|
||||
data = np.arange(4)
|
||||
with expected_warnings(["No inliers found"]):
|
||||
ransac(data, DummyModel, min_samples=3, residual_threshold=0.0, max_trials=10)
|
||||
|
||||
|
||||
def test_ransac_with_no_final_inliers():
|
||||
data = np.random.rand(5, 2)
|
||||
with expected_warnings(['No inliers found. Model not fitted']):
|
||||
model, inliers = ransac(
|
||||
data,
|
||||
model_class=LineModelND,
|
||||
min_samples=3,
|
||||
residual_threshold=0,
|
||||
rng=1523427,
|
||||
)
|
||||
assert inliers is None
|
||||
assert model is None
|
||||
|
||||
|
||||
def test_ransac_non_valid_best_model():
|
||||
"""Example from GH issue #5572"""
|
||||
|
||||
def is_model_valid(model, *random_data) -> bool:
|
||||
"""Allow models with a maximum of 10 degree tilt from the vertical"""
|
||||
tilt = abs(np.arccos(np.dot(model.params[1], [0, 0, 1])))
|
||||
return tilt <= (10 / 180 * np.pi)
|
||||
|
||||
rng = np.random.RandomState(1)
|
||||
data = np.linspace([0, 0, 0], [0.3, 0, 1], 1000) + rng.rand(1000, 3) - 0.5
|
||||
with expected_warnings(["Estimated model is not valid"]):
|
||||
ransac(
|
||||
data,
|
||||
LineModelND,
|
||||
min_samples=2,
|
||||
residual_threshold=0.3,
|
||||
max_trials=50,
|
||||
rng=0,
|
||||
is_model_valid=is_model_valid,
|
||||
)
|
||||
59
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_label.py
vendored
Normal file
59
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_label.py
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from skimage import data
|
||||
from skimage.measure._label import _label_bool, label
|
||||
from skimage.measure._ccomp import label_cython as clabel
|
||||
|
||||
from skimage._shared import testing
|
||||
|
||||
# In this testsuite, we ensure that the results provided by
|
||||
# label_cython are identical to the one from _label_bool,
|
||||
# which is based on ndimage.
|
||||
|
||||
|
||||
def test_no_option():
|
||||
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
|
||||
l_ndi = _label_bool(img)
|
||||
l_cy = clabel(img)
|
||||
testing.assert_equal(l_ndi, l_cy)
|
||||
|
||||
|
||||
def test_background():
|
||||
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
|
||||
l_ndi = _label_bool(img, background=0)
|
||||
l_cy = clabel(img, background=0)
|
||||
testing.assert_equal(l_ndi, l_cy)
|
||||
|
||||
l_ndi = _label_bool(img, background=1)
|
||||
l_cy = clabel(img, background=1)
|
||||
testing.assert_equal(l_ndi, l_cy)
|
||||
|
||||
|
||||
def test_return_num():
|
||||
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
|
||||
l_ndi = _label_bool(img, return_num=True)
|
||||
l_cy = clabel(img, return_num=True)
|
||||
testing.assert_equal(l_ndi, l_cy)
|
||||
|
||||
|
||||
def test_connectivity():
|
||||
img = data.binary_blobs(length=128, blob_size_fraction=0.15, n_dim=3)
|
||||
for c in (1, 2, 3):
|
||||
l_ndi = _label_bool(img, connectivity=c)
|
||||
l_cy = clabel(img, connectivity=c)
|
||||
testing.assert_equal(l_ndi, l_cy)
|
||||
|
||||
for c in (0, 4):
|
||||
with pytest.raises(ValueError):
|
||||
l_ndi = _label_bool(img, connectivity=c)
|
||||
with pytest.raises(ValueError):
|
||||
l_cy = clabel(img, connectivity=c)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [bool, int])
|
||||
def test_zero_size(dtype):
|
||||
img = np.ones((300, 0, 300), dtype=dtype)
|
||||
lab, num = label(img, return_num=True)
|
||||
|
||||
assert lab.shape == img.shape
|
||||
assert num == 0
|
||||
184
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_marching_cubes.py
vendored
Normal file
184
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_marching_cubes.py
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_allclose
|
||||
|
||||
from skimage.draw import ellipsoid, ellipsoid_stats
|
||||
from skimage.measure import marching_cubes, mesh_surface_area
|
||||
|
||||
|
||||
def test_marching_cubes_isotropic():
|
||||
ellipsoid_isotropic = ellipsoid(6, 10, 16, levelset=True)
|
||||
_, surf = ellipsoid_stats(6, 10, 16)
|
||||
|
||||
# Classic
|
||||
verts, faces = marching_cubes(ellipsoid_isotropic, 0.0, method='lorensen')[:2]
|
||||
surf_calc = mesh_surface_area(verts, faces)
|
||||
# Test within 1% tolerance for isotropic. Will always underestimate.
|
||||
assert surf > surf_calc and surf_calc > surf * 0.99
|
||||
|
||||
# Lewiner
|
||||
verts, faces = marching_cubes(ellipsoid_isotropic, 0.0)[:2]
|
||||
surf_calc = mesh_surface_area(verts, faces)
|
||||
# Test within 1% tolerance for isotropic. Will always underestimate.
|
||||
assert surf > surf_calc and surf_calc > surf * 0.99
|
||||
|
||||
|
||||
def test_marching_cubes_anisotropic():
|
||||
# test spacing as numpy array (and not just tuple)
|
||||
spacing = np.array([1.0, 10 / 6.0, 16 / 6.0])
|
||||
ellipsoid_anisotropic = ellipsoid(6, 10, 16, spacing=spacing, levelset=True)
|
||||
_, surf = ellipsoid_stats(6, 10, 16)
|
||||
|
||||
# Classic
|
||||
verts, faces = marching_cubes(
|
||||
ellipsoid_anisotropic, 0.0, spacing=spacing, method='lorensen'
|
||||
)[:2]
|
||||
surf_calc = mesh_surface_area(verts, faces)
|
||||
# Test within 1.5% tolerance for anisotropic. Will always underestimate.
|
||||
assert surf > surf_calc and surf_calc > surf * 0.985
|
||||
|
||||
# Lewiner
|
||||
verts, faces = marching_cubes(ellipsoid_anisotropic, 0.0, spacing=spacing)[:2]
|
||||
surf_calc = mesh_surface_area(verts, faces)
|
||||
# Test within 1.5% tolerance for anisotropic. Will always underestimate.
|
||||
assert surf > surf_calc and surf_calc > surf * 0.985
|
||||
|
||||
# Test marching cube with mask
|
||||
with pytest.raises(ValueError):
|
||||
verts, faces = marching_cubes(
|
||||
ellipsoid_anisotropic, 0.0, spacing=spacing, mask=np.array([])
|
||||
)[:2]
|
||||
|
||||
# Test spacing together with allow_degenerate=False
|
||||
marching_cubes(ellipsoid_anisotropic, 0, spacing=spacing, allow_degenerate=False)
|
||||
|
||||
|
||||
def test_invalid_input():
|
||||
# Classic
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((2, 2, 1)), 0, method='lorensen')
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((2, 2, 1)), 1, method='lorensen')
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.ones((3, 3, 3)), 1, spacing=(1, 2), method='lorensen')
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((20, 20)), 0, method='lorensen')
|
||||
|
||||
# Lewiner
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((2, 2, 1)), 0)
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((2, 2, 1)), 1)
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.ones((3, 3, 3)), 1, spacing=(1, 2))
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(np.zeros((20, 20)), 0)
|
||||
|
||||
# invalid method name
|
||||
ellipsoid_isotropic = ellipsoid(6, 10, 16, levelset=True)
|
||||
with pytest.raises(ValueError):
|
||||
marching_cubes(ellipsoid_isotropic, 0.0, method='abcd')
|
||||
|
||||
|
||||
def test_both_algs_same_result_ellipse():
|
||||
# Performing this test on data that does not have ambiguities
|
||||
|
||||
sphere_small = ellipsoid(1, 1, 1, levelset=True)
|
||||
|
||||
vertices1, faces1 = marching_cubes(sphere_small, 0, allow_degenerate=False)[:2]
|
||||
vertices2, faces2 = marching_cubes(
|
||||
sphere_small, 0, allow_degenerate=False, method='lorensen'
|
||||
)[:2]
|
||||
|
||||
# Order is different, best we can do is test equal shape and same
|
||||
# vertices present
|
||||
assert _same_mesh(vertices1, faces1, vertices2, faces2)
|
||||
|
||||
|
||||
def _same_mesh(vertices1, faces1, vertices2, faces2, tol=1e-10):
|
||||
"""Compare two meshes, using a certain tolerance and invariant to
|
||||
the order of the faces.
|
||||
"""
|
||||
# Unwind vertices
|
||||
triangles1 = vertices1[np.array(faces1)]
|
||||
triangles2 = vertices2[np.array(faces2)]
|
||||
# Sort vertices within each triangle
|
||||
triang1 = [np.concatenate(sorted(t, key=lambda x: tuple(x))) for t in triangles1]
|
||||
triang2 = [np.concatenate(sorted(t, key=lambda x: tuple(x))) for t in triangles2]
|
||||
# Sort the resulting 9-element "tuples"
|
||||
triang1 = np.array(sorted([tuple(x) for x in triang1]))
|
||||
triang2 = np.array(sorted([tuple(x) for x in triang2]))
|
||||
return triang1.shape == triang2.shape and np.allclose(triang1, triang2, 0, tol)
|
||||
|
||||
|
||||
def test_both_algs_same_result_donut():
|
||||
# Performing this test on data that does not have ambiguities
|
||||
n = 48
|
||||
a, b = 2.5 / n, -1.25
|
||||
|
||||
vol = np.empty((n, n, n), 'float32')
|
||||
for iz in range(vol.shape[0]):
|
||||
for iy in range(vol.shape[1]):
|
||||
for ix in range(vol.shape[2]):
|
||||
# Double-torii formula by Thomas Lewiner
|
||||
z, y, x = float(iz) * a + b, float(iy) * a + b, float(ix) * a + b
|
||||
vol[iz, iy, ix] = (
|
||||
((8 * x) ** 2 + (8 * y - 2) ** 2 + (8 * z) ** 2 + 16 - 1.85 * 1.85)
|
||||
* (
|
||||
(8 * x) ** 2
|
||||
+ (8 * y - 2) ** 2
|
||||
+ (8 * z) ** 2
|
||||
+ 16
|
||||
- 1.85 * 1.85
|
||||
)
|
||||
- 64 * ((8 * x) ** 2 + (8 * y - 2) ** 2)
|
||||
) * (
|
||||
(
|
||||
(8 * x) ** 2
|
||||
+ ((8 * y - 2) + 4) * ((8 * y - 2) + 4)
|
||||
+ (8 * z) ** 2
|
||||
+ 16
|
||||
- 1.85 * 1.85
|
||||
)
|
||||
* (
|
||||
(8 * x) ** 2
|
||||
+ ((8 * y - 2) + 4) * ((8 * y - 2) + 4)
|
||||
+ (8 * z) ** 2
|
||||
+ 16
|
||||
- 1.85 * 1.85
|
||||
)
|
||||
- 64 * (((8 * y - 2) + 4) * ((8 * y - 2) + 4) + (8 * z) ** 2)
|
||||
) + 1025
|
||||
|
||||
vertices1, faces1 = marching_cubes(vol, 0, method='lorensen')[:2]
|
||||
vertices2, faces2 = marching_cubes(vol, 0)[:2]
|
||||
|
||||
# Old and new alg are different
|
||||
assert not _same_mesh(vertices1, faces1, vertices2, faces2)
|
||||
|
||||
|
||||
def test_masked_marching_cubes():
|
||||
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
|
||||
mask = np.ones_like(ellipsoid_scalar, dtype=bool)
|
||||
mask[:10, :, :] = False
|
||||
mask[:, :, 20:] = False
|
||||
ver, faces, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
|
||||
area = mesh_surface_area(ver, faces)
|
||||
|
||||
assert_allclose(area, 299.56878662109375, rtol=0.01)
|
||||
|
||||
|
||||
def test_masked_marching_cubes_empty():
|
||||
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
|
||||
mask = np.array([])
|
||||
with pytest.raises(ValueError):
|
||||
_ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
|
||||
|
||||
|
||||
def test_masked_marching_cubes_all_true():
|
||||
ellipsoid_scalar = ellipsoid(6, 10, 16, levelset=True)
|
||||
mask = np.ones_like(ellipsoid_scalar, dtype=bool)
|
||||
ver_m, faces_m, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
|
||||
ver, faces, _, _ = marching_cubes(ellipsoid_scalar, 0, mask=mask)
|
||||
assert_allclose(ver_m, ver, rtol=0.00001)
|
||||
assert_allclose(faces_m, faces, rtol=0.00001)
|
||||
360
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_moments.py
vendored
Normal file
360
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_moments.py
vendored
Normal file
@@ -0,0 +1,360 @@
|
||||
import itertools
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from scipy import ndimage as ndi
|
||||
|
||||
from skimage import draw
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_allclose, assert_almost_equal, assert_equal
|
||||
from skimage._shared.utils import _supported_float_type
|
||||
from skimage.measure import (
|
||||
centroid,
|
||||
inertia_tensor,
|
||||
inertia_tensor_eigvals,
|
||||
moments,
|
||||
moments_central,
|
||||
moments_coords,
|
||||
moments_coords_central,
|
||||
moments_hu,
|
||||
moments_normalized,
|
||||
)
|
||||
|
||||
|
||||
def compare_moments(m1, m2, thresh=1e-8):
|
||||
"""Compare two moments arrays.
|
||||
|
||||
Compares only values in the upper-left triangle of m1, m2 since
|
||||
values below the diagonal exceed the specified order and are not computed
|
||||
when the analytical computation is used.
|
||||
|
||||
Also, there the first-order central moments will be exactly zero with the
|
||||
analytical calculation, but will not be zero due to limited floating point
|
||||
precision when using a numerical computation. Here we just specify the
|
||||
tolerance as a fraction of the maximum absolute value in the moments array.
|
||||
"""
|
||||
m1 = m1.copy()
|
||||
m2 = m2.copy()
|
||||
|
||||
# make sure location of any NaN values match and then ignore the NaN values
|
||||
# in the subsequent comparisons
|
||||
nan_idx1 = np.where(np.isnan(m1.ravel()))[0]
|
||||
nan_idx2 = np.where(np.isnan(m2.ravel()))[0]
|
||||
assert len(nan_idx1) == len(nan_idx2)
|
||||
assert np.all(nan_idx1 == nan_idx2)
|
||||
m1[np.isnan(m1)] = 0
|
||||
m2[np.isnan(m2)] = 0
|
||||
|
||||
max_val = np.abs(m1[m1 != 0]).max()
|
||||
for orders in itertools.product(*((range(m1.shape[0]),) * m1.ndim)):
|
||||
if sum(orders) > m1.shape[0] - 1:
|
||||
m1[orders] = 0
|
||||
m2[orders] = 0
|
||||
continue
|
||||
abs_diff = abs(m1[orders] - m2[orders])
|
||||
rel_diff = abs_diff / max_val
|
||||
assert rel_diff < thresh
|
||||
|
||||
|
||||
@pytest.mark.parametrize('anisotropic', [False, True, None])
|
||||
def test_moments(anisotropic):
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[14, 14] = 1
|
||||
image[15, 15] = 1
|
||||
image[14, 15] = 0.5
|
||||
image[15, 14] = 0.5
|
||||
if anisotropic:
|
||||
spacing = (1.4, 2)
|
||||
else:
|
||||
spacing = (1, 1)
|
||||
if anisotropic is None:
|
||||
m = moments(image)
|
||||
else:
|
||||
m = moments(image, spacing=spacing)
|
||||
assert_equal(m[0, 0], 3)
|
||||
assert_almost_equal(m[1, 0] / m[0, 0], 14.5 * spacing[0])
|
||||
assert_almost_equal(m[0, 1] / m[0, 0], 14.5 * spacing[1])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('anisotropic', [False, True, None])
|
||||
def test_moments_central(anisotropic):
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[14, 14] = 1
|
||||
image[15, 15] = 1
|
||||
image[14, 15] = 0.5
|
||||
image[15, 14] = 0.5
|
||||
if anisotropic:
|
||||
spacing = (2, 1)
|
||||
else:
|
||||
spacing = (1, 1)
|
||||
if anisotropic is None:
|
||||
mu = moments_central(image, (14.5, 14.5))
|
||||
# check for proper centroid computation
|
||||
mu_calc_centroid = moments_central(image)
|
||||
else:
|
||||
mu = moments_central(
|
||||
image, (14.5 * spacing[0], 14.5 * spacing[1]), spacing=spacing
|
||||
)
|
||||
# check for proper centroid computation
|
||||
mu_calc_centroid = moments_central(image, spacing=spacing)
|
||||
|
||||
compare_moments(mu, mu_calc_centroid, thresh=1e-14)
|
||||
|
||||
# shift image by dx=2, dy=2
|
||||
image2 = np.zeros((20, 20), dtype=np.double)
|
||||
image2[16, 16] = 1
|
||||
image2[17, 17] = 1
|
||||
image2[16, 17] = 0.5
|
||||
image2[17, 16] = 0.5
|
||||
if anisotropic is None:
|
||||
mu2 = moments_central(image2, (14.5 + 2, 14.5 + 2))
|
||||
else:
|
||||
mu2 = moments_central(
|
||||
image2, ((14.5 + 2) * spacing[0], (14.5 + 2) * spacing[1]), spacing=spacing
|
||||
)
|
||||
# central moments must be translation invariant
|
||||
compare_moments(mu, mu2, thresh=1e-14)
|
||||
|
||||
|
||||
def test_moments_coords():
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[13:17, 13:17] = 1
|
||||
mu_image = moments(image)
|
||||
|
||||
coords = np.array(
|
||||
[[r, c] for r in range(13, 17) for c in range(13, 17)], dtype=np.float64
|
||||
)
|
||||
mu_coords = moments_coords(coords)
|
||||
assert_almost_equal(mu_coords, mu_image)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_moments_coords_dtype(dtype):
|
||||
image = np.zeros((20, 20), dtype=dtype)
|
||||
image[13:17, 13:17] = 1
|
||||
|
||||
expected_dtype = _supported_float_type(dtype)
|
||||
mu_image = moments(image)
|
||||
assert mu_image.dtype == expected_dtype
|
||||
|
||||
coords = np.array(
|
||||
[[r, c] for r in range(13, 17) for c in range(13, 17)], dtype=dtype
|
||||
)
|
||||
mu_coords = moments_coords(coords)
|
||||
assert mu_coords.dtype == expected_dtype
|
||||
|
||||
assert_almost_equal(mu_coords, mu_image)
|
||||
|
||||
|
||||
def test_moments_central_coords():
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[13:17, 13:17] = 1
|
||||
mu_image = moments_central(image, (14.5, 14.5))
|
||||
|
||||
coords = np.array(
|
||||
[[r, c] for r in range(13, 17) for c in range(13, 17)], dtype=np.float64
|
||||
)
|
||||
mu_coords = moments_coords_central(coords, (14.5, 14.5))
|
||||
assert_almost_equal(mu_coords, mu_image)
|
||||
|
||||
# ensure that center is being calculated normally
|
||||
mu_coords_calc_centroid = moments_coords_central(coords)
|
||||
assert_almost_equal(mu_coords_calc_centroid, mu_coords)
|
||||
|
||||
# shift image by dx=3 dy=3
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[16:20, 16:20] = 1
|
||||
mu_image = moments_central(image, (14.5, 14.5))
|
||||
|
||||
coords = np.array(
|
||||
[[r, c] for r in range(16, 20) for c in range(16, 20)], dtype=np.float64
|
||||
)
|
||||
mu_coords = moments_coords_central(coords, (14.5, 14.5))
|
||||
assert_almost_equal(mu_coords, mu_image)
|
||||
|
||||
|
||||
def test_moments_normalized():
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[13:17, 13:17] = 1
|
||||
mu = moments_central(image, (14.5, 14.5))
|
||||
nu = moments_normalized(mu)
|
||||
# shift image by dx=-2, dy=-2 and scale non-zero extent by 0.5
|
||||
image2 = np.zeros((20, 20), dtype=np.float64)
|
||||
# scale amplitude by 0.7
|
||||
image2[11:13, 11:13] = 0.7
|
||||
mu2 = moments_central(image2, (11.5, 11.5))
|
||||
nu2 = moments_normalized(mu2)
|
||||
# central moments must be translation and scale invariant
|
||||
assert_almost_equal(nu, nu2, decimal=1)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('anisotropic', [False, True])
|
||||
def test_moments_normalized_spacing(anisotropic):
|
||||
image = np.zeros((20, 20), dtype=np.double)
|
||||
image[13:17, 13:17] = 1
|
||||
|
||||
if not anisotropic:
|
||||
spacing1 = (1, 1)
|
||||
spacing2 = (3, 3)
|
||||
else:
|
||||
spacing1 = (1, 2)
|
||||
spacing2 = (2, 4)
|
||||
|
||||
mu = moments_central(image, spacing=spacing1)
|
||||
nu = moments_normalized(mu, spacing=spacing1)
|
||||
|
||||
mu2 = moments_central(image, spacing=spacing2)
|
||||
nu2 = moments_normalized(mu2, spacing=spacing2)
|
||||
|
||||
# result should be invariant to absolute scale of spacing
|
||||
compare_moments(nu, nu2)
|
||||
|
||||
|
||||
def test_moments_normalized_3d():
|
||||
image = draw.ellipsoid(1, 1, 10)
|
||||
mu_image = moments_central(image)
|
||||
nu = moments_normalized(mu_image)
|
||||
assert nu[0, 0, 2] > nu[0, 2, 0]
|
||||
assert_almost_equal(nu[0, 2, 0], nu[2, 0, 0])
|
||||
|
||||
coords = np.where(image)
|
||||
mu_coords = moments_coords_central(coords)
|
||||
assert_almost_equal(mu_image, mu_coords)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.uint8, np.int32, np.float32, np.float64])
|
||||
@pytest.mark.parametrize('order', [1, 2, 3, 4])
|
||||
@pytest.mark.parametrize('ndim', [2, 3, 4])
|
||||
def test_analytical_moments_calculation(dtype, order, ndim):
|
||||
if ndim == 2:
|
||||
shape = (256, 256)
|
||||
elif ndim == 3:
|
||||
shape = (64, 64, 64)
|
||||
else:
|
||||
shape = (16,) * ndim
|
||||
rng = np.random.default_rng(1234)
|
||||
if np.dtype(dtype).kind in 'iu':
|
||||
x = rng.integers(0, 256, shape, dtype=dtype)
|
||||
else:
|
||||
x = rng.standard_normal(shape, dtype=dtype)
|
||||
# setting center=None will use the analytical expressions
|
||||
m1 = moments_central(x, center=None, order=order)
|
||||
# providing explicit centroid will bypass the analytical code path
|
||||
m2 = moments_central(x, center=centroid(x), order=order)
|
||||
# ensure numeric and analytical central moments are close
|
||||
# TODO: np 2 failed w/ thresh = 1e-4
|
||||
thresh = 1.5e-4 if x.dtype == np.float32 else 1e-9
|
||||
compare_moments(m1, m2, thresh=thresh)
|
||||
|
||||
|
||||
def test_moments_normalized_invalid():
|
||||
with testing.raises(ValueError):
|
||||
moments_normalized(np.zeros((3, 3)), 3)
|
||||
with testing.raises(ValueError):
|
||||
moments_normalized(np.zeros((3, 3)), 4)
|
||||
|
||||
|
||||
def test_moments_hu():
|
||||
image = np.zeros((20, 20), dtype=np.float64)
|
||||
image[13:15, 13:17] = 1
|
||||
mu = moments_central(image, (13.5, 14.5))
|
||||
nu = moments_normalized(mu)
|
||||
hu = moments_hu(nu)
|
||||
# shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
|
||||
image2 = np.zeros((20, 20), dtype=np.float64)
|
||||
image2[11, 11:13] = 1
|
||||
image2 = image2.T
|
||||
mu2 = moments_central(image2, (11.5, 11))
|
||||
nu2 = moments_normalized(mu2)
|
||||
hu2 = moments_hu(nu2)
|
||||
# central moments must be translation and scale invariant
|
||||
assert_almost_equal(hu, hu2, decimal=1)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_moments_dtype(dtype):
|
||||
image = np.zeros((20, 20), dtype=dtype)
|
||||
image[13:15, 13:17] = 1
|
||||
|
||||
expected_dtype = _supported_float_type(dtype)
|
||||
mu = moments_central(image, (13.5, 14.5))
|
||||
assert mu.dtype == expected_dtype
|
||||
|
||||
nu = moments_normalized(mu)
|
||||
assert nu.dtype == expected_dtype
|
||||
|
||||
hu = moments_hu(nu)
|
||||
assert hu.dtype == expected_dtype
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_centroid(dtype):
|
||||
image = np.zeros((20, 20), dtype=dtype)
|
||||
image[14, 14:16] = 1
|
||||
image[15, 14:16] = 1 / 3
|
||||
image_centroid = centroid(image)
|
||||
if dtype == np.float16:
|
||||
rtol = 1e-3
|
||||
elif dtype == np.float32:
|
||||
rtol = 1e-5
|
||||
else:
|
||||
rtol = 1e-7
|
||||
assert_allclose(image_centroid, (14.25, 14.5), rtol=rtol)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_inertia_tensor_2d(dtype):
|
||||
image = np.zeros((40, 40), dtype=dtype)
|
||||
image[15:25, 5:35] = 1 # big horizontal rectangle (aligned with axis 1)
|
||||
expected_dtype = _supported_float_type(image.dtype)
|
||||
|
||||
T = inertia_tensor(image)
|
||||
assert T.dtype == expected_dtype
|
||||
assert T[0, 0] > T[1, 1]
|
||||
np.testing.assert_allclose(T[0, 1], 0)
|
||||
|
||||
v0, v1 = inertia_tensor_eigvals(image, T=T)
|
||||
assert v0.dtype == expected_dtype
|
||||
assert v1.dtype == expected_dtype
|
||||
np.testing.assert_allclose(np.sqrt(v0 / v1), 3, rtol=0.01, atol=0.05)
|
||||
|
||||
|
||||
def test_inertia_tensor_3d():
|
||||
image = draw.ellipsoid(10, 5, 3)
|
||||
T0 = inertia_tensor(image)
|
||||
eig0, V0 = np.linalg.eig(T0)
|
||||
# principal axis of ellipse = eigenvector of smallest eigenvalue
|
||||
v0 = V0[:, np.argmin(eig0)]
|
||||
|
||||
assert np.allclose(v0, [1, 0, 0]) or np.allclose(-v0, [1, 0, 0])
|
||||
|
||||
imrot = ndi.rotate(image.astype(float), 30, axes=(0, 1), order=1)
|
||||
Tr = inertia_tensor(imrot)
|
||||
eigr, Vr = np.linalg.eig(Tr)
|
||||
vr = Vr[:, np.argmin(eigr)]
|
||||
|
||||
# Check that axis has rotated by expected amount
|
||||
pi, cos, sin = np.pi, np.cos, np.sin
|
||||
R = np.array(
|
||||
[[cos(pi / 6), -sin(pi / 6), 0], [sin(pi / 6), cos(pi / 6), 0], [0, 0, 1]]
|
||||
)
|
||||
expected_vr = R @ v0
|
||||
assert np.allclose(vr, expected_vr, atol=1e-3, rtol=0.01) or np.allclose(
|
||||
-vr, expected_vr, atol=1e-3, rtol=0.01
|
||||
)
|
||||
|
||||
|
||||
def test_inertia_tensor_eigvals():
|
||||
# Floating point precision problems could make a positive
|
||||
# semidefinite matrix have an eigenvalue that is very slightly
|
||||
# negative. Check that we have caught and fixed this problem.
|
||||
image = np.array(
|
||||
[
|
||||
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
|
||||
]
|
||||
)
|
||||
# mu = np.array([[3, 0, 98], [0, 14, 0], [2, 0, 98]])
|
||||
eigvals = inertia_tensor_eigvals(image=image)
|
||||
assert min(eigvals) >= 0
|
||||
44
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_pnpoly.py
vendored
Normal file
44
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_pnpoly.py
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
import numpy as np
|
||||
from skimage.measure import points_in_poly, grid_points_in_poly
|
||||
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
|
||||
|
||||
class TestNpnpoly:
|
||||
def test_square(self):
|
||||
v = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])
|
||||
assert points_in_poly([[0.5, 0.5]], v)[0]
|
||||
assert not points_in_poly([[-0.1, 0.1]], v)[0]
|
||||
|
||||
def test_triangle(self):
|
||||
v = np.array([[0, 0], [1, 0], [0.5, 0.75]])
|
||||
assert points_in_poly([[0.5, 0.7]], v)[0]
|
||||
assert not points_in_poly([[0.5, 0.76]], v)[0]
|
||||
assert not points_in_poly([[0.7, 0.5]], v)[0]
|
||||
|
||||
def test_type(self):
|
||||
assert points_in_poly([[0, 0]], [[0, 0]]).dtype == bool
|
||||
|
||||
|
||||
def test_grid_points_in_poly():
|
||||
v = np.array([[0, 0], [5, 0], [5, 5]])
|
||||
|
||||
expected = np.tril(np.ones((5, 5), dtype=bool))
|
||||
|
||||
assert_array_equal(grid_points_in_poly((5, 5), v), expected)
|
||||
|
||||
|
||||
def test_grid_points_in_poly_binarize():
|
||||
v = np.array([[0, 0], [5, 0], [5, 5]])
|
||||
|
||||
expected = np.array(
|
||||
[
|
||||
[2, 0, 0, 0, 0],
|
||||
[3, 3, 0, 0, 0],
|
||||
[3, 1, 3, 0, 0],
|
||||
[3, 1, 1, 3, 0],
|
||||
[3, 1, 1, 1, 3],
|
||||
]
|
||||
)
|
||||
|
||||
assert_array_equal(grid_points_in_poly((5, 5), v, binarize=False), expected)
|
||||
72
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_polygon.py
vendored
Normal file
72
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_polygon.py
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
import numpy as np
|
||||
from skimage.measure import approximate_polygon, subdivide_polygon
|
||||
from skimage.measure._polygon import _SUBDIVISION_MASKS
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal, assert_equal
|
||||
|
||||
|
||||
square = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[0, 1],
|
||||
[0, 2],
|
||||
[0, 3],
|
||||
[1, 3],
|
||||
[2, 3],
|
||||
[3, 3],
|
||||
[3, 2],
|
||||
[3, 1],
|
||||
[3, 0],
|
||||
[2, 0],
|
||||
[1, 0],
|
||||
[0, 0],
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_approximate_polygon():
|
||||
out = approximate_polygon(square, 0.1)
|
||||
assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
|
||||
|
||||
out = approximate_polygon(square, 2.2)
|
||||
assert_array_equal(out, square[(0, 6, 12), :])
|
||||
|
||||
out = approximate_polygon(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1)
|
||||
assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
|
||||
|
||||
out = approximate_polygon(square, -1)
|
||||
assert_array_equal(out, square)
|
||||
out = approximate_polygon(square, 0)
|
||||
assert_array_equal(out, square)
|
||||
|
||||
|
||||
def test_subdivide_polygon():
|
||||
new_square1 = square
|
||||
new_square2 = square[:-1]
|
||||
new_square3 = square[:-1]
|
||||
# test iterative subdvision
|
||||
for _ in range(10):
|
||||
square1, square2, square3 = new_square1, new_square2, new_square3
|
||||
# test different B-Spline degrees
|
||||
for degree in range(1, 7):
|
||||
mask_len = len(_SUBDIVISION_MASKS[degree][0])
|
||||
# test circular
|
||||
new_square1 = subdivide_polygon(square1, degree)
|
||||
assert_array_equal(new_square1[-1], new_square1[0])
|
||||
assert_equal(new_square1.shape[0], 2 * square1.shape[0] - 1)
|
||||
# test non-circular
|
||||
new_square2 = subdivide_polygon(square2, degree)
|
||||
assert_equal(new_square2.shape[0], 2 * (square2.shape[0] - mask_len + 1))
|
||||
# test non-circular, preserve_ends
|
||||
new_square3 = subdivide_polygon(square3, degree, True)
|
||||
assert_equal(new_square3[0], square3[0])
|
||||
assert_equal(new_square3[-1], square3[-1])
|
||||
|
||||
assert_equal(new_square3.shape[0], 2 * (square3.shape[0] - mask_len + 2))
|
||||
|
||||
# not supported B-Spline degree
|
||||
with testing.raises(ValueError):
|
||||
subdivide_polygon(square, 0)
|
||||
with testing.raises(ValueError):
|
||||
subdivide_polygon(square, 8)
|
||||
284
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_profile.py
vendored
Normal file
284
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_profile.py
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
import numpy as np
|
||||
|
||||
from ..._shared.testing import assert_equal, assert_almost_equal
|
||||
from ..profile import profile_line
|
||||
|
||||
image = np.arange(100).reshape((10, 10)).astype(float)
|
||||
|
||||
|
||||
def test_horizontal_rightward():
|
||||
prof = profile_line(image, (0, 2), (0, 8), order=0, mode='constant')
|
||||
expected_prof = np.arange(2, 9)
|
||||
assert_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_horizontal_leftward():
|
||||
prof = profile_line(image, (0, 8), (0, 2), order=0, mode='constant')
|
||||
expected_prof = np.arange(8, 1, -1)
|
||||
assert_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_vertical_downward():
|
||||
prof = profile_line(image, (2, 5), (8, 5), order=0, mode='constant')
|
||||
expected_prof = np.arange(25, 95, 10)
|
||||
assert_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_vertical_upward():
|
||||
prof = profile_line(image, (8, 5), (2, 5), order=0, mode='constant')
|
||||
expected_prof = np.arange(85, 15, -10)
|
||||
assert_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_45deg_right_downward():
|
||||
prof = profile_line(image, (2, 2), (8, 8), order=0, mode='constant')
|
||||
expected_prof = np.array([22, 33, 33, 44, 55, 55, 66, 77, 77, 88])
|
||||
# repeats are due to aliasing using nearest neighbor interpolation.
|
||||
# to see this, imagine a diagonal line with markers every unit of
|
||||
# length traversing a checkerboard pattern of squares also of unit
|
||||
# length. Because the line is diagonal, sometimes more than one
|
||||
# marker will fall on the same checkerboard box.
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_45deg_right_downward_interpolated():
|
||||
prof = profile_line(image, (2, 2), (8, 8), order=1, mode='constant')
|
||||
expected_prof = np.linspace(22, 88, 10)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_45deg_right_upward():
|
||||
prof = profile_line(image, (8, 2), (2, 8), order=1, mode='constant')
|
||||
expected_prof = np.arange(82, 27, -6)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_45deg_left_upward():
|
||||
prof = profile_line(image, (8, 8), (2, 2), order=1, mode='constant')
|
||||
expected_prof = np.arange(88, 21, -22.0 / 3)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_45deg_left_downward():
|
||||
prof = profile_line(image, (2, 8), (8, 2), order=1, mode='constant')
|
||||
expected_prof = np.arange(28, 83, 6)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_pythagorean_triangle_right_downward():
|
||||
prof = profile_line(image, (1, 1), (7, 9), order=0, mode='constant')
|
||||
expected_prof = np.array([11, 22, 23, 33, 34, 45, 56, 57, 67, 68, 79])
|
||||
assert_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_pythagorean_triangle_right_downward_interpolated():
|
||||
prof = profile_line(image, (1, 1), (7, 9), order=1, mode='constant')
|
||||
expected_prof = np.linspace(11, 79, 11)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
pyth_image = np.zeros((6, 7), float)
|
||||
line = ((1, 2, 2, 3, 3, 4), (1, 2, 3, 3, 4, 5))
|
||||
below = ((2, 2, 3, 4, 4, 5), (0, 1, 2, 3, 4, 4))
|
||||
above = ((0, 1, 1, 2, 3, 3), (2, 2, 3, 4, 5, 6))
|
||||
pyth_image[line] = 1.8
|
||||
pyth_image[below] = 0.6
|
||||
pyth_image[above] = 0.6
|
||||
|
||||
|
||||
def test_pythagorean_triangle_right_downward_linewidth():
|
||||
prof = profile_line(
|
||||
pyth_image, (1, 1), (4, 5), linewidth=3, order=0, mode='constant'
|
||||
)
|
||||
expected_prof = np.ones(6)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_pythagorean_triangle_right_upward_linewidth():
|
||||
prof = profile_line(
|
||||
pyth_image[::-1, :], (4, 1), (1, 5), linewidth=3, order=0, mode='constant'
|
||||
)
|
||||
expected_prof = np.ones(6)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_pythagorean_triangle_transpose_left_down_linewidth():
|
||||
prof = profile_line(
|
||||
pyth_image.T[:, ::-1], (1, 4), (5, 1), linewidth=3, order=0, mode='constant'
|
||||
)
|
||||
expected_prof = np.ones(6)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_mean():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(0, 1),
|
||||
(3, 1),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=np.mean,
|
||||
mode='reflect',
|
||||
)
|
||||
expected_prof = pyth_image[:4, :3].mean(1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_max():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(0, 1),
|
||||
(3, 1),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=np.max,
|
||||
mode='reflect',
|
||||
)
|
||||
expected_prof = pyth_image[:4, :3].max(1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_sum():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(0, 1),
|
||||
(3, 1),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=np.sum,
|
||||
mode='reflect',
|
||||
)
|
||||
expected_prof = pyth_image[:4, :3].sum(1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_mean_linewidth_1():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(0, 1),
|
||||
(3, 1),
|
||||
linewidth=1,
|
||||
order=0,
|
||||
reduce_func=np.mean,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = pyth_image[:4, 1]
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_None_linewidth_1():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(1, 2),
|
||||
(4, 2),
|
||||
linewidth=1,
|
||||
order=0,
|
||||
reduce_func=None,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = pyth_image[1:5, 2, np.newaxis]
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_None_linewidth_3():
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(1, 2),
|
||||
(4, 2),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=None,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = pyth_image[1:5, 1:4]
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_lambda_linewidth_3():
|
||||
def reduce_func(x):
|
||||
return x + x**2
|
||||
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(1, 2),
|
||||
(4, 2),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=reduce_func,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = np.apply_along_axis(reduce_func, arr=pyth_image[1:5, 1:4], axis=1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_sqrt_linewidth_3():
|
||||
def reduce_func(x):
|
||||
return x**0.5
|
||||
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(1, 2),
|
||||
(4, 2),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=reduce_func,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = np.apply_along_axis(reduce_func, arr=pyth_image[1:5, 1:4], axis=1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_reduce_func_sumofsqrt_linewidth_3():
|
||||
def reduce_func(x):
|
||||
return np.sum(x**0.5)
|
||||
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(1, 2),
|
||||
(4, 2),
|
||||
linewidth=3,
|
||||
order=0,
|
||||
reduce_func=reduce_func,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = np.apply_along_axis(reduce_func, arr=pyth_image[1:5, 1:4], axis=1)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_oob_coodinates():
|
||||
offset = 2
|
||||
idx = pyth_image.shape[0] + offset
|
||||
prof = profile_line(
|
||||
pyth_image,
|
||||
(-offset, 2),
|
||||
(idx, 2),
|
||||
linewidth=1,
|
||||
order=0,
|
||||
reduce_func=None,
|
||||
mode='constant',
|
||||
)
|
||||
expected_prof = np.vstack(
|
||||
[np.zeros((offset, 1)), pyth_image[:, 2, np.newaxis], np.zeros((offset + 1, 1))]
|
||||
)
|
||||
assert_almost_equal(prof, expected_prof)
|
||||
|
||||
|
||||
def test_bool_array_input():
|
||||
shape = (200, 200)
|
||||
center_x, center_y = (140, 150)
|
||||
radius = 20
|
||||
x, y = np.meshgrid(range(shape[1]), range(shape[0]))
|
||||
mask = (y - center_y) ** 2 + (x - center_x) ** 2 < radius**2
|
||||
src = (center_y, center_x)
|
||||
phi = 4 * np.pi / 9.0
|
||||
dy = 31 * np.cos(phi)
|
||||
dx = 31 * np.sin(phi)
|
||||
dst = (center_y + dy, center_x + dx)
|
||||
|
||||
profile_u8 = profile_line(mask.astype(np.uint8), src, dst, mode='reflect')
|
||||
assert all(profile_u8[:radius] == 1)
|
||||
|
||||
profile_b = profile_line(mask, src, dst, mode='reflect')
|
||||
assert all(profile_b[:radius] == 1)
|
||||
|
||||
assert all(profile_b == profile_u8)
|
||||
1536
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_regionprops.py
vendored
Normal file
1536
.CondaPkg/env/Lib/site-packages/skimage/measure/tests/test_regionprops.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user