using for loop to install conda package
This commit is contained in:
0
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__init__.py
vendored
Normal file
0
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__init__.py
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/test_join.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/test_join.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/test_slic.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/__pycache__/test_slic.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
124
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_active_contour_model.py
vendored
Normal file
124
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_active_contour_model.py
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_equal, assert_allclose
|
||||
|
||||
from skimage import data
|
||||
from skimage._shared.utils import _supported_float_type
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.filters import gaussian
|
||||
from skimage.segmentation import active_contour
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_periodic_reference(dtype):
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
r = 100 + 100*np.sin(s)
|
||||
c = 220 + 100*np.cos(s)
|
||||
init = np.array([r, c]).T
|
||||
img_smooth = gaussian(img, 3, preserve_range=False).astype(dtype, copy=False)
|
||||
snake = active_contour(img_smooth, init, alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001)
|
||||
assert snake.dtype == _supported_float_type(dtype)
|
||||
refr = [98, 99, 100, 101, 102, 103, 104, 105, 106, 108]
|
||||
refc = [299, 298, 298, 298, 298, 297, 297, 296, 296, 295]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_fixed_reference(dtype):
|
||||
img = data.text()
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
image_smooth = gaussian(img, 1, preserve_range=False).astype(dtype, copy=False)
|
||||
snake = active_contour(image_smooth, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
|
||||
assert snake.dtype == _supported_float_type(dtype)
|
||||
refr = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
|
||||
refc = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_free_reference(dtype):
|
||||
img = data.text()
|
||||
r = np.linspace(70, 40, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
img_smooth = gaussian(img, 3, preserve_range=False).astype(dtype, copy=False)
|
||||
snake = active_contour(img_smooth, init, boundary_condition='free',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
|
||||
assert snake.dtype == _supported_float_type(dtype)
|
||||
refr = [76, 76, 75, 74, 73, 72, 71, 70, 69, 69]
|
||||
refc = [10, 13, 16, 19, 23, 26, 29, 32, 36, 39]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_RGB(dtype):
|
||||
img = gaussian(data.text(), 1, preserve_range=False)
|
||||
imgR = np.zeros((img.shape[0], img.shape[1], 3), dtype=dtype)
|
||||
imgG = np.zeros((img.shape[0], img.shape[1], 3), dtype=dtype)
|
||||
imgRGB = np.zeros((img.shape[0], img.shape[1], 3), dtype=dtype)
|
||||
imgR[:, :, 0] = img
|
||||
imgG[:, :, 1] = img
|
||||
imgRGB[:, :, :] = img[:, :, None]
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(imgR, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
|
||||
float_dtype = _supported_float_type(dtype)
|
||||
assert snake.dtype == float_dtype
|
||||
refr = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
|
||||
refc = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
snake = active_contour(imgG, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
|
||||
assert snake.dtype == float_dtype
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
snake = active_contour(imgRGB, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5/3., w_edge=0,
|
||||
gamma=0.1)
|
||||
assert snake.dtype == float_dtype
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
def test_end_points():
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
r = 100 + 100*np.sin(s)
|
||||
c = 220 + 100*np.cos(s)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='periodic', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_num_iter=100)
|
||||
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) < 2
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='free', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_num_iter=100)
|
||||
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) > 2
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='fixed', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_num_iter=100)
|
||||
assert_allclose(snake[0, :], [r[0], c[0]], atol=1e-5)
|
||||
|
||||
|
||||
def test_bad_input():
|
||||
img = np.zeros((10, 10))
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
with pytest.raises(ValueError):
|
||||
active_contour(img, init, boundary_condition='wrong')
|
||||
with pytest.raises(ValueError):
|
||||
active_contour(img, init, max_num_iter=-15)
|
||||
137
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_boundaries.py
vendored
Normal file
137
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_boundaries.py
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_array_equal, assert_allclose
|
||||
|
||||
from skimage._shared.utils import _supported_float_type
|
||||
from skimage.segmentation import find_boundaries, mark_boundaries
|
||||
|
||||
|
||||
white = (1, 1, 1)
|
||||
|
||||
|
||||
def test_find_boundaries():
|
||||
image = np.zeros((10, 10), dtype=np.uint8)
|
||||
image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
result = find_boundaries(image)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_find_boundaries_bool():
|
||||
image = np.zeros((5, 5), dtype=bool)
|
||||
image[2:5, 2:5] = True
|
||||
|
||||
ref = np.array([[False, False, False, False, False],
|
||||
[False, False, True, True, True],
|
||||
[False, True, True, True, True],
|
||||
[False, True, True, False, False],
|
||||
[False, True, True, False, False]], dtype=bool)
|
||||
result = find_boundaries(image)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', [np.uint8, np.float16, np.float32, np.float64]
|
||||
)
|
||||
def test_mark_boundaries(dtype):
|
||||
image = np.zeros((10, 10), dtype=dtype)
|
||||
label_image = np.zeros((10, 10), dtype=np.uint8)
|
||||
label_image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
marked = mark_boundaries(image, label_image, color=white, mode='thick')
|
||||
assert marked.dtype == _supported_float_type(dtype)
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
ref = np.array([[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
|
||||
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
|
||||
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 0, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
|
||||
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
|
||||
[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
marked = mark_boundaries(image, label_image, color=white,
|
||||
outline_color=(2, 2, 2), mode='thick')
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_mark_boundaries_bool():
|
||||
image = np.zeros((10, 10), dtype=bool)
|
||||
label_image = np.zeros((10, 10), dtype=np.uint8)
|
||||
label_image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
marked = mark_boundaries(image, label_image, color=white, mode='thick')
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_mark_boundaries_subpixel(dtype):
|
||||
labels = np.array([[0, 0, 0, 0],
|
||||
[0, 0, 5, 0],
|
||||
[0, 1, 5, 0],
|
||||
[0, 0, 5, 0],
|
||||
[0, 0, 0, 0]], dtype=np.uint8)
|
||||
np.random.seed(0)
|
||||
image = np.round(np.random.rand(*labels.shape), 2)
|
||||
image = image.astype(dtype, copy=False)
|
||||
marked = mark_boundaries(image, labels, color=white, mode='subpixel')
|
||||
assert marked.dtype == _supported_float_type(dtype)
|
||||
marked_proj = np.round(np.mean(marked, axis=-1), 2)
|
||||
|
||||
ref_result = np.array(
|
||||
[[ 0.55, 0.63, 0.72, 0.69, 0.6 , 0.55, 0.54],
|
||||
[ 0.45, 0.58, 0.72, 1. , 1. , 1. , 0.69],
|
||||
[ 0.42, 0.54, 0.65, 1. , 0.44, 1. , 0.89],
|
||||
[ 0.69, 1. , 1. , 1. , 0.69, 1. , 0.83],
|
||||
[ 0.96, 1. , 0.38, 1. , 0.79, 1. , 0.53],
|
||||
[ 0.89, 1. , 1. , 1. , 0.38, 1. , 0.16],
|
||||
[ 0.57, 0.78, 0.93, 1. , 0.07, 1. , 0.09],
|
||||
[ 0.2 , 0.52, 0.92, 1. , 1. , 1. , 0.54],
|
||||
[ 0.02, 0.35, 0.83, 0.9 , 0.78, 0.81, 0.87]])
|
||||
assert_allclose(marked_proj, ref_result, atol=0.01)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('mode', ['thick', 'inner', 'outer', 'subpixel'])
|
||||
def test_boundaries_constant_image(mode):
|
||||
"""A constant-valued image has not boundaries."""
|
||||
ones = np.ones((8, 8), dtype=int)
|
||||
b = find_boundaries(ones, mode=mode)
|
||||
assert np.all(b == 0)
|
||||
98
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_chan_vese.py
vendored
Normal file
98
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_chan_vese.py
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_array_equal
|
||||
|
||||
from skimage._shared.utils import _supported_float_type
|
||||
from skimage.segmentation import chan_vese
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_chan_vese_flat_level_set(dtype):
|
||||
# because the algorithm evolves the level set around the
|
||||
# zero-level, it the level-set has no zero level, the algorithm
|
||||
# will not produce results in theory. However, since a continuous
|
||||
# approximation of the delta function is used, the algorithm
|
||||
# still affects the entirety of the level-set. Therefore with
|
||||
# infinite time, the segmentation will still converge.
|
||||
img = np.zeros((10, 10), dtype=dtype)
|
||||
img[3:6, 3:6] = 1
|
||||
ls = np.full((10, 10), 1000, dtype=dtype)
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=ls)
|
||||
assert_array_equal(result.astype(float), np.ones((10, 10)))
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=-ls)
|
||||
assert_array_equal(result.astype(float), np.zeros((10, 10)))
|
||||
|
||||
|
||||
def test_chan_vese_small_disk_level_set():
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = 1
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set="small disk")
|
||||
assert_array_equal(result.astype(float), img)
|
||||
|
||||
|
||||
def test_chan_vese_simple_shape():
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = 1
|
||||
result = chan_vese(img, mu=0.0, tol=1e-8).astype(float)
|
||||
assert_array_equal(result, img)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', [np.uint8, np.float16, np.float32, np.float64]
|
||||
)
|
||||
def test_chan_vese_extended_output(dtype):
|
||||
img = np.zeros((10, 10), dtype=dtype)
|
||||
img[3:6, 3:6] = 1
|
||||
result = chan_vese(img, mu=0.0, tol=1e-8, extended_output=True)
|
||||
float_dtype = _supported_float_type(dtype)
|
||||
assert result[1].dtype == float_dtype
|
||||
assert all(arr.dtype == float_dtype for arr in result[2])
|
||||
assert_array_equal(len(result), 3)
|
||||
|
||||
|
||||
def test_chan_vese_remove_noise():
|
||||
ref = np.zeros((10, 10))
|
||||
ref[1:6, 1:6] = np.array([[0, 1, 1, 1, 0],
|
||||
[1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 0]])
|
||||
img = ref.copy()
|
||||
img[8, 3] = 1
|
||||
result = chan_vese(img, mu=0.3, tol=1e-3, max_num_iter=100, dt=10,
|
||||
init_level_set="disk").astype(float)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_chan_vese_incorrect_image_type():
|
||||
img = np.zeros((10, 10, 3))
|
||||
ls = np.zeros((10, 9))
|
||||
with pytest.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set=ls)
|
||||
|
||||
|
||||
def test_chan_vese_gap_closing():
|
||||
ref = np.zeros((20, 20))
|
||||
ref[8:15, :] = np.ones((7, 20))
|
||||
img = ref.copy()
|
||||
img[:, 6] = np.zeros(20)
|
||||
result = chan_vese(img, mu=0.7, tol=1e-3, max_num_iter=1000, dt=1000,
|
||||
init_level_set="disk").astype(float)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_chan_vese_incorrect_level_set():
|
||||
img = np.zeros((10, 10))
|
||||
ls = np.zeros((10, 9))
|
||||
with pytest.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set=ls)
|
||||
with pytest.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set="a")
|
||||
|
||||
|
||||
def test_chan_vese_blank_image():
|
||||
img = np.zeros((10, 10))
|
||||
level_set = np.random.rand(10, 10)
|
||||
ref = level_set > 0
|
||||
result = chan_vese(img, mu=0.0, tol=0.0, init_level_set=level_set)
|
||||
assert_array_equal(result, ref)
|
||||
228
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_clear_border.py
vendored
Normal file
228
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_clear_border.py
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
import numpy as np
|
||||
from skimage.segmentation import clear_border
|
||||
|
||||
from skimage._shared.testing import assert_array_equal, assert_
|
||||
|
||||
|
||||
def test_clear_border():
|
||||
image = np.array(
|
||||
[[0, 0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[1, 1, 0, 0, 1, 0, 0, 1, 0],
|
||||
[1, 1, 0, 1, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
# test default case
|
||||
result = clear_border(image.copy())
|
||||
ref = image.copy()
|
||||
ref[1:3, 0:2] = 0
|
||||
ref[0:2, -2] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
# test buffer
|
||||
result = clear_border(image.copy(), 1)
|
||||
assert_array_equal(result, np.zeros(result.shape))
|
||||
|
||||
# test background value
|
||||
result = clear_border(image.copy(), buffer_size=1, bgval=2)
|
||||
assert_array_equal(result, 2 * np.ones_like(image))
|
||||
|
||||
# test mask
|
||||
mask = np.array([[0, 0, 1, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1]]).astype(bool)
|
||||
result = clear_border(image.copy(), mask=mask)
|
||||
ref = image.copy()
|
||||
ref[1:3, 0:2] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_clear_border_3d():
|
||||
image = np.array([
|
||||
[[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[1, 0, 0, 0]],
|
||||
[[0, 0, 0, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]],
|
||||
])
|
||||
# test default case
|
||||
result = clear_border(image.copy())
|
||||
ref = image.copy()
|
||||
ref[0, 3, 0] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
# test buffer
|
||||
result = clear_border(image.copy(), 1)
|
||||
assert_array_equal(result, np.zeros(result.shape))
|
||||
|
||||
# test background value
|
||||
result = clear_border(image.copy(), buffer_size=1, bgval=2)
|
||||
assert_array_equal(result, 2 * np.ones_like(image))
|
||||
|
||||
|
||||
def test_clear_border_non_binary():
|
||||
image = np.array([[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]])
|
||||
|
||||
result = clear_border(image)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 4, 0],
|
||||
[0, 4, 5, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_(not np.all(image == result))
|
||||
|
||||
|
||||
def test_clear_border_non_binary_3d():
|
||||
image3d = np.array(
|
||||
[[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
])
|
||||
|
||||
result = clear_border(image3d)
|
||||
expected = np.array(
|
||||
[[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_(not np.all(image3d == result))
|
||||
|
||||
|
||||
def test_clear_border_non_binary_inplace():
|
||||
image = np.array([[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]])
|
||||
|
||||
result = clear_border(image, out=image)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 4, 0],
|
||||
[0, 4, 5, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(image, result)
|
||||
|
||||
|
||||
def test_clear_border_non_binary_inplace_3d():
|
||||
image3d = np.array(
|
||||
[[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
])
|
||||
|
||||
result = clear_border(image3d, out=image3d)
|
||||
expected = np.array(
|
||||
[[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(image3d, result)
|
||||
|
||||
|
||||
def test_clear_border_non_binary_out():
|
||||
image = np.array([[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]])
|
||||
out = np.empty_like(image)
|
||||
result = clear_border(image, out=out)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 4, 0],
|
||||
[0, 4, 5, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(out, result)
|
||||
|
||||
|
||||
def test_clear_border_non_binary_out_3d():
|
||||
image3d = np.array(
|
||||
[[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
])
|
||||
out = np.empty_like(image3d)
|
||||
|
||||
result = clear_border(image3d, out=out)
|
||||
expected = np.array(
|
||||
[[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(out, result)
|
||||
184
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_expand_labels.py
vendored
Normal file
184
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_expand_labels.py
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
from scipy import ndimage as ndi
|
||||
from skimage import data
|
||||
|
||||
import numpy as np
|
||||
|
||||
from skimage import measure
|
||||
from skimage.segmentation._expand_labels import expand_labels
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
|
||||
SAMPLE1D = np.array([0, 0, 4, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0])
|
||||
SAMPLE1D_EXPANDED_3 = np.array([4, 4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0])
|
||||
|
||||
# Some pixels are important edge cases with undefined behaviour:
|
||||
# these are the pixels that are at the same distance from
|
||||
# multiple labels. Ideally the label would be chosen at random
|
||||
# to avoid bias, but as we are relying on the index map returned
|
||||
# by the scipy.ndimage distance transform, what actually happens
|
||||
# is determined by the upstream implementation of the distance
|
||||
# tansform, thus we don't give any guarantees for the edge case pixels.
|
||||
#
|
||||
# Regardless, it seems prudent to have a test including an edge case
|
||||
# so we can detect whether future upstream changes in scipy.ndimage
|
||||
# modify the behaviour.
|
||||
|
||||
EDGECASE1D = np.array([0, 0, 4, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0])
|
||||
EDGECASE1D_EXPANDED_3 = np.array([4, 4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0])
|
||||
|
||||
SAMPLE2D = np.array(
|
||||
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
|
||||
)
|
||||
|
||||
SAMPLE2D_EXPANDED_3 = np.array(
|
||||
[[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 1, 1, 0, 0, 2, 0],
|
||||
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2],
|
||||
[0, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]]
|
||||
)
|
||||
|
||||
# non-integer expansion
|
||||
SAMPLE2D_EXPANDED_1_5 = np.array(
|
||||
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 0, 0, 0, 2, 2, 2],
|
||||
[1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2],
|
||||
[0, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
|
||||
EDGECASE2D = np.array(
|
||||
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 0, 2, 2, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 0, 2, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]
|
||||
)
|
||||
|
||||
EDGECASE2D_EXPANDED_4 = np.array(
|
||||
[[1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 0],
|
||||
[1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2],
|
||||
[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 0],
|
||||
[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 0]])
|
||||
|
||||
SAMPLE3D = np.array(
|
||||
[[[0, 0, 0, 0],
|
||||
[0, 3, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]],
|
||||
|
||||
[[0, 0, 0, 0],
|
||||
[0, 3, 3, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]],
|
||||
|
||||
[[0, 0, 0, 0],
|
||||
[0, 3, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 5, 0]],
|
||||
|
||||
[[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 5, 0]]])
|
||||
|
||||
SAMPLE3D_EXPANDED_2 =np.array(
|
||||
[[[3, 3, 3, 3],
|
||||
[3, 3, 3, 3],
|
||||
[3, 3, 3, 3],
|
||||
[0, 3, 5, 0]],
|
||||
|
||||
[[3, 3, 3, 3],
|
||||
[3, 3, 3, 3],
|
||||
[3, 3, 3, 3],
|
||||
[0, 5, 5, 5]],
|
||||
|
||||
[[3, 3, 3, 3],
|
||||
[3, 3, 3, 3],
|
||||
[3, 3, 5, 5],
|
||||
[5, 5, 5, 5]],
|
||||
|
||||
[[3, 3, 3, 0],
|
||||
[3, 3, 3, 0],
|
||||
[3, 3, 5, 5],
|
||||
[5, 5, 5, 5]]])
|
||||
|
||||
SAMPLE_EDGECASE_BEHAVIOUR = np.array([[0, 1, 0, 0], [2, 0, 0, 0], [0, 3, 0, 0]])
|
||||
|
||||
@testing.parametrize(
|
||||
"input_array, expected_output, expand_distance",
|
||||
[
|
||||
(SAMPLE1D, SAMPLE1D_EXPANDED_3, 3),
|
||||
(SAMPLE2D, SAMPLE2D_EXPANDED_3, 3),
|
||||
(SAMPLE2D, SAMPLE2D_EXPANDED_1_5, 1.5),
|
||||
(EDGECASE1D, EDGECASE1D_EXPANDED_3, 3),
|
||||
(EDGECASE2D, EDGECASE2D_EXPANDED_4, 4),
|
||||
(SAMPLE3D, SAMPLE3D_EXPANDED_2, 2)
|
||||
]
|
||||
)
|
||||
def test_expand_labels(input_array, expected_output, expand_distance):
|
||||
expanded = expand_labels(input_array, expand_distance)
|
||||
assert_array_equal(expanded, expected_output)
|
||||
|
||||
|
||||
@testing.parametrize('ndim', [2, 3])
|
||||
@testing.parametrize('distance', range(6))
|
||||
def test_binary_blobs(ndim, distance):
|
||||
"""Check some invariants with label expansion.
|
||||
|
||||
- New labels array should exactly contain the original labels array.
|
||||
- Distance to old labels array within new labels should never exceed input
|
||||
distance.
|
||||
- Distance beyond the expanded labels should always exceed the input
|
||||
distance.
|
||||
"""
|
||||
img = data.binary_blobs(length=64, blob_size_fraction=0.05, n_dim=ndim)
|
||||
labels = measure.label(img)
|
||||
expanded = expand_labels(labels, distance=distance)
|
||||
original_mask = labels != 0
|
||||
assert_array_equal(labels[original_mask], expanded[original_mask])
|
||||
expanded_only_mask = (expanded - labels).astype(bool)
|
||||
distance_map = ndi.distance_transform_edt(~original_mask)
|
||||
expanded_distances = distance_map[expanded_only_mask]
|
||||
if expanded_distances.size > 0:
|
||||
assert np.all(expanded_distances <= distance)
|
||||
beyond_expanded_distances = distance_map[~expanded.astype(bool)]
|
||||
if beyond_expanded_distances.size > 0:
|
||||
assert np.all(beyond_expanded_distances > distance)
|
||||
|
||||
|
||||
def test_edge_case_behaviour():
|
||||
""" Check edge case behavior to detect upstream changes
|
||||
|
||||
For edge cases where a pixel has the same distance to several regions,
|
||||
lexicographical order seems to determine which region gets to expand
|
||||
into this pixel given the current upstream behaviour in
|
||||
scipy.ndimage.distance_map_edt.
|
||||
|
||||
As a result, we expect different results when transposing the array.
|
||||
If this test fails, something has changed upstream.
|
||||
"""
|
||||
expanded = expand_labels(SAMPLE_EDGECASE_BEHAVIOUR, 1)
|
||||
expanded_transpose = expand_labels(SAMPLE_EDGECASE_BEHAVIOUR.T, 1)
|
||||
assert not np.all(expanded == expanded_transpose.T)
|
||||
85
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_felzenszwalb.py
vendored
Normal file
85
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_felzenszwalb.py
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
import numpy as np
|
||||
from skimage import data
|
||||
from skimage.segmentation import felzenszwalb
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_greater, test_parallel,
|
||||
assert_equal, assert_array_equal,
|
||||
assert_warns, assert_no_warnings)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_grey():
|
||||
# very weak tests.
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, 10:] = 0.2
|
||||
img[10:, :10] = 0.4
|
||||
img[10:, 10:] = 0.6
|
||||
seg = felzenszwalb(img, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
# that mostly respect the 4 regions:
|
||||
for i in range(4):
|
||||
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
|
||||
assert_greater(hist[i], 40)
|
||||
|
||||
|
||||
def test_minsize():
|
||||
# single-channel:
|
||||
img = data.coins()[20:168, 0:128]
|
||||
for min_size in np.arange(10, 100, 10):
|
||||
segments = felzenszwalb(img, min_size=min_size, sigma=3)
|
||||
counts = np.bincount(segments.ravel())
|
||||
# actually want to test greater or equal.
|
||||
assert_greater(counts.min() + 1, min_size)
|
||||
# multi-channel:
|
||||
coffee = data.coffee()[::4, ::4]
|
||||
for min_size in np.arange(10, 100, 10):
|
||||
segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
|
||||
counts = np.bincount(segments.ravel())
|
||||
# actually want to test greater or equal.
|
||||
assert_greater(counts.min() + 1, min_size)
|
||||
|
||||
|
||||
@testing.parametrize('channel_axis', [0, -1])
|
||||
def test_3D(channel_axis):
|
||||
grey_img = np.zeros((10, 10))
|
||||
rgb_img = np.zeros((10, 10, 3))
|
||||
three_d_img = np.zeros((10, 10, 10))
|
||||
|
||||
rgb_img = np.moveaxis(rgb_img, -1, channel_axis)
|
||||
with assert_no_warnings():
|
||||
felzenszwalb(grey_img, channel_axis=-1)
|
||||
felzenszwalb(grey_img, channel_axis=None)
|
||||
felzenszwalb(rgb_img, channel_axis=channel_axis)
|
||||
with assert_warns(RuntimeWarning):
|
||||
felzenszwalb(three_d_img, channel_axis=channel_axis)
|
||||
with testing.raises(ValueError):
|
||||
felzenszwalb(rgb_img, channel_axis=None)
|
||||
felzenszwalb(three_d_img, channel_axis=None)
|
||||
|
||||
|
||||
def test_color():
|
||||
# very weak tests.
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
seg = felzenszwalb(img, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_array_equal(seg[:10, :10], 0)
|
||||
assert_array_equal(seg[10:, :10], 2)
|
||||
assert_array_equal(seg[:10, 10:], 1)
|
||||
assert_array_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_merging():
|
||||
# test region merging in the post-processing step
|
||||
img = np.array([[0, 0.3], [0.7, 1]])
|
||||
# With scale=0, only the post-processing is performed.
|
||||
seg = felzenszwalb(img, scale=0, sigma=0, min_size=2)
|
||||
# we expect 2 segments:
|
||||
assert_equal(len(np.unique(seg)), 2)
|
||||
assert_array_equal(seg[0, :], 0)
|
||||
assert_array_equal(seg[1, :], 1)
|
||||
211
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_join.py
vendored
Normal file
211
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_join.py
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
import numpy as np
|
||||
from skimage.segmentation import join_segmentations, relabel_sequential
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
import pytest
|
||||
|
||||
|
||||
def test_join_segmentations():
|
||||
s1 = np.array([[0, 0, 1, 1],
|
||||
[0, 2, 1, 1],
|
||||
[2, 2, 2, 1]])
|
||||
s2 = np.array([[0, 1, 1, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
|
||||
# test correct join
|
||||
# NOTE: technically, equality to j_ref is not required, only that there
|
||||
# is a one-to-one mapping between j and j_ref. I don't know of an easy way
|
||||
# to check this (i.e. not as error-prone as the function being tested)
|
||||
j = join_segmentations(s1, s2)
|
||||
j_ref = np.array([[0, 1, 3, 2],
|
||||
[0, 5, 3, 2],
|
||||
[4, 5, 5, 3]])
|
||||
assert_array_equal(j, j_ref)
|
||||
|
||||
# test correct exception when arrays are different shapes
|
||||
s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]])
|
||||
with testing.raises(ValueError):
|
||||
join_segmentations(s1, s3)
|
||||
|
||||
|
||||
def _check_maps(ar, ar_relab, fw, inv):
|
||||
assert_array_equal(fw[ar], ar_relab)
|
||||
assert_array_equal(inv[ar_relab], ar)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset1():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42])
|
||||
ar_relab, fw, inv = relabel_sequential(ar)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 1
|
||||
fw_ref[5] = 2
|
||||
fw_ref[8] = 3
|
||||
fw_ref[42] = 4
|
||||
fw_ref[99] = 5
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset5():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42])
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset5_with0():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0])
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_dtype():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.uint8)
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar.astype(int), ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_signed_overflow():
|
||||
imax = np.iinfo(np.int32).max
|
||||
labels = np.array([0, 1, 99, 42, 42], dtype=np.int32)
|
||||
output, fw, inv = relabel_sequential(labels, offset=imax)
|
||||
reference = np.array([0, imax, imax + 2, imax + 1, imax + 1],
|
||||
dtype=np.uint32)
|
||||
assert_array_equal(output, reference)
|
||||
assert output.dtype == reference.dtype
|
||||
|
||||
|
||||
def test_very_large_labels():
|
||||
imax = np.iinfo(np.int64).max
|
||||
labels = np.array([0, 1, imax, 42, 42], dtype=np.int64)
|
||||
output, fw, inv = relabel_sequential(labels, offset=imax)
|
||||
assert np.max(output) == imax + 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', (np.byte, np.short, np.intc, int,
|
||||
np.longlong, np.ubyte, np.ushort,
|
||||
np.uintc, np.uint, np.ulonglong))
|
||||
@pytest.mark.parametrize('data_already_sequential', (False, True))
|
||||
def test_relabel_sequential_int_dtype_stability(data_already_sequential,
|
||||
dtype):
|
||||
if data_already_sequential:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4], dtype=dtype)
|
||||
else:
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=dtype)
|
||||
assert all(a.dtype == dtype for a in relabel_sequential(ar))
|
||||
|
||||
|
||||
def test_relabel_sequential_int_dtype_overflow():
|
||||
ar = np.array([1, 3, 0, 2, 5, 4], dtype=np.uint8)
|
||||
offset = 254
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=offset)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
assert all(a.dtype == np.uint16 for a in (ar_relab, fw))
|
||||
assert inv.dtype == ar.dtype
|
||||
ar_relab_ref = np.where(ar > 0, ar.astype(int) + offset - 1, 0)
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_negative_values():
|
||||
ar = np.array([1, 1, 5, -5, 8, 99, 42, 0])
|
||||
with pytest.raises(ValueError):
|
||||
relabel_sequential(ar)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('offset', (0, -3))
|
||||
@pytest.mark.parametrize('data_already_sequential', (False, True))
|
||||
def test_relabel_sequential_nonpositive_offset(data_already_sequential,
|
||||
offset):
|
||||
if data_already_sequential:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4])
|
||||
else:
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0])
|
||||
with pytest.raises(ValueError):
|
||||
relabel_sequential(ar, offset=offset)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('offset', (1, 5))
|
||||
@pytest.mark.parametrize('with0', (False, True))
|
||||
@pytest.mark.parametrize('input_starts_at_offset', (False, True))
|
||||
def test_relabel_sequential_already_sequential(offset, with0,
|
||||
input_starts_at_offset):
|
||||
if with0:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4])
|
||||
else:
|
||||
ar = np.array([1, 3, 2, 5, 4])
|
||||
if input_starts_at_offset:
|
||||
ar[ar > 0] += offset - 1
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=offset)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
if input_starts_at_offset:
|
||||
ar_relab_ref = ar
|
||||
else:
|
||||
ar_relab_ref = np.where(ar > 0, ar + offset - 1, 0)
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
|
||||
|
||||
def test_incorrect_input_dtype():
|
||||
labels = np.array([0, 2, 2, 1, 1, 8], dtype=float)
|
||||
with testing.raises(TypeError):
|
||||
_ = relabel_sequential(labels)
|
||||
|
||||
|
||||
def test_arraymap_call():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
testing.assert_array_equal(relabeled, fw(ar))
|
||||
testing.assert_array_equal(ar, inv(relabeled))
|
||||
|
||||
|
||||
def test_arraymap_len():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
assert len(fw) == 100
|
||||
assert len(fw) == len(np.array(fw))
|
||||
assert len(inv) == 6
|
||||
assert len(inv) == len(np.array(inv))
|
||||
|
||||
|
||||
def test_arraymap_set():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
fw[72] = 6
|
||||
assert fw[72] == 6
|
||||
141
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_morphsnakes.py
vendored
Normal file
141
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_morphsnakes.py
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_array_equal
|
||||
|
||||
from skimage.segmentation import (disk_level_set,
|
||||
inverse_gaussian_gradient,
|
||||
morphological_chan_vese,
|
||||
morphological_geodesic_active_contour)
|
||||
|
||||
|
||||
def gaussian_blob():
|
||||
coords = np.mgrid[-5:6, -5:6]
|
||||
sqrdistances = (coords ** 2).sum(0)
|
||||
return np.exp(-sqrdistances / 10)
|
||||
|
||||
|
||||
def test_morphsnakes_incorrect_image_shape():
|
||||
img = np.zeros((10, 10, 3))
|
||||
ls = np.zeros((10, 9))
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
|
||||
with pytest.raises(ValueError):
|
||||
morphological_geodesic_active_contour(img, num_iter=1,
|
||||
init_level_set=ls)
|
||||
|
||||
|
||||
def test_morphsnakes_incorrect_ndim():
|
||||
img = np.zeros((4, 4, 4, 4))
|
||||
ls = np.zeros((4, 4, 4, 4))
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
|
||||
with pytest.raises(ValueError):
|
||||
morphological_geodesic_active_contour(img, num_iter=1,
|
||||
init_level_set=ls)
|
||||
|
||||
|
||||
def test_morphsnakes_black():
|
||||
img = np.zeros((11, 11))
|
||||
ls = disk_level_set(img.shape, center=(5, 5), radius=3)
|
||||
|
||||
ref_zeros = np.zeros(img.shape, dtype=np.int8)
|
||||
ref_ones = np.ones(img.shape, dtype=np.int8)
|
||||
|
||||
acwe_ls = morphological_chan_vese(img, num_iter=6, init_level_set=ls)
|
||||
assert_array_equal(acwe_ls, ref_zeros)
|
||||
|
||||
gac_ls = morphological_geodesic_active_contour(img, num_iter=6,
|
||||
init_level_set=ls)
|
||||
assert_array_equal(gac_ls, ref_zeros)
|
||||
|
||||
gac_ls2 = morphological_geodesic_active_contour(img, num_iter=6,
|
||||
init_level_set=ls,
|
||||
balloon=1, threshold=-1,
|
||||
smoothing=0)
|
||||
assert_array_equal(gac_ls2, ref_ones)
|
||||
|
||||
assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8
|
||||
|
||||
|
||||
def test_morphsnakes_simple_shape_chan_vese():
|
||||
img = gaussian_blob()
|
||||
ls1 = disk_level_set(img.shape, center=(5, 5), radius=3)
|
||||
ls2 = disk_level_set(img.shape, center=(5, 5), radius=6)
|
||||
|
||||
acwe_ls1 = morphological_chan_vese(img, num_iter=10, init_level_set=ls1)
|
||||
acwe_ls2 = morphological_chan_vese(img, num_iter=10, init_level_set=ls2)
|
||||
|
||||
assert_array_equal(acwe_ls1, acwe_ls2)
|
||||
|
||||
assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8
|
||||
|
||||
|
||||
def test_morphsnakes_simple_shape_geodesic_active_contour():
|
||||
img = (disk_level_set((11, 11), center=(5, 5), radius=3.5)).astype(float)
|
||||
gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0)
|
||||
ls = disk_level_set(img.shape, center=(5, 5), radius=6)
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
|
||||
dtype=np.int8)
|
||||
|
||||
gac_ls = morphological_geodesic_active_contour(gimg, num_iter=10,
|
||||
init_level_set=ls,
|
||||
balloon=-1)
|
||||
assert_array_equal(gac_ls, ref)
|
||||
assert gac_ls.dtype == np.int8
|
||||
|
||||
|
||||
def test_init_level_sets():
|
||||
image = np.zeros((6, 6))
|
||||
checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard')
|
||||
checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[1, 1, 1, 1, 1, 0]], dtype=np.int8)
|
||||
|
||||
disk_ls = morphological_geodesic_active_contour(image, 0, 'disk')
|
||||
disk_ref = np.array([[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 0, 1, 1, 1, 0]], dtype=np.int8)
|
||||
|
||||
assert_array_equal(checkerboard_ls, checkerboard_ref)
|
||||
assert_array_equal(disk_ls, disk_ref)
|
||||
|
||||
|
||||
def test_morphsnakes_3d():
|
||||
image = np.zeros((7, 7, 7))
|
||||
|
||||
evolution = []
|
||||
|
||||
def callback(x):
|
||||
evolution.append(x.sum())
|
||||
|
||||
ls = morphological_chan_vese(image, 5, 'disk',
|
||||
iter_callback=callback)
|
||||
|
||||
# Check that the initial disk level set is correct
|
||||
assert evolution[0] == 81
|
||||
|
||||
# Check that the final level set is correct
|
||||
assert ls.sum() == 0
|
||||
|
||||
# Check that the contour is shrinking at every iteration
|
||||
for v1, v2 in zip(evolution[:-1], evolution[1:]):
|
||||
assert v1 >= v2
|
||||
58
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_quickshift.py
vendored
Normal file
58
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_quickshift.py
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
import numpy as np
|
||||
from skimage.segmentation import quickshift
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_greater, test_parallel,
|
||||
assert_equal, assert_array_equal)
|
||||
|
||||
@test_parallel()
|
||||
@testing.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_grey(dtype):
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, 10:] = 0.2
|
||||
img[10:, :10] = 0.4
|
||||
img[10:, 10:] = 0.6
|
||||
img += 0.05 * rnd.normal(size=img.shape)
|
||||
img = img.astype(dtype, copy=False)
|
||||
seg = quickshift(img, kernel_size=2, max_dist=3, random_seed=0,
|
||||
convert2lab=False, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
# that mostly respect the 4 regions:
|
||||
for i in range(4):
|
||||
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
|
||||
assert_greater(hist[i], 20)
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float32, np.float64])
|
||||
@testing.parametrize('channel_axis', [-3, -2, -1, 0, 1, 2])
|
||||
def test_color(dtype, channel_axis):
|
||||
rnd = np.random.default_rng(583428449)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
img = img.astype(dtype, copy=False)
|
||||
|
||||
img = np.moveaxis(img, source=-1, destination=channel_axis)
|
||||
seg = quickshift(img, random_seed=0, max_dist=30, kernel_size=10, sigma=0,
|
||||
channel_axis=channel_axis)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_array_equal(seg[:10, :10], 1)
|
||||
assert_array_equal(seg[10:, :10], 3)
|
||||
assert_array_equal(seg[:10, 10:], 0)
|
||||
assert_array_equal(seg[10:, 10:], 2)
|
||||
|
||||
seg2 = quickshift(img, kernel_size=1, max_dist=2, random_seed=0,
|
||||
convert2lab=False, sigma=0,
|
||||
channel_axis=channel_axis)
|
||||
# very oversegmented:
|
||||
assert len(np.unique(seg2)) > 10
|
||||
# still don't cross lines
|
||||
assert (seg2[9, :] != seg2[10, :]).all()
|
||||
assert (seg2[:, 9] != seg2[:, 10]).all()
|
||||
512
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_random_walker.py
vendored
Normal file
512
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_random_walker.py
vendored
Normal file
@@ -0,0 +1,512 @@
|
||||
import numpy as np
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared.testing import xfail, arch32
|
||||
from skimage.segmentation import random_walker
|
||||
from skimage.transform import resize
|
||||
|
||||
PYAMG_MISSING_WARNING = r'pyamg|\A\Z'
|
||||
|
||||
|
||||
def make_2d_syntheticdata(lx, ly=None):
|
||||
if ly is None:
|
||||
ly = lx
|
||||
np.random.seed(1234)
|
||||
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
|
||||
small_l = int(lx // 5)
|
||||
data[lx // 2 - small_l:lx // 2 + small_l,
|
||||
ly // 2 - small_l:ly // 2 + small_l] = 1
|
||||
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
|
||||
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
|
||||
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
|
||||
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
|
||||
seeds = np.zeros_like(data)
|
||||
seeds[lx // 5, ly // 5] = 1
|
||||
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
|
||||
return data, seeds
|
||||
|
||||
|
||||
def make_3d_syntheticdata(lx, ly=None, lz=None):
|
||||
if ly is None:
|
||||
ly = lx
|
||||
if lz is None:
|
||||
lz = lx
|
||||
np.random.seed(1234)
|
||||
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
|
||||
small_l = int(lx // 5)
|
||||
data[lx // 2 - small_l:lx // 2 + small_l,
|
||||
ly // 2 - small_l:ly // 2 + small_l,
|
||||
lz // 2 - small_l:lz // 2 + small_l] = 1
|
||||
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
|
||||
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
|
||||
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
|
||||
# make a hole
|
||||
hole_size = np.max([1, small_l // 8])
|
||||
data[lx // 2 - small_l,
|
||||
ly // 2 - hole_size:ly // 2 + hole_size,
|
||||
lz // 2 - hole_size:lz // 2 + hole_size] = 0
|
||||
seeds = np.zeros_like(data)
|
||||
seeds[lx // 5, ly // 5, lz // 5] = 1
|
||||
seeds[lx // 2 + small_l // 4,
|
||||
ly // 2 - small_l // 4,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
return data, seeds
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_2d_bf(dtype):
|
||||
lx = 70
|
||||
ly = 100
|
||||
|
||||
# have to use a smaller beta to avoid warning with lower precision input
|
||||
beta = 90 if dtype == np.float64 else 25
|
||||
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data.astype(dtype, copy=False)
|
||||
labels_bf = random_walker(data, labels, beta=beta, mode='bf')
|
||||
assert (labels_bf[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
full_prob_bf = random_walker(data, labels, beta=beta, mode='bf', return_full_prob=True)
|
||||
assert (full_prob_bf[1, 25:45, 40:60] >=
|
||||
full_prob_bf[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
# Now test with more than two labels
|
||||
labels[55, 80] = 3
|
||||
full_prob_bf = random_walker(data, labels, beta=beta, mode='bf', return_full_prob=True)
|
||||
assert (full_prob_bf[1, 25:45, 40:60] >=
|
||||
full_prob_bf[0, 25:45, 40:60]).all()
|
||||
assert len(full_prob_bf) == 3
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_2d_cg(dtype):
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data.astype(dtype, copy=False)
|
||||
with expected_warnings(['"cg" mode']):
|
||||
labels_cg = random_walker(data, labels, beta=90, mode='cg')
|
||||
assert (labels_cg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings(['"cg" mode']):
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg',
|
||||
return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60] >=
|
||||
full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_2d_cg_mg(dtype):
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data.astype(dtype, copy=False)
|
||||
anticipated_warnings = [f'scipy.sparse.sparsetools|{PYAMG_MISSING_WARNING}']
|
||||
with expected_warnings(anticipated_warnings):
|
||||
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
|
||||
assert (labels_cg_mg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings(anticipated_warnings):
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
|
||||
return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60] >=
|
||||
full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float16, np.float32, np.float64])
|
||||
def test_2d_cg_j(dtype):
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data.astype(dtype, copy=False)
|
||||
labels_cg = random_walker(data, labels, beta=90, mode='cg_j')
|
||||
assert (labels_cg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg_j', return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60] >= full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_types():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = 255 * (data - data.min()) // (data.max() - data.min())
|
||||
data = data.astype(np.uint8)
|
||||
with expected_warnings([PYAMG_MISSING_WARNING]):
|
||||
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
|
||||
assert (labels_cg_mg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_reorder_labels():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
labels[labels == 2] = 4
|
||||
labels_bf = random_walker(data, labels, beta=90, mode='bf')
|
||||
assert (labels_bf[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_2d_inactive():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
labels[10:20, 10:20] = -1
|
||||
labels[46:50, 33:38] = -2
|
||||
labels = random_walker(data, labels, beta=90)
|
||||
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_2d_laplacian_size():
|
||||
# test case from: https://github.com/scikit-image/scikit-image/issues/5034
|
||||
# The markers here were modified from the ones in the original issue to
|
||||
# avoid a singular matrix, but still reproduce the issue.
|
||||
data = np.asarray([[12823, 12787, 12710],
|
||||
[12883, 13425, 12067],
|
||||
[11934, 11929, 12309]])
|
||||
markers = np.asarray([[0, -1, 2],
|
||||
[0, -1, 0],
|
||||
[1, 0, -1]])
|
||||
expected_labels = np.asarray([[1, -1, 2],
|
||||
[1, -1, 2],
|
||||
[1, 1, -1]])
|
||||
labels = random_walker(data, markers, beta=10)
|
||||
np.testing.assert_array_equal(labels, expected_labels)
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_3d(dtype):
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
data = data.astype(dtype, copy=False)
|
||||
with expected_warnings(['"cg" mode']):
|
||||
labels = random_walker(data, labels, mode='cg')
|
||||
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_3d_inactive():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
labels[5:25, 26:29, 26:29] = -1
|
||||
with expected_warnings(['"cg" mode|CObject type']):
|
||||
labels = random_walker(data, labels, mode='cg')
|
||||
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
@testing.parametrize('channel_axis', [0, 1, -1])
|
||||
@testing.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_multispectral_2d(dtype, channel_axis):
|
||||
lx, ly = 70, 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data.astype(dtype, copy=False)
|
||||
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
|
||||
|
||||
data = np.moveaxis(data, -1, channel_axis)
|
||||
with expected_warnings(['"cg" mode',
|
||||
'The probability range is outside']):
|
||||
multi_labels = random_walker(data, labels, mode='cg',
|
||||
channel_axis=channel_axis)
|
||||
data = np.moveaxis(data, channel_axis, -1)
|
||||
|
||||
assert data[..., 0].shape == labels.shape
|
||||
with expected_warnings(['"cg" mode']):
|
||||
random_walker(data[..., 0], labels, mode='cg')
|
||||
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
|
||||
assert data[..., 0].shape == labels.shape
|
||||
|
||||
|
||||
@testing.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_multispectral_3d(dtype):
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
data = data.astype(dtype, copy=False)
|
||||
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
|
||||
with expected_warnings(['"cg" mode']):
|
||||
multi_labels = random_walker(data, labels, mode='cg', channel_axis=-1)
|
||||
assert data[..., 0].shape == labels.shape
|
||||
with expected_warnings(['"cg" mode']):
|
||||
single_labels = random_walker(data[..., 0], labels, mode='cg')
|
||||
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data[..., 0].shape == labels.shape
|
||||
|
||||
|
||||
def test_spacing_0():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, _ = make_3d_syntheticdata(lx, ly, lz)
|
||||
|
||||
# Rescale `data` along Z axis
|
||||
data_aniso = np.zeros((n, n, n // 2))
|
||||
for i, yz in enumerate(data):
|
||||
data_aniso[i, :, :] = resize(yz, (n, n // 2),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso = np.zeros_like(data_aniso)
|
||||
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso[lx // 2 + small_l // 4,
|
||||
ly // 2 - small_l // 4,
|
||||
lz // 4 - small_l // 8] = 2
|
||||
|
||||
# Test with `spacing` kwarg
|
||||
with expected_warnings(['"cg" mode']):
|
||||
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
|
||||
spacing=(1., 1., 0.5))
|
||||
|
||||
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
|
||||
|
||||
|
||||
@xfail(condition=arch32,
|
||||
reason=('Known test failure on 32-bit platforms. See links for '
|
||||
'details: '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3091 '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3092'))
|
||||
def test_spacing_1():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, _ = make_3d_syntheticdata(lx, ly, lz)
|
||||
|
||||
# Rescale `data` along Y axis
|
||||
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
|
||||
data_aniso = np.zeros((n, n * 2, n))
|
||||
for i, yz in enumerate(data):
|
||||
data_aniso[i, :, :] = resize(yz, (n * 2, n),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso = np.zeros_like(data_aniso)
|
||||
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso[lx // 2 + small_l // 4,
|
||||
ly - small_l // 2,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
|
||||
# Test with `spacing` kwarg
|
||||
# First, anisotropic along Y
|
||||
with expected_warnings(['"cg" mode']):
|
||||
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
|
||||
spacing=(1., 2., 1.))
|
||||
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
|
||||
|
||||
# Rescale `data` along X axis
|
||||
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
|
||||
data_aniso = np.zeros((n, n * 2, n))
|
||||
for i in range(data.shape[1]):
|
||||
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso2 = np.zeros_like(data_aniso)
|
||||
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso2[lx - small_l // 2,
|
||||
ly // 2 + small_l // 4,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
|
||||
# Anisotropic along X
|
||||
with expected_warnings(['"cg" mode']):
|
||||
labels_aniso2 = random_walker(data_aniso,
|
||||
labels_aniso2,
|
||||
mode='cg', spacing=(2., 1., 1.))
|
||||
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
|
||||
|
||||
|
||||
def test_trivial_cases():
|
||||
# When all voxels are labeled
|
||||
img = np.ones((10, 10))
|
||||
labels = np.ones((10, 10))
|
||||
|
||||
with expected_warnings(["Returning provided labels"]):
|
||||
pass_through = random_walker(img, labels)
|
||||
np.testing.assert_array_equal(pass_through, labels)
|
||||
|
||||
# When all voxels are labeled AND return_full_prob is True
|
||||
labels[:, :5] = 3
|
||||
expected = np.concatenate(((labels == 1)[..., np.newaxis],
|
||||
(labels == 3)[..., np.newaxis]), axis=2)
|
||||
with expected_warnings(["Returning provided labels"]):
|
||||
test = random_walker(img, labels, return_full_prob=True)
|
||||
np.testing.assert_array_equal(test, expected)
|
||||
|
||||
# Unlabeled voxels not connected to seed, so nothing can be done
|
||||
img = np.full((10, 10), False)
|
||||
object_A = np.array([(6,7), (6,8), (7,7), (7,8)])
|
||||
object_B = np.array([(3,1), (4,1), (2,2), (3,2), (4,2), (2,3), (3,3)])
|
||||
for x, y in np.vstack((object_A, object_B)):
|
||||
img[y][x] = True
|
||||
|
||||
markers = np.zeros((10, 10), dtype=np.int8)
|
||||
for x, y in object_B:
|
||||
markers[y][x] = 1
|
||||
|
||||
markers[img == 0] = -1
|
||||
with expected_warnings(["All unlabeled pixels are isolated"]):
|
||||
output_labels = random_walker(img, markers)
|
||||
assert np.all(output_labels[markers == 1] == 1)
|
||||
# Here 0-labeled pixels could not be determined (no connection to seed)
|
||||
assert np.all(output_labels[markers == 0] == -1)
|
||||
with expected_warnings(["All unlabeled pixels are isolated"]):
|
||||
test = random_walker(img, markers, return_full_prob=True)
|
||||
|
||||
|
||||
def test_length2_spacing():
|
||||
# If this passes without raising an exception (warnings OK), the new
|
||||
# spacing code is working properly.
|
||||
np.random.seed(42)
|
||||
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10), dtype=np.uint8)
|
||||
labels[2, 4] = 1
|
||||
labels[6, 8] = 4
|
||||
random_walker(img, labels, spacing=(1., 2.))
|
||||
|
||||
|
||||
def test_bad_inputs():
|
||||
# Too few dimensions
|
||||
img = np.ones(10)
|
||||
labels = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, channel_axis=-1)
|
||||
|
||||
# Too many dimensions
|
||||
np.random.seed(42)
|
||||
img = np.random.normal(size=(3, 3, 3, 3, 3))
|
||||
labels = np.arange(3 ** 5).reshape(img.shape)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, channel_axis=-1)
|
||||
|
||||
# Spacing incorrect length
|
||||
img = np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10))
|
||||
labels[2, 4] = 2
|
||||
labels[6, 8] = 5
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, spacing=(1,))
|
||||
|
||||
# Invalid mode
|
||||
img = np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10))
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, mode='bad')
|
||||
|
||||
|
||||
def test_isolated_seeds():
|
||||
np.random.seed(0)
|
||||
a = np.random.random((7, 7))
|
||||
mask = - np.ones(a.shape)
|
||||
# This pixel is an isolated seed
|
||||
mask[1, 1] = 1
|
||||
# Unlabeled pixels
|
||||
mask[3:, 3:] = 0
|
||||
# Seeds connected to unlabeled pixels
|
||||
mask[4, 4] = 2
|
||||
mask[6, 6] = 1
|
||||
|
||||
# Test that no error is raised, and that labels of isolated seeds are OK
|
||||
with expected_warnings(['The probability range is outside']):
|
||||
res = random_walker(a, mask)
|
||||
assert res[1, 1] == 1
|
||||
with expected_warnings(['The probability range is outside']):
|
||||
res = random_walker(a, mask, return_full_prob=True)
|
||||
assert res[0, 1, 1] == 1
|
||||
assert res[1, 1, 1] == 0
|
||||
|
||||
|
||||
def test_isolated_area():
|
||||
np.random.seed(0)
|
||||
a = np.random.random((7, 7))
|
||||
mask = - np.ones(a.shape)
|
||||
# This pixel is an isolated seed
|
||||
mask[1, 1] = 0
|
||||
# Unlabeled pixels
|
||||
mask[3:, 3:] = 0
|
||||
# Seeds connected to unlabeled pixels
|
||||
mask[4, 4] = 2
|
||||
mask[6, 6] = 1
|
||||
|
||||
# Test that no error is raised, and that labels of isolated seeds are OK
|
||||
with expected_warnings(['The probability range is outside']):
|
||||
res = random_walker(a, mask)
|
||||
assert res[1, 1] == 0
|
||||
with expected_warnings(['The probability range is outside']):
|
||||
res = random_walker(a, mask, return_full_prob=True)
|
||||
assert res[0, 1, 1] == 0
|
||||
assert res[1, 1, 1] == 0
|
||||
|
||||
|
||||
def test_prob_tol():
|
||||
np.random.seed(0)
|
||||
a = np.random.random((7, 7))
|
||||
mask = - np.ones(a.shape)
|
||||
# This pixel is an isolated seed
|
||||
mask[1, 1] = 1
|
||||
# Unlabeled pixels
|
||||
mask[3:, 3:] = 0
|
||||
# Seeds connected to unlabeled pixels
|
||||
mask[4, 4] = 2
|
||||
mask[6, 6] = 1
|
||||
|
||||
with expected_warnings(['The probability range is outside']):
|
||||
res = random_walker(a, mask, return_full_prob=True)
|
||||
|
||||
# Lower beta, no warning is expected.
|
||||
res = random_walker(a, mask, return_full_prob=True, beta=10)
|
||||
assert res[0, 1, 1] == 1
|
||||
assert res[1, 1, 1] == 0
|
||||
|
||||
# Being more prob_tol tolerant, no warning is expected.
|
||||
res = random_walker(a, mask, return_full_prob=True, prob_tol=1e-1)
|
||||
assert res[0, 1, 1] == 1
|
||||
assert res[1, 1, 1] == 0
|
||||
|
||||
# Reduced tol, no warning is expected.
|
||||
res = random_walker(a, mask, return_full_prob=True, tol=1e-9)
|
||||
assert res[0, 1, 1] == 1
|
||||
assert res[1, 1, 1] == 0
|
||||
|
||||
|
||||
def test_umfpack_import():
|
||||
from skimage.segmentation import random_walker_segmentation
|
||||
UmfpackContext = random_walker_segmentation.UmfpackContext
|
||||
try:
|
||||
# when scikit-umfpack is installed UmfpackContext should not be None
|
||||
import scikits.umfpack # noqa: F401
|
||||
assert UmfpackContext is not None
|
||||
except ImportError:
|
||||
assert UmfpackContext is None
|
||||
|
||||
|
||||
def test_empty_labels():
|
||||
image = np.random.random((5, 5))
|
||||
labels = np.zeros((5, 5), dtype=int)
|
||||
|
||||
with testing.raises(ValueError, match="No seeds provided"):
|
||||
random_walker(image, labels)
|
||||
|
||||
labels[1, 1] = -1
|
||||
with testing.raises(ValueError, match="No seeds provided"):
|
||||
random_walker(image, labels)
|
||||
|
||||
# Once seeds are provided, it should run without error
|
||||
labels[3, 3] = 1
|
||||
random_walker(image, labels)
|
||||
568
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_slic.py
vendored
Normal file
568
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_slic.py
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_equal
|
||||
|
||||
from skimage import data, filters, img_as_float
|
||||
from skimage._shared.testing import test_parallel, expected_warnings
|
||||
from skimage.segmentation import slic
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_color_2d():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,
|
||||
start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_multichannel_2d():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 20, 8))
|
||||
img[:10, :10, 0:2] = 1
|
||||
img[:10, 10:, 2:4] = 1
|
||||
img[10:, :10, 4:6] = 1
|
||||
img[10:, 10:, 6:8] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img = np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, enforce_connectivity=False, start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_gray_2d():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=4, compactness=1,
|
||||
channel_axis=None, convert2lab=False, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape)
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_gray2d_default_channel_axis():
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
with pytest.raises(
|
||||
ValueError, match="channel_axis=-1 indicates multichannel"
|
||||
):
|
||||
slic(img)
|
||||
slic(img, channel_axis=None)
|
||||
|
||||
|
||||
def _check_segment_labels(seg1, seg2, allowed_mismatch_ratio=0.1):
|
||||
size = seg1.size
|
||||
ndiff = np.sum(seg1 != seg2)
|
||||
assert (ndiff / size) < allowed_mismatch_ratio
|
||||
|
||||
|
||||
def test_slic_consistency_across_image_magnitude():
|
||||
# verify that that images of various scales across integer and float dtypes
|
||||
# give the same segmentation result
|
||||
img_uint8 = data.cat()[:256, :128]
|
||||
img_uint16 = 256 * img_uint8.astype(np.uint16)
|
||||
img_float32 = img_as_float(img_uint8)
|
||||
img_float32_norm = img_float32 / img_float32.max()
|
||||
img_float32_offset = img_float32 + 1000
|
||||
|
||||
seg1 = slic(img_uint8)
|
||||
seg2 = slic(img_uint16)
|
||||
seg3 = slic(img_float32)
|
||||
seg4 = slic(img_float32_norm)
|
||||
seg5 = slic(img_float32_offset)
|
||||
|
||||
np.testing.assert_array_equal(seg1, seg2)
|
||||
np.testing.assert_array_equal(seg1, seg3)
|
||||
# Assert that offset has no impact on result
|
||||
np.testing.assert_array_equal(seg4, seg5)
|
||||
# Floating point cases can have mismatch due to floating point error
|
||||
# exact match was observed on x86_64, but mismatches seen no i686.
|
||||
# For now just verify that a similar number of superpixels are present in
|
||||
# each case.
|
||||
n_seg1 = seg1.max()
|
||||
n_seg4 = seg4.max()
|
||||
assert abs(n_seg1 - n_seg4) / n_seg1 < 0.5
|
||||
|
||||
|
||||
def test_color_3d():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 22, 3))
|
||||
slices = []
|
||||
for dim_size in img.shape[:-1]:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
colors = list(product(*(([0, 1],) * 3)))
|
||||
for s, c in zip(slices, colors):
|
||||
img[s] = c
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=8, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 8)
|
||||
for s, c in zip(slices, range(8)):
|
||||
assert_equal(seg[s], c)
|
||||
|
||||
|
||||
def test_gray_3d():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 22))
|
||||
slices = []
|
||||
for dim_size in img.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
shades = np.arange(0, 1.000001, 1.0 / 7)
|
||||
for s, sh in zip(slices, shades):
|
||||
img[s] = sh
|
||||
img += 0.001 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=8, compactness=1,
|
||||
channel_axis=None, convert2lab=False, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 8)
|
||||
for s, c in zip(slices, range(8)):
|
||||
assert_equal(seg[s], c)
|
||||
|
||||
|
||||
def test_list_sigma():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.array([[1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1]], float)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
result_sigma = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1]], int)
|
||||
with expected_warnings(["Input image is 2D: sigma number of "
|
||||
"elements must be 2"]):
|
||||
seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1],
|
||||
channel_axis=None, start_label=0)
|
||||
assert_equal(seg_sigma, result_sigma)
|
||||
|
||||
|
||||
def test_spacing():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], float)
|
||||
result_non_spaced = np.array([[0, 0, 0, 1, 1],
|
||||
[0, 0, 1, 1, 1]], int)
|
||||
result_spaced = np.array([[0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1]], int)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
seg_non_spaced = slic(img, n_segments=2, sigma=0, channel_axis=None,
|
||||
compactness=1.0, start_label=0)
|
||||
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[500, 1],
|
||||
compactness=1.0, channel_axis=None, start_label=0)
|
||||
assert_equal(seg_non_spaced, result_non_spaced)
|
||||
assert_equal(seg_spaced, result_spaced)
|
||||
|
||||
|
||||
def test_invalid_lab_conversion():
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], float) + 1
|
||||
with pytest.raises(ValueError):
|
||||
slic(img, channel_axis=-1, convert2lab=True, start_label=0)
|
||||
|
||||
|
||||
def test_enforce_connectivity():
|
||||
img = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], float)
|
||||
|
||||
segments_connected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False, start_label=0,
|
||||
channel_axis=None)
|
||||
segments_disconnected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=False,
|
||||
convert2lab=False, start_label=0,
|
||||
channel_axis=None)
|
||||
|
||||
# Make sure nothing fatal occurs (e.g. buffer overflow) at low values of
|
||||
# max_size_factor
|
||||
segments_connected_low_max = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False,
|
||||
max_size_factor=0.8,
|
||||
start_label=0,
|
||||
channel_axis=None)
|
||||
|
||||
result_connected = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1]], float)
|
||||
|
||||
result_disconnected = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], float)
|
||||
|
||||
assert_equal(segments_connected, result_connected)
|
||||
assert_equal(segments_disconnected, result_disconnected)
|
||||
assert_equal(segments_connected_low_max, result_connected)
|
||||
|
||||
|
||||
def test_slic_zero():
|
||||
# Same as test_color_2d but with slic_zero=True
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, n_segments=4, sigma=0, slic_zero=True, start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_more_segments_than_pixels():
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=500, compactness=1,
|
||||
channel_axis=None, convert2lab=False, start_label=0)
|
||||
assert np.all(seg.ravel() == np.arange(seg.size))
|
||||
|
||||
|
||||
def test_color_2d_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[10:-2, 2:10], 4)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 10:-2], 3)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_multichannel_2d_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((20, 20))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 20, 8))
|
||||
img[:10, :10, 0:2] = 1
|
||||
img[:10, 10:, 2:4] = 1
|
||||
img[10:, :10, 4:6] = 1
|
||||
img[10:, 10:, 6:8] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, enforce_connectivity=False,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 2)
|
||||
assert_equal(seg[2:10, 10:-2], 1)
|
||||
assert_equal(seg[10:-2, 2:10], 4)
|
||||
assert_equal(seg[10:-2, 10:-2], 3)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_gray_2d_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=4, compactness=1,
|
||||
channel_axis=None, convert2lab=False, mask=msk)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape)
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 2:10], 3)
|
||||
assert_equal(seg[10:-2, 10:-2], 4)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_list_sigma_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((2, 6))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1]], float)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
result_sigma = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], int)
|
||||
seg_sigma = slic(img, n_segments=2, sigma=[50, 1],
|
||||
channel_axis=None, mask=msk)
|
||||
assert_equal(seg_sigma, result_sigma)
|
||||
|
||||
|
||||
def test_spacing_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((2, 5))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], float)
|
||||
result_non_spaced = np.array([[0, 1, 1, 2, 0],
|
||||
[0, 1, 2, 2, 0]], int)
|
||||
result_spaced = np.array([[0, 1, 1, 1, 0],
|
||||
[0, 2, 2, 2, 0]], int)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
seg_non_spaced = slic(img, n_segments=2, sigma=0, channel_axis=None,
|
||||
compactness=1.0, mask=msk)
|
||||
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[50, 1],
|
||||
compactness=1.0, channel_axis=None, mask=msk)
|
||||
assert_equal(seg_non_spaced, result_non_spaced)
|
||||
assert_equal(seg_spaced, result_spaced)
|
||||
|
||||
|
||||
def test_enforce_connectivity_mask():
|
||||
msk = np.zeros((3, 6))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], float)
|
||||
|
||||
segments_connected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False, mask=msk, channel_axis=None)
|
||||
segments_disconnected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=False,
|
||||
convert2lab=False, mask=msk, channel_axis=None)
|
||||
|
||||
# Make sure nothing fatal occurs (e.g. buffer overflow) at low values of
|
||||
# max_size_factor
|
||||
segments_connected_low_max = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False,
|
||||
max_size_factor=0.8, mask=msk,
|
||||
channel_axis=None)
|
||||
|
||||
result_connected = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], float)
|
||||
|
||||
result_disconnected = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], float)
|
||||
|
||||
assert_equal(segments_connected, result_connected)
|
||||
assert_equal(segments_disconnected, result_disconnected)
|
||||
assert_equal(segments_connected_low_max, result_connected)
|
||||
|
||||
|
||||
def test_slic_zero_mask():
|
||||
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, sigma=0, slic_zero=True,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 2:10], 3)
|
||||
assert_equal(seg[10:-2, 10:-2], 4)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_more_segments_than_pixels_mask():
|
||||
rnd = np.random.default_rng(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=500, compactness=1,
|
||||
channel_axis=None, convert2lab=False, mask=msk)
|
||||
|
||||
expected = np.arange(seg[2:-2, 2:-2].size) + 1
|
||||
assert np.all(seg[2:-2, 2:-2].ravel() == expected)
|
||||
|
||||
|
||||
def test_color_3d_mask():
|
||||
|
||||
msk = np.zeros((20, 21, 22))
|
||||
msk[2:-2, 2:-2, 2:-2] = 1
|
||||
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 22, 3))
|
||||
slices = []
|
||||
for dim_size in msk.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
colors = list(product(*(([0, 1],) * 3)))
|
||||
for s, c in zip(slices, colors):
|
||||
img[s] = c
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
|
||||
seg = slic(img, sigma=0, n_segments=8, mask=msk)
|
||||
|
||||
# we expect 8 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 9)
|
||||
for s, c in zip(slices, range(1, 9)):
|
||||
assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)
|
||||
|
||||
|
||||
def test_gray_3d_mask():
|
||||
|
||||
msk = np.zeros((20, 21, 22))
|
||||
msk[2:-2, 2:-2, 2:-2] = 1
|
||||
|
||||
rnd = np.random.default_rng(0)
|
||||
img = np.zeros((20, 21, 22))
|
||||
slices = []
|
||||
for dim_size in img.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
shades = np.linspace(0, 1, 8)
|
||||
for s, sh in zip(slices, shades):
|
||||
img[s] = sh
|
||||
img += 0.001 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=8, channel_axis=None,
|
||||
convert2lab=False, mask=msk)
|
||||
|
||||
# we expect 8 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 9)
|
||||
for s, c in zip(slices, range(1, 9)):
|
||||
assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"dtype", ['float16', 'float32', 'float64', 'uint8', 'int']
|
||||
)
|
||||
def test_dtype_support(dtype):
|
||||
img = np.random.rand(28, 28).astype(dtype)
|
||||
|
||||
# Simply run the function to assert that it runs without error
|
||||
slic(img, start_label=1, channel_axis=None)
|
||||
|
||||
|
||||
def test_start_label_fix():
|
||||
"""Tests the fix for a bug producing a label < start_label (gh-6240).
|
||||
|
||||
For the v0.19.1 release, the `img` and `slic` call as below result in two
|
||||
non-contiguous regions with value 0 despite `start_label=1`. We verify that
|
||||
the minimum label is now `start_label` as expected.
|
||||
"""
|
||||
|
||||
# generate bumpy data that gives unexpected label prior to bug fix
|
||||
rng = np.random.default_rng(9)
|
||||
img = rng.standard_normal((8, 13)) > 0
|
||||
img = filters.gaussian(img, sigma=1)
|
||||
|
||||
start_label = 1
|
||||
superp = slic(img, start_label=start_label, channel_axis=None,
|
||||
n_segments=6, compactness=0.01, enforce_connectivity=True,
|
||||
max_num_iter=10)
|
||||
assert superp.min() == start_label
|
||||
|
||||
|
||||
def test_raises_ValueError_if_input_has_NaN():
|
||||
img = np.zeros((4,5), dtype=float)
|
||||
img[2, 3] = np.NaN
|
||||
with pytest.raises(ValueError):
|
||||
slic(img, channel_axis=None)
|
||||
|
||||
mask = ~np.isnan(img)
|
||||
slic(img, mask=mask, channel_axis=None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("inf", [-np.inf, np.inf])
|
||||
def test_raises_ValueError_if_input_has_inf(inf):
|
||||
img = np.zeros((4,5), dtype=float)
|
||||
img[2, 3] = inf
|
||||
with pytest.raises(ValueError):
|
||||
slic(img, channel_axis=None)
|
||||
|
||||
mask = np.isfinite(img)
|
||||
slic(img, mask=mask, channel_axis=None)
|
||||
498
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_watershed.py
vendored
Normal file
498
.CondaPkg/env/Lib/site-packages/skimage/segmentation/tests/test_watershed.py
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
"""test_watershed.py - tests the watershed function
|
||||
"""
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from scipy import ndimage as ndi
|
||||
|
||||
from skimage._shared.filters import gaussian
|
||||
from skimage.measure import label
|
||||
|
||||
from .._watershed import watershed
|
||||
|
||||
eps = 1e-12
|
||||
blob = np.array([[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 204, 204, 204, 204, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 204, 204, 183, 153, 153, 153, 153, 183, 204, 204, 255, 255, 255],
|
||||
[255, 255, 204, 183, 153, 141, 111, 103, 103, 111, 141, 153, 183, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 94, 72, 52, 52, 72, 94, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 72, 39, 1, 1, 39, 72, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 183, 141, 111, 72, 39, 39, 72, 111, 141, 183, 204, 255, 255],
|
||||
[255, 255, 255, 204, 183, 141, 111, 72, 72, 111, 141, 183, 204, 255, 255, 255],
|
||||
[255, 255, 255, 255, 204, 183, 141, 94, 94, 141, 183, 204, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 153, 103, 103, 153, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 204, 183, 141, 94, 94, 141, 183, 204, 255, 255, 255, 255],
|
||||
[255, 255, 255, 204, 183, 141, 111, 72, 72, 111, 141, 183, 204, 255, 255, 255],
|
||||
[255, 255, 204, 183, 141, 111, 72, 39, 39, 72, 111, 141, 183, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 72, 39, 1, 1, 39, 72, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 94, 72, 52, 52, 72, 94, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 183, 153, 141, 111, 103, 103, 111, 141, 153, 183, 204, 255, 255],
|
||||
[255, 255, 255, 204, 204, 183, 153, 153, 153, 153, 183, 204, 204, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 204, 204, 204, 204, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]])
|
||||
|
||||
|
||||
def diff(a, b):
|
||||
if not isinstance(a, np.ndarray):
|
||||
a = np.asarray(a)
|
||||
if not isinstance(b, np.ndarray):
|
||||
b = np.asarray(b)
|
||||
if (0 in a.shape) and (0 in b.shape):
|
||||
return 0.0
|
||||
b[a == 0] = 0
|
||||
if (a.dtype in [np.complex64, np.complex128] or
|
||||
b.dtype in [np.complex64, np.complex128]):
|
||||
a = np.asarray(a, np.complex128)
|
||||
b = np.asarray(b, np.complex128)
|
||||
t = ((a.real - b.real)**2).sum() + ((a.imag - b.imag)**2).sum()
|
||||
else:
|
||||
a = np.asarray(a)
|
||||
a = a.astype(np.float64)
|
||||
b = np.asarray(b)
|
||||
b = b.astype(np.float64)
|
||||
t = ((a - b)**2).sum()
|
||||
return math.sqrt(t)
|
||||
|
||||
|
||||
class TestWatershed(unittest.TestCase):
|
||||
eight = np.ones((3, 3), bool)
|
||||
|
||||
def test_watershed01(self):
|
||||
"watershed 1"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[ -1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 1, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0]],
|
||||
np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
expected = np.array([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]])
|
||||
error = diff(expected, out)
|
||||
assert error < eps
|
||||
|
||||
def test_watershed02(self):
|
||||
"watershed 2"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
||||
out = watershed(data, markers)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, 1, 1, 1, -1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, 1, 1, 1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed03(self):
|
||||
"watershed 3"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 2, 0, 3, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 0, 2, 0, 3, 0, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 0, 2, 0, 3, 0, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed04(self):
|
||||
"watershed 4"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 2, 0, 3, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed05(self):
|
||||
"watershed 5"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 3, 0, 2, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed06(self):
|
||||
"watershed 6"
|
||||
data = np.array([[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[-1, 0, 0, 0, 0, 0, 0]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed07(self):
|
||||
"A regression test of a competitive case that failed"
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
markers = np.zeros(data.shape, int)
|
||||
markers[6, 7] = 1
|
||||
markers[14, 7] = 2
|
||||
out = watershed(data, markers, self.eight, mask=mask)
|
||||
#
|
||||
# The two objects should be the same size, except possibly for the
|
||||
# border region
|
||||
#
|
||||
size1 = np.sum(out == 1)
|
||||
size2 = np.sum(out == 2)
|
||||
self.assertTrue(abs(size1 - size2) <= 6)
|
||||
|
||||
def test_watershed08(self):
|
||||
"The border pixels + an edge are all the same value"
|
||||
data = blob.copy()
|
||||
data[10, 7:9] = 141
|
||||
mask = (data != 255)
|
||||
markers = np.zeros(data.shape, int)
|
||||
markers[6, 7] = 1
|
||||
markers[14, 7] = 2
|
||||
out = watershed(data, markers, self.eight, mask=mask)
|
||||
#
|
||||
# The two objects should be the same size, except possibly for the
|
||||
# border region
|
||||
#
|
||||
size1 = np.sum(out == 1)
|
||||
size2 = np.sum(out == 2)
|
||||
self.assertTrue(abs(size1 - size2) <= 6)
|
||||
|
||||
def test_watershed09(self):
|
||||
"""Test on an image of reasonable size
|
||||
|
||||
This is here both for timing (does it take forever?) and to
|
||||
ensure that the memory constraints are reasonable
|
||||
"""
|
||||
image = np.zeros((1000, 1000))
|
||||
coords = np.random.uniform(0, 1000, (100, 2)).astype(int)
|
||||
markers = np.zeros((1000, 1000), int)
|
||||
idx = 1
|
||||
for x, y in coords:
|
||||
image[x, y] = 1
|
||||
markers[x, y] = idx
|
||||
idx += 1
|
||||
|
||||
image = gaussian(image, 4, mode='reflect')
|
||||
watershed(image, markers, self.eight)
|
||||
ndi.watershed_ift(image.astype(np.uint16), markers, self.eight)
|
||||
|
||||
def test_watershed10(self):
|
||||
"watershed 10"
|
||||
data = np.array([[1, 1, 1, 1],
|
||||
[1, 1, 1, 1],
|
||||
[1, 1, 1, 1],
|
||||
[1, 1, 1, 1]], np.uint8)
|
||||
markers = np.array([[1, 0, 0, 2],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[3, 0, 0, 4]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[1, 1, 2, 2],
|
||||
[1, 1, 2, 2],
|
||||
[3, 3, 4, 4],
|
||||
[3, 3, 4, 4]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed11(self):
|
||||
'''Make sure that all points on this plateau are assigned to closest seed'''
|
||||
# https://github.com/scikit-image/scikit-image/issues/803
|
||||
#
|
||||
# Make sure that no point in a level image is farther away
|
||||
# from its seed than any other
|
||||
#
|
||||
image = np.zeros((21, 21))
|
||||
markers = np.zeros((21, 21), int)
|
||||
markers[5, 5] = 1
|
||||
markers[5, 10] = 2
|
||||
markers[10, 5] = 3
|
||||
markers[10, 10] = 4
|
||||
|
||||
structure = np.array([[False, True, False],
|
||||
[True, True, True],
|
||||
[False, True, False]])
|
||||
out = watershed(image, markers, structure)
|
||||
i, j = np.mgrid[0:21, 0:21]
|
||||
d = np.dstack(
|
||||
[np.sqrt((i.astype(float)-i0)**2, (j.astype(float)-j0)**2)
|
||||
for i0, j0 in ((5, 5), (5, 10), (10, 5), (10, 10))])
|
||||
dmin = np.min(d, 2)
|
||||
self.assertTrue(np.all(d[i, j, out[i, j]-1] == dmin))
|
||||
|
||||
|
||||
def test_watershed12(self):
|
||||
"The watershed line"
|
||||
data = np.array([[203, 255, 203, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153],
|
||||
[203, 255, 203, 153, 153, 153, 102, 102, 102, 102, 102, 102, 153, 153, 153, 153],
|
||||
[203, 255, 203, 203, 153, 153, 102, 102, 77, 0, 102, 102, 153, 153, 203, 203],
|
||||
[203, 255, 255, 203, 153, 153, 153, 102, 102, 102, 102, 153, 153, 203, 203, 255],
|
||||
[203, 203, 255, 203, 203, 203, 153, 153, 153, 153, 153, 153, 203, 203, 255, 255],
|
||||
[153, 203, 255, 255, 255, 203, 203, 203, 203, 203, 203, 203, 203, 255, 255, 203],
|
||||
[153, 203, 203, 203, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 203, 203],
|
||||
[153, 153, 153, 203, 203, 203, 203, 203, 255, 203, 203, 203, 203, 203, 203, 153],
|
||||
[102, 102, 153, 153, 153, 153, 203, 203, 255, 203, 203, 255, 203, 153, 153, 153],
|
||||
[102, 102, 102, 102, 102, 153, 203, 255, 255, 203, 203, 203, 203, 153, 102, 153],
|
||||
[102, 51, 51, 102, 102, 153, 203, 255, 203, 203, 153, 153, 153, 153, 102, 153],
|
||||
[ 77, 51, 51, 102, 153, 153, 203, 255, 203, 203, 203, 153, 102, 102, 102, 153],
|
||||
[ 77, 0, 51, 102, 153, 203, 203, 255, 203, 255, 203, 153, 102, 51, 102, 153],
|
||||
[ 77, 0, 51, 102, 153, 203, 255, 255, 203, 203, 203, 153, 102, 0, 102, 153],
|
||||
[102, 0, 51, 102, 153, 203, 255, 203, 203, 153, 153, 153, 102, 102, 102, 153],
|
||||
[102, 102, 102, 102, 153, 203, 255, 203, 153, 153, 153, 153, 153, 153, 153, 153]])
|
||||
markerbin = (data==0)
|
||||
marker = label(markerbin)
|
||||
ws = watershed(data, marker, connectivity=2, watershed_line=True)
|
||||
for lab, area in zip(range(4), [34,74,74,74]):
|
||||
self.assertTrue(np.sum(ws == lab) == area)
|
||||
|
||||
|
||||
|
||||
def test_compact_watershed():
|
||||
image = np.zeros((5, 6))
|
||||
image[:, 3:] = 1
|
||||
seeds = np.zeros((5, 6), dtype=int)
|
||||
seeds[2, 0] = 1
|
||||
seeds[2, 3] = 2
|
||||
compact = watershed(image, seeds, compactness=0.01)
|
||||
expected = np.array([[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2]], dtype=int)
|
||||
np.testing.assert_equal(compact, expected)
|
||||
normal = watershed(image, seeds)
|
||||
expected = np.ones(image.shape, dtype=int)
|
||||
expected[2, 3:] = 2
|
||||
np.testing.assert_equal(normal, expected)
|
||||
|
||||
|
||||
def test_numeric_seed_watershed():
|
||||
"""Test that passing just the number of seeds to watershed works."""
|
||||
image = np.zeros((5, 6))
|
||||
image[:, 3:] = 1
|
||||
compact = watershed(image, 2, compactness=0.01)
|
||||
expected = np.array([[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2]], dtype=np.int32)
|
||||
np.testing.assert_equal(compact, expected)
|
||||
|
||||
|
||||
def test_incorrect_markers_shape():
|
||||
image = np.ones((5, 6))
|
||||
markers = np.ones((5, 7))
|
||||
with pytest.raises(ValueError):
|
||||
watershed(image, markers)
|
||||
|
||||
|
||||
def test_incorrect_mask_shape():
|
||||
image = np.ones((5, 6))
|
||||
mask = np.ones((5, 7))
|
||||
with pytest.raises(ValueError):
|
||||
watershed(image, markers=4, mask=mask)
|
||||
|
||||
|
||||
def test_markers_in_mask():
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
out = watershed(data, 25, connectivity=2, mask=mask)
|
||||
# There should be no markers where the mask is false
|
||||
assert np.all(out[~mask] == 0)
|
||||
|
||||
|
||||
def test_no_markers():
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
out = watershed(data, mask=mask)
|
||||
assert np.max(out) == 2
|
||||
|
||||
|
||||
def test_connectivity():
|
||||
"""
|
||||
Watershed segmentation should output different result for
|
||||
different connectivity
|
||||
when markers are calculated where None is supplied.
|
||||
Issue = 5084
|
||||
"""
|
||||
# Generate a dummy BrightnessTemperature image
|
||||
x, y = np.indices((406, 270))
|
||||
x1, y1, x2, y2, x3, y3, x4, y4 = 200, 208, 300, 120, 100, 100, 340, 208
|
||||
r1, r2, r3, r4 = 100, 50, 40, 80
|
||||
mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
|
||||
mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
|
||||
mask_circle3 = (x - x3)**2 + (y - y3)**2 < r3**2
|
||||
mask_circle4 = (x - x4)**2 + (y - y4)**2 < r4**2
|
||||
image = np.logical_or(mask_circle1, mask_circle2)
|
||||
image = np.logical_or(image, mask_circle3)
|
||||
image = np.logical_or(image, mask_circle4)
|
||||
|
||||
# calculate distance in discrete increase
|
||||
DummyBT = ndi.distance_transform_edt(image)
|
||||
DummyBT_dis = np.around(DummyBT / 12, decimals = 0)*12
|
||||
# calculate the mask
|
||||
Img_mask = np.where(DummyBT_dis == 0, 0, 1)
|
||||
|
||||
# segments for connectivity 1 and 2
|
||||
labels_c1 = watershed(200 - DummyBT_dis, mask=Img_mask, connectivity=1,
|
||||
compactness=0.01)
|
||||
labels_c2 = watershed(200 - DummyBT_dis, mask=Img_mask, connectivity=2,
|
||||
compactness=0.01)
|
||||
|
||||
# assertions
|
||||
assert np.unique(labels_c1).shape[0] == 6
|
||||
assert np.unique(labels_c2).shape[0] == 5
|
||||
|
||||
# checking via area of each individual segment.
|
||||
for lab, area in zip(range(6), [61824, 3653, 20467, 11097, 1301, 11278]):
|
||||
assert np.sum(labels_c1 == lab) == area
|
||||
|
||||
for lab, area in zip(range(5), [61824, 3653, 20466, 12386, 11291]):
|
||||
assert np.sum(labels_c2 == lab) == area
|
||||
Reference in New Issue
Block a user