update
This commit is contained in:
@@ -1,19 +0,0 @@
|
||||
"""Functionality with an experimental API. Although you can count on the
|
||||
functions in this package being around in the future, the API may change with
|
||||
any version update **and will not follow the skimage two-version deprecation
|
||||
path**. Therefore, use the functions herein with care, and do not use them in
|
||||
production code that will depend on updated skimage versions.
|
||||
"""
|
||||
|
||||
from .manual_segmentation import manual_polygon_segmentation
|
||||
from .manual_segmentation import manual_lasso_segmentation
|
||||
from .trainable_segmentation import fit_segmenter, predict_segmenter, TrainableSegmenter
|
||||
|
||||
|
||||
__all__ = [
|
||||
"manual_lasso_segmentation",
|
||||
"manual_polygon_segmentation",
|
||||
"fit_segmenter",
|
||||
"predict_segmenter",
|
||||
"TrainableSegmenter",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,7 +0,0 @@
|
||||
# Remove this package in the release after v0.20
|
||||
|
||||
raise ModuleNotFoundError(
|
||||
"The `skimage.future.graph` submodule was moved to `skimage.graph` in "
|
||||
"v0.20. `ncut` was removed in favor of the identical function "
|
||||
"`cut_normalized`. Please update your import paths accordingly."
|
||||
)
|
||||
Binary file not shown.
@@ -1,227 +0,0 @@
|
||||
from functools import reduce
|
||||
import numpy as np
|
||||
from ..draw import polygon
|
||||
from .._shared.version_requirements import require
|
||||
|
||||
|
||||
LEFT_CLICK = 1
|
||||
RIGHT_CLICK = 3
|
||||
|
||||
|
||||
def _mask_from_vertices(vertices, shape, label):
|
||||
mask = np.zeros(shape, dtype=int)
|
||||
pr = [y for x, y in vertices]
|
||||
pc = [x for x, y in vertices]
|
||||
rr, cc = polygon(pr, pc, shape)
|
||||
mask[rr, cc] = label
|
||||
return mask
|
||||
|
||||
|
||||
@require("matplotlib", ">=3.3")
|
||||
def _draw_polygon(ax, vertices, alpha=0.4):
|
||||
from matplotlib.patches import Polygon
|
||||
from matplotlib.collections import PatchCollection
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
polygon = Polygon(vertices, closed=True)
|
||||
p = PatchCollection([polygon], match_original=True, alpha=alpha)
|
||||
polygon_object = ax.add_collection(p)
|
||||
plt.draw()
|
||||
return polygon_object
|
||||
|
||||
|
||||
@require("matplotlib", ">=3.3")
|
||||
def manual_polygon_segmentation(image, alpha=0.4, return_all=False):
|
||||
"""Return a label image based on polygon selections made with the mouse.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N[, 3]) array
|
||||
Grayscale or RGB image.
|
||||
|
||||
alpha : float, optional
|
||||
Transparency value for polygons drawn over the image.
|
||||
|
||||
return_all : bool, optional
|
||||
If True, an array containing each separate polygon drawn is returned.
|
||||
(The polygons may overlap.) If False (default), latter polygons
|
||||
"overwrite" earlier ones where they overlap.
|
||||
|
||||
Returns
|
||||
-------
|
||||
labels : array of int, shape ([Q, ]M, N)
|
||||
The segmented regions. If mode is `'separate'`, the leading dimension
|
||||
of the array corresponds to the number of regions that the user drew.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Use left click to select the vertices of the polygon
|
||||
and right click to confirm the selection once all vertices are selected.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, future, io
|
||||
>>> camera = data.camera()
|
||||
>>> mask = future.manual_polygon_segmentation(camera) # doctest: +SKIP
|
||||
>>> io.imshow(mask) # doctest: +SKIP
|
||||
>>> io.show() # doctest: +SKIP
|
||||
"""
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
list_of_vertex_lists = []
|
||||
polygons_drawn = []
|
||||
|
||||
temp_list = []
|
||||
preview_polygon_drawn = []
|
||||
|
||||
if image.ndim not in (2, 3):
|
||||
raise ValueError('Only 2D grayscale or RGB images are supported.')
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.subplots_adjust(bottom=0.2)
|
||||
ax.imshow(image, cmap="gray")
|
||||
ax.set_axis_off()
|
||||
|
||||
def _undo(*args, **kwargs):
|
||||
if list_of_vertex_lists:
|
||||
list_of_vertex_lists.pop()
|
||||
# Remove last polygon from list of polygons...
|
||||
last_poly = polygons_drawn.pop()
|
||||
# ... then from the plot
|
||||
last_poly.remove()
|
||||
fig.canvas.draw_idle()
|
||||
|
||||
undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075])
|
||||
undo_button = matplotlib.widgets.Button(undo_pos, '\u27F2')
|
||||
undo_button.on_clicked(_undo)
|
||||
|
||||
def _extend_polygon(event):
|
||||
# Do not record click events outside axis or in undo button
|
||||
if event.inaxes is None or event.inaxes is undo_pos:
|
||||
return
|
||||
# Do not record click events when toolbar is active
|
||||
if ax.get_navigate_mode():
|
||||
return
|
||||
|
||||
if event.button == LEFT_CLICK: # Select vertex
|
||||
temp_list.append([event.xdata, event.ydata])
|
||||
# Remove previously drawn preview polygon if any.
|
||||
if preview_polygon_drawn:
|
||||
poly = preview_polygon_drawn.pop()
|
||||
poly.remove()
|
||||
|
||||
# Preview polygon with selected vertices.
|
||||
polygon = _draw_polygon(ax, temp_list, alpha=(alpha / 1.4))
|
||||
preview_polygon_drawn.append(polygon)
|
||||
|
||||
elif event.button == RIGHT_CLICK: # Confirm the selection
|
||||
if not temp_list:
|
||||
return
|
||||
|
||||
# Store the vertices of the polygon as shown in preview.
|
||||
# Redraw polygon and store it in polygons_drawn so that
|
||||
# `_undo` works correctly.
|
||||
list_of_vertex_lists.append(temp_list[:])
|
||||
polygon_object = _draw_polygon(ax, temp_list, alpha=alpha)
|
||||
polygons_drawn.append(polygon_object)
|
||||
|
||||
# Empty the temporary variables.
|
||||
preview_poly = preview_polygon_drawn.pop()
|
||||
preview_poly.remove()
|
||||
del temp_list[:]
|
||||
|
||||
plt.draw()
|
||||
|
||||
fig.canvas.mpl_connect('button_press_event', _extend_polygon)
|
||||
|
||||
plt.show(block=True)
|
||||
|
||||
labels = (_mask_from_vertices(vertices, image.shape[:2], i)
|
||||
for i, vertices in enumerate(list_of_vertex_lists, start=1))
|
||||
if return_all:
|
||||
return np.stack(labels)
|
||||
else:
|
||||
return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2]))
|
||||
|
||||
|
||||
@require("matplotlib", ">=3.3")
|
||||
def manual_lasso_segmentation(image, alpha=0.4, return_all=False):
|
||||
"""Return a label image based on freeform selections made with the mouse.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N[, 3]) array
|
||||
Grayscale or RGB image.
|
||||
|
||||
alpha : float, optional
|
||||
Transparency value for polygons drawn over the image.
|
||||
|
||||
return_all : bool, optional
|
||||
If True, an array containing each separate polygon drawn is returned.
|
||||
(The polygons may overlap.) If False (default), latter polygons
|
||||
"overwrite" earlier ones where they overlap.
|
||||
|
||||
Returns
|
||||
-------
|
||||
labels : array of int, shape ([Q, ]M, N)
|
||||
The segmented regions. If mode is `'separate'`, the leading dimension
|
||||
of the array corresponds to the number of regions that the user drew.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Press and hold the left mouse button to draw around each object.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, future, io
|
||||
>>> camera = data.camera()
|
||||
>>> mask = future.manual_lasso_segmentation(camera) # doctest: +SKIP
|
||||
>>> io.imshow(mask) # doctest: +SKIP
|
||||
>>> io.show() # doctest: +SKIP
|
||||
"""
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
list_of_vertex_lists = []
|
||||
polygons_drawn = []
|
||||
|
||||
if image.ndim not in (2, 3):
|
||||
raise ValueError('Only 2D grayscale or RGB images are supported.')
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.subplots_adjust(bottom=0.2)
|
||||
ax.imshow(image, cmap="gray")
|
||||
ax.set_axis_off()
|
||||
|
||||
def _undo(*args, **kwargs):
|
||||
if list_of_vertex_lists:
|
||||
list_of_vertex_lists.pop()
|
||||
# Remove last polygon from list of polygons...
|
||||
last_poly = polygons_drawn.pop()
|
||||
# ... then from the plot
|
||||
last_poly.remove()
|
||||
fig.canvas.draw_idle()
|
||||
|
||||
undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075])
|
||||
undo_button = matplotlib.widgets.Button(undo_pos, '\u27F2')
|
||||
undo_button.on_clicked(_undo)
|
||||
|
||||
def _on_lasso_selection(vertices):
|
||||
if len(vertices) < 3:
|
||||
return
|
||||
list_of_vertex_lists.append(vertices)
|
||||
polygon_object = _draw_polygon(ax, vertices, alpha=alpha)
|
||||
polygons_drawn.append(polygon_object)
|
||||
plt.draw()
|
||||
|
||||
matplotlib.widgets.LassoSelector(ax, _on_lasso_selection)
|
||||
|
||||
plt.show(block=True)
|
||||
|
||||
labels = (_mask_from_vertices(vertices, image.shape[:2], i)
|
||||
for i, vertices in enumerate(list_of_vertex_lists, start=1))
|
||||
if return_all:
|
||||
return np.stack(labels)
|
||||
else:
|
||||
return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2]))
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,144 +0,0 @@
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from scipy import spatial
|
||||
|
||||
from skimage.future import fit_segmenter, predict_segmenter, TrainableSegmenter
|
||||
from skimage.feature import multiscale_basic_features
|
||||
|
||||
|
||||
class DummyNNClassifier:
|
||||
def fit(self, X, labels):
|
||||
self.X = X
|
||||
self.labels = labels
|
||||
self.tree = spatial.cKDTree(self.X)
|
||||
|
||||
def predict(self, X):
|
||||
# mimic check in scikit-learn for number of features
|
||||
if X.shape[1] != self.X.shape[1]:
|
||||
raise ValueError(
|
||||
f"Expected {self.X.shape[1]} features but got {X.shape[1]}."
|
||||
)
|
||||
nearest_neighbors = self.tree.query(X)[1]
|
||||
return self.labels[nearest_neighbors]
|
||||
|
||||
|
||||
def test_trainable_segmentation_singlechannel():
|
||||
img = np.zeros((20, 20))
|
||||
img[:10] = 1
|
||||
img += 0.05 * np.random.randn(*img.shape)
|
||||
labels = np.zeros_like(img, dtype=np.uint8)
|
||||
labels[:2] = 1
|
||||
labels[-2:] = 2
|
||||
clf = DummyNNClassifier()
|
||||
features_func = partial(
|
||||
multiscale_basic_features,
|
||||
edges=False,
|
||||
texture=False,
|
||||
sigma_min=0.5,
|
||||
sigma_max=2,
|
||||
)
|
||||
features = features_func(img)
|
||||
clf = fit_segmenter(labels, features, clf)
|
||||
out = predict_segmenter(features, clf)
|
||||
assert np.all(out[:10] == 1)
|
||||
assert np.all(out[10:] == 2)
|
||||
|
||||
|
||||
def test_trainable_segmentation_multichannel():
|
||||
img = np.zeros((20, 20, 3))
|
||||
img[:10] = 1
|
||||
img += 0.05 * np.random.randn(*img.shape)
|
||||
labels = np.zeros_like(img[..., 0], dtype=np.uint8)
|
||||
labels[:2] = 1
|
||||
labels[-2:] = 2
|
||||
clf = DummyNNClassifier()
|
||||
features = multiscale_basic_features(
|
||||
img,
|
||||
edges=False,
|
||||
texture=False,
|
||||
sigma_min=0.5,
|
||||
sigma_max=2,
|
||||
channel_axis=-1,
|
||||
)
|
||||
clf = fit_segmenter(labels, features, clf)
|
||||
out = predict_segmenter(features, clf)
|
||||
assert np.all(out[:10] == 1)
|
||||
assert np.all(out[10:] == 2)
|
||||
|
||||
|
||||
def test_trainable_segmentation_predict():
|
||||
img = np.zeros((20, 20))
|
||||
img[:10] = 1
|
||||
img += 0.05 * np.random.randn(*img.shape)
|
||||
labels = np.zeros_like(img, dtype=np.uint8)
|
||||
labels[:2] = 1
|
||||
labels[-2:] = 2
|
||||
clf = DummyNNClassifier()
|
||||
features_func = partial(
|
||||
multiscale_basic_features,
|
||||
edges=False,
|
||||
texture=False,
|
||||
sigma_min=0.5,
|
||||
sigma_max=2,
|
||||
)
|
||||
features = features_func(img)
|
||||
clf = fit_segmenter(labels, features, clf)
|
||||
|
||||
test_features = np.random.random((5, 20, 20))
|
||||
with pytest.raises(ValueError) as err:
|
||||
_ = predict_segmenter(test_features, clf)
|
||||
assert 'type of features' in str(err.value)
|
||||
|
||||
|
||||
def test_trainable_segmentation_oo():
|
||||
"""Test the object-oriented interface using the TrainableSegmenter class."""
|
||||
|
||||
img = np.zeros((20, 20))
|
||||
img[:10] = 1
|
||||
img += 0.05 * np.random.randn(*img.shape)
|
||||
labels = np.zeros_like(img, dtype=np.uint8)
|
||||
labels[:2] = 1
|
||||
labels[-2:] = 2
|
||||
clf = DummyNNClassifier()
|
||||
features_func = partial(
|
||||
multiscale_basic_features,
|
||||
edges=False,
|
||||
texture=False,
|
||||
sigma_min=0.5,
|
||||
sigma_max=2,
|
||||
)
|
||||
segmenter = TrainableSegmenter(clf=clf, features_func=features_func)
|
||||
segmenter.fit(img, labels)
|
||||
|
||||
# model has been fitted
|
||||
np.testing.assert_array_almost_equal(clf.labels, labels[labels > 0])
|
||||
|
||||
out = segmenter.predict(img)
|
||||
assert np.all(out[:10] == 1)
|
||||
assert np.all(out[10:] == 2)
|
||||
|
||||
# test multichannel model
|
||||
img_with_channels = np.stack((img, img.T), axis=-1)
|
||||
features_func = partial(
|
||||
multiscale_basic_features,
|
||||
channel_axis=-1,
|
||||
)
|
||||
segmenter = TrainableSegmenter(clf=clf, features_func=features_func)
|
||||
segmenter.fit(img_with_channels, labels)
|
||||
|
||||
# model has been fitted
|
||||
np.testing.assert_array_almost_equal(clf.labels, labels[labels > 0])
|
||||
|
||||
out = segmenter.predict(img_with_channels)
|
||||
assert np.all(out[:10] == 1)
|
||||
assert np.all(out[10:] == 2)
|
||||
|
||||
# test wrong number of dimensions
|
||||
with pytest.raises(ValueError):
|
||||
segmenter.predict(np.expand_dims(img_with_channels, axis=-1))
|
||||
|
||||
# test wrong number of channels
|
||||
with pytest.raises(ValueError):
|
||||
segmenter.predict(np.concatenate([img_with_channels] * 2, axis=-1))
|
||||
@@ -1,162 +0,0 @@
|
||||
from skimage.feature import multiscale_basic_features
|
||||
|
||||
try:
|
||||
from sklearn.exceptions import NotFittedError
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
has_sklearn = True
|
||||
except ImportError:
|
||||
has_sklearn = False
|
||||
|
||||
class NotFittedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TrainableSegmenter:
|
||||
"""Estimator for classifying pixels.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
clf : classifier object, optional
|
||||
classifier object, exposing a ``fit`` and a ``predict`` method as in
|
||||
scikit-learn's API, for example an instance of
|
||||
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
|
||||
features_func : function, optional
|
||||
function computing features on all pixels of the image, to be passed
|
||||
to the classifier. The output should be of shape
|
||||
``(m_features, *labels.shape)``. If None,
|
||||
:func:`skimage.feature.multiscale_basic_features` is used.
|
||||
|
||||
Methods
|
||||
-------
|
||||
compute_features
|
||||
fit
|
||||
predict
|
||||
"""
|
||||
|
||||
def __init__(self, clf=None, features_func=None):
|
||||
if clf is None:
|
||||
if has_sklearn:
|
||||
self.clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
|
||||
else:
|
||||
raise ImportError(
|
||||
"Please install scikit-learn or pass a classifier instance"
|
||||
"to TrainableSegmenter."
|
||||
)
|
||||
else:
|
||||
self.clf = clf
|
||||
self.features_func = features_func
|
||||
|
||||
def compute_features(self, image):
|
||||
if self.features_func is None:
|
||||
self.features_func = multiscale_basic_features
|
||||
self.features = self.features_func(image)
|
||||
|
||||
def fit(self, image, labels):
|
||||
"""Train classifier using partially labeled (annotated) image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image, which can be grayscale or multichannel, and must have a
|
||||
number of dimensions compatible with ``self.features_func``.
|
||||
labels : ndarray of ints
|
||||
Labeled array of shape compatible with ``image`` (same shape for a
|
||||
single-channel image). Labels >= 1 correspond to the training set and
|
||||
label 0 to unlabeled pixels to be segmented.
|
||||
"""
|
||||
self.compute_features(image)
|
||||
fit_segmenter(labels, self.features, self.clf)
|
||||
|
||||
def predict(self, image):
|
||||
"""Segment new image using trained internal classifier.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image, which can be grayscale or multichannel, and must have a
|
||||
number of dimensions compatible with ``self.features_func``.
|
||||
|
||||
Raises
|
||||
------
|
||||
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
|
||||
"""
|
||||
if self.features_func is None:
|
||||
self.features_func = multiscale_basic_features
|
||||
features = self.features_func(image)
|
||||
return predict_segmenter(features, self.clf)
|
||||
|
||||
|
||||
def fit_segmenter(labels, features, clf):
|
||||
"""Segmentation using labeled parts of the image and a classifier.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
labels : ndarray of ints
|
||||
Image of labels. Labels >= 1 correspond to the training set and
|
||||
label 0 to unlabeled pixels to be segmented.
|
||||
features : ndarray
|
||||
Array of features, with the first dimension corresponding to the number
|
||||
of features, and the other dimensions correspond to ``labels.shape``.
|
||||
clf : classifier object
|
||||
classifier object, exposing a ``fit`` and a ``predict`` method as in
|
||||
scikit-learn's API, for example an instance of
|
||||
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
|
||||
|
||||
Returns
|
||||
-------
|
||||
clf : classifier object
|
||||
classifier trained on ``labels``
|
||||
|
||||
Raises
|
||||
------
|
||||
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
|
||||
"""
|
||||
mask = labels > 0
|
||||
training_data = features[mask]
|
||||
training_labels = labels[mask].ravel()
|
||||
clf.fit(training_data, training_labels)
|
||||
return clf
|
||||
|
||||
|
||||
def predict_segmenter(features, clf):
|
||||
"""Segmentation of images using a pretrained classifier.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
features : ndarray
|
||||
Array of features, with the last dimension corresponding to the number
|
||||
of features, and the other dimensions are compatible with the shape of
|
||||
the image to segment, or a flattened image.
|
||||
clf : classifier object
|
||||
trained classifier object, exposing a ``predict`` method as in
|
||||
scikit-learn's API, for example an instance of
|
||||
``RandomForestClassifier`` or ``LogisticRegression`` classifier. The
|
||||
classifier must be already trained, for example with
|
||||
:func:`skimage.segmentation.fit_segmenter`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : ndarray
|
||||
Labeled array, built from the prediction of the classifier.
|
||||
"""
|
||||
sh = features.shape
|
||||
if features.ndim > 2:
|
||||
features = features.reshape((-1, sh[-1]))
|
||||
|
||||
try:
|
||||
predicted_labels = clf.predict(features)
|
||||
except NotFittedError:
|
||||
raise NotFittedError(
|
||||
"You must train the classifier `clf` first"
|
||||
"for example with the `fit_segmenter` function."
|
||||
)
|
||||
except ValueError as err:
|
||||
if err.args and 'x must consist of vectors of length' in err.args[0]:
|
||||
raise ValueError(
|
||||
err.args[0] + '\n' +
|
||||
"Maybe you did not use the same type of features for training the classifier."
|
||||
)
|
||||
else:
|
||||
raise err
|
||||
output = predicted_labels.reshape(sh[:-1])
|
||||
return output
|
||||
Reference in New Issue
Block a user