This commit is contained in:
ton
2023-10-05 00:01:27 +07:00
parent 1541297f6d
commit 4a987d90c5
12169 changed files with 502 additions and 2656459 deletions

View File

@@ -1,26 +0,0 @@
from ._graph import pixel_graph, central_pixel
from ._graph_cut import cut_threshold, cut_normalized
from ._graph_merge import merge_hierarchical
from ._rag import rag_mean_color, RAG, show_rag, rag_boundary
from .spath import shortest_path
from .mcp import (
MCP, MCP_Geometric, MCP_Connect, MCP_Flexible, route_through_array
)
__all__ = [
'pixel_graph',
'central_pixel',
'shortest_path',
'MCP',
'MCP_Geometric',
'MCP_Connect',
'MCP_Flexible',
'route_through_array',
'rag_mean_color',
'rag_boundary',
'cut_threshold',
'cut_normalized',
'merge_hierarchical',
'RAG',
]

View File

@@ -1,201 +0,0 @@
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from ..morphology._util import _raveled_offsets_and_distances
from ..util._map_array import map_array
def _weighted_abs_diff(values0, values1, distances):
"""A default edge function for complete image graphs.
A pixel graph on an image with no edge values and no mask is a very
boring regular lattice, so we define a default edge weight to be the
absolute difference between values *weighted* by the distance
between them.
Parameters
----------
values0 : array
The pixel values for each node.
values1 : array
The pixel values for each neighbor.
distances : array
The distance between each node and its neighbor.
Returns
-------
edge_values : array of float
The computed values: abs(values0 - values1) * distances.
"""
return np.abs(values0 - values1) * distances
def pixel_graph(
image, *, mask=None, edge_function=None, connectivity=1, spacing=None
):
"""Create an adjacency graph of pixels in an image.
Pixels where the mask is True are nodes in the returned graph, and they are
connected by edges to their neighbors according to the connectivity
parameter. By default, the *value* of an edge when a mask is given, or when
the image is itself the mask, is the euclidean distance between the pixels.
However, if an int- or float-valued image is given with no mask, the value
of the edges is the absolute difference in intensity between adjacent
pixels, weighted by the euclidean distance.
Parameters
----------
image : array
The input image. If the image is of type bool, it will be used as the
mask as well.
mask : array of bool
Which pixels to use. If None, the graph for the whole image is used.
edge_function : callable
A function taking an array of pixel values, and an array of neighbor
pixel values, and an array of distances, and returning a value for the
edge. If no function is given, the value of an edge is just the
distance.
connectivity : int
The square connectivity of the pixel neighborhood: the number of
orthogonal steps allowed to consider a pixel a neighbor. See
`scipy.ndimage.generate_binary_structure` for details.
spacing : tuple of float
The spacing between pixels along each axis.
Returns
-------
graph : scipy.sparse.csr_matrix
A sparse adjacency matrix in which entry (i, j) is 1 if nodes i and j
are neighbors, 0 otherwise.
nodes : array of int
The nodes of the graph. These correspond to the raveled indices of the
nonzero pixels in the mask.
"""
if image.dtype == bool and mask is None:
mask = image
if mask is None and edge_function is None:
mask = np.ones_like(image, dtype=bool)
edge_function = _weighted_abs_diff
# Strategy: we are going to build the (i, j, data) arrays of a scipy
# sparse COO matrix, then convert to CSR (which is fast).
# - grab the raveled IDs of the foreground (mask == True) parts of the
# image **in the padded space**.
# - broadcast them together with the raveled offsets to their neighbors.
# This gives us for each foreground pixel a list of neighbors (that
# may or may not be selected by the mask). (We also track the *distance*
# to each neighbor.)
# - select "valid" entries in the neighbors and distance arrays by indexing
# into the mask, which we can do since these are raveled indices.
# - use np.repeat() to repeat each source index according to the number
# of neighbors selected by the mask it has. Each of these repeated
# indices will be lined up with its neighbor, i.e. **this is the i
# array** of the COO format matrix.
# - use the mask as a boolean index to get a 1D view of the selected
# neighbors. **This is the j array.**
# - by default, the same boolean indexing can be applied to the distances
# to each neighbor, to give the **data array.** Optionally, a
# provided edge function can be computed on the pixel values and the
# distances to give a different value for the edges.
# Note, we use map_array to map the raveled coordinates in the padded
# image to the ones in the original image, and those are the returned
# nodes.
padded = np.pad(mask, 1, mode='constant', constant_values=False)
nodes_padded = np.flatnonzero(padded)
neighbor_offsets_padded, distances_padded = _raveled_offsets_and_distances(
padded.shape, connectivity=connectivity, spacing=spacing
)
neighbors_padded = nodes_padded[:, np.newaxis] + neighbor_offsets_padded
neighbor_distances_full = np.broadcast_to(
distances_padded, neighbors_padded.shape
)
nodes = np.flatnonzero(mask)
nodes_sequential = np.arange(nodes.size)
# neighbors outside the mask get mapped to 0, which is a valid index,
# BUT, they will be masked out in the next step.
neighbors = map_array(neighbors_padded, nodes_padded, nodes)
neighbors_mask = padded.reshape(-1)[neighbors_padded]
num_neighbors = np.sum(neighbors_mask, axis=1)
indices = np.repeat(nodes, num_neighbors)
indices_sequential = np.repeat(nodes_sequential, num_neighbors)
neighbor_indices = neighbors[neighbors_mask]
neighbor_distances = neighbor_distances_full[neighbors_mask]
neighbor_indices_sequential = map_array(
neighbor_indices, nodes, nodes_sequential
)
if edge_function is None:
data = neighbor_distances
else:
image_r = image.reshape(-1)
data = edge_function(
image_r[indices], image_r[neighbor_indices], neighbor_distances
)
m = nodes_sequential.size
mat = sparse.coo_matrix(
(data, (indices_sequential, neighbor_indices_sequential)),
shape=(m, m)
)
graph = mat.tocsr()
return graph, nodes
def central_pixel(graph, nodes=None, shape=None, partition_size=100):
"""Find the pixel with the highest closeness centrality.
Closeness centrality is the inverse of the total sum of shortest distances
from a node to every other node.
Parameters
----------
graph : scipy.sparse.csr_matrix
The sparse matrix representation of the graph.
nodes : array of int
The raveled index of each node in graph in the image. If not provided,
the returned value will be the index in the input graph.
shape : tuple of int
The shape of the image in which the nodes are embedded. If provided,
the returned coordinates are a NumPy multi-index of the same
dimensionality as the input shape. Otherwise, the returned coordinate
is the raveled index provided in `nodes`.
partition_size : int
This function computes the shortest path distance between every pair
of nodes in the graph. This can result in a very large (N*N) matrix.
As a simple performance tweak, the distance values are computed in
lots of `partition_size`, resulting in a memory requirement of only
partition_size*N.
Returns
-------
position : int or tuple of int
If shape is given, the coordinate of the central pixel in the image.
Otherwise, the raveled index of that pixel.
distances : array of float
The total sum of distances from each node to each other reachable
node.
"""
if nodes is None:
nodes = np.arange(graph.shape[0])
if partition_size is None:
num_splits = 1
else:
num_splits = max(2, graph.shape[0] // partition_size)
idxs = np.arange(graph.shape[0])
total_shortest_path_len_list = []
for partition in np.array_split(idxs, num_splits):
shortest_paths = csgraph.shortest_path(
graph, directed=False, indices=partition
)
shortest_paths_no_inf = np.nan_to_num(shortest_paths)
total_shortest_path_len_list.append(
np.sum(shortest_paths_no_inf, axis=1)
)
total_shortest_path_len = np.concatenate(total_shortest_path_len_list)
nonzero = np.flatnonzero(total_shortest_path_len)
min_sp = np.argmin(total_shortest_path_len[nonzero])
raveled_index = nodes[nonzero[min_sp]]
if shape is not None:
central = np.unravel_index(raveled_index, shape)
else:
central = raveled_index
return central, total_shortest_path_len

View File

@@ -1,302 +0,0 @@
import networkx as nx
import numpy as np
from . import _ncut
from . import _ncut_cy
from scipy.sparse import linalg
def cut_threshold(labels, rag, thresh, in_place=True):
"""Combine regions separated by weight less than threshold.
Given an image's labels and its RAG, output new labels by
combining regions whose nodes are separated by a weight less
than the given threshold.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The region adjacency graph.
thresh : float
The threshold. Regions connected by edges with smaller weights are
combined.
in_place : bool
If set, modifies `rag` in place. The function will remove the edges
with weights less that `thresh`. If set to `False` the function
makes a copy of `rag` before proceeding.
Returns
-------
out : ndarray
The new labelled array.
Examples
--------
>>> from skimage import data, segmentation, graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
>>> new_labels = graph.cut_threshold(labels, rag, 10)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
:DOI:`10.1109/83.841950`
"""
if not in_place:
rag = rag.copy()
# Because deleting edges while iterating through them produces an error.
to_remove = [(x, y) for x, y, d in rag.edges(data=True)
if d['weight'] >= thresh]
rag.remove_edges_from(to_remove)
comps = nx.connected_components(rag)
# We construct an array which can map old labels to the new ones.
# All the labels within a connected component are assigned to a single
# label in the output.
map_array = np.arange(labels.max() + 1, dtype=labels.dtype)
for i, nodes in enumerate(comps):
for node in nodes:
for label in rag.nodes[node]['labels']:
map_array[label] = i
return map_array[labels]
def cut_normalized(labels, rag, thresh=0.001, num_cuts=10, in_place=True,
max_edge=1.0,
*,
random_state=None,
):
"""Perform Normalized Graph cut on the Region Adjacency Graph.
Given an image's labels and its similarity RAG, recursively perform
a 2-way normalized cut on it. All nodes belonging to a subgraph
that cannot be cut further are assigned a unique label in the
output.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The region adjacency graph.
thresh : float
The threshold. A subgraph won't be further subdivided if the
value of the N-cut exceeds `thresh`.
num_cuts : int
The number or N-cuts to perform before determining the optimal one.
in_place : bool
If set, modifies `rag` in place. For each node `n` the function will
set a new attribute ``rag.nodes[n]['ncut label']``.
max_edge : float, optional
The maximum possible value of an edge in the RAG. This corresponds to
an edge between identical regions. This is used to put self
edges in the RAG.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
The `random_state` is used for the starting point
of `scipy.sparse.linalg.eigsh`.
Returns
-------
out : ndarray
The new labeled array.
Examples
--------
>>> from skimage import data, segmentation, graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels, mode='similarity')
>>> new_labels = graph.cut_normalized(labels, rag)
References
----------
.. [1] Shi, J.; Malik, J., "Normalized cuts and image segmentation",
Pattern Analysis and Machine Intelligence,
IEEE Transactions on, vol. 22, no. 8, pp. 888-905, August 2000.
"""
random_state = np.random.default_rng(random_state)
if not in_place:
rag = rag.copy()
for node in rag.nodes():
rag.add_edge(node, node, weight=max_edge)
_ncut_relabel(rag, thresh, num_cuts, random_state)
map_array = np.zeros(labels.max() + 1, dtype=labels.dtype)
# Mapping from old labels to new
for n, d in rag.nodes(data=True):
map_array[d['labels']] = d['ncut label']
return map_array[labels]
def partition_by_cut(cut, rag):
"""Compute resulting subgraphs from given bi-partition.
Parameters
----------
cut : array
A array of booleans. Elements set to `True` belong to one
set.
rag : RAG
The Region Adjacency Graph.
Returns
-------
sub1, sub2 : RAG
The two resulting subgraphs from the bi-partition.
"""
# `cut` is derived from `D` and `W` matrices, which also follow the
# ordering returned by `rag.nodes()` because we use
# nx.to_scipy_sparse_matrix.
# Example
# rag.nodes() = [3, 7, 9, 13]
# cut = [True, False, True, False]
# nodes1 = [3, 9]
# nodes2 = [7, 10]
nodes1 = [n for i, n in enumerate(rag.nodes()) if cut[i]]
nodes2 = [n for i, n in enumerate(rag.nodes()) if not cut[i]]
sub1 = rag.subgraph(nodes1)
sub2 = rag.subgraph(nodes2)
return sub1, sub2
def get_min_ncut(ev, d, w, num_cuts):
"""Threshold an eigenvector evenly, to determine minimum ncut.
Parameters
----------
ev : array
The eigenvector to threshold.
d : ndarray
The diagonal matrix of the graph.
w : ndarray
The weight matrix of the graph.
num_cuts : int
The number of evenly spaced thresholds to check for.
Returns
-------
mask : array
The array of booleans which denotes the bi-partition.
mcut : float
The value of the minimum ncut.
"""
mcut = np.inf
mn = ev.min()
mx = ev.max()
# If all values in `ev` are equal, it implies that the graph can't be
# further sub-divided. In this case the bi-partition is the the graph
# itself and an empty set.
min_mask = np.zeros_like(ev, dtype=bool)
if np.allclose(mn, mx):
return min_mask, mcut
# Refer Shi & Malik 2001, Section 3.1.3, Page 892
# Perform evenly spaced n-cuts and determine the optimal one.
for t in np.linspace(mn, mx, num_cuts, endpoint=False):
mask = ev > t
cost = _ncut.ncut_cost(mask, d, w)
if cost < mcut:
min_mask = mask
mcut = cost
return min_mask, mcut
def _label_all(rag, attr_name):
"""Assign a unique integer to the given attribute in the RAG.
This function assumes that all labels in `rag` are unique. It
picks up a random label from them and assigns it to the `attr_name`
attribute of all the nodes.
rag : RAG
The Region Adjacency Graph.
attr_name : string
The attribute to which a unique integer is assigned.
"""
node = min(rag.nodes())
new_label = rag.nodes[node]['labels'][0]
for n, d in rag.nodes(data=True):
d[attr_name] = new_label
def _ncut_relabel(rag, thresh, num_cuts, random_state):
"""Perform Normalized Graph cut on the Region Adjacency Graph.
Recursively partition the graph into 2, until further subdivision
yields a cut greater than `thresh` or such a cut cannot be computed.
For such a subgraph, indices to labels of all its nodes map to a single
unique value.
Parameters
----------
rag : RAG
The region adjacency graph.
thresh : float
The threshold. A subgraph won't be further subdivided if the
value of the N-cut exceeds `thresh`.
num_cuts : int
The number or N-cuts to perform before determining the optimal one.
random_state : `numpy.random.Generator`
Provides initial values for eigenvalue solver.
"""
d, w = _ncut.DW_matrices(rag)
m = w.shape[0]
if m > 2:
d2 = d.copy()
# Since d is diagonal, we can directly operate on its data
# the inverse of the square root
d2.data = np.reciprocal(np.sqrt(d2.data, out=d2.data), out=d2.data)
# Refer Shi & Malik 2001, Equation 7, Page 891
A = d2 * (d - w) * d2
# Initialize the vector to ensure reproducibility.
v0 = random_state.random(A.shape[0])
vals, vectors = linalg.eigsh(A, which='SM', v0=v0,
k=min(100, m - 2))
# Pick second smallest eigenvector.
# Refer Shi & Malik 2001, Section 3.2.3, Page 893
vals, vectors = np.real(vals), np.real(vectors)
index2 = _ncut_cy.argmin2(vals)
ev = vectors[:, index2]
cut_mask, mcut = get_min_ncut(ev, d, w, num_cuts)
if (mcut < thresh):
# Sub divide and perform N-cut again
# Refer Shi & Malik 2001, Section 3.2.5, Page 893
sub1, sub2 = partition_by_cut(cut_mask, rag)
_ncut_relabel(sub1, thresh, num_cuts, random_state)
_ncut_relabel(sub2, thresh, num_cuts, random_state)
return
# The N-cut wasn't small enough, or could not be computed.
# The remaining graph is a region.
# Assign `ncut label` by picking any label from the existing nodes, since
# `labels` are unique, `new_label` is also unique.
_label_all(rag, 'ncut label')

View File

@@ -1,137 +0,0 @@
import numpy as np
import heapq
def _revalidate_node_edges(rag, node, heap_list):
"""Handles validation and invalidation of edges incident to a node.
This function invalidates all existing edges incident on `node` and inserts
new items in `heap_list` updated with the valid weights.
rag : RAG
The Region Adjacency Graph.
node : int
The id of the node whose incident edges are to be validated/invalidated
.
heap_list : list
The list containing the existing heap of edges.
"""
# networkx updates data dictionary if edge exists
# this would mean we have to reposition these edges in
# heap if their weight is updated.
# instead we invalidate them
for nbr in rag.neighbors(node):
data = rag[node][nbr]
try:
# invalidate edges incident on `dst`, they have new weights
data['heap item'][3] = False
_invalidate_edge(rag, node, nbr)
except KeyError:
# will handle the case where the edge did not exist in the existing
# graph
pass
wt = data['weight']
heap_item = [wt, node, nbr, True]
data['heap item'] = heap_item
heapq.heappush(heap_list, heap_item)
def _rename_node(graph, node_id, copy_id):
""" Rename `node_id` in `graph` to `copy_id`. """
graph._add_node_silent(copy_id)
graph.nodes[copy_id].update(graph.nodes[node_id])
for nbr in graph.neighbors(node_id):
wt = graph[node_id][nbr]['weight']
graph.add_edge(nbr, copy_id, {'weight': wt})
graph.remove_node(node_id)
def _invalidate_edge(graph, n1, n2):
""" Invalidates the edge (n1, n2) in the heap. """
graph[n1][n2]['heap item'][3] = False
def merge_hierarchical(labels, rag, thresh, rag_copy, in_place_merge,
merge_func, weight_func):
"""Perform hierarchical merging of a RAG.
Greedily merges the most similar pair of nodes until no edges lower than
`thresh` remain.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The Region Adjacency Graph.
thresh : float
Regions connected by an edge with weight smaller than `thresh` are
merged.
rag_copy : bool
If set, the RAG copied before modifying.
in_place_merge : bool
If set, the nodes are merged in place. Otherwise, a new node is
created for each merge..
merge_func : callable
This function is called before merging two nodes. For the RAG `graph`
while merging `src` and `dst`, it is called as follows
``merge_func(graph, src, dst)``.
weight_func : callable
The function to compute the new weights of the nodes adjacent to the
merged node. This is directly supplied as the argument `weight_func`
to `merge_nodes`.
Returns
-------
out : ndarray
The new labeled array.
"""
if rag_copy:
rag = rag.copy()
edge_heap = []
for n1, n2, data in rag.edges(data=True):
# Push a valid edge in the heap
wt = data['weight']
heap_item = [wt, n1, n2, True]
heapq.heappush(edge_heap, heap_item)
# Reference to the heap item in the graph
data['heap item'] = heap_item
while len(edge_heap) > 0 and edge_heap[0][0] < thresh:
_, n1, n2, valid = heapq.heappop(edge_heap)
# Ensure popped edge is valid, if not, the edge is discarded
if valid:
# Invalidate all neighbors of `src` before its deleted
for nbr in rag.neighbors(n1):
_invalidate_edge(rag, n1, nbr)
for nbr in rag.neighbors(n2):
_invalidate_edge(rag, n2, nbr)
if not in_place_merge:
next_id = rag.next_id()
_rename_node(rag, n2, next_id)
src, dst = n1, next_id
else:
src, dst = n1, n2
merge_func(rag, src, dst)
new_id = rag.merge_nodes(src, dst, weight_func)
_revalidate_node_edges(rag, new_id, edge_heap)
label_map = np.arange(labels.max() + 1)
for ix, (n, d) in enumerate(rag.nodes(data=True)):
for label in d['labels']:
label_map[label] = ix
return label_map[labels]

View File

@@ -1,64 +0,0 @@
import networkx as nx
import numpy as np
from scipy import sparse
from . import _ncut_cy
def DW_matrices(graph):
"""Returns the diagonal and weight matrices of a graph.
Parameters
----------
graph : RAG
A Region Adjacency Graph.
Returns
-------
D : csc_matrix
The diagonal matrix of the graph. ``D[i, i]`` is the sum of weights of
all edges incident on `i`. All other entries are `0`.
W : csc_matrix
The weight matrix of the graph. ``W[i, j]`` is the weight of the edge
joining `i` to `j`.
"""
# sparse.eighsh is most efficient with CSC-formatted input
W = nx.to_scipy_sparse_array(graph, format='csc')
entries = W.sum(axis=0)
D = sparse.dia_matrix((entries, 0), shape=W.shape).tocsc()
return D, W
def ncut_cost(cut, D, W):
"""Returns the N-cut cost of a bi-partition of a graph.
Parameters
----------
cut : ndarray
The mask for the nodes in the graph. Nodes corresponding to a `True`
value are in one set.
D : csc_matrix
The diagonal matrix of the graph.
W : csc_matrix
The weight matrix of the graph.
Returns
-------
cost : float
The cost of performing the N-cut.
References
----------
.. [1] Normalized Cuts and Image Segmentation, Jianbo Shi and
Jitendra Malik, IEEE Transactions on Pattern Analysis and Machine
Intelligence, Page 889, Equation 2.
"""
cut = np.array(cut)
cut_cost = _ncut_cy.cut_cost(cut, W)
# D has elements only along the diagonal, one per node, so we can directly
# index the data attribute with cut.
assoc_a = D.data[cut].sum()
assoc_b = D.data[~cut].sum()
return (cut_cost / assoc_a) + (cut_cost / assoc_b)

View File

@@ -1,556 +0,0 @@
import networkx as nx
import numpy as np
from scipy import ndimage as ndi
from scipy import sparse
import math
from .. import measure, segmentation, util, color
from .._shared.version_requirements import require
def _edge_generator_from_csr(csr_matrix):
"""Yield weighted edge triples for use by NetworkX from a CSR matrix.
This function is a straight rewrite of
`networkx.convert_matrix._csr_gen_triples`. Since that is a private
function, it is safer to include our own here.
Parameters
----------
csr_matrix : scipy.sparse.csr_matrix
The input matrix. An edge (i, j, w) will be yielded if there is a
data value for coordinates (i, j) in the matrix, even if that value
is 0.
Yields
------
i, j, w : (int, int, float) tuples
Each value `w` in the matrix along with its coordinates (i, j).
Examples
--------
>>> dense = np.eye(2, dtype=float)
>>> csr = sparse.csr_matrix(dense)
>>> edges = _edge_generator_from_csr(csr)
>>> list(edges)
[(0, 0, 1.0), (1, 1, 1.0)]
"""
nrows = csr_matrix.shape[0]
values = csr_matrix.data
indptr = csr_matrix.indptr
col_indices = csr_matrix.indices
for i in range(nrows):
for j in range(indptr[i], indptr[i + 1]):
yield i, col_indices[j], values[j]
def min_weight(graph, src, dst, n):
"""Callback to handle merging nodes by choosing minimum weight.
Returns a dictionary with `"weight"` set as either the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The verices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dict with the `"weight"` attribute set the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
"""
# cover the cases where n only has edge to either `src` or `dst`
default = {'weight': np.inf}
w1 = graph[n].get(src, default)['weight']
w2 = graph[n].get(dst, default)['weight']
return {'weight': min(w1, w2)}
def _add_edge_filter(values, graph):
"""Create edge in `graph` between central element of `values` and the rest.
Add an edge between the middle element in `values` and
all other elements of `values` into `graph`. ``values[len(values) // 2]``
is expected to be the central value of the footprint used.
Parameters
----------
values : array
The array to process.
graph : RAG
The graph to add edges in.
Returns
-------
0 : float
Always returns 0. The return value is required so that `generic_filter`
can put it in the output array, but it is ignored by this filter.
"""
values = values.astype(int)
center = values[len(values) // 2]
for value in values:
if value != center and not graph.has_edge(center, value):
graph.add_edge(center, value)
return 0.
class RAG(nx.Graph):
"""
The Region Adjacency Graph (RAG) of an image, subclasses
`networx.Graph <http://networkx.github.io/documentation/latest/reference/classes/graph.html>`_
Parameters
----------
label_image : array of int
An initial segmentation, with each region labeled as a different
integer. Every unique value in ``label_image`` will correspond to
a node in the graph.
connectivity : int in {1, ..., ``label_image.ndim``}, optional
The connectivity between pixels in ``label_image``. For a 2D image,
a connectivity of 1 corresponds to immediate neighbors up, down,
left, and right, while a connectivity of 2 also includes diagonal
neighbors. See `scipy.ndimage.generate_binary_structure`.
data : networkx Graph specification, optional
Initial or additional edges to pass to the NetworkX Graph
constructor. See `networkx.Graph`. Valid edge specifications
include edge list (list of tuples), NumPy arrays, and SciPy
sparse matrices.
**attr : keyword arguments, optional
Additional attributes to add to the graph.
"""
def __init__(self, label_image=None, connectivity=1, data=None, **attr):
super().__init__(data, **attr)
if self.number_of_nodes() == 0:
self.max_id = 0
else:
self.max_id = max(self.nodes())
if label_image is not None:
fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
# In the next ``ndi.generic_filter`` function, the kwarg
# ``output`` is used to provide a strided array with a single
# 64-bit floating point number, to which the function repeatedly
# writes. This is done because even if we don't care about the
# output, without this, a float array of the same shape as the
# input image will be created and that could be expensive in
# memory consumption.
output = np.broadcast_to(1., label_image.shape)
output.setflags(write=True)
ndi.generic_filter(
label_image,
function=_add_edge_filter,
footprint=fp,
mode='nearest',
output=output,
extra_arguments=(self,))
def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True,
extra_arguments=[], extra_keywords={}):
"""Merge node `src` and `dst`.
The new combined node is adjacent to all the neighbors of `src`
and `dst`. `weight_func` is called to decide the weight of edges
incident on the new node.
Parameters
----------
src, dst : int
Nodes to be merged.
weight_func : callable, optional
Function to decide the attributes of edges incident on the new
node. For each neighbor `n` for `src` and `dst`, `weight_func` will
be called as follows: `weight_func(src, dst, n, *extra_arguments,
**extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the
RAG object which is in turn a subclass of `networkx.Graph`. It is
expected to return a dict of attributes of the resulting edge.
in_place : bool, optional
If set to `True`, the merged node has the id `dst`, else merged
node has a new id which is returned.
extra_arguments : sequence, optional
The sequence of extra positional arguments passed to
`weight_func`.
extra_keywords : dictionary, optional
The dict of keyword arguments passed to the `weight_func`.
Returns
-------
id : int
The id of the new node.
Notes
-----
If `in_place` is `False` the resulting node has a new id, rather than
`dst`.
"""
src_nbrs = set(self.neighbors(src))
dst_nbrs = set(self.neighbors(dst))
neighbors = (src_nbrs | dst_nbrs) - {src, dst}
if in_place:
new = dst
else:
new = self.next_id()
self.add_node(new)
for neighbor in neighbors:
data = weight_func(self, src, dst, neighbor, *extra_arguments,
**extra_keywords)
self.add_edge(neighbor, new, attr_dict=data)
self.nodes[new]['labels'] = (self.nodes[src]['labels'] +
self.nodes[dst]['labels'])
self.remove_node(src)
if not in_place:
self.remove_node(dst)
return new
def add_node(self, n, attr_dict=None, **attr):
"""Add node `n` while updating the maximum node id.
.. seealso:: :func:`networkx.Graph.add_node`."""
if attr_dict is None: # compatibility with old networkx
attr_dict = attr
else:
attr_dict.update(attr)
super().add_node(n, **attr_dict)
self.max_id = max(n, self.max_id)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between `u` and `v` while updating max node id.
.. seealso:: :func:`networkx.Graph.add_edge`."""
if attr_dict is None: # compatibility with old networkx
attr_dict = attr
else:
attr_dict.update(attr)
super().add_edge(u, v, **attr_dict)
self.max_id = max(u, v, self.max_id)
def copy(self):
"""Copy the graph with its max node id.
.. seealso:: :func:`networkx.Graph.copy`."""
g = super().copy()
g.max_id = self.max_id
return g
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
This is required when subclassing Graph with networkx v2 and
does not cause problems for v1. Here is more detail from
the network migrating from 1.x to 2.x document::
With the new GraphViews (SubGraph, ReversedGraph, etc)
you can't assume that ``G.__class__()`` will create a new
instance of the same graph type as ``G``. In fact, the
call signature for ``__class__`` differs depending on
whether ``G`` is a view or a base class. For v2.x you
should use ``G.fresh_copy()`` to create a null graph of
the correct type---ready to fill with nodes and edges.
"""
return RAG()
def next_id(self):
"""Returns the `id` for the new node to be inserted.
The current implementation returns one more than the maximum `id`.
Returns
-------
id : int
The `id` of the new node to be inserted.
"""
return self.max_id + 1
def _add_node_silent(self, n):
"""Add node `n` without updating the maximum node id.
This is a convenience method used internally.
.. seealso:: :func:`networkx.Graph.add_node`."""
super().add_node(n)
def rag_mean_color(image, labels, connectivity=2, mode='distance',
sigma=255.0):
"""Compute the Region Adjacency Graph using mean colors.
Given an image and its initial segmentation, this method constructs the
corresponding Region Adjacency Graph (RAG). Each node in the RAG
represents a set of pixels within `image` with the same label in `labels`.
The weight between two adjacent regions represents how similar or
dissimilar two regions are depending on the `mode` parameter.
Parameters
----------
image : ndarray, shape(M, N, [..., P,] 3)
Input image.
labels : ndarray, shape(M, N, [..., P])
The labelled image. This should have one dimension less than
`image`. If `image` has dimensions `(M, N, 3)` `labels` should have
dimensions `(M, N)`.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
``scipy.ndimage.generate_binary_structure``.
mode : {'distance', 'similarity'}, optional
The strategy to assign edge weights.
'distance' : The weight between two adjacent regions is the
:math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
colors of the two regions. It represents the Euclidean distance in
their average color.
'similarity' : The weight between two adjacent is
:math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
:math:`c_1` and :math:`c_2` are the mean colors of the two regions.
It represents how similar two regions are.
sigma : float, optional
Used for computation when `mode` is "similarity". It governs how
close to each other two colors should be, for their corresponding edge
weight to be significant. A very large value of `sigma` could make
any two colors behave as though they were similar.
Returns
-------
out : RAG
The region adjacency graph.
Examples
--------
>>> from skimage import data, segmentation, graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
:DOI:`10.1109/83.841950`
"""
graph = RAG(labels, connectivity=connectivity)
for n in graph:
graph.nodes[n].update({'labels': [n],
'pixel count': 0,
'total color': np.array([0, 0, 0],
dtype=np.float64)})
for index in np.ndindex(labels.shape):
current = labels[index]
graph.nodes[current]['pixel count'] += 1
graph.nodes[current]['total color'] += image[index]
for n in graph:
graph.nodes[n]['mean color'] = (graph.nodes[n]['total color'] /
graph.nodes[n]['pixel count'])
for x, y, d in graph.edges(data=True):
diff = graph.nodes[x]['mean color'] - graph.nodes[y]['mean color']
diff = np.linalg.norm(diff)
if mode == 'similarity':
d['weight'] = math.e ** (-(diff ** 2) / sigma)
elif mode == 'distance':
d['weight'] = diff
else:
raise ValueError(f"The mode '{mode}' is not recognised")
return graph
def rag_boundary(labels, edge_map, connectivity=2):
""" Comouter RAG based on region boundaries
Given an image's initial segmentation and its edge map this method
constructs the corresponding Region Adjacency Graph (RAG). Each node in the
RAG represents a set of pixels within the image with the same label in
`labels`. The weight between two adjacent regions is the average value
in `edge_map` along their boundary.
labels : ndarray
The labelled image.
edge_map : ndarray
This should have the same shape as that of `labels`. For all pixels
along the boundary between 2 adjacent regions, the average value of the
corresponding pixels in `edge_map` is the edge weight between them.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.generate_binary_structure`.
Examples
--------
>>> from skimage import data, segmentation, filters, color, graph
>>> img = data.chelsea()
>>> labels = segmentation.slic(img)
>>> edge_map = filters.sobel(color.rgb2gray(img))
>>> rag = graph.rag_boundary(labels, edge_map)
"""
conn = ndi.generate_binary_structure(labels.ndim, connectivity)
eroded = ndi.grey_erosion(labels, footprint=conn)
dilated = ndi.grey_dilation(labels, footprint=conn)
boundaries0 = (eroded != labels)
boundaries1 = (dilated != labels)
labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
n = np.max(labels_large) + 1
# use a dummy broadcast array as data for RAG
ones = np.broadcast_to(1., labels_small.shape)
count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)),
dtype=int, shape=(n, n)).tocsr()
data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1]))
data_coo = sparse.coo_matrix((data, (labels_small, labels_large)))
graph_matrix = data_coo.tocsr()
graph_matrix.data /= count_matrix.data
rag = RAG()
rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix),
weight='weight')
rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix),
weight='count')
for n in rag.nodes():
rag.nodes[n].update({'labels': [n]})
return rag
@require("matplotlib", ">=3.3")
def show_rag(labels, rag, image, border_color='black', edge_width=1.5,
edge_cmap='magma', img_cmap='bone', in_place=True, ax=None):
"""Show a Region Adjacency Graph on an image.
Given a labelled image and its corresponding RAG, show the nodes and edges
of the RAG on the image with the specified colors. Edges are displayed between
the centroid of the 2 adjacent regions in the image.
Parameters
----------
labels : ndarray, shape (M, N)
The labelled image.
rag : RAG
The Region Adjacency Graph.
image : ndarray, shape (M, N[, 3])
Input image. If `colormap` is `None`, the image should be in RGB
format.
border_color : color spec, optional
Color with which the borders between regions are drawn.
edge_width : float, optional
The thickness with which the RAG edges are drawn.
edge_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the edges are drawn.
img_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the image is draw. If set to `None`
the image is drawn as it is.
in_place : bool, optional
If set, the RAG is modified in place. For each node `n` the function
will set a new attribute ``rag.nodes[n]['centroid']``.
ax : :py:class:`matplotlib.axes.Axes`, optional
The axes to draw on. If not specified, new axes are created and drawn
on.
Returns
-------
lc : :py:class:`matplotlib.collections.LineCollection`
A collection of lines that represent the edges of the graph. It can be
passed to the :meth:`matplotlib.figure.Figure.colorbar` function.
Examples
--------
>>> from skimage import data, segmentation, graph
>>> import matplotlib.pyplot as plt
>>>
>>> img = data.coffee()
>>> labels = segmentation.slic(img)
>>> g = graph.rag_mean_color(img, labels)
>>> lc = graph.show_rag(labels, g, img)
>>> cbar = plt.colorbar(lc)
"""
from matplotlib import colors
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
if not in_place:
rag = rag.copy()
if ax is None:
fig, ax = plt.subplots()
out = util.img_as_float(image, force_copy=True)
if img_cmap is None:
if image.ndim < 3 or image.shape[2] not in [3, 4]:
msg = 'If colormap is `None`, an RGB or RGBA image should be given'
raise ValueError(msg)
# Ignore the alpha channel
out = image[:, :, :3]
else:
img_cmap = plt.get_cmap(img_cmap)
out = color.rgb2gray(image)
# Ignore the alpha channel
out = img_cmap(out)[:, :, :3]
edge_cmap = plt.get_cmap(edge_cmap)
# Handling the case where one node has multiple labels
# offset is 1 so that regionprops does not ignore 0
offset = 1
map_array = np.arange(labels.max() + 1)
for n, d in rag.nodes(data=True):
for label in d['labels']:
map_array[label] = offset
offset += 1
rag_labels = map_array[labels]
regions = measure.regionprops(rag_labels)
for (n, data), region in zip(rag.nodes(data=True), regions):
data['centroid'] = tuple(map(int, region['centroid']))
cc = colors.ColorConverter()
if border_color is not None:
border_color = cc.to_rgb(border_color)
out = segmentation.mark_boundaries(out, rag_labels, color=border_color)
ax.imshow(out)
# Defining the end points of the edges
# The tuple[::-1] syntax reverses a tuple as matplotlib uses (x,y)
# convention while skimage uses (row, column)
lines = [[rag.nodes[n1]['centroid'][::-1], rag.nodes[n2]['centroid'][::-1]]
for (n1, n2) in rag.edges()]
lc = LineCollection(lines, linewidths=edge_width, cmap=edge_cmap)
edge_weights = [d['weight'] for x, y, d in rag.edges(data=True)]
lc.set_array(np.array(edge_weights))
ax.add_collection(lc)
return lc

View File

@@ -1,89 +0,0 @@
from ._mcp import MCP, MCP_Geometric, MCP_Connect, MCP_Flexible # noqa: F401
def route_through_array(array, start, end, fully_connected=True,
geometric=True):
"""Simple example of how to use the MCP and MCP_Geometric classes.
See the MCP and MCP_Geometric class documentation for explanation of the
path-finding algorithm.
Parameters
----------
array : ndarray
Array of costs.
start : iterable
n-d index into `array` defining the starting point
end : iterable
n-d index into `array` defining the end point
fully_connected : bool (optional)
If True, diagonal moves are permitted, if False, only axial moves.
geometric : bool (optional)
If True, the MCP_Geometric class is used to calculate costs, if False,
the MCP base class is used. See the class documentation for
an explanation of the differences between MCP and MCP_Geometric.
Returns
-------
path : list
List of n-d index tuples defining the path from `start` to `end`.
cost : float
Cost of the path. If `geometric` is False, the cost of the path is
the sum of the values of `array` along the path. If `geometric` is
True, a finer computation is made (see the documentation of the
MCP_Geometric class).
See Also
--------
MCP, MCP_Geometric
Examples
--------
>>> import numpy as np
>>> from skimage.graph import route_through_array
>>>
>>> image = np.array([[1, 3], [10, 12]])
>>> image
array([[ 1, 3],
[10, 12]])
>>> # Forbid diagonal steps
>>> route_through_array(image, [0, 0], [1, 1], fully_connected=False)
([(0, 0), (0, 1), (1, 1)], 9.5)
>>> # Now allow diagonal steps: the path goes directly from start to end
>>> route_through_array(image, [0, 0], [1, 1])
([(0, 0), (1, 1)], 9.19238815542512)
>>> # Cost is the sum of array values along the path (16 = 1 + 3 + 12)
>>> route_through_array(image, [0, 0], [1, 1], fully_connected=False,
... geometric=False)
([(0, 0), (0, 1), (1, 1)], 16.0)
>>> # Larger array where we display the path that is selected
>>> image = np.arange((36)).reshape((6, 6))
>>> image
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> # Find the path with lowest cost
>>> indices, weight = route_through_array(image, (0, 0), (5, 5))
>>> indices = np.stack(indices, axis=-1)
>>> path = np.zeros_like(image)
>>> path[indices[0], indices[1]] = 1
>>> path
array([[1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]])
"""
start, end = tuple(start), tuple(end)
if geometric:
mcp_class = MCP_Geometric
else:
mcp_class = MCP
m = mcp_class(array, fully_connected=fully_connected)
costs, traceback_array = m.find_costs([start], [end])
return m.traceback(end), costs[end]

View File

@@ -1,81 +0,0 @@
import numpy as np
from . import _spath
def shortest_path(arr, reach=1, axis=-1, output_indexlist=False):
"""Find the shortest path through an n-d array from one side to another.
Parameters
----------
arr : ndarray of float64
reach : int, optional
By default (``reach = 1``), the shortest path can only move
one row up or down for every step it moves forward (i.e.,
the path gradient is limited to 1). `reach` defines the
number of elements that can be skipped along each non-axis
dimension at each step.
axis : int, optional
The axis along which the path must always move forward (default -1)
output_indexlist : bool, optional
See return value `p` for explanation.
Returns
-------
p : iterable of int
For each step along `axis`, the coordinate of the shortest path.
If `output_indexlist` is True, then the path is returned as a list of
n-d tuples that index into `arr`. If False, then the path is returned
as an array listing the coordinates of the path along the non-axis
dimensions for each step along the axis dimension. That is,
`p.shape == (arr.shape[axis], arr.ndim-1)` except that p is squeezed
before returning so if `arr.ndim == 2`, then
`p.shape == (arr.shape[axis],)`
cost : float
Cost of path. This is the absolute sum of all the
differences along the path.
"""
# First: calculate the valid moves from any given position. Basically,
# always move +1 along the given axis, and then can move anywhere within
# a grid defined by the reach.
if axis < 0:
axis += arr.ndim
offset_ind_shape = (2 * reach + 1,) * (arr.ndim - 1)
offset_indices = np.indices(offset_ind_shape) - reach
offset_indices = np.insert(offset_indices, axis,
np.ones(offset_ind_shape), axis=0)
offset_size = np.multiply.reduce(offset_ind_shape)
offsets = np.reshape(offset_indices, (arr.ndim, offset_size), order='F').T
# Valid starting positions are anywhere on the hyperplane defined by
# position 0 on the given axis. Ending positions are anywhere on the
# hyperplane at position -1 along the same.
non_axis_shape = arr.shape[:axis] + arr.shape[axis + 1:]
non_axis_indices = np.indices(non_axis_shape)
non_axis_size = np.multiply.reduce(non_axis_shape)
start_indices = np.insert(non_axis_indices, axis,
np.zeros(non_axis_shape), axis=0)
starts = np.reshape(start_indices, (arr.ndim, non_axis_size), order='F').T
end_indices = np.insert(non_axis_indices, axis,
np.full(non_axis_shape, -1,
dtype=non_axis_indices.dtype), axis=0)
ends = np.reshape(end_indices, (arr.ndim, non_axis_size), order='F').T
# Find the minimum-cost path to one of the end-points
m = _spath.MCP_Diff(arr, offsets=offsets)
costs, traceback = m.find_costs(starts, ends, find_all_ends=False)
# Figure out which end-point was found
for end in ends:
cost = costs[tuple(end)]
if cost != np.inf:
break
traceback = m.traceback(end)
if not output_indexlist:
traceback = np.array(traceback)
traceback = np.concatenate([traceback[:, :axis],
traceback[:, axis + 1:]], axis=1)
traceback = np.squeeze(traceback)
return traceback, cost

View File

@@ -1,45 +0,0 @@
import numpy as np
import skimage.graph.mcp as mcp
from skimage._shared.testing import assert_array_equal
a = np.ones((8, 8), dtype=np.float32)
horizontal_ramp = np.array([[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,],
[ 0., 1., 2., 3., 4., 5., 6., 7.,]])
vertical_ramp = np.array( [[ 0., 0., 0., 0., 0., 0., 0., 0.,],
[ 1., 1., 1., 1., 1., 1., 1., 1.,],
[ 2., 2., 2., 2., 2., 2., 2., 2.,],
[ 3., 3., 3., 3., 3., 3., 3., 3.,],
[ 4., 4., 4., 4., 4., 4., 4., 4.,],
[ 5., 5., 5., 5., 5., 5., 5., 5.,],
[ 6., 6., 6., 6., 6., 6., 6., 6.,],
[ 7., 7., 7., 7., 7., 7., 7., 7.,]])
def test_anisotropy():
# Create seeds; vertical seeds create a horizonral ramp
seeds_for_horizontal = [(i, 0) for i in range(8)]
seeds_for_vertcal = [(0, i) for i in range(8)]
for sy in range(1, 5):
for sx in range(1, 5):
sampling = sy, sx
# Trace horizontally
m1 = mcp.MCP_Geometric(a, sampling=sampling, fully_connected=True)
costs1, traceback = m1.find_costs(seeds_for_horizontal)
# Trace vertically
m2 = mcp.MCP_Geometric(a, sampling=sampling, fully_connected=True)
costs2, traceback = m2.find_costs(seeds_for_vertcal)
# Check
assert_array_equal(costs1, horizontal_ramp * sx)
assert_array_equal(costs2, vertical_ramp * sy)

View File

@@ -1,75 +0,0 @@
import numpy as np
import skimage.graph.mcp as mcp
# import stentseg.graph._mcp as mcp
from skimage._shared.testing import assert_array_equal
a = np.ones((8, 8), dtype=np.float32)
count = 0
class MCP(mcp.MCP_Connect):
def _reset(self):
""" Reset the id map.
"""
mcp.MCP_Connect._reset(self)
self._conn = {}
self._bestconn = {}
def create_connection(self, id1, id2, pos1, pos2, cost1, cost2):
# Process data
hash = min(id1, id2), max(id1, id2)
val = min(pos1, pos2), max(pos1, pos2)
cost = min(cost1, cost2)
# Add to total list
self._conn.setdefault(hash, []).append(val)
# Keep track of connection with lowest cost
curcost = self._bestconn.get(hash, (np.inf,))[0]
if cost < curcost:
self._bestconn[hash] = (cost,) + val
def test_connections():
# Create MCP object with three seed points
mcp = MCP(a)
costs, traceback = mcp.find_costs([(1, 1), (7, 7), (1, 7)])
# Test that all three seed points are connected
connections = set(mcp._conn.keys())
assert (0, 1) in connections
assert (1, 2) in connections
assert (0, 2) in connections
# Test that any two neighbors have only been connected once
for position_tuples in mcp._conn.values():
n1 = len(position_tuples)
n2 = len(set(position_tuples))
assert n1 == n2
# For seed 0 and 1
cost, pos1, pos2 = mcp._bestconn[(0, 1)]
# Test meeting points
assert (pos1, pos2) == ((3, 3), (4, 4))
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(
path, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)])
# For seed 1 and 2
cost, pos1, pos2 = mcp._bestconn[(1, 2)]
# Test meeting points
assert (pos1, pos2) == ((3, 7), (4, 7))
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(
path, [(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)])
# For seed 0 and 2
cost, pos1, pos2 = mcp._bestconn[(0, 2)]
# Test meeting points
assert (pos1, pos2) == ((1, 3), (1, 4))
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(
path, [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)])

View File

@@ -1,53 +0,0 @@
import numpy as np
import skimage.graph.mcp as mcp
from skimage._shared.testing import assert_array_equal
a = np.ones((8, 8), dtype=np.float32)
a[1::2] *= 2.0
class FlexibleMCP(mcp.MCP_Flexible):
""" Simple MCP subclass that allows the front to travel
a certain distance from the seed point, and uses a constant
cost factor that is independent of the cost array.
"""
def _reset(self):
mcp.MCP_Flexible._reset(self)
self._distance = np.zeros((8, 8), dtype=np.float32).ravel()
def goal_reached(self, index, cumcost):
if self._distance[index] > 4:
return 2
else:
return 0
def travel_cost(self, index, new_index, offset_length):
return 1.0 # fixed cost
def examine_neighbor(self, index, new_index, offset_length):
pass # We do not test this
def update_node(self, index, new_index, offset_length):
self._distance[new_index] = self._distance[index] + 1
def test_flexible():
# Create MCP and do a traceback
mcp = FlexibleMCP(a)
costs, traceback = mcp.find_costs([(0, 0)])
# Check that inner part is correct. This basically
# tests whether travel_cost works.
assert_array_equal(costs[:4, :4], [[1, 2, 3, 4],
[2, 2, 3, 4],
[3, 3, 3, 4],
[4, 4, 4, 4]])
# Test that the algorithm stopped at the right distance.
# Note that some of the costs are filled in but not yet frozen,
# so we take a bit of margin
assert np.all(costs[-2:, :] == np.inf)
assert np.all(costs[:, -2:] == np.inf)

View File

@@ -1,50 +0,0 @@
import time
import random
import skimage.graph.heap as heap
from skimage._shared.testing import test_parallel
@test_parallel()
def test_heap():
_test_heap(100000, True)
_test_heap(100000, False)
def _test_heap(n, fast_update):
# generate random numbers with duplicates
random.seed(0)
a = [random.uniform(1.0, 100.0) for i in range(n // 2)]
a = a + a
t0 = time.perf_counter()
# insert in heap with random removals
if fast_update:
h = heap.FastUpdateBinaryHeap(128, n)
else:
h = heap.BinaryHeap(128)
for i in range(len(a)):
h.push(a[i], i)
if a[i] < 25:
# double-push same ref sometimes to test fast update codepaths
h.push(2 * a[i], i)
if 25 < a[i] < 50:
# pop some to test random removal
h.pop()
# pop from heap
b = []
while True:
try:
b.append(h.pop()[0])
except IndexError:
break
t1 = time.perf_counter()
# verify
for i in range(1, len(b)):
assert(b[i] >= b[i - 1])
return t1 - t0

View File

@@ -1,163 +0,0 @@
import numpy as np
import skimage.graph.mcp as mcp
from skimage._shared.testing import (assert_array_equal, assert_almost_equal,
parametrize)
from skimage._shared._warnings import expected_warnings
np.random.seed(0)
a = np.ones((8, 8), dtype=np.float32)
a[1:-1, 1] = 0
a[1, 1:-1] = 0
warning_optional = r'|\A\Z'
def test_basic():
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(a, fully_connected=True)
costs, traceback = m.find_costs([(1, 6)])
return_path = m.traceback((7, 2))
assert_array_equal(costs,
[[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 1., 1., 1., 1., 1., 1.],
[1., 0., 1., 2., 2., 2., 2., 2.],
[1., 0., 1., 2., 3., 3., 3., 3.],
[1., 0., 1., 2., 3., 4., 4., 4.],
[1., 0., 1., 2., 3., 4., 5., 5.],
[1., 1., 1., 2., 3., 4., 5., 6.]])
assert_array_equal(return_path,
[(1, 6),
(1, 5),
(1, 4),
(1, 3),
(1, 2),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 2)])
def test_neg_inf():
expected_costs = np.where(a == 1, np.inf, 0)
expected_path = [(1, 6),
(1, 5),
(1, 4),
(1, 3),
(1, 2),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1)]
test_neg = np.where(a == 1, -1, 0)
test_inf = np.where(a == 1, np.inf, 0)
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(test_neg, fully_connected=True)
costs, traceback = m.find_costs([(1, 6)])
return_path = m.traceback((6, 1))
assert_array_equal(costs, expected_costs)
assert_array_equal(return_path, expected_path)
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(test_inf, fully_connected=True)
costs, traceback = m.find_costs([(1, 6)])
return_path = m.traceback((6, 1))
assert_array_equal(costs, expected_costs)
assert_array_equal(return_path, expected_path)
def test_route():
with expected_warnings(['Upgrading NumPy' + warning_optional]):
return_path, cost = mcp.route_through_array(a, (1, 6), (7, 2),
geometric=True)
assert_almost_equal(cost, np.sqrt(2) / 2)
assert_array_equal(return_path,
[(1, 6),
(1, 5),
(1, 4),
(1, 3),
(1, 2),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 2)])
def test_no_diagonal():
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(a, fully_connected=False)
costs, traceback = m.find_costs([(1, 6)])
return_path = m.traceback((7, 2))
assert_array_equal(costs,
[[2., 1., 1., 1., 1., 1., 1., 2.],
[1., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 1., 1., 1., 1., 1., 2.],
[1., 0., 1., 2., 2., 2., 2., 3.],
[1., 0., 1., 2., 3., 3., 3., 4.],
[1., 0., 1., 2., 3., 4., 4., 5.],
[1., 0., 1., 2., 3., 4., 5., 6.],
[2., 1., 2., 3., 4., 5., 6., 7.]])
assert_array_equal(return_path,
[(1, 6),
(1, 5),
(1, 4),
(1, 3),
(1, 2),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(7, 2)])
def test_offsets():
offsets = [(1, i) for i in range(10)] + [(1, -i) for i in range(1, 10)]
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(a, offsets=offsets)
costs, traceback = m.find_costs([(1, 6)])
assert_array_equal(traceback,
[[-2, -2, -2, -2, -2, -2, -2, -2],
[-2, -2, -2, -2, -2, -2, -1, -2],
[15, 14, 13, 12, 11, 10, 0, 1],
[10, 0, 1, 2, 3, 4, 5, 6],
[10, 0, 1, 2, 3, 4, 5, 6],
[10, 0, 1, 2, 3, 4, 5, 6],
[10, 0, 1, 2, 3, 4, 5, 6],
[10, 0, 1, 2, 3, 4, 5, 6]])
assert hasattr(m, "offsets")
assert_array_equal(offsets, m.offsets)
@parametrize("shape", [(100, 100), (5, 8, 13, 17)] * 5)
def test_crashing(shape):
_test_random(shape)
def _test_random(shape):
# Just tests for crashing -- not for correctness.
a = np.random.rand(*shape).astype(np.float32)
starts = [[0] * len(shape), [-1] * len(shape),
(np.random.rand(len(shape)) * shape).astype(int)]
ends = [(np.random.rand(len(shape)) * shape).astype(int)
for i in range(4)]
with expected_warnings(['Upgrading NumPy' + warning_optional]):
m = mcp.MCP(a, fully_connected=True)
costs, offsets = m.find_costs(starts)
for point in [(np.random.rand(len(shape)) * shape).astype(int)
for i in range(4)]:
m.traceback(point)
m._reset()
m.find_costs(starts, ends)
for end in ends:
m.traceback(end)
return a, costs, offsets

View File

@@ -1,52 +0,0 @@
import numpy as np
from skimage.graph._graph import pixel_graph, central_pixel
mask = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 0]], dtype=bool)
image = np.random.default_rng().random(mask.shape)
def test_small_graph():
g, n = pixel_graph(mask, connectivity=2)
assert g.shape == (4, 4)
assert len(g.data) == 8
np.testing.assert_allclose(np.unique(g.data), [1, np.sqrt(2)])
np.testing.assert_array_equal(n, [0, 4, 5, 7])
def test_central_pixel():
g, n = pixel_graph(mask, connectivity=2)
px, ds = central_pixel(g, n, shape=mask.shape)
np.testing.assert_array_equal(px, (1, 1))
s2 = np.sqrt(2)
np.testing.assert_allclose(ds, [s2*3 + 2, s2 + 2, s2*2 + 2, s2*2 + 2])
# test raveled coordinate
px, _ = central_pixel(g, n)
assert px == 4
# test no nodes given
px, _ = central_pixel(g)
assert px == 1
def test_edge_function():
def edge_func(values_src, values_dst, distances):
return np.abs(values_src - values_dst) + distances
g, n = pixel_graph(
image, mask=mask, connectivity=2, edge_function=edge_func
)
s2 = np.sqrt(2)
np.testing.assert_allclose(g[0, 1], np.abs(image[0, 0] - image[1, 1]) + s2)
np.testing.assert_allclose(g[1, 2], np.abs(image[1, 1] - image[1, 2]) + 1)
np.testing.assert_array_equal(n, [0, 4, 5, 7])
def test_default_edge_func():
g, n = pixel_graph(image, spacing=np.array([0.78, 0.78]))
num_edges = len(g.data) // 2 # each edge appears in both directions
assert num_edges == 12 # lattice in a (3, 3) grid
np.testing.assert_almost_equal(
g[0, 1], 0.78 * np.abs(image[0, 0] - image[0, 1])
)
np.testing.assert_array_equal(n, np.arange(image.size))

View File

@@ -1,256 +0,0 @@
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from skimage import graph
from skimage import segmentation, data
from skimage._shared import testing
def max_edge(g, src, dst, n):
default = {'weight': -np.inf}
w1 = g[n].get(src, default)['weight']
w2 = g[n].get(dst, default)['weight']
return {'weight': max(w1, w2)}
def test_rag_merge():
g = graph.RAG()
for i in range(5):
g.add_node(i, {'labels': [i]})
g.add_edge(0, 1, {'weight': 10})
g.add_edge(1, 2, {'weight': 20})
g.add_edge(2, 3, {'weight': 30})
g.add_edge(3, 0, {'weight': 40})
g.add_edge(0, 2, {'weight': 50})
g.add_edge(3, 4, {'weight': 60})
gc = g.copy()
# We merge nodes and ensure that the minimum weight is chosen
# when there is a conflict.
g.merge_nodes(0, 2)
assert g.adj[1][2]['weight'] == 10
assert g.adj[2][3]['weight'] == 30
# We specify `max_edge` as `weight_func` as ensure that maximum
# weight is chosen in case on conflict
gc.merge_nodes(0, 2, weight_func=max_edge)
assert gc.adj[1][2]['weight'] == 20
assert gc.adj[2][3]['weight'] == 40
g.merge_nodes(1, 4)
g.merge_nodes(2, 3)
n = g.merge_nodes(3, 4, in_place=False)
assert sorted(g.nodes[n]['labels']) == list(range(5))
assert list(g.edges()) == []
@pytest.mark.parametrize(
"in_place", [True, False],
)
def test_rag_merge_gh5360(in_place):
# Add another test case covering the gallery example plot_rag.py.
# See bug report at gh-5360.
g = graph.RAG()
g.add_edge(1, 2, weight=10)
g.add_edge(2, 3, weight=20)
g.add_edge(3, 4, weight=30)
g.add_edge(4, 1, weight=40)
g.add_edge(1, 3, weight=50)
for n in g.nodes():
g.nodes[n]['labels'] = [n]
gc = g.copy()
# New node ID is chosen if in_place=False
merged_id = 3 if in_place is True else 5
g.merge_nodes(1, 3, in_place=in_place)
assert g.adj[merged_id][2]['weight'] == 10
assert g.adj[merged_id][4]['weight'] == 30
gc.merge_nodes(1, 3, weight_func=max_edge, in_place=in_place)
assert gc.adj[merged_id][2]['weight'] == 20
assert gc.adj[merged_id][4]['weight'] == 40
def test_threshold_cut():
img = np.zeros((100, 100, 3), dtype='uint8')
img[:50, :50] = 255, 255, 255
img[:50, 50:] = 254, 254, 254
img[50:, :50] = 2, 2, 2
img[50:, 50:] = 1, 1, 1
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 0
labels[:50, 50:] = 1
labels[50:, :50] = 2
labels[50:, 50:] = 3
rag = graph.rag_mean_color(img, labels)
new_labels = graph.cut_threshold(labels, rag, 10, in_place=False)
# Two labels
assert new_labels.max() == 1
new_labels = graph.cut_threshold(labels, rag, 10)
# Two labels
assert new_labels.max() == 1
def test_cut_normalized():
img = np.zeros((100, 100, 3), dtype='uint8')
img[:50, :50] = 255, 255, 255
img[:50, 50:] = 254, 254, 254
img[50:, :50] = 2, 2, 2
img[50:, 50:] = 1, 1, 1
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 0
labels[:50, 50:] = 1
labels[50:, :50] = 2
labels[50:, 50:] = 3
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
# Two labels
assert new_labels.max() == 1
new_labels = graph.cut_normalized(labels, rag)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 1
def test_rag_error():
img = np.zeros((10, 10, 3), dtype='uint8')
labels = np.zeros((10, 10), dtype='uint8')
labels[:5, :] = 0
labels[5:, :] = 1
with testing.raises(ValueError):
graph.rag_mean_color(img, labels,
2, 'non existent mode')
def _weight_mean_color(graph, src, dst, n):
diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def _pre_merge_mean_color(graph, src, dst):
graph.nodes[dst]['total color'] += graph.nodes[src]['total color']
graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']
graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] /
graph.nodes[dst]['pixel count'])
def merge_hierarchical_mean_color(labels, rag, thresh, rag_copy=True,
in_place_merge=False):
return graph.merge_hierarchical(labels, rag, thresh, rag_copy,
in_place_merge, _pre_merge_mean_color,
_weight_mean_color)
def test_rag_hierarchical():
img = np.zeros((8, 8, 3), dtype='uint8')
labels = np.zeros((8, 8), dtype='uint8')
img[:, :, :] = 31
labels[:, :] = 1
img[0:4, 0:4, :] = 10, 10, 10
labels[0:4, 0:4] = 2
img[4:, 0:4, :] = 20, 20, 20
labels[4:, 0:4] = 3
g = graph.rag_mean_color(img, labels)
g2 = g.copy()
thresh = 20 # more than 11*sqrt(3) but less than
result = merge_hierarchical_mean_color(labels, g, thresh)
assert(np.all(result[:, :4] == result[0, 0]))
assert(np.all(result[:, 4:] == result[-1, -1]))
result = merge_hierarchical_mean_color(labels, g2, thresh,
in_place_merge=True)
assert(np.all(result[:, :4] == result[0, 0]))
assert(np.all(result[:, 4:] == result[-1, -1]))
result = graph.cut_threshold(labels, g, thresh)
assert np.all(result == result[0, 0])
def test_ncut_stable_subgraph():
""" Test to catch an error thrown when subgraph has all equal edges. """
img = np.zeros((100, 100, 3), dtype='uint8')
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 1
labels[:50, 50:] = 2
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 0
def test_reproducibility():
"""ensure cut_normalized returns the same output for the same input,
when specifying random_state
"""
img = data.coffee()
labels1 = segmentation.slic(
img, compactness=30, n_segments=400, start_label=0)
g = graph.rag_mean_color(img, labels1, mode='similarity')
results = [None] * 4
for i in range(len(results)):
results[i] = graph.cut_normalized(
labels1, g, in_place=False, thresh=1e-3, random_state=1234)
for i in range(len(results) - 1):
assert_array_equal(results[i], results[i + 1])
def test_generic_rag_2d():
labels = np.array([[1, 2], [3, 4]], dtype=np.uint8)
g = graph.RAG(labels)
assert g.has_edge(1, 2) and g.has_edge(2, 4) and not g.has_edge(1, 4)
h = graph.RAG(labels, connectivity=2)
assert h.has_edge(1, 2) and h.has_edge(1, 4) and h.has_edge(2, 3)
def test_generic_rag_3d():
labels = np.arange(8, dtype=np.uint8).reshape((2, 2, 2))
g = graph.RAG(labels)
assert g.has_edge(0, 1) and g.has_edge(1, 3) and not g.has_edge(0, 3)
h = graph.RAG(labels, connectivity=2)
assert h.has_edge(0, 1) and h.has_edge(0, 3) and not h.has_edge(0, 7)
k = graph.RAG(labels, connectivity=3)
assert k.has_edge(0, 1) and k.has_edge(1, 2) and k.has_edge(2, 5)
def test_rag_boundary():
labels = np.zeros((16, 16), dtype='uint8')
edge_map = np.zeros_like(labels, dtype=float)
edge_map[8, :] = 0.5
edge_map[:, 8] = 1.0
labels[:8, :8] = 1
labels[:8, 8:] = 2
labels[8:, :8] = 3
labels[8:, 8:] = 4
g = graph.rag_boundary(labels, edge_map, connectivity=1)
assert set(g.nodes()) == {1, 2, 3, 4}
assert set(g.edges()) == {(1, 2), (1, 3), (2, 4), (3, 4)}
assert g[1][3]['weight'] == 0.25
assert g[2][4]['weight'] == 0.34375
assert g[1][3]['count'] == 16

View File

@@ -1,32 +0,0 @@
import numpy as np
import skimage.graph.spath as spath
from skimage._shared.testing import assert_equal, assert_array_equal
def test_basic():
x = np.array([[1, 1, 3],
[0, 2, 0],
[4, 3, 1]])
path, cost = spath.shortest_path(x)
assert_array_equal(path, [0, 0, 1])
assert_equal(cost, 1)
def test_reach():
x = np.array([[1, 1, 3],
[0, 2, 0],
[4, 3, 1]])
path, cost = spath.shortest_path(x, reach=2)
assert_array_equal(path, [0, 0, 2])
assert_equal(cost, 0)
def test_non_square():
x = np.array([[1, 1, 1, 1, 5, 5, 5],
[5, 0, 0, 5, 9, 1, 1],
[0, 5, 1, 0, 5, 5, 0],
[6, 1, 1, 5, 0, 0, 1]])
path, cost = spath.shortest_path(x, reach=2)
assert_array_equal(path, [2, 1, 1, 2, 3, 3, 2])
assert_equal(cost, 0)