using for loop to install conda package
This commit is contained in:
298
.CondaPkg/env/Lib/site-packages/scipy/sparse/__init__.py
vendored
Normal file
298
.CondaPkg/env/Lib/site-packages/scipy/sparse/__init__.py
vendored
Normal file
@@ -0,0 +1,298 @@
|
||||
"""
|
||||
=====================================
|
||||
Sparse matrices (:mod:`scipy.sparse`)
|
||||
=====================================
|
||||
|
||||
.. currentmodule:: scipy.sparse
|
||||
|
||||
SciPy 2-D sparse array package for numeric data.
|
||||
|
||||
.. note::
|
||||
|
||||
This package is switching to an array interface, compatible with
|
||||
NumPy arrays, from the older matrix interface. We recommend that
|
||||
you use the array objects (`bsr_array`, `coo_array`, etc.) for
|
||||
all new work.
|
||||
|
||||
When using the array interface, please note that:
|
||||
|
||||
- ``x * y`` no longer performs matrix multiplication, but
|
||||
element-wise multiplication (just like with NumPy arrays). To
|
||||
make code work with both arrays and matrices, use ``x @ y`` for
|
||||
matrix multiplication.
|
||||
- Operations such as `sum`, that used to produce dense matrices, now
|
||||
produce arrays, whose multiplication behavior differs similarly.
|
||||
- Sparse arrays currently must be two-dimensional. This also means
|
||||
that all *slicing* operations on these objects must produce
|
||||
two-dimensional results, or they will result in an error. This
|
||||
will be addressed in a future version.
|
||||
|
||||
The construction utilities (`eye`, `kron`, `random`, `diags`, etc.)
|
||||
have not yet been ported, but their results can be wrapped into arrays::
|
||||
|
||||
A = csr_array(eye(3))
|
||||
|
||||
Contents
|
||||
========
|
||||
|
||||
Sparse array classes
|
||||
--------------------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
bsr_array - Block Sparse Row array
|
||||
coo_array - A sparse array in COOrdinate format
|
||||
csc_array - Compressed Sparse Column array
|
||||
csr_array - Compressed Sparse Row array
|
||||
dia_array - Sparse array with DIAgonal storage
|
||||
dok_array - Dictionary Of Keys based sparse array
|
||||
lil_array - Row-based list of lists sparse array
|
||||
|
||||
Sparse matrix classes
|
||||
---------------------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
bsr_matrix - Block Sparse Row matrix
|
||||
coo_matrix - A sparse matrix in COOrdinate format
|
||||
csc_matrix - Compressed Sparse Column matrix
|
||||
csr_matrix - Compressed Sparse Row matrix
|
||||
dia_matrix - Sparse matrix with DIAgonal storage
|
||||
dok_matrix - Dictionary Of Keys based sparse matrix
|
||||
lil_matrix - Row-based list of lists sparse matrix
|
||||
spmatrix - Sparse matrix base class
|
||||
|
||||
Functions
|
||||
---------
|
||||
|
||||
Building sparse matrices:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
eye - Sparse MxN matrix whose k-th diagonal is all ones
|
||||
identity - Identity matrix in sparse format
|
||||
kron - kronecker product of two sparse matrices
|
||||
kronsum - kronecker sum of sparse matrices
|
||||
diags - Return a sparse matrix from diagonals
|
||||
spdiags - Return a sparse matrix from diagonals
|
||||
block_diag - Build a block diagonal sparse matrix
|
||||
tril - Lower triangular portion of a matrix in sparse format
|
||||
triu - Upper triangular portion of a matrix in sparse format
|
||||
bmat - Build a sparse matrix from sparse sub-blocks
|
||||
hstack - Stack sparse matrices horizontally (column wise)
|
||||
vstack - Stack sparse matrices vertically (row wise)
|
||||
rand - Random values in a given shape
|
||||
random - Random values in a given shape
|
||||
|
||||
Save and load sparse matrices:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
save_npz - Save a sparse matrix to a file using ``.npz`` format.
|
||||
load_npz - Load a sparse matrix from a file using ``.npz`` format.
|
||||
|
||||
Sparse matrix tools:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
find
|
||||
|
||||
Identifying sparse matrices:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
issparse
|
||||
isspmatrix
|
||||
isspmatrix_csc
|
||||
isspmatrix_csr
|
||||
isspmatrix_bsr
|
||||
isspmatrix_lil
|
||||
isspmatrix_dok
|
||||
isspmatrix_coo
|
||||
isspmatrix_dia
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
.. autosummary::
|
||||
|
||||
csgraph - Compressed sparse graph routines
|
||||
linalg - sparse linear algebra routines
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
SparseEfficiencyWarning
|
||||
SparseWarning
|
||||
|
||||
|
||||
Usage information
|
||||
=================
|
||||
|
||||
There are seven available sparse matrix types:
|
||||
|
||||
1. csc_matrix: Compressed Sparse Column format
|
||||
2. csr_matrix: Compressed Sparse Row format
|
||||
3. bsr_matrix: Block Sparse Row format
|
||||
4. lil_matrix: List of Lists format
|
||||
5. dok_matrix: Dictionary of Keys format
|
||||
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
|
||||
7. dia_matrix: DIAgonal format
|
||||
|
||||
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
|
||||
The lil_matrix class supports basic slicing and fancy indexing with a
|
||||
similar syntax to NumPy arrays. As illustrated below, the COO format
|
||||
may also be used to efficiently construct matrices. Despite their
|
||||
similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
|
||||
functions directly on these matrices because NumPy may not properly convert
|
||||
them for computations, leading to unexpected (and incorrect) results. If you
|
||||
do want to apply a NumPy function to these matrices, first check if SciPy has
|
||||
its own implementation for the given sparse matrix class, or **convert the
|
||||
sparse matrix to a NumPy array** (e.g., using the `toarray()` method of the
|
||||
class) first before applying the method.
|
||||
|
||||
To perform manipulations such as multiplication or inversion, first
|
||||
convert the matrix to either CSC or CSR format. The lil_matrix format is
|
||||
row-based, so conversion to CSR is efficient, whereas conversion to CSC
|
||||
is less so.
|
||||
|
||||
All conversions among the CSR, CSC, and COO formats are efficient,
|
||||
linear-time operations.
|
||||
|
||||
Matrix vector product
|
||||
---------------------
|
||||
To do a vector product between a sparse matrix and a vector simply use
|
||||
the matrix `dot` method, as described in its docstring:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import csr_matrix
|
||||
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
|
||||
>>> v = np.array([1, 0, -1])
|
||||
>>> A.dot(v)
|
||||
array([ 1, -3, -1], dtype=int64)
|
||||
|
||||
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
|
||||
therefore using it will result on unexpected results or errors.
|
||||
The corresponding dense array should be obtained first instead:
|
||||
|
||||
>>> np.dot(A.toarray(), v)
|
||||
array([ 1, -3, -1], dtype=int64)
|
||||
|
||||
but then all the performance advantages would be lost.
|
||||
|
||||
The CSR format is specially suitable for fast matrix vector products.
|
||||
|
||||
Example 1
|
||||
---------
|
||||
Construct a 1000x1000 lil_matrix and add some values to it:
|
||||
|
||||
>>> from scipy.sparse import lil_matrix
|
||||
>>> from scipy.sparse.linalg import spsolve
|
||||
>>> from numpy.linalg import solve, norm
|
||||
>>> from numpy.random import rand
|
||||
|
||||
>>> A = lil_matrix((1000, 1000))
|
||||
>>> A[0, :100] = rand(100)
|
||||
>>> A[1, 100:200] = A[0, :100]
|
||||
>>> A.setdiag(rand(1000))
|
||||
|
||||
Now convert it to CSR format and solve A x = b for x:
|
||||
|
||||
>>> A = A.tocsr()
|
||||
>>> b = rand(1000)
|
||||
>>> x = spsolve(A, b)
|
||||
|
||||
Convert it to a dense matrix and solve, and check that the result
|
||||
is the same:
|
||||
|
||||
>>> x_ = solve(A.toarray(), b)
|
||||
|
||||
Now we can compute norm of the error with:
|
||||
|
||||
>>> err = norm(x-x_)
|
||||
>>> err < 1e-10
|
||||
True
|
||||
|
||||
It should be small :)
|
||||
|
||||
|
||||
Example 2
|
||||
---------
|
||||
|
||||
Construct a matrix in COO format:
|
||||
|
||||
>>> from scipy import sparse
|
||||
>>> from numpy import array
|
||||
>>> I = array([0,3,1,0])
|
||||
>>> J = array([0,3,1,2])
|
||||
>>> V = array([4,5,7,9])
|
||||
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
|
||||
|
||||
Notice that the indices do not need to be sorted.
|
||||
|
||||
Duplicate (i,j) entries are summed when converting to CSR or CSC.
|
||||
|
||||
>>> I = array([0,0,1,3,1,0,0])
|
||||
>>> J = array([0,2,1,3,1,0,0])
|
||||
>>> V = array([1,1,1,1,1,1,1])
|
||||
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
|
||||
|
||||
This is useful for constructing finite-element stiffness and mass matrices.
|
||||
|
||||
Further details
|
||||
---------------
|
||||
|
||||
CSR column indices are not necessarily sorted. Likewise for CSC row
|
||||
indices. Use the .sorted_indices() and .sort_indices() methods when
|
||||
sorted indices are required (e.g., when passing data to other libraries).
|
||||
|
||||
"""
|
||||
|
||||
# Original code by Travis Oliphant.
|
||||
# Modified and extended by Ed Schofield, Robert Cimrman,
|
||||
# Nathan Bell, and Jake Vanderplas.
|
||||
|
||||
import warnings as _warnings
|
||||
|
||||
from ._base import *
|
||||
from ._csr import *
|
||||
from ._csc import *
|
||||
from ._lil import *
|
||||
from ._dok import *
|
||||
from ._coo import *
|
||||
from ._dia import *
|
||||
from ._bsr import *
|
||||
from ._construct import *
|
||||
from ._extract import *
|
||||
from ._matrix_io import *
|
||||
|
||||
from ._arrays import (
|
||||
csr_array, csc_array, lil_array, dok_array, coo_array, dia_array, bsr_array
|
||||
)
|
||||
|
||||
# For backward compatibility with v0.19.
|
||||
from . import csgraph
|
||||
|
||||
# Deprecated namespaces, to be removed in v2.0.0
|
||||
from . import (
|
||||
base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract,
|
||||
lil, sparsetools, sputils
|
||||
)
|
||||
|
||||
__all__ = [s for s in dir() if not s.startswith('_')]
|
||||
|
||||
# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15
|
||||
_warnings.filterwarnings('ignore', message='the matrix subclass is not the recommended way')
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_arrays.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_arrays.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_base.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_base.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_bsr.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_bsr.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_compressed.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_compressed.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_construct.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_construct.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_coo.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_coo.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_csc.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_csc.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_csr.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_csr.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_data.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_data.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_dia.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_dia.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_dok.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_dok.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_extract.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_extract.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_index.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_index.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_lil.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_lil.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_sputils.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/_sputils.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/base.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/base.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/bsr.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/bsr.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/compressed.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/compressed.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/construct.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/construct.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/coo.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/coo.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/csc.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/csc.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/csr.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/csr.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/data.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/data.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/dia.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/dia.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/dok.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/dok.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/extract.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/extract.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/lil.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/lil.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/sputils.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/__pycache__/sputils.cpython-311.pyc
vendored
Normal file
Binary file not shown.
98
.CondaPkg/env/Lib/site-packages/scipy/sparse/_arrays.py
vendored
Normal file
98
.CondaPkg/env/Lib/site-packages/scipy/sparse/_arrays.py
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
from ._bsr import bsr_matrix
|
||||
from ._coo import coo_matrix
|
||||
from ._csc import csc_matrix
|
||||
from ._csr import csr_matrix
|
||||
from ._dia import dia_matrix
|
||||
from ._dok import dok_matrix
|
||||
from ._lil import lil_matrix
|
||||
|
||||
|
||||
class _sparray:
|
||||
"""This class provides a base class for all sparse arrays.
|
||||
|
||||
It cannot be instantiated. Most of the work is provided by subclasses.
|
||||
"""
|
||||
_is_array = True
|
||||
|
||||
@property
|
||||
def _bsr_container(self):
|
||||
return bsr_array
|
||||
|
||||
@property
|
||||
def _coo_container(self):
|
||||
return coo_array
|
||||
|
||||
@property
|
||||
def _csc_container(self):
|
||||
return csc_array
|
||||
|
||||
@property
|
||||
def _csr_container(self):
|
||||
return csr_array
|
||||
|
||||
@property
|
||||
def _dia_container(self):
|
||||
return dia_array
|
||||
|
||||
@property
|
||||
def _dok_container(self):
|
||||
return dok_array
|
||||
|
||||
@property
|
||||
def _lil_container(self):
|
||||
return lil_array
|
||||
|
||||
# Restore elementwise multiplication
|
||||
def __mul__(self, *args, **kwargs):
|
||||
return self.multiply(*args, **kwargs)
|
||||
|
||||
def __rmul__(self, *args, **kwargs):
|
||||
return self.multiply(*args, **kwargs)
|
||||
|
||||
# Restore elementwise power
|
||||
def __pow__(self, *args, **kwargs):
|
||||
return self.power(*args, **kwargs)
|
||||
|
||||
|
||||
def _matrix_doc_to_array(docstr):
|
||||
# For opimized builds with stripped docstrings
|
||||
if docstr is None:
|
||||
return None
|
||||
return docstr.replace('matrix', 'array').replace('matrices', 'arrays')
|
||||
|
||||
|
||||
class bsr_array(_sparray, bsr_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class coo_array(_sparray, coo_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class csc_array(_sparray, csc_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class csr_array(_sparray, csr_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class dia_array(_sparray, dia_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class dok_array(_sparray, dok_matrix):
|
||||
pass
|
||||
|
||||
|
||||
class lil_array(_sparray, lil_matrix):
|
||||
pass
|
||||
|
||||
|
||||
bsr_array.__doc__ = _matrix_doc_to_array(bsr_matrix.__doc__)
|
||||
coo_array.__doc__ = _matrix_doc_to_array(coo_matrix.__doc__)
|
||||
csc_array.__doc__ = _matrix_doc_to_array(csc_matrix.__doc__)
|
||||
csr_array.__doc__ = _matrix_doc_to_array(csr_matrix.__doc__)
|
||||
dia_array.__doc__ = _matrix_doc_to_array(dia_matrix.__doc__)
|
||||
dok_array.__doc__ = _matrix_doc_to_array(dok_matrix.__doc__)
|
||||
lil_array.__doc__ = _matrix_doc_to_array(lil_matrix.__doc__)
|
||||
1331
.CondaPkg/env/Lib/site-packages/scipy/sparse/_base.py
vendored
Normal file
1331
.CondaPkg/env/Lib/site-packages/scipy/sparse/_base.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
721
.CondaPkg/env/Lib/site-packages/scipy/sparse/_bsr.py
vendored
Normal file
721
.CondaPkg/env/Lib/site-packages/scipy/sparse/_bsr.py
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
"""Compressed Block Sparse Row matrix format"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
|
||||
|
||||
from warnings import warn
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._data import _data_matrix, _minmax_mixin
|
||||
from ._compressed import _cs_matrix
|
||||
from ._base import isspmatrix, _formats, spmatrix
|
||||
from ._sputils import (isshape, getdtype, getdata, to_native, upcast,
|
||||
get_index_dtype, check_shape)
|
||||
from . import _sparsetools
|
||||
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
|
||||
bsr_matmat, bsr_transpose, bsr_sort_indices,
|
||||
bsr_tocsr)
|
||||
|
||||
|
||||
class bsr_matrix(_cs_matrix, _minmax_mixin):
|
||||
"""Block Sparse Row matrix
|
||||
|
||||
This can be instantiated in several ways:
|
||||
bsr_matrix(D, [blocksize=(R,C)])
|
||||
where D is a dense matrix or 2-D ndarray.
|
||||
|
||||
bsr_matrix(S, [blocksize=(R,C)])
|
||||
with another sparse matrix S (equivalent to S.tobsr())
|
||||
|
||||
bsr_matrix((M, N), [blocksize=(R,C), dtype])
|
||||
to construct an empty matrix with shape (M, N)
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
|
||||
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
|
||||
|
||||
bsr_matrix((data, indices, indptr), [shape=(M, N)])
|
||||
is the standard BSR representation where the block column
|
||||
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
|
||||
and their corresponding block values are stored in
|
||||
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
|
||||
supplied, the matrix dimensions are inferred from the index arrays.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
Data array of the matrix
|
||||
indices
|
||||
BSR format index array
|
||||
indptr
|
||||
BSR format index pointer array
|
||||
blocksize
|
||||
Block size of the matrix
|
||||
has_sorted_indices
|
||||
Whether indices are sorted
|
||||
|
||||
Notes
|
||||
-----
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
**Summary of BSR format**
|
||||
|
||||
The Block Compressed Row (BSR) format is very similar to the Compressed
|
||||
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
|
||||
sub matrices like the last example below. Block matrices often arise in
|
||||
vector-valued finite element discretizations. In such cases, BSR is
|
||||
considerably more efficient than CSR and CSC for many sparse arithmetic
|
||||
operations.
|
||||
|
||||
**Blocksize**
|
||||
|
||||
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
|
||||
That is, R and C must satisfy the relationship ``M % R = 0`` and
|
||||
``N % C = 0``.
|
||||
|
||||
If no blocksize is specified, a simple heuristic is applied to determine
|
||||
an appropriate blocksize.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import bsr_matrix
|
||||
>>> import numpy as np
|
||||
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=int8)
|
||||
|
||||
>>> row = np.array([0, 0, 1, 2, 2, 2])
|
||||
>>> col = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> data = np.array([1, 2, 3 ,4, 5, 6])
|
||||
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
|
||||
array([[1, 0, 2],
|
||||
[0, 0, 3],
|
||||
[4, 5, 6]])
|
||||
|
||||
>>> indptr = np.array([0, 2, 3, 6])
|
||||
>>> indices = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
|
||||
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
|
||||
array([[1, 1, 0, 0, 2, 2],
|
||||
[1, 1, 0, 0, 2, 2],
|
||||
[0, 0, 0, 0, 3, 3],
|
||||
[0, 0, 0, 0, 3, 3],
|
||||
[4, 4, 5, 5, 6, 6],
|
||||
[4, 4, 5, 5, 6, 6]])
|
||||
|
||||
"""
|
||||
format = 'bsr'
|
||||
|
||||
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
|
||||
_data_matrix.__init__(self)
|
||||
|
||||
if isspmatrix(arg1):
|
||||
if isspmatrix_bsr(arg1) and copy:
|
||||
arg1 = arg1.copy()
|
||||
else:
|
||||
arg1 = arg1.tobsr(blocksize=blocksize)
|
||||
self._set_self(arg1)
|
||||
|
||||
elif isinstance(arg1,tuple):
|
||||
if isshape(arg1):
|
||||
# it's a tuple of matrix dimensions (M,N)
|
||||
self._shape = check_shape(arg1)
|
||||
M,N = self.shape
|
||||
# process blocksize
|
||||
if blocksize is None:
|
||||
blocksize = (1,1)
|
||||
else:
|
||||
if not isshape(blocksize):
|
||||
raise ValueError('invalid blocksize=%s' % blocksize)
|
||||
blocksize = tuple(blocksize)
|
||||
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
|
||||
|
||||
R,C = blocksize
|
||||
if (M % R) != 0 or (N % C) != 0:
|
||||
raise ValueError('shape must be multiple of blocksize')
|
||||
|
||||
# Select index dtype large enough to pass array and
|
||||
# scalar parameters to sparsetools
|
||||
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
|
||||
self.indices = np.zeros(0, dtype=idx_dtype)
|
||||
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
|
||||
|
||||
elif len(arg1) == 2:
|
||||
# (data,(row,col)) format
|
||||
self._set_self(
|
||||
self._coo_container(arg1, dtype=dtype, shape=shape).tobsr(
|
||||
blocksize=blocksize
|
||||
)
|
||||
)
|
||||
|
||||
elif len(arg1) == 3:
|
||||
# (data,indices,indptr) format
|
||||
(data, indices, indptr) = arg1
|
||||
|
||||
# Select index dtype large enough to pass array and
|
||||
# scalar parameters to sparsetools
|
||||
maxval = 1
|
||||
if shape is not None:
|
||||
maxval = max(shape)
|
||||
if blocksize is not None:
|
||||
maxval = max(maxval, max(blocksize))
|
||||
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval,
|
||||
check_contents=True)
|
||||
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
|
||||
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
|
||||
self.data = getdata(data, copy=copy, dtype=dtype)
|
||||
if self.data.ndim != 3:
|
||||
raise ValueError(
|
||||
'BSR data must be 3-dimensional, got shape=%s' % (
|
||||
self.data.shape,))
|
||||
if blocksize is not None:
|
||||
if not isshape(blocksize):
|
||||
raise ValueError('invalid blocksize=%s' % (blocksize,))
|
||||
if tuple(blocksize) != self.data.shape[1:]:
|
||||
raise ValueError('mismatching blocksize=%s vs %s' % (
|
||||
blocksize, self.data.shape[1:]))
|
||||
else:
|
||||
raise ValueError('unrecognized bsr_matrix constructor usage')
|
||||
else:
|
||||
# must be dense
|
||||
try:
|
||||
arg1 = np.asarray(arg1)
|
||||
except Exception as e:
|
||||
raise ValueError("unrecognized form for"
|
||||
" %s_matrix constructor" % self.format) from e
|
||||
arg1 = self._coo_container(
|
||||
arg1, dtype=dtype
|
||||
).tobsr(blocksize=blocksize)
|
||||
self._set_self(arg1)
|
||||
|
||||
if shape is not None:
|
||||
self._shape = check_shape(shape)
|
||||
else:
|
||||
if self.shape is None:
|
||||
# shape not already set, try to infer dimensions
|
||||
try:
|
||||
M = len(self.indptr) - 1
|
||||
N = self.indices.max() + 1
|
||||
except Exception as e:
|
||||
raise ValueError('unable to infer matrix dimensions') from e
|
||||
else:
|
||||
R,C = self.blocksize
|
||||
self._shape = check_shape((M*R,N*C))
|
||||
|
||||
if self.shape is None:
|
||||
if shape is None:
|
||||
# TODO infer shape here
|
||||
raise ValueError('need to infer shape')
|
||||
else:
|
||||
self._shape = check_shape(shape)
|
||||
|
||||
if dtype is not None:
|
||||
self.data = self.data.astype(dtype, copy=False)
|
||||
|
||||
self.check_format(full_check=False)
|
||||
|
||||
def check_format(self, full_check=True):
|
||||
"""check whether the matrix format is valid
|
||||
|
||||
*Parameters*:
|
||||
full_check:
|
||||
True - rigorous check, O(N) operations : default
|
||||
False - basic check, O(1) operations
|
||||
|
||||
"""
|
||||
M,N = self.shape
|
||||
R,C = self.blocksize
|
||||
|
||||
# index arrays should have integer data types
|
||||
if self.indptr.dtype.kind != 'i':
|
||||
warn("indptr array has non-integer dtype (%s)"
|
||||
% self.indptr.dtype.name)
|
||||
if self.indices.dtype.kind != 'i':
|
||||
warn("indices array has non-integer dtype (%s)"
|
||||
% self.indices.dtype.name)
|
||||
|
||||
idx_dtype = get_index_dtype((self.indices, self.indptr))
|
||||
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
|
||||
self.indices = np.asarray(self.indices, dtype=idx_dtype)
|
||||
self.data = to_native(self.data)
|
||||
|
||||
# check array shapes
|
||||
if self.indices.ndim != 1 or self.indptr.ndim != 1:
|
||||
raise ValueError("indices, and indptr should be 1-D")
|
||||
if self.data.ndim != 3:
|
||||
raise ValueError("data should be 3-D")
|
||||
|
||||
# check index pointer
|
||||
if (len(self.indptr) != M//R + 1):
|
||||
raise ValueError("index pointer size (%d) should be (%d)" %
|
||||
(len(self.indptr), M//R + 1))
|
||||
if (self.indptr[0] != 0):
|
||||
raise ValueError("index pointer should start with 0")
|
||||
|
||||
# check index and data arrays
|
||||
if (len(self.indices) != len(self.data)):
|
||||
raise ValueError("indices and data should have the same size")
|
||||
if (self.indptr[-1] > len(self.indices)):
|
||||
raise ValueError("Last value of index pointer should be less than "
|
||||
"the size of index and data arrays")
|
||||
|
||||
self.prune()
|
||||
|
||||
if full_check:
|
||||
# check format validity (more expensive)
|
||||
if self.nnz > 0:
|
||||
if self.indices.max() >= N//C:
|
||||
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
|
||||
if self.indices.min() < 0:
|
||||
raise ValueError("column index values must be >= 0")
|
||||
if np.diff(self.indptr).min() < 0:
|
||||
raise ValueError("index pointer values must form a "
|
||||
"non-decreasing sequence")
|
||||
|
||||
# if not self.has_sorted_indices():
|
||||
# warn('Indices were not in sorted order. Sorting indices.')
|
||||
# self.sort_indices(check_first=False)
|
||||
|
||||
def _get_blocksize(self):
|
||||
return self.data.shape[1:]
|
||||
blocksize = property(fget=_get_blocksize)
|
||||
|
||||
def getnnz(self, axis=None):
|
||||
if axis is not None:
|
||||
raise NotImplementedError("getnnz over an axis is not implemented "
|
||||
"for BSR format")
|
||||
R,C = self.blocksize
|
||||
return int(self.indptr[-1] * R * C)
|
||||
|
||||
getnnz.__doc__ = spmatrix.getnnz.__doc__
|
||||
|
||||
def __repr__(self):
|
||||
format = _formats[self.getformat()][1]
|
||||
return ("<%dx%d sparse matrix of type '%s'\n"
|
||||
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
|
||||
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
|
||||
(format,)))
|
||||
|
||||
def diagonal(self, k=0):
|
||||
rows, cols = self.shape
|
||||
if k <= -rows or k >= cols:
|
||||
return np.empty(0, dtype=self.data.dtype)
|
||||
R, C = self.blocksize
|
||||
y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
|
||||
dtype=upcast(self.dtype))
|
||||
_sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
|
||||
self.indptr, self.indices,
|
||||
np.ravel(self.data), y)
|
||||
return y
|
||||
|
||||
diagonal.__doc__ = spmatrix.diagonal.__doc__
|
||||
|
||||
##########################
|
||||
# NotImplemented methods #
|
||||
##########################
|
||||
|
||||
def __getitem__(self,key):
|
||||
raise NotImplementedError
|
||||
|
||||
def __setitem__(self,key,val):
|
||||
raise NotImplementedError
|
||||
|
||||
######################
|
||||
# Arithmetic methods #
|
||||
######################
|
||||
|
||||
def _add_dense(self, other):
|
||||
return self.tocoo(copy=False)._add_dense(other)
|
||||
|
||||
def _mul_vector(self, other):
|
||||
M,N = self.shape
|
||||
R,C = self.blocksize
|
||||
|
||||
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
|
||||
|
||||
bsr_matvec(M//R, N//C, R, C,
|
||||
self.indptr, self.indices, self.data.ravel(),
|
||||
other, result)
|
||||
|
||||
return result
|
||||
|
||||
def _mul_multivector(self,other):
|
||||
R,C = self.blocksize
|
||||
M,N = self.shape
|
||||
n_vecs = other.shape[1] # number of column vectors
|
||||
|
||||
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
|
||||
|
||||
bsr_matvecs(M//R, N//C, n_vecs, R, C,
|
||||
self.indptr, self.indices, self.data.ravel(),
|
||||
other.ravel(), result.ravel())
|
||||
|
||||
return result
|
||||
|
||||
def _mul_sparse_matrix(self, other):
|
||||
M, K1 = self.shape
|
||||
K2, N = other.shape
|
||||
|
||||
R,n = self.blocksize
|
||||
|
||||
# convert to this format
|
||||
if isspmatrix_bsr(other):
|
||||
C = other.blocksize[1]
|
||||
else:
|
||||
C = 1
|
||||
|
||||
from ._csr import isspmatrix_csr
|
||||
|
||||
if isspmatrix_csr(other) and n == 1:
|
||||
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
|
||||
else:
|
||||
other = other.tobsr(blocksize=(n,C))
|
||||
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices,
|
||||
other.indptr, other.indices))
|
||||
|
||||
bnnz = csr_matmat_maxnnz(M//R, N//C,
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
other.indptr.astype(idx_dtype),
|
||||
other.indices.astype(idx_dtype))
|
||||
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices,
|
||||
other.indptr, other.indices),
|
||||
maxval=bnnz)
|
||||
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
|
||||
indices = np.empty(bnnz, dtype=idx_dtype)
|
||||
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
|
||||
|
||||
bsr_matmat(bnnz, M//R, N//C, R, C, n,
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
np.ravel(self.data),
|
||||
other.indptr.astype(idx_dtype),
|
||||
other.indices.astype(idx_dtype),
|
||||
np.ravel(other.data),
|
||||
indptr,
|
||||
indices,
|
||||
data)
|
||||
|
||||
data = data.reshape(-1,R,C)
|
||||
|
||||
# TODO eliminate zeros
|
||||
|
||||
return self._bsr_container(
|
||||
(data, indices, indptr), shape=(M, N), blocksize=(R, C)
|
||||
)
|
||||
|
||||
######################
|
||||
# Conversion methods #
|
||||
######################
|
||||
|
||||
def tobsr(self, blocksize=None, copy=False):
|
||||
"""Convert this matrix into Block Sparse Row Format.
|
||||
|
||||
With copy=False, the data/indices may be shared between this
|
||||
matrix and the resultant bsr_matrix.
|
||||
|
||||
If blocksize=(R, C) is provided, it will be used for determining
|
||||
block size of the bsr_matrix.
|
||||
"""
|
||||
if blocksize not in [None, self.blocksize]:
|
||||
return self.tocsr().tobsr(blocksize=blocksize)
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
def tocsr(self, copy=False):
|
||||
M, N = self.shape
|
||||
R, C = self.blocksize
|
||||
nnz = self.nnz
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices),
|
||||
maxval=max(nnz, N))
|
||||
indptr = np.empty(M + 1, dtype=idx_dtype)
|
||||
indices = np.empty(nnz, dtype=idx_dtype)
|
||||
data = np.empty(nnz, dtype=upcast(self.dtype))
|
||||
|
||||
bsr_tocsr(M // R, # n_brow
|
||||
N // C, # n_bcol
|
||||
R, C,
|
||||
self.indptr.astype(idx_dtype, copy=False),
|
||||
self.indices.astype(idx_dtype, copy=False),
|
||||
self.data,
|
||||
indptr,
|
||||
indices,
|
||||
data)
|
||||
return self._csr_container((data, indices, indptr), shape=self.shape)
|
||||
|
||||
tocsr.__doc__ = spmatrix.tocsr.__doc__
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
return self.tocsr(copy=False).tocsc(copy=copy)
|
||||
|
||||
tocsc.__doc__ = spmatrix.tocsc.__doc__
|
||||
|
||||
def tocoo(self, copy=True):
|
||||
"""Convert this matrix to COOrdinate format.
|
||||
|
||||
When copy=False the data array will be shared between
|
||||
this matrix and the resultant coo_matrix.
|
||||
"""
|
||||
|
||||
M,N = self.shape
|
||||
R,C = self.blocksize
|
||||
|
||||
indptr_diff = np.diff(self.indptr)
|
||||
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
|
||||
# Check for potential overflow
|
||||
indptr_diff_limited = indptr_diff.astype(np.intp)
|
||||
if np.any(indptr_diff_limited != indptr_diff):
|
||||
raise ValueError("Matrix too big to convert")
|
||||
indptr_diff = indptr_diff_limited
|
||||
|
||||
row = (R * np.arange(M//R)).repeat(indptr_diff)
|
||||
row = row.repeat(R*C).reshape(-1,R,C)
|
||||
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
|
||||
row = row.reshape(-1)
|
||||
|
||||
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
|
||||
col += np.tile(np.arange(C), (R,1))
|
||||
col = col.reshape(-1)
|
||||
|
||||
data = self.data.reshape(-1)
|
||||
|
||||
if copy:
|
||||
data = data.copy()
|
||||
|
||||
return self._coo_container(
|
||||
(data, (row, col)), shape=self.shape
|
||||
)
|
||||
|
||||
def toarray(self, order=None, out=None):
|
||||
return self.tocoo(copy=False).toarray(order=order, out=out)
|
||||
|
||||
toarray.__doc__ = spmatrix.toarray.__doc__
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation."))
|
||||
|
||||
R, C = self.blocksize
|
||||
M, N = self.shape
|
||||
NBLK = self.nnz//(R*C)
|
||||
|
||||
if self.nnz == 0:
|
||||
return self._bsr_container((N, M), blocksize=(C, R),
|
||||
dtype=self.dtype, copy=copy)
|
||||
|
||||
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
|
||||
indices = np.empty(NBLK, dtype=self.indices.dtype)
|
||||
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
|
||||
|
||||
bsr_transpose(M//R, N//C, R, C,
|
||||
self.indptr, self.indices, self.data.ravel(),
|
||||
indptr, indices, data.ravel())
|
||||
|
||||
return self._bsr_container((data, indices, indptr),
|
||||
shape=(N, M), copy=copy)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
##############################################################
|
||||
# methods that examine or modify the internal data structure #
|
||||
##############################################################
|
||||
|
||||
def eliminate_zeros(self):
|
||||
"""Remove zero elements in-place."""
|
||||
|
||||
if not self.nnz:
|
||||
return # nothing to do
|
||||
|
||||
R,C = self.blocksize
|
||||
M,N = self.shape
|
||||
|
||||
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
|
||||
|
||||
nonzero_blocks = mask.nonzero()[0]
|
||||
|
||||
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
|
||||
|
||||
# modifies self.indptr and self.indices *in place*
|
||||
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
|
||||
self.indices, mask)
|
||||
self.prune()
|
||||
|
||||
def sum_duplicates(self):
|
||||
"""Eliminate duplicate matrix entries by adding them together
|
||||
|
||||
The is an *in place* operation
|
||||
"""
|
||||
if self.has_canonical_format:
|
||||
return
|
||||
self.sort_indices()
|
||||
R, C = self.blocksize
|
||||
M, N = self.shape
|
||||
|
||||
# port of _sparsetools.csr_sum_duplicates
|
||||
n_row = M // R
|
||||
nnz = 0
|
||||
row_end = 0
|
||||
for i in range(n_row):
|
||||
jj = row_end
|
||||
row_end = self.indptr[i+1]
|
||||
while jj < row_end:
|
||||
j = self.indices[jj]
|
||||
x = self.data[jj]
|
||||
jj += 1
|
||||
while jj < row_end and self.indices[jj] == j:
|
||||
x += self.data[jj]
|
||||
jj += 1
|
||||
self.indices[nnz] = j
|
||||
self.data[nnz] = x
|
||||
nnz += 1
|
||||
self.indptr[i+1] = nnz
|
||||
|
||||
self.prune() # nnz may have changed
|
||||
self.has_canonical_format = True
|
||||
|
||||
def sort_indices(self):
|
||||
"""Sort the indices of this matrix *in place*
|
||||
"""
|
||||
if self.has_sorted_indices:
|
||||
return
|
||||
|
||||
R,C = self.blocksize
|
||||
M,N = self.shape
|
||||
|
||||
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
|
||||
|
||||
self.has_sorted_indices = True
|
||||
|
||||
def prune(self):
|
||||
""" Remove empty space after all non-zero elements.
|
||||
"""
|
||||
|
||||
R,C = self.blocksize
|
||||
M,N = self.shape
|
||||
|
||||
if len(self.indptr) != M//R + 1:
|
||||
raise ValueError("index pointer has invalid length")
|
||||
|
||||
bnnz = self.indptr[-1]
|
||||
|
||||
if len(self.indices) < bnnz:
|
||||
raise ValueError("indices array has too few elements")
|
||||
if len(self.data) < bnnz:
|
||||
raise ValueError("data array has too few elements")
|
||||
|
||||
self.data = self.data[:bnnz]
|
||||
self.indices = self.indices[:bnnz]
|
||||
|
||||
# utility functions
|
||||
def _binopt(self, other, op, in_shape=None, out_shape=None):
|
||||
"""Apply the binary operation fn to two sparse matrices."""
|
||||
|
||||
# Ideally we'd take the GCDs of the blocksize dimensions
|
||||
# and explode self and other to match.
|
||||
other = self.__class__(other, blocksize=self.blocksize)
|
||||
|
||||
# e.g. bsr_plus_bsr, etc.
|
||||
fn = getattr(_sparsetools, self.format + op + self.format)
|
||||
|
||||
R,C = self.blocksize
|
||||
|
||||
max_bnnz = len(self.data) + len(other.data)
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices,
|
||||
other.indptr, other.indices),
|
||||
maxval=max_bnnz)
|
||||
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
|
||||
indices = np.empty(max_bnnz, dtype=idx_dtype)
|
||||
|
||||
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
|
||||
if op in bool_ops:
|
||||
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
|
||||
else:
|
||||
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
|
||||
|
||||
fn(self.shape[0]//R, self.shape[1]//C, R, C,
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
self.data,
|
||||
other.indptr.astype(idx_dtype),
|
||||
other.indices.astype(idx_dtype),
|
||||
np.ravel(other.data),
|
||||
indptr,
|
||||
indices,
|
||||
data)
|
||||
|
||||
actual_bnnz = indptr[-1]
|
||||
indices = indices[:actual_bnnz]
|
||||
data = data[:R*C*actual_bnnz]
|
||||
|
||||
if actual_bnnz < max_bnnz/2:
|
||||
indices = indices.copy()
|
||||
data = data.copy()
|
||||
|
||||
data = data.reshape(-1,R,C)
|
||||
|
||||
return self.__class__((data, indices, indptr), shape=self.shape)
|
||||
|
||||
# needed by _data_matrix
|
||||
def _with_data(self,data,copy=True):
|
||||
"""Returns a matrix with the same sparsity structure as self,
|
||||
but with different data. By default the structure arrays
|
||||
(i.e. .indptr and .indices) are copied.
|
||||
"""
|
||||
if copy:
|
||||
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
|
||||
shape=self.shape,dtype=data.dtype)
|
||||
else:
|
||||
return self.__class__((data,self.indices,self.indptr),
|
||||
shape=self.shape,dtype=data.dtype)
|
||||
|
||||
# # these functions are used by the parent class
|
||||
# # to remove redudancy between bsc_matrix and bsr_matrix
|
||||
# def _swap(self,x):
|
||||
# """swap the members of x if this is a column-oriented matrix
|
||||
# """
|
||||
# return (x[0],x[1])
|
||||
|
||||
|
||||
def isspmatrix_bsr(x):
|
||||
"""Is x of a bsr_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a bsr matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a bsr matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
|
||||
>>> isspmatrix_bsr(bsr_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
|
||||
>>> isspmatrix_bsr(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import bsr_array
|
||||
return isinstance(x, bsr_matrix) or isinstance(x, bsr_array)
|
||||
1318
.CondaPkg/env/Lib/site-packages/scipy/sparse/_compressed.py
vendored
Normal file
1318
.CondaPkg/env/Lib/site-packages/scipy/sparse/_compressed.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
947
.CondaPkg/env/Lib/site-packages/scipy/sparse/_construct.py
vendored
Normal file
947
.CondaPkg/env/Lib/site-packages/scipy/sparse/_construct.py
vendored
Normal file
@@ -0,0 +1,947 @@
|
||||
"""Functions to construct sparse matrices
|
||||
"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
|
||||
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
|
||||
|
||||
import numbers
|
||||
from functools import partial
|
||||
import numpy as np
|
||||
|
||||
from scipy._lib._util import check_random_state, rng_integers
|
||||
from ._sputils import upcast, get_index_dtype, isscalarlike
|
||||
|
||||
from ._sparsetools import csr_hstack
|
||||
from ._csr import csr_matrix
|
||||
from ._csc import csc_matrix
|
||||
from ._bsr import bsr_matrix
|
||||
from ._coo import coo_matrix
|
||||
from ._dia import dia_matrix
|
||||
|
||||
from ._base import issparse
|
||||
|
||||
|
||||
def spdiags(data, diags, m=None, n=None, format=None):
|
||||
"""
|
||||
Return a sparse matrix from diagonals.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : array_like
|
||||
Matrix diagonals stored row-wise
|
||||
diags : sequence of int or an int
|
||||
Diagonals to set:
|
||||
|
||||
* k = 0 the main diagonal
|
||||
* k > 0 the kth upper diagonal
|
||||
* k < 0 the kth lower diagonal
|
||||
m, n : int, tuple, optional
|
||||
Shape of the result. If `n` is None and `m` is a given tuple,
|
||||
the shape is this tuple. If omitted, the matrix is square and
|
||||
its shape is len(data[0]).
|
||||
format : str, optional
|
||||
Format of the result. By default (format=None) an appropriate sparse
|
||||
matrix format is returned. This choice is subject to change.
|
||||
|
||||
See Also
|
||||
--------
|
||||
diags : more convenient form of this function
|
||||
dia_matrix : the sparse DIAgonal format.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import spdiags
|
||||
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
|
||||
>>> diags = np.array([0, -1, 2])
|
||||
>>> spdiags(data, diags, 4, 4).toarray()
|
||||
array([[1, 0, 3, 0],
|
||||
[1, 2, 0, 4],
|
||||
[0, 2, 3, 0],
|
||||
[0, 0, 3, 4]])
|
||||
|
||||
"""
|
||||
if m is None and n is None:
|
||||
m = n = len(data[0])
|
||||
elif n is None:
|
||||
m, n = m
|
||||
return dia_matrix((data, diags), shape=(m, n)).asformat(format)
|
||||
|
||||
|
||||
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
|
||||
"""
|
||||
Construct a sparse matrix from diagonals.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
diagonals : sequence of array_like
|
||||
Sequence of arrays containing the matrix diagonals,
|
||||
corresponding to `offsets`.
|
||||
offsets : sequence of int or an int, optional
|
||||
Diagonals to set:
|
||||
- k = 0 the main diagonal (default)
|
||||
- k > 0 the kth upper diagonal
|
||||
- k < 0 the kth lower diagonal
|
||||
shape : tuple of int, optional
|
||||
Shape of the result. If omitted, a square matrix large enough
|
||||
to contain the diagonals is returned.
|
||||
format : {"dia", "csr", "csc", "lil", ...}, optional
|
||||
Matrix format of the result. By default (format=None) an
|
||||
appropriate sparse matrix format is returned. This choice is
|
||||
subject to change.
|
||||
dtype : dtype, optional
|
||||
Data type of the matrix.
|
||||
|
||||
See Also
|
||||
--------
|
||||
spdiags : construct matrix from diagonals
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function differs from `spdiags` in the way it handles
|
||||
off-diagonals.
|
||||
|
||||
The result from `diags` is the sparse equivalent of::
|
||||
|
||||
np.diag(diagonals[0], offsets[0])
|
||||
+ ...
|
||||
+ np.diag(diagonals[k], offsets[k])
|
||||
|
||||
Repeated diagonal offsets are disallowed.
|
||||
|
||||
.. versionadded:: 0.11
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import diags
|
||||
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
|
||||
>>> diags(diagonals, [0, -1, 2]).toarray()
|
||||
array([[1, 0, 1, 0],
|
||||
[1, 2, 0, 2],
|
||||
[0, 2, 3, 0],
|
||||
[0, 0, 3, 4]])
|
||||
|
||||
Broadcasting of scalars is supported (but shape needs to be
|
||||
specified):
|
||||
|
||||
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
|
||||
array([[-2., 1., 0., 0.],
|
||||
[ 1., -2., 1., 0.],
|
||||
[ 0., 1., -2., 1.],
|
||||
[ 0., 0., 1., -2.]])
|
||||
|
||||
|
||||
If only one diagonal is wanted (as in `numpy.diag`), the following
|
||||
works as well:
|
||||
|
||||
>>> diags([1, 2, 3], 1).toarray()
|
||||
array([[ 0., 1., 0., 0.],
|
||||
[ 0., 0., 2., 0.],
|
||||
[ 0., 0., 0., 3.],
|
||||
[ 0., 0., 0., 0.]])
|
||||
"""
|
||||
# if offsets is not a sequence, assume that there's only one diagonal
|
||||
if isscalarlike(offsets):
|
||||
# now check that there's actually only one diagonal
|
||||
if len(diagonals) == 0 or isscalarlike(diagonals[0]):
|
||||
diagonals = [np.atleast_1d(diagonals)]
|
||||
else:
|
||||
raise ValueError("Different number of diagonals and offsets.")
|
||||
else:
|
||||
diagonals = list(map(np.atleast_1d, diagonals))
|
||||
|
||||
offsets = np.atleast_1d(offsets)
|
||||
|
||||
# Basic check
|
||||
if len(diagonals) != len(offsets):
|
||||
raise ValueError("Different number of diagonals and offsets.")
|
||||
|
||||
# Determine shape, if omitted
|
||||
if shape is None:
|
||||
m = len(diagonals[0]) + abs(int(offsets[0]))
|
||||
shape = (m, m)
|
||||
|
||||
# Determine data type, if omitted
|
||||
if dtype is None:
|
||||
dtype = np.common_type(*diagonals)
|
||||
|
||||
# Construct data array
|
||||
m, n = shape
|
||||
|
||||
M = max([min(m + offset, n - offset) + max(0, offset)
|
||||
for offset in offsets])
|
||||
M = max(0, M)
|
||||
data_arr = np.zeros((len(offsets), M), dtype=dtype)
|
||||
|
||||
K = min(m, n)
|
||||
|
||||
for j, diagonal in enumerate(diagonals):
|
||||
offset = offsets[j]
|
||||
k = max(0, offset)
|
||||
length = min(m + offset, n - offset, K)
|
||||
if length < 0:
|
||||
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
|
||||
try:
|
||||
data_arr[j, k:k+length] = diagonal[...,:length]
|
||||
except ValueError as e:
|
||||
if len(diagonal) != length and len(diagonal) != 1:
|
||||
raise ValueError(
|
||||
"Diagonal length (index %d: %d at offset %d) does not "
|
||||
"agree with matrix size (%d, %d)." % (
|
||||
j, len(diagonal), offset, m, n)) from e
|
||||
raise
|
||||
|
||||
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
|
||||
|
||||
|
||||
def identity(n, dtype='d', format=None):
|
||||
"""Identity matrix in sparse format
|
||||
|
||||
Returns an identity matrix with shape (n,n) using a given
|
||||
sparse format and dtype.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : int
|
||||
Shape of the identity matrix.
|
||||
dtype : dtype, optional
|
||||
Data type of the matrix
|
||||
format : str, optional
|
||||
Sparse format of the result, e.g., format="csr", etc.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import identity
|
||||
>>> identity(3).toarray()
|
||||
array([[ 1., 0., 0.],
|
||||
[ 0., 1., 0.],
|
||||
[ 0., 0., 1.]])
|
||||
>>> identity(3, dtype='int8', format='dia')
|
||||
<3x3 sparse matrix of type '<class 'numpy.int8'>'
|
||||
with 3 stored elements (1 diagonals) in DIAgonal format>
|
||||
|
||||
"""
|
||||
return eye(n, n, dtype=dtype, format=format)
|
||||
|
||||
|
||||
def eye(m, n=None, k=0, dtype=float, format=None):
|
||||
"""Sparse matrix with ones on diagonal
|
||||
|
||||
Returns a sparse (m x n) matrix where the kth diagonal
|
||||
is all ones and everything else is zeros.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
m : int
|
||||
Number of rows in the matrix.
|
||||
n : int, optional
|
||||
Number of columns. Default: `m`.
|
||||
k : int, optional
|
||||
Diagonal to place ones on. Default: 0 (main diagonal).
|
||||
dtype : dtype, optional
|
||||
Data type of the matrix.
|
||||
format : str, optional
|
||||
Sparse format of the result, e.g., format="csr", etc.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy import sparse
|
||||
>>> sparse.eye(3).toarray()
|
||||
array([[ 1., 0., 0.],
|
||||
[ 0., 1., 0.],
|
||||
[ 0., 0., 1.]])
|
||||
>>> sparse.eye(3, dtype=np.int8)
|
||||
<3x3 sparse matrix of type '<class 'numpy.int8'>'
|
||||
with 3 stored elements (1 diagonals) in DIAgonal format>
|
||||
|
||||
"""
|
||||
if n is None:
|
||||
n = m
|
||||
m,n = int(m),int(n)
|
||||
|
||||
if m == n and k == 0:
|
||||
# fast branch for special formats
|
||||
if format in ['csr', 'csc']:
|
||||
idx_dtype = get_index_dtype(maxval=n)
|
||||
indptr = np.arange(n+1, dtype=idx_dtype)
|
||||
indices = np.arange(n, dtype=idx_dtype)
|
||||
data = np.ones(n, dtype=dtype)
|
||||
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
|
||||
return cls((data,indices,indptr),(n,n))
|
||||
elif format == 'coo':
|
||||
idx_dtype = get_index_dtype(maxval=n)
|
||||
row = np.arange(n, dtype=idx_dtype)
|
||||
col = np.arange(n, dtype=idx_dtype)
|
||||
data = np.ones(n, dtype=dtype)
|
||||
return coo_matrix((data, (row, col)), (n, n))
|
||||
|
||||
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
|
||||
return spdiags(diags, k, m, n).asformat(format)
|
||||
|
||||
|
||||
def kron(A, B, format=None):
|
||||
"""kronecker product of sparse matrices A and B
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : sparse or dense matrix
|
||||
first matrix of the product
|
||||
B : sparse or dense matrix
|
||||
second matrix of the product
|
||||
format : str, optional
|
||||
format of the result (e.g. "csr")
|
||||
|
||||
Returns
|
||||
-------
|
||||
kronecker product in a sparse matrix format
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy import sparse
|
||||
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
|
||||
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
|
||||
>>> sparse.kron(A, B).toarray()
|
||||
array([[ 0, 0, 2, 4],
|
||||
[ 0, 0, 6, 8],
|
||||
[ 5, 10, 0, 0],
|
||||
[15, 20, 0, 0]])
|
||||
|
||||
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
|
||||
array([[ 0, 0, 2, 4],
|
||||
[ 0, 0, 6, 8],
|
||||
[ 5, 10, 0, 0],
|
||||
[15, 20, 0, 0]])
|
||||
|
||||
"""
|
||||
B = coo_matrix(B)
|
||||
|
||||
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
|
||||
# B is fairly dense, use BSR
|
||||
A = csr_matrix(A,copy=True)
|
||||
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
|
||||
|
||||
if A.nnz == 0 or B.nnz == 0:
|
||||
# kronecker product is the zero matrix
|
||||
return coo_matrix(output_shape).asformat(format)
|
||||
|
||||
B = B.toarray()
|
||||
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
|
||||
data = data * B
|
||||
|
||||
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
|
||||
else:
|
||||
# use COO
|
||||
A = coo_matrix(A)
|
||||
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
|
||||
|
||||
if A.nnz == 0 or B.nnz == 0:
|
||||
# kronecker product is the zero matrix
|
||||
return coo_matrix(output_shape).asformat(format)
|
||||
|
||||
# expand entries of a into blocks
|
||||
row = A.row.repeat(B.nnz)
|
||||
col = A.col.repeat(B.nnz)
|
||||
data = A.data.repeat(B.nnz)
|
||||
|
||||
if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max:
|
||||
row = row.astype(np.int64)
|
||||
col = col.astype(np.int64)
|
||||
|
||||
row *= B.shape[0]
|
||||
col *= B.shape[1]
|
||||
|
||||
# increment block indices
|
||||
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
|
||||
row += B.row
|
||||
col += B.col
|
||||
row,col = row.reshape(-1),col.reshape(-1)
|
||||
|
||||
# compute block entries
|
||||
data = data.reshape(-1,B.nnz) * B.data
|
||||
data = data.reshape(-1)
|
||||
|
||||
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
|
||||
|
||||
|
||||
def kronsum(A, B, format=None):
|
||||
"""kronecker sum of sparse matrices A and B
|
||||
|
||||
Kronecker sum of two sparse matrices is a sum of two Kronecker
|
||||
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
|
||||
and B has shape (n,n) and I_m and I_n are identity matrices
|
||||
of shape (m,m) and (n,n), respectively.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A
|
||||
square matrix
|
||||
B
|
||||
square matrix
|
||||
format : str
|
||||
format of the result (e.g. "csr")
|
||||
|
||||
Returns
|
||||
-------
|
||||
kronecker sum in a sparse matrix format
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
|
||||
"""
|
||||
A = coo_matrix(A)
|
||||
B = coo_matrix(B)
|
||||
|
||||
if A.shape[0] != A.shape[1]:
|
||||
raise ValueError('A is not square')
|
||||
|
||||
if B.shape[0] != B.shape[1]:
|
||||
raise ValueError('B is not square')
|
||||
|
||||
dtype = upcast(A.dtype, B.dtype)
|
||||
|
||||
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
|
||||
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
|
||||
|
||||
return (L+R).asformat(format) # since L + R is not always same format
|
||||
|
||||
|
||||
def _compressed_sparse_stack(blocks, axis):
|
||||
"""
|
||||
Stacking fast path for CSR/CSC matrices
|
||||
(i) vstack for CSR, (ii) hstack for CSC.
|
||||
"""
|
||||
other_axis = 1 if axis == 0 else 0
|
||||
data = np.concatenate([b.data for b in blocks])
|
||||
constant_dim = blocks[0].shape[other_axis]
|
||||
idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
|
||||
maxval=max(data.size, constant_dim))
|
||||
indices = np.empty(data.size, dtype=idx_dtype)
|
||||
indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
|
||||
last_indptr = idx_dtype(0)
|
||||
sum_dim = 0
|
||||
sum_indices = 0
|
||||
for b in blocks:
|
||||
if b.shape[other_axis] != constant_dim:
|
||||
raise ValueError(f'incompatible dimensions for axis {other_axis}')
|
||||
indices[sum_indices:sum_indices+b.indices.size] = b.indices
|
||||
sum_indices += b.indices.size
|
||||
idxs = slice(sum_dim, sum_dim + b.shape[axis])
|
||||
indptr[idxs] = b.indptr[:-1]
|
||||
indptr[idxs] += last_indptr
|
||||
sum_dim += b.shape[axis]
|
||||
last_indptr += b.indptr[-1]
|
||||
indptr[-1] = last_indptr
|
||||
if axis == 0:
|
||||
return csr_matrix((data, indices, indptr),
|
||||
shape=(sum_dim, constant_dim))
|
||||
else:
|
||||
return csc_matrix((data, indices, indptr),
|
||||
shape=(constant_dim, sum_dim))
|
||||
|
||||
|
||||
def _stack_along_minor_axis(blocks, axis):
|
||||
"""
|
||||
Stacking fast path for CSR/CSC matrices along the minor axis
|
||||
(i) hstack for CSR, (ii) vstack for CSC.
|
||||
"""
|
||||
n_blocks = len(blocks)
|
||||
if n_blocks == 0:
|
||||
raise ValueError('Missing block matrices')
|
||||
|
||||
if n_blocks == 1:
|
||||
return blocks[0]
|
||||
|
||||
# check for incompatible dimensions
|
||||
other_axis = 1 if axis == 0 else 0
|
||||
other_axis_dims = set(b.shape[other_axis] for b in blocks)
|
||||
if len(other_axis_dims) > 1:
|
||||
raise ValueError(f'Mismatching dimensions along axis {other_axis}: '
|
||||
f'{other_axis_dims}')
|
||||
constant_dim, = other_axis_dims
|
||||
|
||||
# Do the stacking
|
||||
indptr_list = [b.indptr for b in blocks]
|
||||
data_cat = np.concatenate([b.data for b in blocks])
|
||||
|
||||
# Need to check if any indices/indptr, would be too large post-
|
||||
# concatenation for np.int32:
|
||||
# - The max value of indices is the output array's stacking-axis length - 1
|
||||
# - The max value in indptr is the number of non-zero entries. This is
|
||||
# exceedingly unlikely to require int64, but is checked out of an
|
||||
# abundance of caution.
|
||||
sum_dim = sum(b.shape[axis] for b in blocks)
|
||||
nnz = sum(len(b.indices) for b in blocks)
|
||||
idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz))
|
||||
stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype)
|
||||
if data_cat.size > 0:
|
||||
indptr_cat = np.concatenate(indptr_list).astype(idx_dtype)
|
||||
indices_cat = (np.concatenate([b.indices for b in blocks])
|
||||
.astype(idx_dtype))
|
||||
indptr = np.empty(constant_dim + 1, dtype=idx_dtype)
|
||||
indices = np.empty_like(indices_cat)
|
||||
data = np.empty_like(data_cat)
|
||||
csr_hstack(n_blocks, constant_dim, stack_dim_cat,
|
||||
indptr_cat, indices_cat, data_cat,
|
||||
indptr, indices, data)
|
||||
else:
|
||||
indptr = np.zeros(constant_dim + 1, dtype=idx_dtype)
|
||||
indices = np.empty(0, dtype=idx_dtype)
|
||||
data = np.empty(0, dtype=data_cat.dtype)
|
||||
|
||||
if axis == 0:
|
||||
return csc_matrix((data, indices, indptr),
|
||||
shape=(sum_dim, constant_dim))
|
||||
else:
|
||||
return csr_matrix((data, indices, indptr),
|
||||
shape=(constant_dim, sum_dim))
|
||||
|
||||
|
||||
def hstack(blocks, format=None, dtype=None):
|
||||
"""
|
||||
Stack sparse matrices horizontally (column wise)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
blocks
|
||||
sequence of sparse matrices with compatible shapes
|
||||
format : str
|
||||
sparse format of the result (e.g., "csr")
|
||||
by default an appropriate sparse matrix format is returned.
|
||||
This choice is subject to change.
|
||||
dtype : dtype, optional
|
||||
The data-type of the output matrix. If not given, the dtype is
|
||||
determined from that of `blocks`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
vstack : stack sparse matrices vertically (row wise)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import coo_matrix, hstack
|
||||
>>> A = coo_matrix([[1, 2], [3, 4]])
|
||||
>>> B = coo_matrix([[5], [6]])
|
||||
>>> hstack([A,B]).toarray()
|
||||
array([[1, 2, 5],
|
||||
[3, 4, 6]])
|
||||
|
||||
"""
|
||||
return bmat([blocks], format=format, dtype=dtype)
|
||||
|
||||
|
||||
def vstack(blocks, format=None, dtype=None):
|
||||
"""
|
||||
Stack sparse matrices vertically (row wise)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
blocks
|
||||
sequence of sparse matrices with compatible shapes
|
||||
format : str, optional
|
||||
sparse format of the result (e.g., "csr")
|
||||
by default an appropriate sparse matrix format is returned.
|
||||
This choice is subject to change.
|
||||
dtype : dtype, optional
|
||||
The data-type of the output matrix. If not given, the dtype is
|
||||
determined from that of `blocks`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
hstack : stack sparse matrices horizontally (column wise)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import coo_matrix, vstack
|
||||
>>> A = coo_matrix([[1, 2], [3, 4]])
|
||||
>>> B = coo_matrix([[5, 6]])
|
||||
>>> vstack([A, B]).toarray()
|
||||
array([[1, 2],
|
||||
[3, 4],
|
||||
[5, 6]])
|
||||
|
||||
"""
|
||||
return bmat([[b] for b in blocks], format=format, dtype=dtype)
|
||||
|
||||
|
||||
def bmat(blocks, format=None, dtype=None):
|
||||
"""
|
||||
Build a sparse matrix from sparse sub-blocks
|
||||
|
||||
Parameters
|
||||
----------
|
||||
blocks : array_like
|
||||
Grid of sparse matrices with compatible shapes.
|
||||
An entry of None implies an all-zero matrix.
|
||||
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
|
||||
The sparse format of the result (e.g. "csr"). By default an
|
||||
appropriate sparse matrix format is returned.
|
||||
This choice is subject to change.
|
||||
dtype : dtype, optional
|
||||
The data-type of the output matrix. If not given, the dtype is
|
||||
determined from that of `blocks`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bmat : sparse matrix
|
||||
|
||||
See Also
|
||||
--------
|
||||
block_diag, diags
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import coo_matrix, bmat
|
||||
>>> A = coo_matrix([[1, 2], [3, 4]])
|
||||
>>> B = coo_matrix([[5], [6]])
|
||||
>>> C = coo_matrix([[7]])
|
||||
>>> bmat([[A, B], [None, C]]).toarray()
|
||||
array([[1, 2, 5],
|
||||
[3, 4, 6],
|
||||
[0, 0, 7]])
|
||||
|
||||
>>> bmat([[A, None], [None, C]]).toarray()
|
||||
array([[1, 2, 0],
|
||||
[3, 4, 0],
|
||||
[0, 0, 7]])
|
||||
|
||||
"""
|
||||
|
||||
blocks = np.asarray(blocks, dtype='object')
|
||||
|
||||
if blocks.ndim != 2:
|
||||
raise ValueError('blocks must be 2-D')
|
||||
|
||||
M,N = blocks.shape
|
||||
|
||||
# check for fast path cases
|
||||
if (format in (None, 'csr') and all(isinstance(b, csr_matrix)
|
||||
for b in blocks.flat)):
|
||||
if N > 1:
|
||||
# stack along columns (axis 1):
|
||||
blocks = [[_stack_along_minor_axis(blocks[b, :], 1)]
|
||||
for b in range(M)] # must have shape: (M, 1)
|
||||
blocks = np.asarray(blocks, dtype='object')
|
||||
|
||||
# stack along rows (axis 0):
|
||||
A = _compressed_sparse_stack(blocks[:, 0], 0)
|
||||
if dtype is not None:
|
||||
A = A.astype(dtype)
|
||||
return A
|
||||
elif (format in (None, 'csc') and all(isinstance(b, csc_matrix)
|
||||
for b in blocks.flat)):
|
||||
if M > 1:
|
||||
# stack along rows (axis 0):
|
||||
blocks = [[_stack_along_minor_axis(blocks[:, b], 0)
|
||||
for b in range(N)]] # must have shape: (1, N)
|
||||
blocks = np.asarray(blocks, dtype='object')
|
||||
|
||||
# stack along columns (axis 1):
|
||||
A = _compressed_sparse_stack(blocks[0, :], 1)
|
||||
if dtype is not None:
|
||||
A = A.astype(dtype)
|
||||
return A
|
||||
|
||||
block_mask = np.zeros(blocks.shape, dtype=bool)
|
||||
brow_lengths = np.zeros(M, dtype=np.int64)
|
||||
bcol_lengths = np.zeros(N, dtype=np.int64)
|
||||
|
||||
# convert everything to COO format
|
||||
for i in range(M):
|
||||
for j in range(N):
|
||||
if blocks[i,j] is not None:
|
||||
A = coo_matrix(blocks[i,j])
|
||||
blocks[i,j] = A
|
||||
block_mask[i,j] = True
|
||||
|
||||
if brow_lengths[i] == 0:
|
||||
brow_lengths[i] = A.shape[0]
|
||||
elif brow_lengths[i] != A.shape[0]:
|
||||
msg = (f'blocks[{i},:] has incompatible row dimensions. '
|
||||
f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, '
|
||||
f'expected {brow_lengths[i]}.')
|
||||
raise ValueError(msg)
|
||||
|
||||
if bcol_lengths[j] == 0:
|
||||
bcol_lengths[j] = A.shape[1]
|
||||
elif bcol_lengths[j] != A.shape[1]:
|
||||
msg = (f'blocks[:,{j}] has incompatible column '
|
||||
f'dimensions. '
|
||||
f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, '
|
||||
f'expected {bcol_lengths[j]}.')
|
||||
raise ValueError(msg)
|
||||
|
||||
nnz = sum(block.nnz for block in blocks[block_mask])
|
||||
if dtype is None:
|
||||
all_dtypes = [blk.dtype for blk in blocks[block_mask]]
|
||||
dtype = upcast(*all_dtypes) if all_dtypes else None
|
||||
|
||||
row_offsets = np.append(0, np.cumsum(brow_lengths))
|
||||
col_offsets = np.append(0, np.cumsum(bcol_lengths))
|
||||
|
||||
shape = (row_offsets[-1], col_offsets[-1])
|
||||
|
||||
data = np.empty(nnz, dtype=dtype)
|
||||
idx_dtype = get_index_dtype(maxval=max(shape))
|
||||
row = np.empty(nnz, dtype=idx_dtype)
|
||||
col = np.empty(nnz, dtype=idx_dtype)
|
||||
|
||||
nnz = 0
|
||||
ii, jj = np.nonzero(block_mask)
|
||||
for i, j in zip(ii, jj):
|
||||
B = blocks[i, j]
|
||||
idx = slice(nnz, nnz + B.nnz)
|
||||
data[idx] = B.data
|
||||
np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype)
|
||||
np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype)
|
||||
nnz += B.nnz
|
||||
|
||||
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
|
||||
|
||||
|
||||
def block_diag(mats, format=None, dtype=None):
|
||||
"""
|
||||
Build a block diagonal sparse matrix from provided matrices.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mats : sequence of matrices
|
||||
Input matrices.
|
||||
format : str, optional
|
||||
The sparse format of the result (e.g., "csr"). If not given, the matrix
|
||||
is returned in "coo" format.
|
||||
dtype : dtype specifier, optional
|
||||
The data-type of the output matrix. If not given, the dtype is
|
||||
determined from that of `blocks`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : sparse matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.11.0
|
||||
|
||||
See Also
|
||||
--------
|
||||
bmat, diags
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import coo_matrix, block_diag
|
||||
>>> A = coo_matrix([[1, 2], [3, 4]])
|
||||
>>> B = coo_matrix([[5], [6]])
|
||||
>>> C = coo_matrix([[7]])
|
||||
>>> block_diag((A, B, C)).toarray()
|
||||
array([[1, 2, 0, 0],
|
||||
[3, 4, 0, 0],
|
||||
[0, 0, 5, 0],
|
||||
[0, 0, 6, 0],
|
||||
[0, 0, 0, 7]])
|
||||
|
||||
"""
|
||||
row = []
|
||||
col = []
|
||||
data = []
|
||||
r_idx = 0
|
||||
c_idx = 0
|
||||
for a in mats:
|
||||
if isinstance(a, (list, numbers.Number)):
|
||||
a = coo_matrix(a)
|
||||
nrows, ncols = a.shape
|
||||
if issparse(a):
|
||||
a = a.tocoo()
|
||||
row.append(a.row + r_idx)
|
||||
col.append(a.col + c_idx)
|
||||
data.append(a.data)
|
||||
else:
|
||||
a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols)
|
||||
row.append(a_row + r_idx)
|
||||
col.append(a_col + c_idx)
|
||||
data.append(a.ravel())
|
||||
r_idx += nrows
|
||||
c_idx += ncols
|
||||
row = np.concatenate(row)
|
||||
col = np.concatenate(col)
|
||||
data = np.concatenate(data)
|
||||
return coo_matrix((data, (row, col)),
|
||||
shape=(r_idx, c_idx),
|
||||
dtype=dtype).asformat(format)
|
||||
|
||||
|
||||
def random(m, n, density=0.01, format='coo', dtype=None,
|
||||
random_state=None, data_rvs=None):
|
||||
"""Generate a sparse matrix of the given shape and density with randomly
|
||||
distributed values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
m, n : int
|
||||
shape of the matrix
|
||||
density : real, optional
|
||||
density of the generated matrix: density equal to one means a full
|
||||
matrix, density of 0 means a matrix with no non-zero items.
|
||||
format : str, optional
|
||||
sparse matrix format.
|
||||
dtype : dtype, optional
|
||||
type of the returned matrix values.
|
||||
random_state : {None, int, `numpy.random.Generator`,
|
||||
`numpy.random.RandomState`}, optional
|
||||
|
||||
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
||||
singleton is used.
|
||||
If `seed` is an int, a new ``RandomState`` instance is used,
|
||||
seeded with `seed`.
|
||||
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
||||
that instance is used.
|
||||
This random state will be used
|
||||
for sampling the sparsity structure, but not necessarily for sampling
|
||||
the values of the structurally nonzero entries of the matrix.
|
||||
data_rvs : callable, optional
|
||||
Samples a requested number of random values.
|
||||
This function should take a single argument specifying the length
|
||||
of the ndarray that it will return. The structurally nonzero entries
|
||||
of the sparse random matrix will be taken from the array sampled
|
||||
by this function. By default, uniform [0, 1) random values will be
|
||||
sampled using the same random state as is used for sampling
|
||||
the sparsity structure.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : sparse matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
Only float types are supported for now.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import random
|
||||
>>> from scipy import stats
|
||||
>>> from numpy.random import default_rng
|
||||
>>> rng = default_rng()
|
||||
>>> rvs = stats.poisson(25, loc=10).rvs
|
||||
>>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=rvs)
|
||||
>>> S.A
|
||||
array([[ 36., 0., 33., 0.], # random
|
||||
[ 0., 0., 0., 0.],
|
||||
[ 0., 0., 36., 0.]])
|
||||
|
||||
>>> from scipy.sparse import random
|
||||
>>> from scipy.stats import rv_continuous
|
||||
>>> class CustomDistribution(rv_continuous):
|
||||
... def _rvs(self, size=None, random_state=None):
|
||||
... return random_state.standard_normal(size)
|
||||
>>> X = CustomDistribution(seed=rng)
|
||||
>>> Y = X() # get a frozen version of the distribution
|
||||
>>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs)
|
||||
>>> S.A
|
||||
array([[ 0. , 0. , 0. , 0. ], # random
|
||||
[ 0.13569738, 1.9467163 , -0.81205367, 0. ],
|
||||
[ 0. , 0. , 0. , 0. ]])
|
||||
|
||||
"""
|
||||
if density < 0 or density > 1:
|
||||
raise ValueError("density expected to be 0 <= density <= 1")
|
||||
dtype = np.dtype(dtype)
|
||||
|
||||
mn = m * n
|
||||
|
||||
tp = np.intc
|
||||
if mn > np.iinfo(tp).max:
|
||||
tp = np.int64
|
||||
|
||||
if mn > np.iinfo(tp).max:
|
||||
msg = """\
|
||||
Trying to generate a random sparse matrix such as the product of dimensions is
|
||||
greater than %d - this is not supported on this machine
|
||||
"""
|
||||
raise ValueError(msg % np.iinfo(tp).max)
|
||||
|
||||
# Number of non zero values
|
||||
k = int(round(density * m * n))
|
||||
|
||||
random_state = check_random_state(random_state)
|
||||
|
||||
if data_rvs is None:
|
||||
if np.issubdtype(dtype, np.integer):
|
||||
def data_rvs(n):
|
||||
return rng_integers(random_state,
|
||||
np.iinfo(dtype).min,
|
||||
np.iinfo(dtype).max,
|
||||
n,
|
||||
dtype=dtype)
|
||||
elif np.issubdtype(dtype, np.complexfloating):
|
||||
def data_rvs(n):
|
||||
return (random_state.uniform(size=n) +
|
||||
random_state.uniform(size=n) * 1j)
|
||||
else:
|
||||
data_rvs = partial(random_state.uniform, 0., 1.)
|
||||
|
||||
ind = random_state.choice(mn, size=k, replace=False)
|
||||
|
||||
j = np.floor(ind * 1. / m).astype(tp, copy=False)
|
||||
i = (ind - j * m).astype(tp, copy=False)
|
||||
vals = data_rvs(k).astype(dtype, copy=False)
|
||||
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format,
|
||||
copy=False)
|
||||
|
||||
|
||||
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
|
||||
"""Generate a sparse matrix of the given shape and density with uniformly
|
||||
distributed values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
m, n : int
|
||||
shape of the matrix
|
||||
density : real, optional
|
||||
density of the generated matrix: density equal to one means a full
|
||||
matrix, density of 0 means a matrix with no non-zero items.
|
||||
format : str, optional
|
||||
sparse matrix format.
|
||||
dtype : dtype, optional
|
||||
type of the returned matrix values.
|
||||
random_state : {None, int, `numpy.random.Generator`,
|
||||
`numpy.random.RandomState`}, optional
|
||||
|
||||
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
||||
singleton is used.
|
||||
If `seed` is an int, a new ``RandomState`` instance is used,
|
||||
seeded with `seed`.
|
||||
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
||||
that instance is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : sparse matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
Only float types are supported for now.
|
||||
|
||||
See Also
|
||||
--------
|
||||
scipy.sparse.random : Similar function that allows a user-specified random
|
||||
data source.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import rand
|
||||
>>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
|
||||
>>> matrix
|
||||
<3x4 sparse matrix of type '<class 'numpy.float64'>'
|
||||
with 3 stored elements in Compressed Sparse Row format>
|
||||
>>> matrix.toarray()
|
||||
array([[0.05641158, 0. , 0. , 0.65088847],
|
||||
[0. , 0. , 0. , 0.14286682],
|
||||
[0. , 0. , 0. , 0. ]])
|
||||
|
||||
"""
|
||||
return random(m, n, density, format, dtype, random_state)
|
||||
614
.CondaPkg/env/Lib/site-packages/scipy/sparse/_coo.py
vendored
Normal file
614
.CondaPkg/env/Lib/site-packages/scipy/sparse/_coo.py
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
""" A sparse matrix in COOrdinate or 'triplet' format"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['coo_matrix', 'isspmatrix_coo']
|
||||
|
||||
from warnings import warn
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
|
||||
from ._base import isspmatrix, SparseEfficiencyWarning, spmatrix
|
||||
from ._data import _data_matrix, _minmax_mixin
|
||||
from ._sputils import (upcast, upcast_char, to_native, isshape, getdtype,
|
||||
getdata, get_index_dtype, downcast_intp_index,
|
||||
check_shape, check_reshape_kwargs)
|
||||
|
||||
import operator
|
||||
|
||||
|
||||
class coo_matrix(_data_matrix, _minmax_mixin):
|
||||
"""
|
||||
A sparse matrix in COOrdinate format.
|
||||
|
||||
Also known as the 'ijv' or 'triplet' format.
|
||||
|
||||
This can be instantiated in several ways:
|
||||
coo_matrix(D)
|
||||
with a dense matrix D
|
||||
|
||||
coo_matrix(S)
|
||||
with another sparse matrix S (equivalent to S.tocoo())
|
||||
|
||||
coo_matrix((M, N), [dtype])
|
||||
to construct an empty matrix with shape (M, N)
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
coo_matrix((data, (i, j)), [shape=(M, N)])
|
||||
to construct from three arrays:
|
||||
1. data[:] the entries of the matrix, in any order
|
||||
2. i[:] the row indices of the matrix entries
|
||||
3. j[:] the column indices of the matrix entries
|
||||
|
||||
Where ``A[i[k], j[k]] = data[k]``. When shape is not
|
||||
specified, it is inferred from the index arrays
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
COO format data array of the matrix
|
||||
row
|
||||
COO format row index array of the matrix
|
||||
col
|
||||
COO format column index array of the matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Advantages of the COO format
|
||||
- facilitates fast conversion among sparse formats
|
||||
- permits duplicate entries (see example)
|
||||
- very fast conversion to and from CSR/CSC formats
|
||||
|
||||
Disadvantages of the COO format
|
||||
- does not directly support:
|
||||
+ arithmetic operations
|
||||
+ slicing
|
||||
|
||||
Intended Usage
|
||||
- COO is a fast format for constructing sparse matrices
|
||||
- Once a matrix has been constructed, convert to CSR or
|
||||
CSC format for fast arithmetic and matrix vector operations
|
||||
- By default when converting to CSR or CSC format, duplicate (i,j)
|
||||
entries will be summed together. This facilitates efficient
|
||||
construction of finite element matrices and the like. (see example)
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> # Constructing an empty matrix
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import coo_matrix
|
||||
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=int8)
|
||||
|
||||
>>> # Constructing a matrix using ijv format
|
||||
>>> row = np.array([0, 3, 1, 0])
|
||||
>>> col = np.array([0, 3, 1, 2])
|
||||
>>> data = np.array([4, 5, 7, 9])
|
||||
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
|
||||
array([[4, 0, 9, 0],
|
||||
[0, 7, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 5]])
|
||||
|
||||
>>> # Constructing a matrix with duplicate indices
|
||||
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
|
||||
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
|
||||
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
|
||||
>>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
|
||||
>>> # Duplicate indices are maintained until implicitly or explicitly summed
|
||||
>>> np.max(coo.data)
|
||||
1
|
||||
>>> coo.toarray()
|
||||
array([[3, 0, 1, 0],
|
||||
[0, 2, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
"""
|
||||
format = 'coo'
|
||||
|
||||
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
||||
_data_matrix.__init__(self)
|
||||
|
||||
if isinstance(arg1, tuple):
|
||||
if isshape(arg1):
|
||||
M, N = arg1
|
||||
self._shape = check_shape((M, N))
|
||||
idx_dtype = get_index_dtype(maxval=max(M, N))
|
||||
data_dtype = getdtype(dtype, default=float)
|
||||
self.row = np.array([], dtype=idx_dtype)
|
||||
self.col = np.array([], dtype=idx_dtype)
|
||||
self.data = np.array([], dtype=data_dtype)
|
||||
self.has_canonical_format = True
|
||||
else:
|
||||
try:
|
||||
obj, (row, col) = arg1
|
||||
except (TypeError, ValueError) as e:
|
||||
raise TypeError('invalid input format') from e
|
||||
|
||||
if shape is None:
|
||||
if len(row) == 0 or len(col) == 0:
|
||||
raise ValueError('cannot infer dimensions from zero '
|
||||
'sized index arrays')
|
||||
M = operator.index(np.max(row)) + 1
|
||||
N = operator.index(np.max(col)) + 1
|
||||
self._shape = check_shape((M, N))
|
||||
else:
|
||||
# Use 2 steps to ensure shape has length 2.
|
||||
M, N = shape
|
||||
self._shape = check_shape((M, N))
|
||||
|
||||
idx_dtype = get_index_dtype(maxval=max(self.shape))
|
||||
self.row = np.array(row, copy=copy, dtype=idx_dtype)
|
||||
self.col = np.array(col, copy=copy, dtype=idx_dtype)
|
||||
self.data = getdata(obj, copy=copy, dtype=dtype)
|
||||
self.has_canonical_format = False
|
||||
else:
|
||||
if isspmatrix(arg1):
|
||||
if isspmatrix_coo(arg1) and copy:
|
||||
self.row = arg1.row.copy()
|
||||
self.col = arg1.col.copy()
|
||||
self.data = arg1.data.copy()
|
||||
self._shape = check_shape(arg1.shape)
|
||||
else:
|
||||
coo = arg1.tocoo()
|
||||
self.row = coo.row
|
||||
self.col = coo.col
|
||||
self.data = coo.data
|
||||
self._shape = check_shape(coo.shape)
|
||||
self.has_canonical_format = False
|
||||
else:
|
||||
#dense argument
|
||||
M = np.atleast_2d(np.asarray(arg1))
|
||||
|
||||
if M.ndim != 2:
|
||||
raise TypeError('expected dimension <= 2 array or matrix')
|
||||
|
||||
self._shape = check_shape(M.shape)
|
||||
if shape is not None:
|
||||
if check_shape(shape) != self._shape:
|
||||
raise ValueError('inconsistent shapes: %s != %s' %
|
||||
(shape, self._shape))
|
||||
|
||||
self.row, self.col = M.nonzero()
|
||||
self.data = M[self.row, self.col]
|
||||
self.has_canonical_format = True
|
||||
|
||||
if dtype is not None:
|
||||
self.data = self.data.astype(dtype, copy=False)
|
||||
|
||||
self._check()
|
||||
|
||||
def reshape(self, *args, **kwargs):
|
||||
shape = check_shape(args, self.shape)
|
||||
order, copy = check_reshape_kwargs(kwargs)
|
||||
|
||||
# Return early if reshape is not required
|
||||
if shape == self.shape:
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
nrows, ncols = self.shape
|
||||
|
||||
if order == 'C':
|
||||
# Upcast to avoid overflows: the coo_matrix constructor
|
||||
# below will downcast the results to a smaller dtype, if
|
||||
# possible.
|
||||
dtype = get_index_dtype(maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
|
||||
|
||||
flat_indices = np.multiply(ncols, self.row, dtype=dtype) + self.col
|
||||
new_row, new_col = divmod(flat_indices, shape[1])
|
||||
elif order == 'F':
|
||||
dtype = get_index_dtype(maxval=(nrows * max(0, ncols - 1) + max(0, nrows - 1)))
|
||||
|
||||
flat_indices = np.multiply(nrows, self.col, dtype=dtype) + self.row
|
||||
new_col, new_row = divmod(flat_indices, shape[0])
|
||||
else:
|
||||
raise ValueError("'order' must be 'C' or 'F'")
|
||||
|
||||
# Handle copy here rather than passing on to the constructor so that no
|
||||
# copy will be made of new_row and new_col regardless
|
||||
if copy:
|
||||
new_data = self.data.copy()
|
||||
else:
|
||||
new_data = self.data
|
||||
|
||||
return self.__class__((new_data, (new_row, new_col)),
|
||||
shape=shape, copy=False)
|
||||
|
||||
reshape.__doc__ = spmatrix.reshape.__doc__
|
||||
|
||||
def getnnz(self, axis=None):
|
||||
if axis is None:
|
||||
nnz = len(self.data)
|
||||
if nnz != len(self.row) or nnz != len(self.col):
|
||||
raise ValueError('row, column, and data array must all be the '
|
||||
'same length')
|
||||
|
||||
if self.data.ndim != 1 or self.row.ndim != 1 or \
|
||||
self.col.ndim != 1:
|
||||
raise ValueError('row, column, and data arrays must be 1-D')
|
||||
|
||||
return int(nnz)
|
||||
|
||||
if axis < 0:
|
||||
axis += 2
|
||||
if axis == 0:
|
||||
return np.bincount(downcast_intp_index(self.col),
|
||||
minlength=self.shape[1])
|
||||
elif axis == 1:
|
||||
return np.bincount(downcast_intp_index(self.row),
|
||||
minlength=self.shape[0])
|
||||
else:
|
||||
raise ValueError('axis out of bounds')
|
||||
|
||||
getnnz.__doc__ = spmatrix.getnnz.__doc__
|
||||
|
||||
def _check(self):
|
||||
""" Checks data structure for consistency """
|
||||
|
||||
# index arrays should have integer data types
|
||||
if self.row.dtype.kind != 'i':
|
||||
warn("row index array has non-integer dtype (%s) "
|
||||
% self.row.dtype.name)
|
||||
if self.col.dtype.kind != 'i':
|
||||
warn("col index array has non-integer dtype (%s) "
|
||||
% self.col.dtype.name)
|
||||
|
||||
idx_dtype = get_index_dtype(maxval=max(self.shape))
|
||||
self.row = np.asarray(self.row, dtype=idx_dtype)
|
||||
self.col = np.asarray(self.col, dtype=idx_dtype)
|
||||
self.data = to_native(self.data)
|
||||
|
||||
if self.nnz > 0:
|
||||
if self.row.max() >= self.shape[0]:
|
||||
raise ValueError('row index exceeds matrix dimensions')
|
||||
if self.col.max() >= self.shape[1]:
|
||||
raise ValueError('column index exceeds matrix dimensions')
|
||||
if self.row.min() < 0:
|
||||
raise ValueError('negative row index found')
|
||||
if self.col.min() < 0:
|
||||
raise ValueError('negative column index found')
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation."))
|
||||
|
||||
M, N = self.shape
|
||||
return self.__class__((self.data, (self.col, self.row)),
|
||||
shape=(N, M), copy=copy)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def resize(self, *shape):
|
||||
shape = check_shape(shape)
|
||||
new_M, new_N = shape
|
||||
M, N = self.shape
|
||||
|
||||
if new_M < M or new_N < N:
|
||||
mask = np.logical_and(self.row < new_M, self.col < new_N)
|
||||
if not mask.all():
|
||||
self.row = self.row[mask]
|
||||
self.col = self.col[mask]
|
||||
self.data = self.data[mask]
|
||||
|
||||
self._shape = shape
|
||||
|
||||
resize.__doc__ = spmatrix.resize.__doc__
|
||||
|
||||
def toarray(self, order=None, out=None):
|
||||
"""See the docstring for `spmatrix.toarray`."""
|
||||
B = self._process_toarray_args(order, out)
|
||||
fortran = int(B.flags.f_contiguous)
|
||||
if not fortran and not B.flags.c_contiguous:
|
||||
raise ValueError("Output array must be C or F contiguous")
|
||||
M,N = self.shape
|
||||
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
|
||||
B.ravel('A'), fortran)
|
||||
return B
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
"""Convert this matrix to Compressed Sparse Column format
|
||||
|
||||
Duplicate entries will be summed together.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy import array
|
||||
>>> from scipy.sparse import coo_matrix
|
||||
>>> row = array([0, 0, 1, 3, 1, 0, 0])
|
||||
>>> col = array([0, 2, 1, 3, 1, 0, 0])
|
||||
>>> data = array([1, 1, 1, 1, 1, 1, 1])
|
||||
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
|
||||
>>> A.toarray()
|
||||
array([[3, 0, 1, 0],
|
||||
[0, 2, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
"""
|
||||
if self.nnz == 0:
|
||||
return self._csc_container(self.shape, dtype=self.dtype)
|
||||
else:
|
||||
M,N = self.shape
|
||||
idx_dtype = get_index_dtype((self.col, self.row),
|
||||
maxval=max(self.nnz, M))
|
||||
row = self.row.astype(idx_dtype, copy=False)
|
||||
col = self.col.astype(idx_dtype, copy=False)
|
||||
|
||||
indptr = np.empty(N + 1, dtype=idx_dtype)
|
||||
indices = np.empty_like(row, dtype=idx_dtype)
|
||||
data = np.empty_like(self.data, dtype=upcast(self.dtype))
|
||||
|
||||
coo_tocsr(N, M, self.nnz, col, row, self.data,
|
||||
indptr, indices, data)
|
||||
|
||||
x = self._csc_container((data, indices, indptr), shape=self.shape)
|
||||
if not self.has_canonical_format:
|
||||
x.sum_duplicates()
|
||||
return x
|
||||
|
||||
def tocsr(self, copy=False):
|
||||
"""Convert this matrix to Compressed Sparse Row format
|
||||
|
||||
Duplicate entries will be summed together.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy import array
|
||||
>>> from scipy.sparse import coo_matrix
|
||||
>>> row = array([0, 0, 1, 3, 1, 0, 0])
|
||||
>>> col = array([0, 2, 1, 3, 1, 0, 0])
|
||||
>>> data = array([1, 1, 1, 1, 1, 1, 1])
|
||||
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
|
||||
>>> A.toarray()
|
||||
array([[3, 0, 1, 0],
|
||||
[0, 2, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
"""
|
||||
if self.nnz == 0:
|
||||
return self._csr_container(self.shape, dtype=self.dtype)
|
||||
else:
|
||||
M,N = self.shape
|
||||
idx_dtype = get_index_dtype((self.row, self.col),
|
||||
maxval=max(self.nnz, N))
|
||||
row = self.row.astype(idx_dtype, copy=False)
|
||||
col = self.col.astype(idx_dtype, copy=False)
|
||||
|
||||
indptr = np.empty(M + 1, dtype=idx_dtype)
|
||||
indices = np.empty_like(col, dtype=idx_dtype)
|
||||
data = np.empty_like(self.data, dtype=upcast(self.dtype))
|
||||
|
||||
coo_tocsr(M, N, self.nnz, row, col, self.data,
|
||||
indptr, indices, data)
|
||||
|
||||
x = self._csr_container((data, indices, indptr), shape=self.shape)
|
||||
if not self.has_canonical_format:
|
||||
x.sum_duplicates()
|
||||
return x
|
||||
|
||||
def tocoo(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
tocoo.__doc__ = spmatrix.tocoo.__doc__
|
||||
|
||||
def todia(self, copy=False):
|
||||
self.sum_duplicates()
|
||||
ks = self.col - self.row # the diagonal for each nonzero
|
||||
diags, diag_idx = np.unique(ks, return_inverse=True)
|
||||
|
||||
if len(diags) > 100:
|
||||
# probably undesired, should todia() have a maxdiags parameter?
|
||||
warn("Constructing a DIA matrix with %d diagonals "
|
||||
"is inefficient" % len(diags), SparseEfficiencyWarning)
|
||||
|
||||
#initialize and fill in data array
|
||||
if self.data.size == 0:
|
||||
data = np.zeros((0, 0), dtype=self.dtype)
|
||||
else:
|
||||
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
|
||||
data[diag_idx, self.col] = self.data
|
||||
|
||||
return self._dia_container((data, diags), shape=self.shape)
|
||||
|
||||
todia.__doc__ = spmatrix.todia.__doc__
|
||||
|
||||
def todok(self, copy=False):
|
||||
self.sum_duplicates()
|
||||
dok = self._dok_container((self.shape), dtype=self.dtype)
|
||||
dok._update(zip(zip(self.row,self.col),self.data))
|
||||
|
||||
return dok
|
||||
|
||||
todok.__doc__ = spmatrix.todok.__doc__
|
||||
|
||||
def diagonal(self, k=0):
|
||||
rows, cols = self.shape
|
||||
if k <= -rows or k >= cols:
|
||||
return np.empty(0, dtype=self.data.dtype)
|
||||
diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
|
||||
dtype=self.dtype)
|
||||
diag_mask = (self.row + k) == self.col
|
||||
|
||||
if self.has_canonical_format:
|
||||
row = self.row[diag_mask]
|
||||
data = self.data[diag_mask]
|
||||
else:
|
||||
row, _, data = self._sum_duplicates(self.row[diag_mask],
|
||||
self.col[diag_mask],
|
||||
self.data[diag_mask])
|
||||
diag[row + min(k, 0)] = data
|
||||
|
||||
return diag
|
||||
|
||||
diagonal.__doc__ = _data_matrix.diagonal.__doc__
|
||||
|
||||
def _setdiag(self, values, k):
|
||||
M, N = self.shape
|
||||
if values.ndim and not len(values):
|
||||
return
|
||||
idx_dtype = self.row.dtype
|
||||
|
||||
# Determine which triples to keep and where to put the new ones.
|
||||
full_keep = self.col - self.row != k
|
||||
if k < 0:
|
||||
max_index = min(M+k, N)
|
||||
if values.ndim:
|
||||
max_index = min(max_index, len(values))
|
||||
keep = np.logical_or(full_keep, self.col >= max_index)
|
||||
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
|
||||
new_col = np.arange(max_index, dtype=idx_dtype)
|
||||
else:
|
||||
max_index = min(M, N-k)
|
||||
if values.ndim:
|
||||
max_index = min(max_index, len(values))
|
||||
keep = np.logical_or(full_keep, self.row >= max_index)
|
||||
new_row = np.arange(max_index, dtype=idx_dtype)
|
||||
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
|
||||
|
||||
# Define the array of data consisting of the entries to be added.
|
||||
if values.ndim:
|
||||
new_data = values[:max_index]
|
||||
else:
|
||||
new_data = np.empty(max_index, dtype=self.dtype)
|
||||
new_data[:] = values
|
||||
|
||||
# Update the internal structure.
|
||||
self.row = np.concatenate((self.row[keep], new_row))
|
||||
self.col = np.concatenate((self.col[keep], new_col))
|
||||
self.data = np.concatenate((self.data[keep], new_data))
|
||||
self.has_canonical_format = False
|
||||
|
||||
# needed by _data_matrix
|
||||
def _with_data(self,data,copy=True):
|
||||
"""Returns a matrix with the same sparsity structure as self,
|
||||
but with different data. By default the index arrays
|
||||
(i.e. .row and .col) are copied.
|
||||
"""
|
||||
if copy:
|
||||
return self.__class__((data, (self.row.copy(), self.col.copy())),
|
||||
shape=self.shape, dtype=data.dtype)
|
||||
else:
|
||||
return self.__class__((data, (self.row, self.col)),
|
||||
shape=self.shape, dtype=data.dtype)
|
||||
|
||||
def sum_duplicates(self):
|
||||
"""Eliminate duplicate matrix entries by adding them together
|
||||
|
||||
This is an *in place* operation
|
||||
"""
|
||||
if self.has_canonical_format:
|
||||
return
|
||||
summed = self._sum_duplicates(self.row, self.col, self.data)
|
||||
self.row, self.col, self.data = summed
|
||||
self.has_canonical_format = True
|
||||
|
||||
def _sum_duplicates(self, row, col, data):
|
||||
# Assumes (data, row, col) not in canonical format.
|
||||
if len(data) == 0:
|
||||
return row, col, data
|
||||
order = np.lexsort((row, col))
|
||||
row = row[order]
|
||||
col = col[order]
|
||||
data = data[order]
|
||||
unique_mask = ((row[1:] != row[:-1]) |
|
||||
(col[1:] != col[:-1]))
|
||||
unique_mask = np.append(True, unique_mask)
|
||||
row = row[unique_mask]
|
||||
col = col[unique_mask]
|
||||
unique_inds, = np.nonzero(unique_mask)
|
||||
data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
|
||||
return row, col, data
|
||||
|
||||
def eliminate_zeros(self):
|
||||
"""Remove zero entries from the matrix
|
||||
|
||||
This is an *in place* operation
|
||||
"""
|
||||
mask = self.data != 0
|
||||
self.data = self.data[mask]
|
||||
self.row = self.row[mask]
|
||||
self.col = self.col[mask]
|
||||
|
||||
#######################
|
||||
# Arithmetic handlers #
|
||||
#######################
|
||||
|
||||
def _add_dense(self, other):
|
||||
if other.shape != self.shape:
|
||||
raise ValueError('Incompatible shapes ({} and {})'
|
||||
.format(self.shape, other.shape))
|
||||
dtype = upcast_char(self.dtype.char, other.dtype.char)
|
||||
result = np.array(other, dtype=dtype, copy=True)
|
||||
fortran = int(result.flags.f_contiguous)
|
||||
M, N = self.shape
|
||||
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
|
||||
result.ravel('A'), fortran)
|
||||
return self._container(result, copy=False)
|
||||
|
||||
def _mul_vector(self, other):
|
||||
#output array
|
||||
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
|
||||
other.dtype.char))
|
||||
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
|
||||
return result
|
||||
|
||||
def _mul_multivector(self, other):
|
||||
result = np.zeros((other.shape[1], self.shape[0]),
|
||||
dtype=upcast_char(self.dtype.char, other.dtype.char))
|
||||
for i, col in enumerate(other.T):
|
||||
coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i])
|
||||
return result.T.view(type=type(other))
|
||||
|
||||
|
||||
def isspmatrix_coo(x):
|
||||
"""Is x of coo_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a coo matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a coo matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import coo_matrix, isspmatrix_coo
|
||||
>>> isspmatrix_coo(coo_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import coo_matrix, csr_matrix, isspmatrix_coo
|
||||
>>> isspmatrix_coo(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import coo_array
|
||||
return isinstance(x, coo_matrix) or isinstance(x, coo_array)
|
||||
260
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csc.py
vendored
Normal file
260
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csc.py
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
"""Compressed Sparse Column matrix format"""
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['csc_matrix', 'isspmatrix_csc']
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._base import spmatrix
|
||||
from ._sparsetools import csc_tocsr, expandptr
|
||||
from ._sputils import upcast, get_index_dtype
|
||||
|
||||
from ._compressed import _cs_matrix
|
||||
|
||||
|
||||
class csc_matrix(_cs_matrix):
|
||||
"""
|
||||
Compressed Sparse Column matrix
|
||||
|
||||
This can be instantiated in several ways:
|
||||
|
||||
csc_matrix(D)
|
||||
with a dense matrix or rank-2 ndarray D
|
||||
|
||||
csc_matrix(S)
|
||||
with another sparse matrix S (equivalent to S.tocsc())
|
||||
|
||||
csc_matrix((M, N), [dtype])
|
||||
to construct an empty matrix with shape (M, N)
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
|
||||
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
|
||||
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
|
||||
|
||||
csc_matrix((data, indices, indptr), [shape=(M, N)])
|
||||
is the standard CSC representation where the row indices for
|
||||
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
|
||||
and their corresponding values are stored in
|
||||
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
|
||||
not supplied, the matrix dimensions are inferred from
|
||||
the index arrays.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
Data array of the matrix
|
||||
indices
|
||||
CSC format index array
|
||||
indptr
|
||||
CSC format index pointer array
|
||||
has_sorted_indices
|
||||
Whether indices are sorted
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Advantages of the CSC format
|
||||
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
|
||||
- efficient column slicing
|
||||
- fast matrix vector products (CSR, BSR may be faster)
|
||||
|
||||
Disadvantages of the CSC format
|
||||
- slow row slicing operations (consider CSR)
|
||||
- changes to the sparsity structure are expensive (consider LIL or DOK)
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=int8)
|
||||
|
||||
>>> row = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> col = np.array([0, 0, 1, 2, 2, 2])
|
||||
>>> data = np.array([1, 2, 3, 4, 5, 6])
|
||||
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
|
||||
array([[1, 0, 4],
|
||||
[0, 0, 5],
|
||||
[2, 3, 6]])
|
||||
|
||||
>>> indptr = np.array([0, 2, 3, 6])
|
||||
>>> indices = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> data = np.array([1, 2, 3, 4, 5, 6])
|
||||
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
|
||||
array([[1, 0, 4],
|
||||
[0, 0, 5],
|
||||
[2, 3, 6]])
|
||||
|
||||
"""
|
||||
format = 'csc'
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation."))
|
||||
|
||||
M, N = self.shape
|
||||
|
||||
return self._csr_container((self.data, self.indices,
|
||||
self.indptr), (N, M), copy=copy)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def __iter__(self):
|
||||
yield from self.tocsr()
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
tocsc.__doc__ = spmatrix.tocsc.__doc__
|
||||
|
||||
def tocsr(self, copy=False):
|
||||
M,N = self.shape
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices),
|
||||
maxval=max(self.nnz, N))
|
||||
indptr = np.empty(M + 1, dtype=idx_dtype)
|
||||
indices = np.empty(self.nnz, dtype=idx_dtype)
|
||||
data = np.empty(self.nnz, dtype=upcast(self.dtype))
|
||||
|
||||
csc_tocsr(M, N,
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
self.data,
|
||||
indptr,
|
||||
indices,
|
||||
data)
|
||||
|
||||
A = self._csr_container(
|
||||
(data, indices, indptr),
|
||||
shape=self.shape, copy=False
|
||||
)
|
||||
A.has_sorted_indices = True
|
||||
return A
|
||||
|
||||
tocsr.__doc__ = spmatrix.tocsr.__doc__
|
||||
|
||||
def nonzero(self):
|
||||
# CSC can't use _cs_matrix's .nonzero method because it
|
||||
# returns the indices sorted for self transposed.
|
||||
|
||||
# Get row and col indices, from _cs_matrix.tocoo
|
||||
major_dim, minor_dim = self._swap(self.shape)
|
||||
minor_indices = self.indices
|
||||
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
|
||||
expandptr(major_dim, self.indptr, major_indices)
|
||||
row, col = self._swap((major_indices, minor_indices))
|
||||
|
||||
# Remove explicit zeros
|
||||
nz_mask = self.data != 0
|
||||
row = row[nz_mask]
|
||||
col = col[nz_mask]
|
||||
|
||||
# Sort them to be in C-style order
|
||||
ind = np.argsort(row, kind='mergesort')
|
||||
row = row[ind]
|
||||
col = col[ind]
|
||||
|
||||
return row, col
|
||||
|
||||
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
|
||||
|
||||
def getrow(self, i):
|
||||
"""Returns a copy of row i of the matrix, as a (1 x n)
|
||||
CSR matrix (row vector).
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < 0:
|
||||
i += M
|
||||
if i < 0 or i >= M:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
return self._get_submatrix(minor=i).tocsr()
|
||||
|
||||
def getcol(self, i):
|
||||
"""Returns a copy of column i of the matrix, as a (m x 1)
|
||||
CSC matrix (column vector).
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < 0:
|
||||
i += N
|
||||
if i < 0 or i >= N:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
return self._get_submatrix(major=i, copy=True)
|
||||
|
||||
def _get_intXarray(self, row, col):
|
||||
return self._major_index_fancy(col)._get_submatrix(minor=row)
|
||||
|
||||
def _get_intXslice(self, row, col):
|
||||
if col.step in (1, None):
|
||||
return self._get_submatrix(major=col, minor=row, copy=True)
|
||||
return self._major_slice(col)._get_submatrix(minor=row)
|
||||
|
||||
def _get_sliceXint(self, row, col):
|
||||
if row.step in (1, None):
|
||||
return self._get_submatrix(major=col, minor=row, copy=True)
|
||||
return self._get_submatrix(major=col)._minor_slice(row)
|
||||
|
||||
def _get_sliceXarray(self, row, col):
|
||||
return self._major_index_fancy(col)._minor_slice(row)
|
||||
|
||||
def _get_arrayXint(self, row, col):
|
||||
return self._get_submatrix(major=col)._minor_index_fancy(row)
|
||||
|
||||
def _get_arrayXslice(self, row, col):
|
||||
return self._major_slice(col)._minor_index_fancy(row)
|
||||
|
||||
# these functions are used by the parent class (_cs_matrix)
|
||||
# to remove redudancy between csc_matrix and csr_matrix
|
||||
def _swap(self, x):
|
||||
"""swap the members of x if this is a column-oriented matrix
|
||||
"""
|
||||
return x[1], x[0]
|
||||
|
||||
|
||||
def isspmatrix_csc(x):
|
||||
"""Is x of csc_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a csc matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a csc matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix, isspmatrix_csc
|
||||
>>> isspmatrix_csc(csc_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
|
||||
>>> isspmatrix_csc(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import csc_array
|
||||
return isinstance(x, csc_matrix) or isinstance(x, csc_array)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csparsetools.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csparsetools.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csparsetools.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csparsetools.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
357
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csr.py
vendored
Normal file
357
.CondaPkg/env/Lib/site-packages/scipy/sparse/_csr.py
vendored
Normal file
@@ -0,0 +1,357 @@
|
||||
"""Compressed Sparse Row matrix format"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['csr_matrix', 'isspmatrix_csr']
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._base import spmatrix
|
||||
from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks,
|
||||
get_csr_submatrix)
|
||||
from ._sputils import upcast, get_index_dtype
|
||||
|
||||
from ._compressed import _cs_matrix
|
||||
|
||||
|
||||
class csr_matrix(_cs_matrix):
|
||||
"""
|
||||
Compressed Sparse Row matrix
|
||||
|
||||
This can be instantiated in several ways:
|
||||
csr_matrix(D)
|
||||
with a dense matrix or rank-2 ndarray D
|
||||
|
||||
csr_matrix(S)
|
||||
with another sparse matrix S (equivalent to S.tocsr())
|
||||
|
||||
csr_matrix((M, N), [dtype])
|
||||
to construct an empty matrix with shape (M, N)
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
|
||||
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
|
||||
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
|
||||
|
||||
csr_matrix((data, indices, indptr), [shape=(M, N)])
|
||||
is the standard CSR representation where the column indices for
|
||||
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
|
||||
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
|
||||
If the shape parameter is not supplied, the matrix dimensions
|
||||
are inferred from the index arrays.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
CSR format data array of the matrix
|
||||
indices
|
||||
CSR format index array of the matrix
|
||||
indptr
|
||||
CSR format index pointer array of the matrix
|
||||
has_sorted_indices
|
||||
Whether indices are sorted
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Advantages of the CSR format
|
||||
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
|
||||
- efficient row slicing
|
||||
- fast matrix vector products
|
||||
|
||||
Disadvantages of the CSR format
|
||||
- slow column slicing operations (consider CSC)
|
||||
- changes to the sparsity structure are expensive (consider LIL or DOK)
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import csr_matrix
|
||||
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=int8)
|
||||
|
||||
>>> row = np.array([0, 0, 1, 2, 2, 2])
|
||||
>>> col = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> data = np.array([1, 2, 3, 4, 5, 6])
|
||||
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
|
||||
array([[1, 0, 2],
|
||||
[0, 0, 3],
|
||||
[4, 5, 6]])
|
||||
|
||||
>>> indptr = np.array([0, 2, 3, 6])
|
||||
>>> indices = np.array([0, 2, 2, 0, 1, 2])
|
||||
>>> data = np.array([1, 2, 3, 4, 5, 6])
|
||||
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
|
||||
array([[1, 0, 2],
|
||||
[0, 0, 3],
|
||||
[4, 5, 6]])
|
||||
|
||||
Duplicate entries are summed together:
|
||||
|
||||
>>> row = np.array([0, 1, 2, 0])
|
||||
>>> col = np.array([0, 1, 1, 0])
|
||||
>>> data = np.array([1, 2, 4, 8])
|
||||
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
|
||||
array([[9, 0, 0],
|
||||
[0, 2, 0],
|
||||
[0, 4, 0]])
|
||||
|
||||
As an example of how to construct a CSR matrix incrementally,
|
||||
the following snippet builds a term-document matrix from texts:
|
||||
|
||||
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
|
||||
>>> indptr = [0]
|
||||
>>> indices = []
|
||||
>>> data = []
|
||||
>>> vocabulary = {}
|
||||
>>> for d in docs:
|
||||
... for term in d:
|
||||
... index = vocabulary.setdefault(term, len(vocabulary))
|
||||
... indices.append(index)
|
||||
... data.append(1)
|
||||
... indptr.append(len(indices))
|
||||
...
|
||||
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
|
||||
array([[2, 1, 0, 0],
|
||||
[0, 1, 1, 1]])
|
||||
|
||||
"""
|
||||
format = 'csr'
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation."))
|
||||
|
||||
M, N = self.shape
|
||||
return self._csc_container((self.data, self.indices,
|
||||
self.indptr), shape=(N, M), copy=copy)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def tolil(self, copy=False):
|
||||
lil = self._lil_container(self.shape, dtype=self.dtype)
|
||||
|
||||
self.sum_duplicates()
|
||||
ptr,ind,dat = self.indptr,self.indices,self.data
|
||||
rows, data = lil.rows, lil.data
|
||||
|
||||
for n in range(self.shape[0]):
|
||||
start = ptr[n]
|
||||
end = ptr[n+1]
|
||||
rows[n] = ind[start:end].tolist()
|
||||
data[n] = dat[start:end].tolist()
|
||||
|
||||
return lil
|
||||
|
||||
tolil.__doc__ = spmatrix.tolil.__doc__
|
||||
|
||||
def tocsr(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
tocsr.__doc__ = spmatrix.tocsr.__doc__
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices),
|
||||
maxval=max(self.nnz, self.shape[0]))
|
||||
indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
|
||||
indices = np.empty(self.nnz, dtype=idx_dtype)
|
||||
data = np.empty(self.nnz, dtype=upcast(self.dtype))
|
||||
|
||||
csr_tocsc(self.shape[0], self.shape[1],
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
self.data,
|
||||
indptr,
|
||||
indices,
|
||||
data)
|
||||
|
||||
A = self._csc_container((data, indices, indptr), shape=self.shape)
|
||||
A.has_sorted_indices = True
|
||||
return A
|
||||
|
||||
tocsc.__doc__ = spmatrix.tocsc.__doc__
|
||||
|
||||
def tobsr(self, blocksize=None, copy=True):
|
||||
if blocksize is None:
|
||||
from ._spfuncs import estimate_blocksize
|
||||
return self.tobsr(blocksize=estimate_blocksize(self))
|
||||
|
||||
elif blocksize == (1,1):
|
||||
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
|
||||
return self._bsr_container(arg1, shape=self.shape, copy=copy)
|
||||
|
||||
else:
|
||||
R,C = blocksize
|
||||
M,N = self.shape
|
||||
|
||||
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
|
||||
raise ValueError('invalid blocksize %s' % blocksize)
|
||||
|
||||
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
|
||||
|
||||
idx_dtype = get_index_dtype((self.indptr, self.indices),
|
||||
maxval=max(N//C, blks))
|
||||
indptr = np.empty(M//R+1, dtype=idx_dtype)
|
||||
indices = np.empty(blks, dtype=idx_dtype)
|
||||
data = np.zeros((blks,R,C), dtype=self.dtype)
|
||||
|
||||
csr_tobsr(M, N, R, C,
|
||||
self.indptr.astype(idx_dtype),
|
||||
self.indices.astype(idx_dtype),
|
||||
self.data,
|
||||
indptr, indices, data.ravel())
|
||||
|
||||
return self._bsr_container(
|
||||
(data, indices, indptr), shape=self.shape
|
||||
)
|
||||
|
||||
tobsr.__doc__ = spmatrix.tobsr.__doc__
|
||||
|
||||
# these functions are used by the parent class (_cs_matrix)
|
||||
# to remove redundancy between csc_matrix and csr_matrix
|
||||
def _swap(self, x):
|
||||
"""swap the members of x if this is a column-oriented matrix
|
||||
"""
|
||||
return x
|
||||
|
||||
def __iter__(self):
|
||||
indptr = np.zeros(2, dtype=self.indptr.dtype)
|
||||
shape = (1, self.shape[1])
|
||||
i0 = 0
|
||||
for i1 in self.indptr[1:]:
|
||||
indptr[1] = i1 - i0
|
||||
indices = self.indices[i0:i1]
|
||||
data = self.data[i0:i1]
|
||||
yield self.__class__(
|
||||
(data, indices, indptr), shape=shape, copy=True
|
||||
)
|
||||
i0 = i1
|
||||
|
||||
def getrow(self, i):
|
||||
"""Returns a copy of row i of the matrix, as a (1 x n)
|
||||
CSR matrix (row vector).
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < 0:
|
||||
i += M
|
||||
if i < 0 or i >= M:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
indptr, indices, data = get_csr_submatrix(
|
||||
M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N)
|
||||
return self.__class__((data, indices, indptr), shape=(1, N),
|
||||
dtype=self.dtype, copy=False)
|
||||
|
||||
def getcol(self, i):
|
||||
"""Returns a copy of column i of the matrix, as a (m x 1)
|
||||
CSR matrix (column vector).
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < 0:
|
||||
i += N
|
||||
if i < 0 or i >= N:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
indptr, indices, data = get_csr_submatrix(
|
||||
M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1)
|
||||
return self.__class__((data, indices, indptr), shape=(M, 1),
|
||||
dtype=self.dtype, copy=False)
|
||||
|
||||
def _get_intXarray(self, row, col):
|
||||
return self.getrow(row)._minor_index_fancy(col)
|
||||
|
||||
def _get_intXslice(self, row, col):
|
||||
if col.step in (1, None):
|
||||
return self._get_submatrix(row, col, copy=True)
|
||||
# TODO: uncomment this once it's faster:
|
||||
# return self.getrow(row)._minor_slice(col)
|
||||
|
||||
M, N = self.shape
|
||||
start, stop, stride = col.indices(N)
|
||||
|
||||
ii, jj = self.indptr[row:row+2]
|
||||
row_indices = self.indices[ii:jj]
|
||||
row_data = self.data[ii:jj]
|
||||
|
||||
if stride > 0:
|
||||
ind = (row_indices >= start) & (row_indices < stop)
|
||||
else:
|
||||
ind = (row_indices <= start) & (row_indices > stop)
|
||||
|
||||
if abs(stride) > 1:
|
||||
ind &= (row_indices - start) % stride == 0
|
||||
|
||||
row_indices = (row_indices[ind] - start) // stride
|
||||
row_data = row_data[ind]
|
||||
row_indptr = np.array([0, len(row_indices)])
|
||||
|
||||
if stride < 0:
|
||||
row_data = row_data[::-1]
|
||||
row_indices = abs(row_indices[::-1])
|
||||
|
||||
shape = (1, max(0, int(np.ceil(float(stop - start) / stride))))
|
||||
return self.__class__((row_data, row_indices, row_indptr), shape=shape,
|
||||
dtype=self.dtype, copy=False)
|
||||
|
||||
def _get_sliceXint(self, row, col):
|
||||
if row.step in (1, None):
|
||||
return self._get_submatrix(row, col, copy=True)
|
||||
return self._major_slice(row)._get_submatrix(minor=col)
|
||||
|
||||
def _get_sliceXarray(self, row, col):
|
||||
return self._major_slice(row)._minor_index_fancy(col)
|
||||
|
||||
def _get_arrayXint(self, row, col):
|
||||
return self._major_index_fancy(row)._get_submatrix(minor=col)
|
||||
|
||||
def _get_arrayXslice(self, row, col):
|
||||
if col.step not in (1, None):
|
||||
col = np.arange(*col.indices(self.shape[1]))
|
||||
return self._get_arrayXarray(row, col)
|
||||
return self._major_index_fancy(row)._get_submatrix(minor=col)
|
||||
|
||||
|
||||
def isspmatrix_csr(x):
|
||||
"""Is x of csr_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a csr matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a csr matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csr_matrix, isspmatrix_csr
|
||||
>>> isspmatrix_csr(csr_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
|
||||
>>> isspmatrix_csr(csc_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import csr_array
|
||||
return isinstance(x, csr_matrix) or isinstance(x, csr_array)
|
||||
402
.CondaPkg/env/Lib/site-packages/scipy/sparse/_data.py
vendored
Normal file
402
.CondaPkg/env/Lib/site-packages/scipy/sparse/_data.py
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
"""Base class for sparse matrice with a .data attribute
|
||||
|
||||
subclasses must provide a _with_data() method that
|
||||
creates a new matrix with the same sparsity pattern
|
||||
as self but with a different data array
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._base import spmatrix, _ufuncs_with_fixed_point_at_zero
|
||||
from ._sputils import isscalarlike, validateaxis, matrix
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
# TODO implement all relevant operations
|
||||
# use .data.__methods__() instead of /=, *=, etc.
|
||||
class _data_matrix(spmatrix):
|
||||
def __init__(self):
|
||||
spmatrix.__init__(self)
|
||||
|
||||
def _get_dtype(self):
|
||||
return self.data.dtype
|
||||
|
||||
def _set_dtype(self, newtype):
|
||||
self.data.dtype = newtype
|
||||
dtype = property(fget=_get_dtype, fset=_set_dtype)
|
||||
|
||||
def _deduped_data(self):
|
||||
if hasattr(self, 'sum_duplicates'):
|
||||
self.sum_duplicates()
|
||||
return self.data
|
||||
|
||||
def __abs__(self):
|
||||
return self._with_data(abs(self._deduped_data()))
|
||||
|
||||
def __round__(self, ndigits=0):
|
||||
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
|
||||
|
||||
def _real(self):
|
||||
return self._with_data(self.data.real)
|
||||
|
||||
def _imag(self):
|
||||
return self._with_data(self.data.imag)
|
||||
|
||||
def __neg__(self):
|
||||
if self.dtype.kind == 'b':
|
||||
raise NotImplementedError('negating a sparse boolean '
|
||||
'matrix is not supported')
|
||||
return self._with_data(-self.data)
|
||||
|
||||
def __imul__(self, other): # self *= other
|
||||
if isscalarlike(other):
|
||||
self.data *= other
|
||||
return self
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __itruediv__(self, other): # self /= other
|
||||
if isscalarlike(other):
|
||||
recip = 1.0 / other
|
||||
self.data *= recip
|
||||
return self
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def astype(self, dtype, casting='unsafe', copy=True):
|
||||
dtype = np.dtype(dtype)
|
||||
if self.dtype != dtype:
|
||||
return self._with_data(
|
||||
self._deduped_data().astype(dtype, casting=casting, copy=copy),
|
||||
copy=copy)
|
||||
elif copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
astype.__doc__ = spmatrix.astype.__doc__
|
||||
|
||||
def conj(self, copy=True):
|
||||
if np.issubdtype(self.dtype, np.complexfloating):
|
||||
return self._with_data(self.data.conj(), copy=copy)
|
||||
elif copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
conj.__doc__ = spmatrix.conj.__doc__
|
||||
|
||||
def copy(self):
|
||||
return self._with_data(self.data.copy(), copy=True)
|
||||
|
||||
copy.__doc__ = spmatrix.copy.__doc__
|
||||
|
||||
def count_nonzero(self):
|
||||
return np.count_nonzero(self._deduped_data())
|
||||
|
||||
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
|
||||
|
||||
def power(self, n, dtype=None):
|
||||
"""
|
||||
This function performs element-wise power.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : n is a scalar
|
||||
|
||||
dtype : If dtype is not specified, the current dtype will be preserved.
|
||||
"""
|
||||
if not isscalarlike(n):
|
||||
raise NotImplementedError("input is not scalar")
|
||||
|
||||
data = self._deduped_data()
|
||||
if dtype is not None:
|
||||
data = data.astype(dtype)
|
||||
return self._with_data(data ** n)
|
||||
|
||||
###########################
|
||||
# Multiplication handlers #
|
||||
###########################
|
||||
|
||||
def _mul_scalar(self, other):
|
||||
return self._with_data(self.data * other)
|
||||
|
||||
|
||||
# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
|
||||
for npfunc in _ufuncs_with_fixed_point_at_zero:
|
||||
name = npfunc.__name__
|
||||
|
||||
def _create_method(op):
|
||||
def method(self):
|
||||
result = op(self._deduped_data())
|
||||
return self._with_data(result, copy=True)
|
||||
|
||||
method.__doc__ = ("Element-wise %s.\n\n"
|
||||
"See `numpy.%s` for more information." % (name, name))
|
||||
method.__name__ = name
|
||||
|
||||
return method
|
||||
|
||||
setattr(_data_matrix, name, _create_method(npfunc))
|
||||
|
||||
|
||||
def _find_missing_index(ind, n):
|
||||
for k, a in enumerate(ind):
|
||||
if k != a:
|
||||
return k
|
||||
|
||||
k += 1
|
||||
if k < n:
|
||||
return k
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class _minmax_mixin:
|
||||
"""Mixin for min and max methods.
|
||||
|
||||
These are not implemented for dia_matrix, hence the separate class.
|
||||
"""
|
||||
|
||||
def _min_or_max_axis(self, axis, min_or_max):
|
||||
N = self.shape[axis]
|
||||
if N == 0:
|
||||
raise ValueError("zero-size array to reduction operation")
|
||||
M = self.shape[1 - axis]
|
||||
|
||||
mat = self.tocsc() if axis == 0 else self.tocsr()
|
||||
mat.sum_duplicates()
|
||||
|
||||
major_index, value = mat._minor_reduce(min_or_max)
|
||||
not_full = np.diff(mat.indptr)[major_index] < N
|
||||
value[not_full] = min_or_max(value[not_full], 0)
|
||||
|
||||
mask = value != 0
|
||||
major_index = np.compress(mask, major_index)
|
||||
value = np.compress(mask, value)
|
||||
|
||||
if axis == 0:
|
||||
return self._coo_container(
|
||||
(value, (np.zeros(len(value)), major_index)),
|
||||
dtype=self.dtype, shape=(1, M)
|
||||
)
|
||||
else:
|
||||
return self._coo_container(
|
||||
(value, (major_index, np.zeros(len(value)))),
|
||||
dtype=self.dtype, shape=(M, 1)
|
||||
)
|
||||
|
||||
def _min_or_max(self, axis, out, min_or_max):
|
||||
if out is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'out' parameter."))
|
||||
|
||||
validateaxis(axis)
|
||||
|
||||
if axis is None:
|
||||
if 0 in self.shape:
|
||||
raise ValueError("zero-size array to reduction operation")
|
||||
|
||||
zero = self.dtype.type(0)
|
||||
if self.nnz == 0:
|
||||
return zero
|
||||
m = min_or_max.reduce(self._deduped_data().ravel())
|
||||
if self.nnz != np.prod(self.shape):
|
||||
m = min_or_max(zero, m)
|
||||
return m
|
||||
|
||||
if axis < 0:
|
||||
axis += 2
|
||||
|
||||
if (axis == 0) or (axis == 1):
|
||||
return self._min_or_max_axis(axis, min_or_max)
|
||||
else:
|
||||
raise ValueError("axis out of range")
|
||||
|
||||
def _arg_min_or_max_axis(self, axis, op, compare):
|
||||
if self.shape[axis] == 0:
|
||||
raise ValueError("Can't apply the operation along a zero-sized "
|
||||
"dimension.")
|
||||
|
||||
if axis < 0:
|
||||
axis += 2
|
||||
|
||||
zero = self.dtype.type(0)
|
||||
|
||||
mat = self.tocsc() if axis == 0 else self.tocsr()
|
||||
mat.sum_duplicates()
|
||||
|
||||
ret_size, line_size = mat._swap(mat.shape)
|
||||
ret = np.zeros(ret_size, dtype=int)
|
||||
|
||||
nz_lines, = np.nonzero(np.diff(mat.indptr))
|
||||
for i in nz_lines:
|
||||
p, q = mat.indptr[i:i + 2]
|
||||
data = mat.data[p:q]
|
||||
indices = mat.indices[p:q]
|
||||
am = op(data)
|
||||
m = data[am]
|
||||
if compare(m, zero) or q - p == line_size:
|
||||
ret[i] = indices[am]
|
||||
else:
|
||||
zero_ind = _find_missing_index(indices, line_size)
|
||||
if m == zero:
|
||||
ret[i] = min(am, zero_ind)
|
||||
else:
|
||||
ret[i] = zero_ind
|
||||
|
||||
if axis == 1:
|
||||
ret = ret.reshape(-1, 1)
|
||||
|
||||
return matrix(ret)
|
||||
|
||||
def _arg_min_or_max(self, axis, out, op, compare):
|
||||
if out is not None:
|
||||
raise ValueError("Sparse matrices do not support "
|
||||
"an 'out' parameter.")
|
||||
|
||||
validateaxis(axis)
|
||||
|
||||
if axis is None:
|
||||
if 0 in self.shape:
|
||||
raise ValueError("Can't apply the operation to "
|
||||
"an empty matrix.")
|
||||
|
||||
if self.nnz == 0:
|
||||
return 0
|
||||
else:
|
||||
zero = self.dtype.type(0)
|
||||
mat = self.tocoo()
|
||||
mat.sum_duplicates()
|
||||
am = op(mat.data)
|
||||
m = mat.data[am]
|
||||
|
||||
if compare(m, zero):
|
||||
# cast to Python int to avoid overflow
|
||||
# and RuntimeError
|
||||
return int(mat.row[am])*mat.shape[1] + int(mat.col[am])
|
||||
else:
|
||||
size = np.prod(mat.shape)
|
||||
if size == mat.nnz:
|
||||
return am
|
||||
else:
|
||||
ind = mat.row * mat.shape[1] + mat.col
|
||||
zero_ind = _find_missing_index(ind, size)
|
||||
if m == zero:
|
||||
return min(zero_ind, am)
|
||||
else:
|
||||
return zero_ind
|
||||
|
||||
return self._arg_min_or_max_axis(axis, op, compare)
|
||||
|
||||
def max(self, axis=None, out=None):
|
||||
"""
|
||||
Return the maximum of the matrix or maximum along an axis.
|
||||
This takes all elements into account, not just the non-zero ones.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
axis : {-2, -1, 0, 1, None} optional
|
||||
Axis along which the sum is computed. The default is to
|
||||
compute the maximum over all the matrix elements, returning
|
||||
a scalar (i.e., `axis` = `None`).
|
||||
|
||||
out : None, optional
|
||||
This argument is in the signature *solely* for NumPy
|
||||
compatibility reasons. Do not pass in anything except
|
||||
for the default value, as this argument is not used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
amax : coo_matrix or scalar
|
||||
Maximum of `a`. If `axis` is None, the result is a scalar value.
|
||||
If `axis` is given, the result is a sparse.coo_matrix of dimension
|
||||
``a.ndim - 1``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
min : The minimum value of a sparse matrix along a given axis.
|
||||
numpy.matrix.max : NumPy's implementation of 'max' for matrices
|
||||
|
||||
"""
|
||||
return self._min_or_max(axis, out, np.maximum)
|
||||
|
||||
def min(self, axis=None, out=None):
|
||||
"""
|
||||
Return the minimum of the matrix or maximum along an axis.
|
||||
This takes all elements into account, not just the non-zero ones.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
axis : {-2, -1, 0, 1, None} optional
|
||||
Axis along which the sum is computed. The default is to
|
||||
compute the minimum over all the matrix elements, returning
|
||||
a scalar (i.e., `axis` = `None`).
|
||||
|
||||
out : None, optional
|
||||
This argument is in the signature *solely* for NumPy
|
||||
compatibility reasons. Do not pass in anything except for
|
||||
the default value, as this argument is not used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
amin : coo_matrix or scalar
|
||||
Minimum of `a`. If `axis` is None, the result is a scalar value.
|
||||
If `axis` is given, the result is a sparse.coo_matrix of dimension
|
||||
``a.ndim - 1``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
max : The maximum value of a sparse matrix along a given axis.
|
||||
numpy.matrix.min : NumPy's implementation of 'min' for matrices
|
||||
|
||||
"""
|
||||
return self._min_or_max(axis, out, np.minimum)
|
||||
|
||||
def argmax(self, axis=None, out=None):
|
||||
"""Return indices of maximum elements along an axis.
|
||||
|
||||
Implicit zero elements are also taken into account. If there are
|
||||
several maximum values, the index of the first occurrence is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
axis : {-2, -1, 0, 1, None}, optional
|
||||
Axis along which the argmax is computed. If None (default), index
|
||||
of the maximum element in the flatten data is returned.
|
||||
out : None, optional
|
||||
This argument is in the signature *solely* for NumPy
|
||||
compatibility reasons. Do not pass in anything except for
|
||||
the default value, as this argument is not used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ind : numpy.matrix or int
|
||||
Indices of maximum elements. If matrix, its size along `axis` is 1.
|
||||
"""
|
||||
return self._arg_min_or_max(axis, out, np.argmax, np.greater)
|
||||
|
||||
def argmin(self, axis=None, out=None):
|
||||
"""Return indices of minimum elements along an axis.
|
||||
|
||||
Implicit zero elements are also taken into account. If there are
|
||||
several minimum values, the index of the first occurrence is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
axis : {-2, -1, 0, 1, None}, optional
|
||||
Axis along which the argmin is computed. If None (default), index
|
||||
of the minimum element in the flatten data is returned.
|
||||
out : None, optional
|
||||
This argument is in the signature *solely* for NumPy
|
||||
compatibility reasons. Do not pass in anything except for
|
||||
the default value, as this argument is not used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ind : numpy.matrix or int
|
||||
Indices of minimum elements. If matrix, its size along `axis` is 1.
|
||||
"""
|
||||
return self._arg_min_or_max(axis, out, np.argmin, np.less)
|
||||
470
.CondaPkg/env/Lib/site-packages/scipy/sparse/_dia.py
vendored
Normal file
470
.CondaPkg/env/Lib/site-packages/scipy/sparse/_dia.py
vendored
Normal file
@@ -0,0 +1,470 @@
|
||||
"""Sparse DIAgonal format"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['dia_matrix', 'isspmatrix_dia']
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._base import isspmatrix, _formats, spmatrix
|
||||
from ._data import _data_matrix
|
||||
from ._sputils import (isshape, upcast_char, getdtype, get_index_dtype,
|
||||
get_sum_dtype, validateaxis, check_shape)
|
||||
from ._sparsetools import dia_matvec
|
||||
|
||||
|
||||
class dia_matrix(_data_matrix):
|
||||
"""Sparse matrix with DIAgonal storage
|
||||
|
||||
This can be instantiated in several ways:
|
||||
dia_matrix(D)
|
||||
with a dense matrix
|
||||
|
||||
dia_matrix(S)
|
||||
with another sparse matrix S (equivalent to S.todia())
|
||||
|
||||
dia_matrix((M, N), [dtype])
|
||||
to construct an empty matrix with shape (M, N),
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
dia_matrix((data, offsets), shape=(M, N))
|
||||
where the ``data[k,:]`` stores the diagonal entries for
|
||||
diagonal ``offsets[k]`` (See example below)
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
DIA format data array of the matrix
|
||||
offsets
|
||||
DIA format offset array of the matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import dia_matrix
|
||||
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=int8)
|
||||
|
||||
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
|
||||
>>> offsets = np.array([0, -1, 2])
|
||||
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
|
||||
array([[1, 0, 3, 0],
|
||||
[1, 2, 0, 4],
|
||||
[0, 2, 3, 0],
|
||||
[0, 0, 3, 4]])
|
||||
|
||||
>>> from scipy.sparse import dia_matrix
|
||||
>>> n = 10
|
||||
>>> ex = np.ones(n)
|
||||
>>> data = np.array([ex, 2 * ex, ex])
|
||||
>>> offsets = np.array([-1, 0, 1])
|
||||
>>> dia_matrix((data, offsets), shape=(n, n)).toarray()
|
||||
array([[2., 1., 0., ..., 0., 0., 0.],
|
||||
[1., 2., 1., ..., 0., 0., 0.],
|
||||
[0., 1., 2., ..., 0., 0., 0.],
|
||||
...,
|
||||
[0., 0., 0., ..., 2., 1., 0.],
|
||||
[0., 0., 0., ..., 1., 2., 1.],
|
||||
[0., 0., 0., ..., 0., 1., 2.]])
|
||||
"""
|
||||
format = 'dia'
|
||||
|
||||
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
||||
_data_matrix.__init__(self)
|
||||
|
||||
if isspmatrix_dia(arg1):
|
||||
if copy:
|
||||
arg1 = arg1.copy()
|
||||
self.data = arg1.data
|
||||
self.offsets = arg1.offsets
|
||||
self._shape = check_shape(arg1.shape)
|
||||
elif isspmatrix(arg1):
|
||||
if isspmatrix_dia(arg1) and copy:
|
||||
A = arg1.copy()
|
||||
else:
|
||||
A = arg1.todia()
|
||||
self.data = A.data
|
||||
self.offsets = A.offsets
|
||||
self._shape = check_shape(A.shape)
|
||||
elif isinstance(arg1, tuple):
|
||||
if isshape(arg1):
|
||||
# It's a tuple of matrix dimensions (M, N)
|
||||
# create empty matrix
|
||||
self._shape = check_shape(arg1)
|
||||
self.data = np.zeros((0,0), getdtype(dtype, default=float))
|
||||
idx_dtype = get_index_dtype(maxval=max(self.shape))
|
||||
self.offsets = np.zeros((0), dtype=idx_dtype)
|
||||
else:
|
||||
try:
|
||||
# Try interpreting it as (data, offsets)
|
||||
data, offsets = arg1
|
||||
except Exception as e:
|
||||
raise ValueError('unrecognized form for dia_matrix constructor') from e
|
||||
else:
|
||||
if shape is None:
|
||||
raise ValueError('expected a shape argument')
|
||||
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
|
||||
self.offsets = np.atleast_1d(np.array(arg1[1],
|
||||
dtype=get_index_dtype(maxval=max(shape)),
|
||||
copy=copy))
|
||||
self._shape = check_shape(shape)
|
||||
else:
|
||||
#must be dense, convert to COO first, then to DIA
|
||||
try:
|
||||
arg1 = np.asarray(arg1)
|
||||
except Exception as e:
|
||||
raise ValueError("unrecognized form for"
|
||||
" %s_matrix constructor" % self.format) from e
|
||||
A = self._coo_container(arg1, dtype=dtype, shape=shape).todia()
|
||||
self.data = A.data
|
||||
self.offsets = A.offsets
|
||||
self._shape = check_shape(A.shape)
|
||||
|
||||
if dtype is not None:
|
||||
self.data = self.data.astype(dtype)
|
||||
|
||||
#check format
|
||||
if self.offsets.ndim != 1:
|
||||
raise ValueError('offsets array must have rank 1')
|
||||
|
||||
if self.data.ndim != 2:
|
||||
raise ValueError('data array must have rank 2')
|
||||
|
||||
if self.data.shape[0] != len(self.offsets):
|
||||
raise ValueError('number of diagonals (%d) '
|
||||
'does not match the number of offsets (%d)'
|
||||
% (self.data.shape[0], len(self.offsets)))
|
||||
|
||||
if len(np.unique(self.offsets)) != len(self.offsets):
|
||||
raise ValueError('offset array contains duplicate values')
|
||||
|
||||
def __repr__(self):
|
||||
format = _formats[self.getformat()][1]
|
||||
return "<%dx%d sparse matrix of type '%s'\n" \
|
||||
"\twith %d stored elements (%d diagonals) in %s format>" % \
|
||||
(self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
|
||||
format))
|
||||
|
||||
def _data_mask(self):
|
||||
"""Returns a mask of the same shape as self.data, where
|
||||
mask[i,j] is True when data[i,j] corresponds to a stored element."""
|
||||
num_rows, num_cols = self.shape
|
||||
offset_inds = np.arange(self.data.shape[1])
|
||||
row = offset_inds - self.offsets[:,None]
|
||||
mask = (row >= 0)
|
||||
mask &= (row < num_rows)
|
||||
mask &= (offset_inds < num_cols)
|
||||
return mask
|
||||
|
||||
def count_nonzero(self):
|
||||
mask = self._data_mask()
|
||||
return np.count_nonzero(self.data[mask])
|
||||
|
||||
def getnnz(self, axis=None):
|
||||
if axis is not None:
|
||||
raise NotImplementedError("getnnz over an axis is not implemented "
|
||||
"for DIA format")
|
||||
M,N = self.shape
|
||||
nnz = 0
|
||||
for k in self.offsets:
|
||||
if k > 0:
|
||||
nnz += min(M,N-k)
|
||||
else:
|
||||
nnz += min(M+k,N)
|
||||
return int(nnz)
|
||||
|
||||
getnnz.__doc__ = spmatrix.getnnz.__doc__
|
||||
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
|
||||
|
||||
def sum(self, axis=None, dtype=None, out=None):
|
||||
validateaxis(axis)
|
||||
|
||||
if axis is not None and axis < 0:
|
||||
axis += 2
|
||||
|
||||
res_dtype = get_sum_dtype(self.dtype)
|
||||
num_rows, num_cols = self.shape
|
||||
ret = None
|
||||
|
||||
if axis == 0:
|
||||
mask = self._data_mask()
|
||||
x = (self.data * mask).sum(axis=0)
|
||||
if x.shape[0] == num_cols:
|
||||
res = x
|
||||
else:
|
||||
res = np.zeros(num_cols, dtype=x.dtype)
|
||||
res[:x.shape[0]] = x
|
||||
ret = self._ascontainer(res, dtype=res_dtype)
|
||||
|
||||
else:
|
||||
row_sums = np.zeros((num_rows, 1), dtype=res_dtype)
|
||||
one = np.ones(num_cols, dtype=res_dtype)
|
||||
dia_matvec(num_rows, num_cols, len(self.offsets),
|
||||
self.data.shape[1], self.offsets, self.data, one, row_sums)
|
||||
|
||||
row_sums = self._ascontainer(row_sums)
|
||||
|
||||
if axis is None:
|
||||
return row_sums.sum(dtype=dtype, out=out)
|
||||
|
||||
ret = self._ascontainer(row_sums.sum(axis=axis))
|
||||
|
||||
if out is not None and out.shape != ret.shape:
|
||||
raise ValueError("dimensions do not match")
|
||||
|
||||
return ret.sum(axis=(), dtype=dtype, out=out)
|
||||
|
||||
sum.__doc__ = spmatrix.sum.__doc__
|
||||
|
||||
def _add_sparse(self, other):
|
||||
|
||||
# Check if other is also of type dia_matrix
|
||||
if not isinstance(other, type(self)):
|
||||
# If other is not of type dia_matrix, default to
|
||||
# converting to csr_matrix, as is done in the _add_sparse
|
||||
# method of parent class spmatrix
|
||||
return self.tocsr()._add_sparse(other)
|
||||
|
||||
# The task is to compute m = self + other
|
||||
# Start by making a copy of self, of the datatype
|
||||
# that should result from adding self and other
|
||||
dtype = np.promote_types(self.dtype, other.dtype)
|
||||
m = self.astype(dtype, copy=True)
|
||||
|
||||
# Then, add all the stored diagonals of other.
|
||||
for d in other.offsets:
|
||||
# Check if the diagonal has already been added.
|
||||
if d in m.offsets:
|
||||
# If the diagonal is already there, we need to take
|
||||
# the sum of the existing and the new
|
||||
m.setdiag(m.diagonal(d) + other.diagonal(d), d)
|
||||
else:
|
||||
m.setdiag(other.diagonal(d), d)
|
||||
return m
|
||||
|
||||
def _mul_vector(self, other):
|
||||
x = other
|
||||
|
||||
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
|
||||
x.dtype.char))
|
||||
|
||||
L = self.data.shape[1]
|
||||
|
||||
M,N = self.shape
|
||||
|
||||
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
|
||||
|
||||
return y
|
||||
|
||||
def _mul_multimatrix(self, other):
|
||||
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
|
||||
|
||||
def _setdiag(self, values, k=0):
|
||||
M, N = self.shape
|
||||
|
||||
if values.ndim == 0:
|
||||
# broadcast
|
||||
values_n = np.inf
|
||||
else:
|
||||
values_n = len(values)
|
||||
|
||||
if k < 0:
|
||||
n = min(M + k, N, values_n)
|
||||
min_index = 0
|
||||
max_index = n
|
||||
else:
|
||||
n = min(M, N - k, values_n)
|
||||
min_index = k
|
||||
max_index = k + n
|
||||
|
||||
if values.ndim != 0:
|
||||
# allow also longer sequences
|
||||
values = values[:n]
|
||||
|
||||
data_rows, data_cols = self.data.shape
|
||||
if k in self.offsets:
|
||||
if max_index > data_cols:
|
||||
data = np.zeros((data_rows, max_index), dtype=self.data.dtype)
|
||||
data[:, :data_cols] = self.data
|
||||
self.data = data
|
||||
self.data[self.offsets == k, min_index:max_index] = values
|
||||
else:
|
||||
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
|
||||
m = max(max_index, data_cols)
|
||||
data = np.zeros((data_rows + 1, m), dtype=self.data.dtype)
|
||||
data[:-1, :data_cols] = self.data
|
||||
data[-1, min_index:max_index] = values
|
||||
self.data = data
|
||||
|
||||
def todia(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
todia.__doc__ = spmatrix.todia.__doc__
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError(("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation."))
|
||||
|
||||
num_rows, num_cols = self.shape
|
||||
max_dim = max(self.shape)
|
||||
|
||||
# flip diagonal offsets
|
||||
offsets = -self.offsets
|
||||
|
||||
# re-align the data matrix
|
||||
r = np.arange(len(offsets), dtype=np.intc)[:, None]
|
||||
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
|
||||
pad_amount = max(0, max_dim-self.data.shape[1])
|
||||
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
|
||||
dtype=self.data.dtype)))
|
||||
data = data[r, c]
|
||||
return self._dia_container((data, offsets), shape=(
|
||||
num_cols, num_rows), copy=copy)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def diagonal(self, k=0):
|
||||
rows, cols = self.shape
|
||||
if k <= -rows or k >= cols:
|
||||
return np.empty(0, dtype=self.data.dtype)
|
||||
idx, = np.nonzero(self.offsets == k)
|
||||
first_col = max(0, k)
|
||||
last_col = min(rows + k, cols)
|
||||
result_size = last_col - first_col
|
||||
if idx.size == 0:
|
||||
return np.zeros(result_size, dtype=self.data.dtype)
|
||||
result = self.data[idx[0], first_col:last_col]
|
||||
padding = result_size - len(result)
|
||||
if padding > 0:
|
||||
result = np.pad(result, (0, padding), mode='constant')
|
||||
return result
|
||||
|
||||
diagonal.__doc__ = spmatrix.diagonal.__doc__
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
if self.nnz == 0:
|
||||
return self._csc_container(self.shape, dtype=self.dtype)
|
||||
|
||||
num_rows, num_cols = self.shape
|
||||
num_offsets, offset_len = self.data.shape
|
||||
offset_inds = np.arange(offset_len)
|
||||
|
||||
row = offset_inds - self.offsets[:,None]
|
||||
mask = (row >= 0)
|
||||
mask &= (row < num_rows)
|
||||
mask &= (offset_inds < num_cols)
|
||||
mask &= (self.data != 0)
|
||||
|
||||
idx_dtype = get_index_dtype(maxval=max(self.shape))
|
||||
indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
|
||||
indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols])
|
||||
if offset_len < num_cols:
|
||||
indptr[offset_len+1:] = indptr[offset_len]
|
||||
indices = row.T[mask.T].astype(idx_dtype, copy=False)
|
||||
data = self.data.T[mask.T]
|
||||
return self._csc_container((data, indices, indptr), shape=self.shape,
|
||||
dtype=self.dtype)
|
||||
|
||||
tocsc.__doc__ = spmatrix.tocsc.__doc__
|
||||
|
||||
def tocoo(self, copy=False):
|
||||
num_rows, num_cols = self.shape
|
||||
num_offsets, offset_len = self.data.shape
|
||||
offset_inds = np.arange(offset_len)
|
||||
|
||||
row = offset_inds - self.offsets[:,None]
|
||||
mask = (row >= 0)
|
||||
mask &= (row < num_rows)
|
||||
mask &= (offset_inds < num_cols)
|
||||
mask &= (self.data != 0)
|
||||
row = row[mask]
|
||||
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
|
||||
data = self.data[mask]
|
||||
|
||||
A = self._coo_container(
|
||||
(data, (row, col)), shape=self.shape, dtype=self.dtype
|
||||
)
|
||||
A.has_canonical_format = True
|
||||
return A
|
||||
|
||||
tocoo.__doc__ = spmatrix.tocoo.__doc__
|
||||
|
||||
# needed by _data_matrix
|
||||
def _with_data(self, data, copy=True):
|
||||
"""Returns a matrix with the same sparsity structure as self,
|
||||
but with different data. By default the structure arrays are copied.
|
||||
"""
|
||||
if copy:
|
||||
return self._dia_container(
|
||||
(data, self.offsets.copy()), shape=self.shape
|
||||
)
|
||||
else:
|
||||
return self._dia_container(
|
||||
(data, self.offsets), shape=self.shape
|
||||
)
|
||||
|
||||
def resize(self, *shape):
|
||||
shape = check_shape(shape)
|
||||
M, N = shape
|
||||
# we do not need to handle the case of expanding N
|
||||
self.data = self.data[:, :N]
|
||||
|
||||
if (M > self.shape[0] and
|
||||
np.any(self.offsets + self.shape[0] < self.data.shape[1])):
|
||||
# explicitly clear values that were previously hidden
|
||||
mask = (self.offsets[:, None] + self.shape[0] <=
|
||||
np.arange(self.data.shape[1]))
|
||||
self.data[mask] = 0
|
||||
|
||||
self._shape = shape
|
||||
|
||||
resize.__doc__ = spmatrix.resize.__doc__
|
||||
|
||||
|
||||
def isspmatrix_dia(x):
|
||||
"""Is x of dia_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a dia matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a dia matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import dia_matrix, isspmatrix_dia
|
||||
>>> isspmatrix_dia(dia_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia
|
||||
>>> isspmatrix_dia(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import dia_array
|
||||
return isinstance(x, dia_matrix) or isinstance(x, dia_array)
|
||||
456
.CondaPkg/env/Lib/site-packages/scipy/sparse/_dok.py
vendored
Normal file
456
.CondaPkg/env/Lib/site-packages/scipy/sparse/_dok.py
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
"""Dictionary Of Keys based matrix"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['dok_matrix', 'isspmatrix_dok']
|
||||
|
||||
import itertools
|
||||
import numpy as np
|
||||
|
||||
from ._base import spmatrix, isspmatrix
|
||||
from ._index import IndexMixin
|
||||
from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike,
|
||||
upcast, upcast_scalar, get_index_dtype, check_shape)
|
||||
|
||||
try:
|
||||
from operator import isSequenceType as _is_sequence
|
||||
except ImportError:
|
||||
def _is_sequence(x):
|
||||
return (hasattr(x, '__len__') or hasattr(x, '__next__')
|
||||
or hasattr(x, 'next'))
|
||||
|
||||
|
||||
class dok_matrix(spmatrix, IndexMixin, dict):
|
||||
"""
|
||||
Dictionary Of Keys based sparse matrix.
|
||||
|
||||
This is an efficient structure for constructing sparse
|
||||
matrices incrementally.
|
||||
|
||||
This can be instantiated in several ways:
|
||||
dok_matrix(D)
|
||||
with a dense matrix, D
|
||||
|
||||
dok_matrix(S)
|
||||
with a sparse matrix, S
|
||||
|
||||
dok_matrix((M,N), [dtype])
|
||||
create the matrix with initial shape (M,N)
|
||||
dtype is optional, defaulting to dtype='d'
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of nonzero elements
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Allows for efficient O(1) access of individual elements.
|
||||
Duplicates are not allowed.
|
||||
Can be efficiently converted to a coo_matrix once constructed.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import dok_matrix
|
||||
>>> S = dok_matrix((5, 5), dtype=np.float32)
|
||||
>>> for i in range(5):
|
||||
... for j in range(5):
|
||||
... S[i, j] = i + j # Update element
|
||||
|
||||
"""
|
||||
format = 'dok'
|
||||
|
||||
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
||||
dict.__init__(self)
|
||||
spmatrix.__init__(self)
|
||||
|
||||
self.dtype = getdtype(dtype, default=float)
|
||||
if isinstance(arg1, tuple) and isshape(arg1): # (M,N)
|
||||
M, N = arg1
|
||||
self._shape = check_shape((M, N))
|
||||
elif isspmatrix(arg1): # Sparse ctor
|
||||
if isspmatrix_dok(arg1) and copy:
|
||||
arg1 = arg1.copy()
|
||||
else:
|
||||
arg1 = arg1.todok()
|
||||
|
||||
if dtype is not None:
|
||||
arg1 = arg1.astype(dtype, copy=False)
|
||||
|
||||
dict.update(self, arg1)
|
||||
self._shape = check_shape(arg1.shape)
|
||||
self.dtype = arg1.dtype
|
||||
else: # Dense ctor
|
||||
try:
|
||||
arg1 = np.asarray(arg1)
|
||||
except Exception as e:
|
||||
raise TypeError('Invalid input format.') from e
|
||||
|
||||
if len(arg1.shape) != 2:
|
||||
raise TypeError('Expected rank <=2 dense array or matrix.')
|
||||
|
||||
d = self._coo_container(arg1, dtype=dtype).todok()
|
||||
dict.update(self, d)
|
||||
self._shape = check_shape(arg1.shape)
|
||||
self.dtype = d.dtype
|
||||
|
||||
def update(self, val):
|
||||
# Prevent direct usage of update
|
||||
raise NotImplementedError("Direct modification to dok_matrix element "
|
||||
"is not allowed.")
|
||||
|
||||
def _update(self, data):
|
||||
"""An update method for dict data defined for direct access to
|
||||
`dok_matrix` data. Main purpose is to be used for effcient conversion
|
||||
from other spmatrix classes. Has no checking if `data` is valid."""
|
||||
return dict.update(self, data)
|
||||
|
||||
def set_shape(self, shape):
|
||||
new_matrix = self.reshape(shape, copy=False).asformat(self.format)
|
||||
self.__dict__ = new_matrix.__dict__
|
||||
dict.clear(self)
|
||||
dict.update(self, new_matrix)
|
||||
|
||||
shape = property(fget=spmatrix.get_shape, fset=set_shape)
|
||||
|
||||
def getnnz(self, axis=None):
|
||||
if axis is not None:
|
||||
raise NotImplementedError("getnnz over an axis is not implemented "
|
||||
"for DOK format.")
|
||||
return dict.__len__(self)
|
||||
|
||||
def count_nonzero(self):
|
||||
return sum(x != 0 for x in self.values())
|
||||
|
||||
getnnz.__doc__ = spmatrix.getnnz.__doc__
|
||||
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
|
||||
|
||||
def __len__(self):
|
||||
return dict.__len__(self)
|
||||
|
||||
def get(self, key, default=0.):
|
||||
"""This overrides the dict.get method, providing type checking
|
||||
but otherwise equivalent functionality.
|
||||
"""
|
||||
try:
|
||||
i, j = key
|
||||
assert isintlike(i) and isintlike(j)
|
||||
except (AssertionError, TypeError, ValueError) as e:
|
||||
raise IndexError('Index must be a pair of integers.') from e
|
||||
if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]):
|
||||
raise IndexError('Index out of bounds.')
|
||||
return dict.get(self, key, default)
|
||||
|
||||
def _get_intXint(self, row, col):
|
||||
return dict.get(self, (row, col), self.dtype.type(0))
|
||||
|
||||
def _get_intXslice(self, row, col):
|
||||
return self._get_sliceXslice(slice(row, row+1), col)
|
||||
|
||||
def _get_sliceXint(self, row, col):
|
||||
return self._get_sliceXslice(row, slice(col, col+1))
|
||||
|
||||
def _get_sliceXslice(self, row, col):
|
||||
row_start, row_stop, row_step = row.indices(self.shape[0])
|
||||
col_start, col_stop, col_step = col.indices(self.shape[1])
|
||||
row_range = range(row_start, row_stop, row_step)
|
||||
col_range = range(col_start, col_stop, col_step)
|
||||
shape = (len(row_range), len(col_range))
|
||||
# Switch paths only when advantageous
|
||||
# (count the iterations in the loops, adjust for complexity)
|
||||
if len(self) >= 2 * shape[0] * shape[1]:
|
||||
# O(nr*nc) path: loop over <row x col>
|
||||
return self._get_columnXarray(row_range, col_range)
|
||||
# O(nnz) path: loop over entries of self
|
||||
newdok = self._dok_container(shape, dtype=self.dtype)
|
||||
for key in self.keys():
|
||||
i, ri = divmod(int(key[0]) - row_start, row_step)
|
||||
if ri != 0 or i < 0 or i >= shape[0]:
|
||||
continue
|
||||
j, rj = divmod(int(key[1]) - col_start, col_step)
|
||||
if rj != 0 or j < 0 or j >= shape[1]:
|
||||
continue
|
||||
x = dict.__getitem__(self, key)
|
||||
dict.__setitem__(newdok, (i, j), x)
|
||||
return newdok
|
||||
|
||||
def _get_intXarray(self, row, col):
|
||||
col = col.squeeze()
|
||||
return self._get_columnXarray([row], col)
|
||||
|
||||
def _get_arrayXint(self, row, col):
|
||||
row = row.squeeze()
|
||||
return self._get_columnXarray(row, [col])
|
||||
|
||||
def _get_sliceXarray(self, row, col):
|
||||
row = list(range(*row.indices(self.shape[0])))
|
||||
return self._get_columnXarray(row, col)
|
||||
|
||||
def _get_arrayXslice(self, row, col):
|
||||
col = list(range(*col.indices(self.shape[1])))
|
||||
return self._get_columnXarray(row, col)
|
||||
|
||||
def _get_columnXarray(self, row, col):
|
||||
# outer indexing
|
||||
newdok = self._dok_container((len(row), len(col)), dtype=self.dtype)
|
||||
|
||||
for i, r in enumerate(row):
|
||||
for j, c in enumerate(col):
|
||||
v = dict.get(self, (r, c), 0)
|
||||
if v:
|
||||
dict.__setitem__(newdok, (i, j), v)
|
||||
return newdok
|
||||
|
||||
def _get_arrayXarray(self, row, col):
|
||||
# inner indexing
|
||||
i, j = map(np.atleast_2d, np.broadcast_arrays(row, col))
|
||||
newdok = self._dok_container(i.shape, dtype=self.dtype)
|
||||
|
||||
for key in itertools.product(range(i.shape[0]), range(i.shape[1])):
|
||||
v = dict.get(self, (i[key], j[key]), 0)
|
||||
if v:
|
||||
dict.__setitem__(newdok, key, v)
|
||||
return newdok
|
||||
|
||||
def _set_intXint(self, row, col, x):
|
||||
key = (row, col)
|
||||
if x:
|
||||
dict.__setitem__(self, key, x)
|
||||
elif dict.__contains__(self, key):
|
||||
del self[key]
|
||||
|
||||
def _set_arrayXarray(self, row, col, x):
|
||||
row = list(map(int, row.ravel()))
|
||||
col = list(map(int, col.ravel()))
|
||||
x = x.ravel()
|
||||
dict.update(self, zip(zip(row, col), x))
|
||||
|
||||
for i in np.nonzero(x == 0)[0]:
|
||||
key = (row[i], col[i])
|
||||
if dict.__getitem__(self, key) == 0:
|
||||
# may have been superseded by later update
|
||||
del self[key]
|
||||
|
||||
def __add__(self, other):
|
||||
if isscalarlike(other):
|
||||
res_dtype = upcast_scalar(self.dtype, other)
|
||||
new = self._dok_container(self.shape, dtype=res_dtype)
|
||||
# Add this scalar to every element.
|
||||
M, N = self.shape
|
||||
for key in itertools.product(range(M), range(N)):
|
||||
aij = dict.get(self, (key), 0) + other
|
||||
if aij:
|
||||
new[key] = aij
|
||||
# new.dtype.char = self.dtype.char
|
||||
elif isspmatrix_dok(other):
|
||||
if other.shape != self.shape:
|
||||
raise ValueError("Matrix dimensions are not equal.")
|
||||
# We could alternatively set the dimensions to the largest of
|
||||
# the two matrices to be summed. Would this be a good idea?
|
||||
res_dtype = upcast(self.dtype, other.dtype)
|
||||
new = self._dok_container(self.shape, dtype=res_dtype)
|
||||
dict.update(new, self)
|
||||
with np.errstate(over='ignore'):
|
||||
dict.update(new,
|
||||
((k, new[k] + other[k]) for k in other.keys()))
|
||||
elif isspmatrix(other):
|
||||
csc = self.tocsc()
|
||||
new = csc + other
|
||||
elif isdense(other):
|
||||
new = self.todense() + other
|
||||
else:
|
||||
return NotImplemented
|
||||
return new
|
||||
|
||||
def __radd__(self, other):
|
||||
if isscalarlike(other):
|
||||
new = self._dok_container(self.shape, dtype=self.dtype)
|
||||
M, N = self.shape
|
||||
for key in itertools.product(range(M), range(N)):
|
||||
aij = dict.get(self, (key), 0) + other
|
||||
if aij:
|
||||
new[key] = aij
|
||||
elif isspmatrix_dok(other):
|
||||
if other.shape != self.shape:
|
||||
raise ValueError("Matrix dimensions are not equal.")
|
||||
new = self._dok_container(self.shape, dtype=self.dtype)
|
||||
dict.update(new, self)
|
||||
dict.update(new,
|
||||
((k, self[k] + other[k]) for k in other.keys()))
|
||||
elif isspmatrix(other):
|
||||
csc = self.tocsc()
|
||||
new = csc + other
|
||||
elif isdense(other):
|
||||
new = other + self.todense()
|
||||
else:
|
||||
return NotImplemented
|
||||
return new
|
||||
|
||||
def __neg__(self):
|
||||
if self.dtype.kind == 'b':
|
||||
raise NotImplementedError('Negating a sparse boolean matrix is not'
|
||||
' supported.')
|
||||
new = self._dok_container(self.shape, dtype=self.dtype)
|
||||
dict.update(new, ((k, -self[k]) for k in self.keys()))
|
||||
return new
|
||||
|
||||
def _mul_scalar(self, other):
|
||||
res_dtype = upcast_scalar(self.dtype, other)
|
||||
# Multiply this scalar by every element.
|
||||
new = self._dok_container(self.shape, dtype=res_dtype)
|
||||
dict.update(new, ((k, v * other) for k, v in self.items()))
|
||||
return new
|
||||
|
||||
def _mul_vector(self, other):
|
||||
# matrix * vector
|
||||
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
|
||||
for (i, j), v in self.items():
|
||||
result[i] += v * other[j]
|
||||
return result
|
||||
|
||||
def _mul_multivector(self, other):
|
||||
# matrix * multivector
|
||||
result_shape = (self.shape[0], other.shape[1])
|
||||
result_dtype = upcast(self.dtype, other.dtype)
|
||||
result = np.zeros(result_shape, dtype=result_dtype)
|
||||
for (i, j), v in self.items():
|
||||
result[i,:] += v * other[j,:]
|
||||
return result
|
||||
|
||||
def __imul__(self, other):
|
||||
if isscalarlike(other):
|
||||
dict.update(self, ((k, v * other) for k, v in self.items()))
|
||||
return self
|
||||
return NotImplemented
|
||||
|
||||
def __truediv__(self, other):
|
||||
if isscalarlike(other):
|
||||
res_dtype = upcast_scalar(self.dtype, other)
|
||||
new = self._dok_container(self.shape, dtype=res_dtype)
|
||||
dict.update(new, ((k, v / other) for k, v in self.items()))
|
||||
return new
|
||||
return self.tocsr() / other
|
||||
|
||||
def __itruediv__(self, other):
|
||||
if isscalarlike(other):
|
||||
dict.update(self, ((k, v / other) for k, v in self.items()))
|
||||
return self
|
||||
return NotImplemented
|
||||
|
||||
def __reduce__(self):
|
||||
# this approach is necessary because __setstate__ is called after
|
||||
# __setitem__ upon unpickling and since __init__ is not called there
|
||||
# is no shape attribute hence it is not possible to unpickle it.
|
||||
return dict.__reduce__(self)
|
||||
|
||||
# What should len(sparse) return? For consistency with dense matrices,
|
||||
# perhaps it should be the number of rows? For now it returns the number
|
||||
# of non-zeros.
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
if axes is not None:
|
||||
raise ValueError("Sparse matrices do not support "
|
||||
"an 'axes' parameter because swapping "
|
||||
"dimensions is the only logical permutation.")
|
||||
|
||||
M, N = self.shape
|
||||
new = self._dok_container((N, M), dtype=self.dtype, copy=copy)
|
||||
dict.update(new, (((right, left), val)
|
||||
for (left, right), val in self.items()))
|
||||
return new
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def conjtransp(self):
|
||||
"""Return the conjugate transpose."""
|
||||
M, N = self.shape
|
||||
new = self._dok_container((N, M), dtype=self.dtype)
|
||||
dict.update(new, (((right, left), np.conj(val))
|
||||
for (left, right), val in self.items()))
|
||||
return new
|
||||
|
||||
def copy(self):
|
||||
new = self._dok_container(self.shape, dtype=self.dtype)
|
||||
dict.update(new, self)
|
||||
return new
|
||||
|
||||
copy.__doc__ = spmatrix.copy.__doc__
|
||||
|
||||
def tocoo(self, copy=False):
|
||||
if self.nnz == 0:
|
||||
return self._coo_container(self.shape, dtype=self.dtype)
|
||||
|
||||
idx_dtype = get_index_dtype(maxval=max(self.shape))
|
||||
data = np.fromiter(self.values(), dtype=self.dtype, count=self.nnz)
|
||||
row = np.fromiter((i for i, _ in self.keys()), dtype=idx_dtype, count=self.nnz)
|
||||
col = np.fromiter((j for _, j in self.keys()), dtype=idx_dtype, count=self.nnz)
|
||||
A = self._coo_container(
|
||||
(data, (row, col)), shape=self.shape, dtype=self.dtype
|
||||
)
|
||||
A.has_canonical_format = True
|
||||
return A
|
||||
|
||||
tocoo.__doc__ = spmatrix.tocoo.__doc__
|
||||
|
||||
def todok(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
return self
|
||||
|
||||
todok.__doc__ = spmatrix.todok.__doc__
|
||||
|
||||
def tocsc(self, copy=False):
|
||||
return self.tocoo(copy=False).tocsc(copy=copy)
|
||||
|
||||
tocsc.__doc__ = spmatrix.tocsc.__doc__
|
||||
|
||||
def resize(self, *shape):
|
||||
shape = check_shape(shape)
|
||||
newM, newN = shape
|
||||
M, N = self.shape
|
||||
if newM < M or newN < N:
|
||||
# Remove all elements outside new dimensions
|
||||
for (i, j) in list(self.keys()):
|
||||
if i >= newM or j >= newN:
|
||||
del self[i, j]
|
||||
self._shape = shape
|
||||
|
||||
resize.__doc__ = spmatrix.resize.__doc__
|
||||
|
||||
|
||||
def isspmatrix_dok(x):
|
||||
"""Is x of dok_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a dok matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a dok matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import dok_matrix, isspmatrix_dok
|
||||
>>> isspmatrix_dok(dok_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import dok_matrix, csr_matrix, isspmatrix_dok
|
||||
>>> isspmatrix_dok(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import dok_array
|
||||
return isinstance(x, dok_matrix) or isinstance(x, dok_array)
|
||||
169
.CondaPkg/env/Lib/site-packages/scipy/sparse/_extract.py
vendored
Normal file
169
.CondaPkg/env/Lib/site-packages/scipy/sparse/_extract.py
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Functions to extract parts of sparse matrices
|
||||
"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['find', 'tril', 'triu']
|
||||
|
||||
|
||||
from ._coo import coo_matrix
|
||||
|
||||
|
||||
def find(A):
|
||||
"""Return the indices and values of the nonzero elements of a matrix
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : dense or sparse matrix
|
||||
Matrix whose nonzero elements are desired.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(I,J,V) : tuple of arrays
|
||||
I,J, and V contain the row indices, column indices, and values
|
||||
of the nonzero matrix entries.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csr_matrix, find
|
||||
>>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]])
|
||||
>>> find(A)
|
||||
(array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.]))
|
||||
|
||||
"""
|
||||
|
||||
A = coo_matrix(A, copy=True)
|
||||
A.sum_duplicates()
|
||||
# remove explicit zeros
|
||||
nz_mask = A.data != 0
|
||||
return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
|
||||
|
||||
|
||||
def tril(A, k=0, format=None):
|
||||
"""Return the lower triangular portion of a matrix in sparse format
|
||||
|
||||
Returns the elements on or below the k-th diagonal of the matrix A.
|
||||
- k = 0 corresponds to the main diagonal
|
||||
- k > 0 is above the main diagonal
|
||||
- k < 0 is below the main diagonal
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : dense or sparse matrix
|
||||
Matrix whose lower trianglar portion is desired.
|
||||
k : integer : optional
|
||||
The top-most diagonal of the lower triangle.
|
||||
format : string
|
||||
Sparse format of the result, e.g. format="csr", etc.
|
||||
|
||||
Returns
|
||||
-------
|
||||
L : sparse matrix
|
||||
Lower triangular portion of A in sparse format.
|
||||
|
||||
See Also
|
||||
--------
|
||||
triu : upper triangle in sparse format
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csr_matrix, tril
|
||||
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
|
||||
... dtype='int32')
|
||||
>>> A.toarray()
|
||||
array([[1, 2, 0, 0, 3],
|
||||
[4, 5, 0, 6, 7],
|
||||
[0, 0, 8, 9, 0]])
|
||||
>>> tril(A).toarray()
|
||||
array([[1, 0, 0, 0, 0],
|
||||
[4, 5, 0, 0, 0],
|
||||
[0, 0, 8, 0, 0]])
|
||||
>>> tril(A).nnz
|
||||
4
|
||||
>>> tril(A, k=1).toarray()
|
||||
array([[1, 2, 0, 0, 0],
|
||||
[4, 5, 0, 0, 0],
|
||||
[0, 0, 8, 9, 0]])
|
||||
>>> tril(A, k=-1).toarray()
|
||||
array([[0, 0, 0, 0, 0],
|
||||
[4, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
>>> tril(A, format='csc')
|
||||
<3x5 sparse matrix of type '<class 'numpy.int32'>'
|
||||
with 4 stored elements in Compressed Sparse Column format>
|
||||
|
||||
"""
|
||||
|
||||
# convert to COOrdinate format where things are easy
|
||||
A = coo_matrix(A, copy=False)
|
||||
mask = A.row + k >= A.col
|
||||
return _masked_coo(A, mask).asformat(format)
|
||||
|
||||
|
||||
def triu(A, k=0, format=None):
|
||||
"""Return the upper triangular portion of a matrix in sparse format
|
||||
|
||||
Returns the elements on or above the k-th diagonal of the matrix A.
|
||||
- k = 0 corresponds to the main diagonal
|
||||
- k > 0 is above the main diagonal
|
||||
- k < 0 is below the main diagonal
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : dense or sparse matrix
|
||||
Matrix whose upper trianglar portion is desired.
|
||||
k : integer : optional
|
||||
The bottom-most diagonal of the upper triangle.
|
||||
format : string
|
||||
Sparse format of the result, e.g. format="csr", etc.
|
||||
|
||||
Returns
|
||||
-------
|
||||
L : sparse matrix
|
||||
Upper triangular portion of A in sparse format.
|
||||
|
||||
See Also
|
||||
--------
|
||||
tril : lower triangle in sparse format
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csr_matrix, triu
|
||||
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
|
||||
... dtype='int32')
|
||||
>>> A.toarray()
|
||||
array([[1, 2, 0, 0, 3],
|
||||
[4, 5, 0, 6, 7],
|
||||
[0, 0, 8, 9, 0]])
|
||||
>>> triu(A).toarray()
|
||||
array([[1, 2, 0, 0, 3],
|
||||
[0, 5, 0, 6, 7],
|
||||
[0, 0, 8, 9, 0]])
|
||||
>>> triu(A).nnz
|
||||
8
|
||||
>>> triu(A, k=1).toarray()
|
||||
array([[0, 2, 0, 0, 3],
|
||||
[0, 0, 0, 6, 7],
|
||||
[0, 0, 0, 9, 0]])
|
||||
>>> triu(A, k=-1).toarray()
|
||||
array([[1, 2, 0, 0, 3],
|
||||
[4, 5, 0, 6, 7],
|
||||
[0, 0, 8, 9, 0]])
|
||||
>>> triu(A, format='csc')
|
||||
<3x5 sparse matrix of type '<class 'numpy.int32'>'
|
||||
with 8 stored elements in Compressed Sparse Column format>
|
||||
|
||||
"""
|
||||
|
||||
# convert to COOrdinate format where things are easy
|
||||
A = coo_matrix(A, copy=False)
|
||||
mask = A.row + k <= A.col
|
||||
return _masked_coo(A, mask).asformat(format)
|
||||
|
||||
|
||||
def _masked_coo(A, mask):
|
||||
row = A.row[mask]
|
||||
col = A.col[mask]
|
||||
data = A.data[mask]
|
||||
return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
|
||||
389
.CondaPkg/env/Lib/site-packages/scipy/sparse/_index.py
vendored
Normal file
389
.CondaPkg/env/Lib/site-packages/scipy/sparse/_index.py
vendored
Normal file
@@ -0,0 +1,389 @@
|
||||
"""Indexing mixin for sparse matrix classes.
|
||||
"""
|
||||
import numpy as np
|
||||
from ._sputils import isintlike
|
||||
|
||||
try:
|
||||
INT_TYPES = (int, long, np.integer)
|
||||
except NameError:
|
||||
# long is not defined in Python3
|
||||
INT_TYPES = (int, np.integer)
|
||||
|
||||
|
||||
def _broadcast_arrays(a, b):
|
||||
"""
|
||||
Same as np.broadcast_arrays(a, b) but old writeability rules.
|
||||
|
||||
NumPy >= 1.17.0 transitions broadcast_arrays to return
|
||||
read-only arrays. Set writeability explicitly to avoid warnings.
|
||||
Retain the old writeability rules, as our Cython code assumes
|
||||
the old behavior.
|
||||
"""
|
||||
x, y = np.broadcast_arrays(a, b)
|
||||
x.flags.writeable = a.flags.writeable
|
||||
y.flags.writeable = b.flags.writeable
|
||||
return x, y
|
||||
|
||||
|
||||
class IndexMixin:
|
||||
"""
|
||||
This class provides common dispatching and validation logic for indexing.
|
||||
"""
|
||||
def _raise_on_1d_array_slice(self):
|
||||
"""We do not currently support 1D sparse arrays.
|
||||
|
||||
This function is called each time that a 1D array would
|
||||
result, raising an error instead.
|
||||
|
||||
Once 1D sparse arrays are implemented, it should be removed.
|
||||
"""
|
||||
if self._is_array:
|
||||
raise NotImplementedError(
|
||||
'We have not yet implemented 1D sparse slices; '
|
||||
'please index using explicit indices, e.g. `x[:, [0]]`'
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
row, col = self._validate_indices(key)
|
||||
|
||||
# Dispatch to specialized methods.
|
||||
if isinstance(row, INT_TYPES):
|
||||
if isinstance(col, INT_TYPES):
|
||||
return self._get_intXint(row, col)
|
||||
elif isinstance(col, slice):
|
||||
self._raise_on_1d_array_slice()
|
||||
return self._get_intXslice(row, col)
|
||||
elif col.ndim == 1:
|
||||
self._raise_on_1d_array_slice()
|
||||
return self._get_intXarray(row, col)
|
||||
elif col.ndim == 2:
|
||||
return self._get_intXarray(row, col)
|
||||
raise IndexError('index results in >2 dimensions')
|
||||
elif isinstance(row, slice):
|
||||
if isinstance(col, INT_TYPES):
|
||||
self._raise_on_1d_array_slice()
|
||||
return self._get_sliceXint(row, col)
|
||||
elif isinstance(col, slice):
|
||||
if row == slice(None) and row == col:
|
||||
return self.copy()
|
||||
return self._get_sliceXslice(row, col)
|
||||
elif col.ndim == 1:
|
||||
return self._get_sliceXarray(row, col)
|
||||
raise IndexError('index results in >2 dimensions')
|
||||
elif row.ndim == 1:
|
||||
if isinstance(col, INT_TYPES):
|
||||
self._raise_on_1d_array_slice()
|
||||
return self._get_arrayXint(row, col)
|
||||
elif isinstance(col, slice):
|
||||
return self._get_arrayXslice(row, col)
|
||||
else: # row.ndim == 2
|
||||
if isinstance(col, INT_TYPES):
|
||||
return self._get_arrayXint(row, col)
|
||||
elif isinstance(col, slice):
|
||||
raise IndexError('index results in >2 dimensions')
|
||||
elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1):
|
||||
# special case for outer indexing
|
||||
return self._get_columnXarray(row[:,0], col.ravel())
|
||||
|
||||
# The only remaining case is inner (fancy) indexing
|
||||
row, col = _broadcast_arrays(row, col)
|
||||
if row.shape != col.shape:
|
||||
raise IndexError('number of row and column indices differ')
|
||||
if row.size == 0:
|
||||
return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype)
|
||||
return self._get_arrayXarray(row, col)
|
||||
|
||||
def __setitem__(self, key, x):
|
||||
row, col = self._validate_indices(key)
|
||||
|
||||
if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
|
||||
x = np.asarray(x, dtype=self.dtype)
|
||||
if x.size != 1:
|
||||
raise ValueError('Trying to assign a sequence to an item')
|
||||
self._set_intXint(row, col, x.flat[0])
|
||||
return
|
||||
|
||||
if isinstance(row, slice):
|
||||
row = np.arange(*row.indices(self.shape[0]))[:, None]
|
||||
else:
|
||||
row = np.atleast_1d(row)
|
||||
|
||||
if isinstance(col, slice):
|
||||
col = np.arange(*col.indices(self.shape[1]))[None, :]
|
||||
if row.ndim == 1:
|
||||
row = row[:, None]
|
||||
else:
|
||||
col = np.atleast_1d(col)
|
||||
|
||||
i, j = _broadcast_arrays(row, col)
|
||||
if i.shape != j.shape:
|
||||
raise IndexError('number of row and column indices differ')
|
||||
|
||||
from ._base import isspmatrix
|
||||
if isspmatrix(x):
|
||||
if i.ndim == 1:
|
||||
# Inner indexing, so treat them like row vectors.
|
||||
i = i[None]
|
||||
j = j[None]
|
||||
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
|
||||
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
|
||||
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
|
||||
(broadcast_col or x.shape[1] == i.shape[1])):
|
||||
raise ValueError('shape mismatch in assignment')
|
||||
if x.shape[0] == 0 or x.shape[1] == 0:
|
||||
return
|
||||
x = x.tocoo(copy=True)
|
||||
x.sum_duplicates()
|
||||
self._set_arrayXarray_sparse(i, j, x)
|
||||
else:
|
||||
# Make x and i into the same shape
|
||||
x = np.asarray(x, dtype=self.dtype)
|
||||
if x.squeeze().shape != i.squeeze().shape:
|
||||
x = np.broadcast_to(x, i.shape)
|
||||
if x.size == 0:
|
||||
return
|
||||
x = x.reshape(i.shape)
|
||||
self._set_arrayXarray(i, j, x)
|
||||
|
||||
def _validate_indices(self, key):
|
||||
M, N = self.shape
|
||||
row, col = _unpack_index(key)
|
||||
|
||||
if isintlike(row):
|
||||
row = int(row)
|
||||
if row < -M or row >= M:
|
||||
raise IndexError('row index (%d) out of range' % row)
|
||||
if row < 0:
|
||||
row += M
|
||||
elif not isinstance(row, slice):
|
||||
row = self._asindices(row, M)
|
||||
|
||||
if isintlike(col):
|
||||
col = int(col)
|
||||
if col < -N or col >= N:
|
||||
raise IndexError('column index (%d) out of range' % col)
|
||||
if col < 0:
|
||||
col += N
|
||||
elif not isinstance(col, slice):
|
||||
col = self._asindices(col, N)
|
||||
|
||||
return row, col
|
||||
|
||||
def _asindices(self, idx, length):
|
||||
"""Convert `idx` to a valid index for an axis with a given length.
|
||||
|
||||
Subclasses that need special validation can override this method.
|
||||
"""
|
||||
try:
|
||||
x = np.asarray(idx)
|
||||
except (ValueError, TypeError, MemoryError) as e:
|
||||
raise IndexError('invalid index') from e
|
||||
|
||||
if x.ndim not in (1, 2):
|
||||
raise IndexError('Index dimension must be 1 or 2')
|
||||
|
||||
if x.size == 0:
|
||||
return x
|
||||
|
||||
# Check bounds
|
||||
max_indx = x.max()
|
||||
if max_indx >= length:
|
||||
raise IndexError('index (%d) out of range' % max_indx)
|
||||
|
||||
min_indx = x.min()
|
||||
if min_indx < 0:
|
||||
if min_indx < -length:
|
||||
raise IndexError('index (%d) out of range' % min_indx)
|
||||
if x is idx or not x.flags.owndata:
|
||||
x = x.copy()
|
||||
x[x < 0] += length
|
||||
return x
|
||||
|
||||
def getrow(self, i):
|
||||
"""Return a copy of row i of the matrix, as a (1 x n) row vector.
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < -M or i >= M:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
if i < 0:
|
||||
i += M
|
||||
return self._get_intXslice(i, slice(None))
|
||||
|
||||
def getcol(self, i):
|
||||
"""Return a copy of column i of the matrix, as a (m x 1) column vector.
|
||||
"""
|
||||
M, N = self.shape
|
||||
i = int(i)
|
||||
if i < -N or i >= N:
|
||||
raise IndexError('index (%d) out of range' % i)
|
||||
if i < 0:
|
||||
i += N
|
||||
return self._get_sliceXint(slice(None), i)
|
||||
|
||||
def _get_intXint(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_intXarray(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_intXslice(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_sliceXint(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_sliceXslice(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_sliceXarray(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_arrayXint(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_arrayXslice(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_columnXarray(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_arrayXarray(self, row, col):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _set_intXint(self, row, col, x):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _set_arrayXarray(self, row, col, x):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _set_arrayXarray_sparse(self, row, col, x):
|
||||
# Fall back to densifying x
|
||||
x = np.asarray(x.toarray(), dtype=self.dtype)
|
||||
x, _ = _broadcast_arrays(x, row)
|
||||
self._set_arrayXarray(row, col, x)
|
||||
|
||||
|
||||
def _unpack_index(index):
|
||||
""" Parse index. Always return a tuple of the form (row, col).
|
||||
Valid type for row/col is integer, slice, or array of integers.
|
||||
"""
|
||||
# First, check if indexing with single boolean matrix.
|
||||
from ._base import spmatrix, isspmatrix
|
||||
if (isinstance(index, (spmatrix, np.ndarray)) and
|
||||
index.ndim == 2 and index.dtype.kind == 'b'):
|
||||
return index.nonzero()
|
||||
|
||||
# Parse any ellipses.
|
||||
index = _check_ellipsis(index)
|
||||
|
||||
# Next, parse the tuple or object
|
||||
if isinstance(index, tuple):
|
||||
if len(index) == 2:
|
||||
row, col = index
|
||||
elif len(index) == 1:
|
||||
row, col = index[0], slice(None)
|
||||
else:
|
||||
raise IndexError('invalid number of indices')
|
||||
else:
|
||||
idx = _compatible_boolean_index(index)
|
||||
if idx is None:
|
||||
row, col = index, slice(None)
|
||||
elif idx.ndim < 2:
|
||||
return _boolean_index_to_array(idx), slice(None)
|
||||
elif idx.ndim == 2:
|
||||
return idx.nonzero()
|
||||
# Next, check for validity and transform the index as needed.
|
||||
if isspmatrix(row) or isspmatrix(col):
|
||||
# Supporting sparse boolean indexing with both row and col does
|
||||
# not work because spmatrix.ndim is always 2.
|
||||
raise IndexError(
|
||||
'Indexing with sparse matrices is not supported '
|
||||
'except boolean indexing where matrix and index '
|
||||
'are equal shapes.')
|
||||
bool_row = _compatible_boolean_index(row)
|
||||
bool_col = _compatible_boolean_index(col)
|
||||
if bool_row is not None:
|
||||
row = _boolean_index_to_array(bool_row)
|
||||
if bool_col is not None:
|
||||
col = _boolean_index_to_array(bool_col)
|
||||
return row, col
|
||||
|
||||
|
||||
def _check_ellipsis(index):
|
||||
"""Process indices with Ellipsis. Returns modified index."""
|
||||
if index is Ellipsis:
|
||||
return (slice(None), slice(None))
|
||||
|
||||
if not isinstance(index, tuple):
|
||||
return index
|
||||
|
||||
# TODO: Deprecate this multiple-ellipsis handling,
|
||||
# as numpy no longer supports it.
|
||||
|
||||
# Find first ellipsis.
|
||||
for j, v in enumerate(index):
|
||||
if v is Ellipsis:
|
||||
first_ellipsis = j
|
||||
break
|
||||
else:
|
||||
return index
|
||||
|
||||
# Try to expand it using shortcuts for common cases
|
||||
if len(index) == 1:
|
||||
return (slice(None), slice(None))
|
||||
if len(index) == 2:
|
||||
if first_ellipsis == 0:
|
||||
if index[1] is Ellipsis:
|
||||
return (slice(None), slice(None))
|
||||
return (slice(None), index[1])
|
||||
return (index[0], slice(None))
|
||||
|
||||
# Expand it using a general-purpose algorithm
|
||||
tail = []
|
||||
for v in index[first_ellipsis+1:]:
|
||||
if v is not Ellipsis:
|
||||
tail.append(v)
|
||||
nd = first_ellipsis + len(tail)
|
||||
nslice = max(0, 2 - nd)
|
||||
return index[:first_ellipsis] + (slice(None),)*nslice + tuple(tail)
|
||||
|
||||
|
||||
def _maybe_bool_ndarray(idx):
|
||||
"""Returns a compatible array if elements are boolean.
|
||||
"""
|
||||
idx = np.asanyarray(idx)
|
||||
if idx.dtype.kind == 'b':
|
||||
return idx
|
||||
return None
|
||||
|
||||
|
||||
def _first_element_bool(idx, max_dim=2):
|
||||
"""Returns True if first element of the incompatible
|
||||
array type is boolean.
|
||||
"""
|
||||
if max_dim < 1:
|
||||
return None
|
||||
try:
|
||||
first = next(iter(idx), None)
|
||||
except TypeError:
|
||||
return None
|
||||
if isinstance(first, bool):
|
||||
return True
|
||||
return _first_element_bool(first, max_dim-1)
|
||||
|
||||
|
||||
def _compatible_boolean_index(idx):
|
||||
"""Returns a boolean index array that can be converted to
|
||||
integer array. Returns None if no such array exists.
|
||||
"""
|
||||
# Presence of attribute `ndim` indicates a compatible array type.
|
||||
if hasattr(idx, 'ndim') or _first_element_bool(idx):
|
||||
return _maybe_bool_ndarray(idx)
|
||||
return None
|
||||
|
||||
|
||||
def _boolean_index_to_array(idx):
|
||||
if idx.ndim > 1:
|
||||
raise IndexError('invalid index shape')
|
||||
return np.where(idx)[0]
|
||||
547
.CondaPkg/env/Lib/site-packages/scipy/sparse/_lil.py
vendored
Normal file
547
.CondaPkg/env/Lib/site-packages/scipy/sparse/_lil.py
vendored
Normal file
@@ -0,0 +1,547 @@
|
||||
"""List of Lists sparse matrix class
|
||||
"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['lil_matrix', 'isspmatrix_lil']
|
||||
|
||||
from bisect import bisect_left
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._base import spmatrix, isspmatrix
|
||||
from ._index import IndexMixin, INT_TYPES, _broadcast_arrays
|
||||
from ._sputils import (getdtype, isshape, isscalarlike, upcast_scalar,
|
||||
get_index_dtype, check_shape, check_reshape_kwargs)
|
||||
from . import _csparsetools
|
||||
|
||||
|
||||
class lil_matrix(spmatrix, IndexMixin):
|
||||
"""Row-based LIst of Lists sparse matrix
|
||||
|
||||
This is a structure for constructing sparse matrices incrementally.
|
||||
Note that inserting a single item can take linear time in the worst case;
|
||||
to construct a matrix efficiently, make sure the items are pre-sorted by
|
||||
index, per row.
|
||||
|
||||
This can be instantiated in several ways:
|
||||
lil_matrix(D)
|
||||
with a dense matrix or rank-2 ndarray D
|
||||
|
||||
lil_matrix(S)
|
||||
with another sparse matrix S (equivalent to S.tolil())
|
||||
|
||||
lil_matrix((M, N), [dtype])
|
||||
to construct an empty matrix with shape (M, N)
|
||||
dtype is optional, defaulting to dtype='d'.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
dtype : dtype
|
||||
Data type of the matrix
|
||||
shape : 2-tuple
|
||||
Shape of the matrix
|
||||
ndim : int
|
||||
Number of dimensions (this is always 2)
|
||||
nnz
|
||||
Number of stored values, including explicit zeros
|
||||
data
|
||||
LIL format data array of the matrix
|
||||
rows
|
||||
LIL format row index array of the matrix
|
||||
|
||||
Notes
|
||||
-----
|
||||
Sparse matrices can be used in arithmetic operations: they support
|
||||
addition, subtraction, multiplication, division, and matrix power.
|
||||
|
||||
Advantages of the LIL format
|
||||
- supports flexible slicing
|
||||
- changes to the matrix sparsity structure are efficient
|
||||
|
||||
Disadvantages of the LIL format
|
||||
- arithmetic operations LIL + LIL are slow (consider CSR or CSC)
|
||||
- slow column slicing (consider CSC)
|
||||
- slow matrix vector products (consider CSR or CSC)
|
||||
|
||||
Intended Usage
|
||||
- LIL is a convenient format for constructing sparse matrices
|
||||
- once a matrix has been constructed, convert to CSR or
|
||||
CSC format for fast arithmetic and matrix vector operations
|
||||
- consider using the COO format when constructing large matrices
|
||||
|
||||
Data Structure
|
||||
- An array (``self.rows``) of rows, each of which is a sorted
|
||||
list of column indices of non-zero elements.
|
||||
- The corresponding nonzero values are stored in similar
|
||||
fashion in ``self.data``.
|
||||
|
||||
|
||||
"""
|
||||
format = 'lil'
|
||||
|
||||
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
||||
spmatrix.__init__(self)
|
||||
self.dtype = getdtype(dtype, arg1, default=float)
|
||||
|
||||
# First get the shape
|
||||
if isspmatrix(arg1):
|
||||
if isspmatrix_lil(arg1) and copy:
|
||||
A = arg1.copy()
|
||||
else:
|
||||
A = arg1.tolil()
|
||||
|
||||
if dtype is not None:
|
||||
A = A.astype(dtype, copy=False)
|
||||
|
||||
self._shape = check_shape(A.shape)
|
||||
self.dtype = A.dtype
|
||||
self.rows = A.rows
|
||||
self.data = A.data
|
||||
elif isinstance(arg1,tuple):
|
||||
if isshape(arg1):
|
||||
if shape is not None:
|
||||
raise ValueError('invalid use of shape parameter')
|
||||
M, N = arg1
|
||||
self._shape = check_shape((M, N))
|
||||
self.rows = np.empty((M,), dtype=object)
|
||||
self.data = np.empty((M,), dtype=object)
|
||||
for i in range(M):
|
||||
self.rows[i] = []
|
||||
self.data[i] = []
|
||||
else:
|
||||
raise TypeError('unrecognized lil_matrix constructor usage')
|
||||
else:
|
||||
# assume A is dense
|
||||
try:
|
||||
A = self._ascontainer(arg1)
|
||||
except TypeError as e:
|
||||
raise TypeError('unsupported matrix type') from e
|
||||
else:
|
||||
A = self._csr_container(A, dtype=dtype).tolil()
|
||||
|
||||
self._shape = check_shape(A.shape)
|
||||
self.dtype = A.dtype
|
||||
self.rows = A.rows
|
||||
self.data = A.data
|
||||
|
||||
def __iadd__(self,other):
|
||||
self[:,:] = self + other
|
||||
return self
|
||||
|
||||
def __isub__(self,other):
|
||||
self[:,:] = self - other
|
||||
return self
|
||||
|
||||
def __imul__(self,other):
|
||||
if isscalarlike(other):
|
||||
self[:,:] = self * other
|
||||
return self
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __itruediv__(self,other):
|
||||
if isscalarlike(other):
|
||||
self[:,:] = self / other
|
||||
return self
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
# Whenever the dimensions change, empty lists should be created for each
|
||||
# row
|
||||
|
||||
def getnnz(self, axis=None):
|
||||
if axis is None:
|
||||
return sum([len(rowvals) for rowvals in self.data])
|
||||
if axis < 0:
|
||||
axis += 2
|
||||
if axis == 0:
|
||||
out = np.zeros(self.shape[1], dtype=np.intp)
|
||||
for row in self.rows:
|
||||
out[row] += 1
|
||||
return out
|
||||
elif axis == 1:
|
||||
return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
|
||||
else:
|
||||
raise ValueError('axis out of bounds')
|
||||
|
||||
def count_nonzero(self):
|
||||
return sum(np.count_nonzero(rowvals) for rowvals in self.data)
|
||||
|
||||
getnnz.__doc__ = spmatrix.getnnz.__doc__
|
||||
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
|
||||
|
||||
def __str__(self):
|
||||
val = ''
|
||||
for i, row in enumerate(self.rows):
|
||||
for pos, j in enumerate(row):
|
||||
val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
|
||||
return val[:-1]
|
||||
|
||||
def getrowview(self, i):
|
||||
"""Returns a view of the 'i'th row (without copying).
|
||||
"""
|
||||
new = self._lil_container((1, self.shape[1]), dtype=self.dtype)
|
||||
new.rows[0] = self.rows[i]
|
||||
new.data[0] = self.data[i]
|
||||
return new
|
||||
|
||||
def getrow(self, i):
|
||||
"""Returns a copy of the 'i'th row.
|
||||
"""
|
||||
M, N = self.shape
|
||||
if i < 0:
|
||||
i += M
|
||||
if i < 0 or i >= M:
|
||||
raise IndexError('row index out of bounds')
|
||||
new = self._lil_container((1, N), dtype=self.dtype)
|
||||
new.rows[0] = self.rows[i][:]
|
||||
new.data[0] = self.data[i][:]
|
||||
return new
|
||||
|
||||
def __getitem__(self, key):
|
||||
# Fast path for simple (int, int) indexing.
|
||||
if (isinstance(key, tuple) and len(key) == 2 and
|
||||
isinstance(key[0], INT_TYPES) and
|
||||
isinstance(key[1], INT_TYPES)):
|
||||
# lil_get1 handles validation for us.
|
||||
return self._get_intXint(*key)
|
||||
# Everything else takes the normal path.
|
||||
return IndexMixin.__getitem__(self, key)
|
||||
|
||||
def _asindices(self, idx, N):
|
||||
# LIL routines handle bounds-checking for us, so don't do it here.
|
||||
try:
|
||||
x = np.asarray(idx)
|
||||
except (ValueError, TypeError, MemoryError) as e:
|
||||
raise IndexError('invalid index') from e
|
||||
if x.ndim not in (1, 2):
|
||||
raise IndexError('Index dimension must be <= 2')
|
||||
return x
|
||||
|
||||
def _get_intXint(self, row, col):
|
||||
v = _csparsetools.lil_get1(self.shape[0], self.shape[1], self.rows,
|
||||
self.data, row, col)
|
||||
return self.dtype.type(v)
|
||||
|
||||
def _get_sliceXint(self, row, col):
|
||||
row = range(*row.indices(self.shape[0]))
|
||||
return self._get_row_ranges(row, slice(col, col+1))
|
||||
|
||||
def _get_arrayXint(self, row, col):
|
||||
row = row.squeeze()
|
||||
return self._get_row_ranges(row, slice(col, col+1))
|
||||
|
||||
def _get_intXslice(self, row, col):
|
||||
return self._get_row_ranges((row,), col)
|
||||
|
||||
def _get_sliceXslice(self, row, col):
|
||||
row = range(*row.indices(self.shape[0]))
|
||||
return self._get_row_ranges(row, col)
|
||||
|
||||
def _get_arrayXslice(self, row, col):
|
||||
return self._get_row_ranges(row, col)
|
||||
|
||||
def _get_intXarray(self, row, col):
|
||||
row = np.array(row, dtype=col.dtype, ndmin=1)
|
||||
return self._get_columnXarray(row, col)
|
||||
|
||||
def _get_sliceXarray(self, row, col):
|
||||
row = np.arange(*row.indices(self.shape[0]))
|
||||
return self._get_columnXarray(row, col)
|
||||
|
||||
def _get_columnXarray(self, row, col):
|
||||
# outer indexing
|
||||
row, col = _broadcast_arrays(row[:,None], col)
|
||||
return self._get_arrayXarray(row, col)
|
||||
|
||||
def _get_arrayXarray(self, row, col):
|
||||
# inner indexing
|
||||
i, j = map(np.atleast_2d, _prepare_index_for_memoryview(row, col))
|
||||
new = self._lil_container(i.shape, dtype=self.dtype)
|
||||
_csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
|
||||
self.rows, self.data,
|
||||
new.rows, new.data,
|
||||
i, j)
|
||||
return new
|
||||
|
||||
def _get_row_ranges(self, rows, col_slice):
|
||||
"""
|
||||
Fast path for indexing in the case where column index is slice.
|
||||
|
||||
This gains performance improvement over brute force by more
|
||||
efficient skipping of zeros, by accessing the elements
|
||||
column-wise in order.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rows : sequence or range
|
||||
Rows indexed. If range, must be within valid bounds.
|
||||
col_slice : slice
|
||||
Columns indexed
|
||||
|
||||
"""
|
||||
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
|
||||
col_range = range(j_start, j_stop, j_stride)
|
||||
nj = len(col_range)
|
||||
new = self._lil_container((len(rows), nj), dtype=self.dtype)
|
||||
|
||||
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
|
||||
self.rows, self.data,
|
||||
new.rows, new.data,
|
||||
rows,
|
||||
j_start, j_stop, j_stride, nj)
|
||||
|
||||
return new
|
||||
|
||||
def _set_intXint(self, row, col, x):
|
||||
_csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows,
|
||||
self.data, row, col, x)
|
||||
|
||||
def _set_arrayXarray(self, row, col, x):
|
||||
i, j, x = map(np.atleast_2d, _prepare_index_for_memoryview(row, col, x))
|
||||
_csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
|
||||
self.rows, self.data,
|
||||
i, j, x)
|
||||
|
||||
def _set_arrayXarray_sparse(self, row, col, x):
|
||||
# Special case: full matrix assignment
|
||||
if (x.shape == self.shape and
|
||||
isinstance(row, slice) and row == slice(None) and
|
||||
isinstance(col, slice) and col == slice(None)):
|
||||
x = self._lil_container(x, dtype=self.dtype)
|
||||
self.rows = x.rows
|
||||
self.data = x.data
|
||||
return
|
||||
# Fall back to densifying x
|
||||
x = np.asarray(x.toarray(), dtype=self.dtype)
|
||||
x, _ = _broadcast_arrays(x, row)
|
||||
self._set_arrayXarray(row, col, x)
|
||||
|
||||
def __setitem__(self, key, x):
|
||||
# Fast path for simple (int, int) indexing.
|
||||
if (isinstance(key, tuple) and len(key) == 2 and
|
||||
isinstance(key[0], INT_TYPES) and
|
||||
isinstance(key[1], INT_TYPES)):
|
||||
x = self.dtype.type(x)
|
||||
if x.size > 1:
|
||||
raise ValueError("Trying to assign a sequence to an item")
|
||||
return self._set_intXint(key[0], key[1], x)
|
||||
# Everything else takes the normal path.
|
||||
IndexMixin.__setitem__(self, key, x)
|
||||
|
||||
def _mul_scalar(self, other):
|
||||
if other == 0:
|
||||
# Multiply by zero: return the zero matrix
|
||||
new = self._lil_container(self.shape, dtype=self.dtype)
|
||||
else:
|
||||
res_dtype = upcast_scalar(self.dtype, other)
|
||||
|
||||
new = self.copy()
|
||||
new = new.astype(res_dtype)
|
||||
# Multiply this scalar by every element.
|
||||
for j, rowvals in enumerate(new.data):
|
||||
new.data[j] = [val*other for val in rowvals]
|
||||
return new
|
||||
|
||||
def __truediv__(self, other): # self / other
|
||||
if isscalarlike(other):
|
||||
new = self.copy()
|
||||
# Divide every element by this scalar
|
||||
for j, rowvals in enumerate(new.data):
|
||||
new.data[j] = [val/other for val in rowvals]
|
||||
return new
|
||||
else:
|
||||
return self.tocsr() / other
|
||||
|
||||
def copy(self):
|
||||
M, N = self.shape
|
||||
new = self._lil_container(self.shape, dtype=self.dtype)
|
||||
# This is ~14x faster than calling deepcopy() on rows and data.
|
||||
_csparsetools.lil_get_row_ranges(M, N, self.rows, self.data,
|
||||
new.rows, new.data, range(M),
|
||||
0, N, 1, N)
|
||||
return new
|
||||
|
||||
copy.__doc__ = spmatrix.copy.__doc__
|
||||
|
||||
def reshape(self, *args, **kwargs):
|
||||
shape = check_shape(args, self.shape)
|
||||
order, copy = check_reshape_kwargs(kwargs)
|
||||
|
||||
# Return early if reshape is not required
|
||||
if shape == self.shape:
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
new = self._lil_container(shape, dtype=self.dtype)
|
||||
|
||||
if order == 'C':
|
||||
ncols = self.shape[1]
|
||||
for i, row in enumerate(self.rows):
|
||||
for col, j in enumerate(row):
|
||||
new_r, new_c = np.unravel_index(i * ncols + j, shape)
|
||||
new[new_r, new_c] = self[i, j]
|
||||
elif order == 'F':
|
||||
nrows = self.shape[0]
|
||||
for i, row in enumerate(self.rows):
|
||||
for col, j in enumerate(row):
|
||||
new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
|
||||
new[new_r, new_c] = self[i, j]
|
||||
else:
|
||||
raise ValueError("'order' must be 'C' or 'F'")
|
||||
|
||||
return new
|
||||
|
||||
reshape.__doc__ = spmatrix.reshape.__doc__
|
||||
|
||||
def resize(self, *shape):
|
||||
shape = check_shape(shape)
|
||||
new_M, new_N = shape
|
||||
M, N = self.shape
|
||||
|
||||
if new_M < M:
|
||||
self.rows = self.rows[:new_M]
|
||||
self.data = self.data[:new_M]
|
||||
elif new_M > M:
|
||||
self.rows = np.resize(self.rows, new_M)
|
||||
self.data = np.resize(self.data, new_M)
|
||||
for i in range(M, new_M):
|
||||
self.rows[i] = []
|
||||
self.data[i] = []
|
||||
|
||||
if new_N < N:
|
||||
for row, data in zip(self.rows, self.data):
|
||||
trunc = bisect_left(row, new_N)
|
||||
del row[trunc:]
|
||||
del data[trunc:]
|
||||
|
||||
self._shape = shape
|
||||
|
||||
resize.__doc__ = spmatrix.resize.__doc__
|
||||
|
||||
def toarray(self, order=None, out=None):
|
||||
d = self._process_toarray_args(order, out)
|
||||
for i, row in enumerate(self.rows):
|
||||
for pos, j in enumerate(row):
|
||||
d[i, j] = self.data[i][pos]
|
||||
return d
|
||||
|
||||
toarray.__doc__ = spmatrix.toarray.__doc__
|
||||
|
||||
def transpose(self, axes=None, copy=False):
|
||||
return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False)
|
||||
|
||||
transpose.__doc__ = spmatrix.transpose.__doc__
|
||||
|
||||
def tolil(self, copy=False):
|
||||
if copy:
|
||||
return self.copy()
|
||||
else:
|
||||
return self
|
||||
|
||||
tolil.__doc__ = spmatrix.tolil.__doc__
|
||||
|
||||
def tocsr(self, copy=False):
|
||||
M, N = self.shape
|
||||
if M == 0 or N == 0:
|
||||
return self._csr_container((M, N), dtype=self.dtype)
|
||||
|
||||
# construct indptr array
|
||||
if M*N <= np.iinfo(np.int32).max:
|
||||
# fast path: it is known that 64-bit indexing will not be needed.
|
||||
idx_dtype = np.int32
|
||||
indptr = np.empty(M + 1, dtype=idx_dtype)
|
||||
indptr[0] = 0
|
||||
_csparsetools.lil_get_lengths(self.rows, indptr[1:])
|
||||
np.cumsum(indptr, out=indptr)
|
||||
nnz = indptr[-1]
|
||||
else:
|
||||
idx_dtype = get_index_dtype(maxval=N)
|
||||
lengths = np.empty(M, dtype=idx_dtype)
|
||||
_csparsetools.lil_get_lengths(self.rows, lengths)
|
||||
nnz = lengths.sum(dtype=np.int64)
|
||||
idx_dtype = get_index_dtype(maxval=max(N, nnz))
|
||||
indptr = np.empty(M + 1, dtype=idx_dtype)
|
||||
indptr[0] = 0
|
||||
np.cumsum(lengths, dtype=idx_dtype, out=indptr[1:])
|
||||
|
||||
indices = np.empty(nnz, dtype=idx_dtype)
|
||||
data = np.empty(nnz, dtype=self.dtype)
|
||||
_csparsetools.lil_flatten_to_array(self.rows, indices)
|
||||
_csparsetools.lil_flatten_to_array(self.data, data)
|
||||
|
||||
# init csr matrix
|
||||
return self._csr_container((data, indices, indptr), shape=self.shape)
|
||||
|
||||
tocsr.__doc__ = spmatrix.tocsr.__doc__
|
||||
|
||||
|
||||
def _prepare_index_for_memoryview(i, j, x=None):
|
||||
"""
|
||||
Convert index and data arrays to form suitable for passing to the
|
||||
Cython fancy getset routines.
|
||||
|
||||
The conversions are necessary since to (i) ensure the integer
|
||||
index arrays are in one of the accepted types, and (ii) to ensure
|
||||
the arrays are writable so that Cython memoryview support doesn't
|
||||
choke on them.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i, j
|
||||
Index arrays
|
||||
x : optional
|
||||
Data arrays
|
||||
|
||||
Returns
|
||||
-------
|
||||
i, j, x
|
||||
Re-formatted arrays (x is omitted, if input was None)
|
||||
|
||||
"""
|
||||
if i.dtype > j.dtype:
|
||||
j = j.astype(i.dtype)
|
||||
elif i.dtype < j.dtype:
|
||||
i = i.astype(j.dtype)
|
||||
|
||||
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
|
||||
i = i.astype(np.intp)
|
||||
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
|
||||
j = j.astype(np.intp)
|
||||
|
||||
if x is not None:
|
||||
if not x.flags.writeable:
|
||||
x = x.copy()
|
||||
return i, j, x
|
||||
else:
|
||||
return i, j
|
||||
|
||||
|
||||
def isspmatrix_lil(x):
|
||||
"""Is x of lil_matrix type?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
object to check for being a lil matrix
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if x is a lil matrix, False otherwise
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import lil_matrix, isspmatrix_lil
|
||||
>>> isspmatrix_lil(lil_matrix([[5]]))
|
||||
True
|
||||
|
||||
>>> from scipy.sparse import lil_matrix, csr_matrix, isspmatrix_lil
|
||||
>>> isspmatrix_lil(csr_matrix([[5]]))
|
||||
False
|
||||
"""
|
||||
from ._arrays import lil_array
|
||||
return isinstance(x, lil_matrix) or isinstance(x, lil_array)
|
||||
151
.CondaPkg/env/Lib/site-packages/scipy/sparse/_matrix_io.py
vendored
Normal file
151
.CondaPkg/env/Lib/site-packages/scipy/sparse/_matrix_io.py
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
import numpy as np
|
||||
import scipy.sparse
|
||||
|
||||
__all__ = ['save_npz', 'load_npz']
|
||||
|
||||
|
||||
# Make loading safe vs. malicious input
|
||||
PICKLE_KWARGS = dict(allow_pickle=False)
|
||||
|
||||
|
||||
def save_npz(file, matrix, compressed=True):
|
||||
""" Save a sparse matrix to a file using ``.npz`` format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
file : str or file-like object
|
||||
Either the file name (string) or an open file (file-like object)
|
||||
where the data will be saved. If file is a string, the ``.npz``
|
||||
extension will be appended to the file name if it is not already
|
||||
there.
|
||||
matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
|
||||
The sparse matrix to save.
|
||||
compressed : bool, optional
|
||||
Allow compressing the file. Default: True
|
||||
|
||||
See Also
|
||||
--------
|
||||
scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
|
||||
numpy.savez: Save several arrays into a ``.npz`` archive.
|
||||
numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Store sparse matrix to disk, and load it again:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import scipy.sparse
|
||||
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
|
||||
>>> sparse_matrix
|
||||
<2x3 sparse matrix of type '<class 'numpy.int64'>'
|
||||
with 2 stored elements in Compressed Sparse Column format>
|
||||
>>> sparse_matrix.toarray()
|
||||
array([[0, 0, 3],
|
||||
[4, 0, 0]], dtype=int64)
|
||||
|
||||
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
|
||||
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
|
||||
|
||||
>>> sparse_matrix
|
||||
<2x3 sparse matrix of type '<class 'numpy.int64'>'
|
||||
with 2 stored elements in Compressed Sparse Column format>
|
||||
>>> sparse_matrix.toarray()
|
||||
array([[0, 0, 3],
|
||||
[4, 0, 0]], dtype=int64)
|
||||
"""
|
||||
arrays_dict = {}
|
||||
if matrix.format in ('csc', 'csr', 'bsr'):
|
||||
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
|
||||
elif matrix.format == 'dia':
|
||||
arrays_dict.update(offsets=matrix.offsets)
|
||||
elif matrix.format == 'coo':
|
||||
arrays_dict.update(row=matrix.row, col=matrix.col)
|
||||
else:
|
||||
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
|
||||
arrays_dict.update(
|
||||
format=matrix.format.encode('ascii'),
|
||||
shape=matrix.shape,
|
||||
data=matrix.data
|
||||
)
|
||||
if compressed:
|
||||
np.savez_compressed(file, **arrays_dict)
|
||||
else:
|
||||
np.savez(file, **arrays_dict)
|
||||
|
||||
|
||||
def load_npz(file):
|
||||
""" Load a sparse matrix from a file using ``.npz`` format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
file : str or file-like object
|
||||
Either the file name (string) or an open file (file-like object)
|
||||
where the data will be loaded.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
|
||||
A sparse matrix containing the loaded data.
|
||||
|
||||
Raises
|
||||
------
|
||||
OSError
|
||||
If the input file does not exist or cannot be read.
|
||||
|
||||
See Also
|
||||
--------
|
||||
scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
|
||||
numpy.load: Load several arrays from a ``.npz`` archive.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Store sparse matrix to disk, and load it again:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import scipy.sparse
|
||||
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
|
||||
>>> sparse_matrix
|
||||
<2x3 sparse matrix of type '<class 'numpy.int64'>'
|
||||
with 2 stored elements in Compressed Sparse Column format>
|
||||
>>> sparse_matrix.toarray()
|
||||
array([[0, 0, 3],
|
||||
[4, 0, 0]], dtype=int64)
|
||||
|
||||
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
|
||||
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
|
||||
|
||||
>>> sparse_matrix
|
||||
<2x3 sparse matrix of type '<class 'numpy.int64'>'
|
||||
with 2 stored elements in Compressed Sparse Column format>
|
||||
>>> sparse_matrix.toarray()
|
||||
array([[0, 0, 3],
|
||||
[4, 0, 0]], dtype=int64)
|
||||
"""
|
||||
|
||||
with np.load(file, **PICKLE_KWARGS) as loaded:
|
||||
try:
|
||||
matrix_format = loaded['format']
|
||||
except KeyError as e:
|
||||
raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e
|
||||
|
||||
matrix_format = matrix_format.item()
|
||||
|
||||
if not isinstance(matrix_format, str):
|
||||
# Play safe with Python 2 vs 3 backward compatibility;
|
||||
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
|
||||
matrix_format = matrix_format.decode('ascii')
|
||||
|
||||
try:
|
||||
cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
|
||||
except AttributeError as e:
|
||||
raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e
|
||||
|
||||
if matrix_format in ('csc', 'csr', 'bsr'):
|
||||
return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
|
||||
elif matrix_format == 'dia':
|
||||
return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
|
||||
elif matrix_format == 'coo':
|
||||
return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
|
||||
else:
|
||||
raise NotImplementedError('Load is not implemented for '
|
||||
'sparse matrix of format {}.'.format(matrix_format))
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sparsetools.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sparsetools.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sparsetools.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sparsetools.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
76
.CondaPkg/env/Lib/site-packages/scipy/sparse/_spfuncs.py
vendored
Normal file
76
.CondaPkg/env/Lib/site-packages/scipy/sparse/_spfuncs.py
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
""" Functions that operate on sparse matrices
|
||||
"""
|
||||
|
||||
__all__ = ['count_blocks','estimate_blocksize']
|
||||
|
||||
from ._csr import isspmatrix_csr, csr_matrix
|
||||
from ._csc import isspmatrix_csc
|
||||
from ._sparsetools import csr_count_blocks
|
||||
|
||||
|
||||
def estimate_blocksize(A,efficiency=0.7):
|
||||
"""Attempt to determine the blocksize of a sparse matrix
|
||||
|
||||
Returns a blocksize=(r,c) such that
|
||||
- A.nnz / A.tobsr( (r,c) ).nnz > efficiency
|
||||
"""
|
||||
if not (isspmatrix_csr(A) or isspmatrix_csc(A)):
|
||||
A = csr_matrix(A)
|
||||
|
||||
if A.nnz == 0:
|
||||
return (1,1)
|
||||
|
||||
if not 0 < efficiency < 1.0:
|
||||
raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
|
||||
|
||||
high_efficiency = (1.0 + efficiency) / 2.0
|
||||
nnz = float(A.nnz)
|
||||
M,N = A.shape
|
||||
|
||||
if M % 2 == 0 and N % 2 == 0:
|
||||
e22 = nnz / (4 * count_blocks(A,(2,2)))
|
||||
else:
|
||||
e22 = 0.0
|
||||
|
||||
if M % 3 == 0 and N % 3 == 0:
|
||||
e33 = nnz / (9 * count_blocks(A,(3,3)))
|
||||
else:
|
||||
e33 = 0.0
|
||||
|
||||
if e22 > high_efficiency and e33 > high_efficiency:
|
||||
e66 = nnz / (36 * count_blocks(A,(6,6)))
|
||||
if e66 > efficiency:
|
||||
return (6,6)
|
||||
else:
|
||||
return (3,3)
|
||||
else:
|
||||
if M % 4 == 0 and N % 4 == 0:
|
||||
e44 = nnz / (16 * count_blocks(A,(4,4)))
|
||||
else:
|
||||
e44 = 0.0
|
||||
|
||||
if e44 > efficiency:
|
||||
return (4,4)
|
||||
elif e33 > efficiency:
|
||||
return (3,3)
|
||||
elif e22 > efficiency:
|
||||
return (2,2)
|
||||
else:
|
||||
return (1,1)
|
||||
|
||||
|
||||
def count_blocks(A,blocksize):
|
||||
"""For a given blocksize=(r,c) count the number of occupied
|
||||
blocks in a sparse matrix A
|
||||
"""
|
||||
r,c = blocksize
|
||||
if r < 1 or c < 1:
|
||||
raise ValueError('r and c must be positive')
|
||||
|
||||
if isspmatrix_csr(A):
|
||||
M,N = A.shape
|
||||
return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
|
||||
elif isspmatrix_csc(A):
|
||||
return count_blocks(A.T,(c,r))
|
||||
else:
|
||||
return count_blocks(csr_matrix(A),blocksize)
|
||||
413
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sputils.py
vendored
Normal file
413
.CondaPkg/env/Lib/site-packages/scipy/sparse/_sputils.py
vendored
Normal file
@@ -0,0 +1,413 @@
|
||||
""" Utility functions for sparse matrix module
|
||||
"""
|
||||
|
||||
import sys
|
||||
import operator
|
||||
import numpy as np
|
||||
from scipy._lib._util import prod
|
||||
import scipy.sparse as sp
|
||||
|
||||
|
||||
__all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike',
|
||||
'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
|
||||
|
||||
supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc,
|
||||
np.uintc, np.int_, np.uint, np.longlong, np.ulonglong,
|
||||
np.single, np.double,
|
||||
np.longdouble, np.csingle, np.cdouble, np.clongdouble]
|
||||
|
||||
_upcast_memo = {}
|
||||
|
||||
|
||||
def upcast(*args):
|
||||
"""Returns the nearest supported sparse dtype for the
|
||||
combination of one or more types.
|
||||
|
||||
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> upcast('int32')
|
||||
<type 'numpy.int32'>
|
||||
>>> upcast('bool')
|
||||
<type 'numpy.bool_'>
|
||||
>>> upcast('int32','float32')
|
||||
<type 'numpy.float64'>
|
||||
>>> upcast('bool',complex,float)
|
||||
<type 'numpy.complex128'>
|
||||
|
||||
"""
|
||||
|
||||
t = _upcast_memo.get(hash(args))
|
||||
if t is not None:
|
||||
return t
|
||||
|
||||
upcast = np.result_type(*args)
|
||||
|
||||
for t in supported_dtypes:
|
||||
if np.can_cast(upcast, t):
|
||||
_upcast_memo[hash(args)] = t
|
||||
return t
|
||||
|
||||
raise TypeError('no supported conversion for types: %r' % (args,))
|
||||
|
||||
|
||||
def upcast_char(*args):
|
||||
"""Same as `upcast` but taking dtype.char as input (faster)."""
|
||||
t = _upcast_memo.get(args)
|
||||
if t is not None:
|
||||
return t
|
||||
t = upcast(*map(np.dtype, args))
|
||||
_upcast_memo[args] = t
|
||||
return t
|
||||
|
||||
|
||||
def upcast_scalar(dtype, scalar):
|
||||
"""Determine data type for binary operation between an array of
|
||||
type `dtype` and a scalar.
|
||||
"""
|
||||
return (np.array([0], dtype=dtype) * scalar).dtype
|
||||
|
||||
|
||||
def downcast_intp_index(arr):
|
||||
"""
|
||||
Down-cast index array to np.intp dtype if it is of a larger dtype.
|
||||
|
||||
Raise an error if the array contains a value that is too large for
|
||||
intp.
|
||||
"""
|
||||
if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
|
||||
if arr.size == 0:
|
||||
return arr.astype(np.intp)
|
||||
maxval = arr.max()
|
||||
minval = arr.min()
|
||||
if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
|
||||
raise ValueError("Cannot deal with arrays with indices larger "
|
||||
"than the machine maximum address size "
|
||||
"(e.g. 64-bit indices on 32-bit machine).")
|
||||
return arr.astype(np.intp)
|
||||
return arr
|
||||
|
||||
|
||||
def to_native(A):
|
||||
"""
|
||||
Ensure that the data type of the NumPy array `A` has native byte order.
|
||||
|
||||
`A` must be a NumPy array. If the data type of `A` does not have native
|
||||
byte order, a copy of `A` with a native byte order is returned. Otherwise
|
||||
`A` is returned.
|
||||
"""
|
||||
dt = A.dtype
|
||||
if dt.isnative:
|
||||
# Don't call `asarray()` if A is already native, to avoid unnecessarily
|
||||
# creating a view of the input array.
|
||||
return A
|
||||
return np.asarray(A, dtype=dt.newbyteorder('native'))
|
||||
|
||||
|
||||
def getdtype(dtype, a=None, default=None):
|
||||
"""Function used to simplify argument processing. If 'dtype' is not
|
||||
specified (is None), returns a.dtype; otherwise returns a np.dtype
|
||||
object created from the specified dtype argument. If 'dtype' and 'a'
|
||||
are both None, construct a data type out of the 'default' parameter.
|
||||
Furthermore, 'dtype' must be in 'allowed' set.
|
||||
"""
|
||||
# TODO is this really what we want?
|
||||
if dtype is None:
|
||||
try:
|
||||
newdtype = a.dtype
|
||||
except AttributeError as e:
|
||||
if default is not None:
|
||||
newdtype = np.dtype(default)
|
||||
else:
|
||||
raise TypeError("could not interpret data type") from e
|
||||
else:
|
||||
newdtype = np.dtype(dtype)
|
||||
if newdtype == np.object_:
|
||||
raise ValueError(
|
||||
"object dtype is not supported by sparse matrices"
|
||||
)
|
||||
|
||||
return newdtype
|
||||
|
||||
|
||||
def getdata(obj, dtype=None, copy=False):
|
||||
"""
|
||||
This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)`
|
||||
that will generate a warning if the result is an object array.
|
||||
"""
|
||||
data = np.array(obj, dtype=dtype, copy=copy)
|
||||
# Defer to getdtype for checking that the dtype is OK.
|
||||
# This is called for the validation only; we don't need the return value.
|
||||
getdtype(data.dtype)
|
||||
return data
|
||||
|
||||
|
||||
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
|
||||
"""
|
||||
Based on input (integer) arrays `a`, determine a suitable index data
|
||||
type that can hold the data in the arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arrays : tuple of array_like
|
||||
Input arrays whose types/contents to check
|
||||
maxval : float, optional
|
||||
Maximum value needed
|
||||
check_contents : bool, optional
|
||||
Whether to check the values in the arrays and not just their types.
|
||||
Default: False (check only the types)
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtype : dtype
|
||||
Suitable index data type (int32 or int64)
|
||||
|
||||
"""
|
||||
|
||||
int32min = np.int32(np.iinfo(np.int32).min)
|
||||
int32max = np.int32(np.iinfo(np.int32).max)
|
||||
|
||||
# not using intc directly due to misinteractions with pythran
|
||||
dtype = np.int32 if np.intc().itemsize == 4 else np.int64
|
||||
if maxval is not None:
|
||||
maxval = np.int64(maxval)
|
||||
if maxval > int32max:
|
||||
dtype = np.int64
|
||||
|
||||
if isinstance(arrays, np.ndarray):
|
||||
arrays = (arrays,)
|
||||
|
||||
for arr in arrays:
|
||||
arr = np.asarray(arr)
|
||||
if not np.can_cast(arr.dtype, np.int32):
|
||||
if check_contents:
|
||||
if arr.size == 0:
|
||||
# a bigger type not needed
|
||||
continue
|
||||
elif np.issubdtype(arr.dtype, np.integer):
|
||||
maxval = arr.max()
|
||||
minval = arr.min()
|
||||
if minval >= int32min and maxval <= int32max:
|
||||
# a bigger type not needed
|
||||
continue
|
||||
|
||||
dtype = np.int64
|
||||
break
|
||||
|
||||
return dtype
|
||||
|
||||
|
||||
def get_sum_dtype(dtype):
|
||||
"""Mimic numpy's casting for np.sum"""
|
||||
if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
|
||||
return np.uint
|
||||
if np.can_cast(dtype, np.int_):
|
||||
return np.int_
|
||||
return dtype
|
||||
|
||||
|
||||
def isscalarlike(x):
|
||||
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
|
||||
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
|
||||
|
||||
|
||||
def isintlike(x):
|
||||
"""Is x appropriate as an index into a sparse matrix? Returns True
|
||||
if it can be cast safely to a machine int.
|
||||
"""
|
||||
# Fast-path check to eliminate non-scalar values. operator.index would
|
||||
# catch this case too, but the exception catching is slow.
|
||||
if np.ndim(x) != 0:
|
||||
return False
|
||||
try:
|
||||
operator.index(x)
|
||||
except (TypeError, ValueError):
|
||||
try:
|
||||
loose_int = bool(int(x) == x)
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
if loose_int:
|
||||
msg = "Inexact indices into sparse matrices are not allowed"
|
||||
raise ValueError(msg)
|
||||
return loose_int
|
||||
return True
|
||||
|
||||
|
||||
def isshape(x, nonneg=False):
|
||||
"""Is x a valid 2-tuple of dimensions?
|
||||
|
||||
If nonneg, also checks that the dimensions are non-negative.
|
||||
"""
|
||||
try:
|
||||
# Assume it's a tuple of matrix dimensions (M, N)
|
||||
(M, N) = x
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
if isintlike(M) and isintlike(N):
|
||||
if np.ndim(M) == 0 and np.ndim(N) == 0:
|
||||
if not nonneg or (M >= 0 and N >= 0):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def issequence(t):
|
||||
return ((isinstance(t, (list, tuple)) and
|
||||
(len(t) == 0 or np.isscalar(t[0]))) or
|
||||
(isinstance(t, np.ndarray) and (t.ndim == 1)))
|
||||
|
||||
|
||||
def ismatrix(t):
|
||||
return ((isinstance(t, (list, tuple)) and
|
||||
len(t) > 0 and issequence(t[0])) or
|
||||
(isinstance(t, np.ndarray) and t.ndim == 2))
|
||||
|
||||
|
||||
def isdense(x):
|
||||
return isinstance(x, np.ndarray)
|
||||
|
||||
|
||||
def validateaxis(axis):
|
||||
if axis is not None:
|
||||
axis_type = type(axis)
|
||||
|
||||
# In NumPy, you can pass in tuples for 'axis', but they are
|
||||
# not very useful for sparse matrices given their limited
|
||||
# dimensions, so let's make it explicit that they are not
|
||||
# allowed to be passed in
|
||||
if axis_type == tuple:
|
||||
raise TypeError(("Tuples are not accepted for the 'axis' "
|
||||
"parameter. Please pass in one of the "
|
||||
"following: {-2, -1, 0, 1, None}."))
|
||||
|
||||
# If not a tuple, check that the provided axis is actually
|
||||
# an integer and raise a TypeError similar to NumPy's
|
||||
if not np.issubdtype(np.dtype(axis_type), np.integer):
|
||||
raise TypeError("axis must be an integer, not {name}"
|
||||
.format(name=axis_type.__name__))
|
||||
|
||||
if not (-2 <= axis <= 1):
|
||||
raise ValueError("axis out of range")
|
||||
|
||||
|
||||
def check_shape(args, current_shape=None):
|
||||
"""Imitate numpy.matrix handling of shape arguments"""
|
||||
if len(args) == 0:
|
||||
raise TypeError("function missing 1 required positional argument: "
|
||||
"'shape'")
|
||||
elif len(args) == 1:
|
||||
try:
|
||||
shape_iter = iter(args[0])
|
||||
except TypeError:
|
||||
new_shape = (operator.index(args[0]), )
|
||||
else:
|
||||
new_shape = tuple(operator.index(arg) for arg in shape_iter)
|
||||
else:
|
||||
new_shape = tuple(operator.index(arg) for arg in args)
|
||||
|
||||
if current_shape is None:
|
||||
if len(new_shape) != 2:
|
||||
raise ValueError('shape must be a 2-tuple of positive integers')
|
||||
elif any(d < 0 for d in new_shape):
|
||||
raise ValueError("'shape' elements cannot be negative")
|
||||
|
||||
else:
|
||||
# Check the current size only if needed
|
||||
current_size = prod(current_shape)
|
||||
|
||||
# Check for negatives
|
||||
negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
|
||||
if len(negative_indexes) == 0:
|
||||
new_size = prod(new_shape)
|
||||
if new_size != current_size:
|
||||
raise ValueError('cannot reshape array of size {} into shape {}'
|
||||
.format(current_size, new_shape))
|
||||
elif len(negative_indexes) == 1:
|
||||
skip = negative_indexes[0]
|
||||
specified = prod(new_shape[0:skip] + new_shape[skip+1:])
|
||||
unspecified, remainder = divmod(current_size, specified)
|
||||
if remainder != 0:
|
||||
err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
|
||||
raise ValueError('cannot reshape array of size {} into shape {}'
|
||||
''.format(current_size, err_shape))
|
||||
new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
|
||||
else:
|
||||
raise ValueError('can only specify one unknown dimension')
|
||||
|
||||
if len(new_shape) != 2:
|
||||
raise ValueError('matrix shape must be two-dimensional')
|
||||
|
||||
return new_shape
|
||||
|
||||
|
||||
def check_reshape_kwargs(kwargs):
|
||||
"""Unpack keyword arguments for reshape function.
|
||||
|
||||
This is useful because keyword arguments after star arguments are not
|
||||
allowed in Python 2, but star keyword arguments are. This function unpacks
|
||||
'order' and 'copy' from the star keyword arguments (with defaults) and
|
||||
throws an error for any remaining.
|
||||
"""
|
||||
|
||||
order = kwargs.pop('order', 'C')
|
||||
copy = kwargs.pop('copy', False)
|
||||
if kwargs: # Some unused kwargs remain
|
||||
raise TypeError('reshape() got unexpected keywords arguments: {}'
|
||||
.format(', '.join(kwargs.keys())))
|
||||
return order, copy
|
||||
|
||||
|
||||
def is_pydata_spmatrix(m):
|
||||
"""
|
||||
Check whether object is pydata/sparse matrix, avoiding importing the module.
|
||||
"""
|
||||
base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)
|
||||
return base_cls is not None and isinstance(m, base_cls)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Wrappers for NumPy types that are deprecated
|
||||
|
||||
# Numpy versions of these functions raise deprecation warnings, the
|
||||
# ones below do not.
|
||||
|
||||
def matrix(*args, **kwargs):
|
||||
return np.array(*args, **kwargs).view(np.matrix)
|
||||
|
||||
|
||||
def asmatrix(data, dtype=None):
|
||||
if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype):
|
||||
return data
|
||||
return np.asarray(data, dtype=dtype).view(np.matrix)
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
def _todata(s: 'sp.spmatrix') -> np.ndarray:
|
||||
"""Access nonzero values, possibly after summing duplicates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
s : sparse matrix
|
||||
Input sparse matrix.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data: ndarray
|
||||
Nonzero values of the array, with shape (s.nnz,)
|
||||
|
||||
"""
|
||||
if isinstance(s, sp._data._data_matrix):
|
||||
return s._deduped_data()
|
||||
|
||||
if isinstance(s, sp.dok_matrix):
|
||||
return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz)
|
||||
|
||||
if isinstance(s, sp.lil_matrix):
|
||||
data = np.empty(s.nnz, dtype=s.dtype)
|
||||
sp._csparsetools.lil_flatten_to_array(s.data, data)
|
||||
return data
|
||||
|
||||
return s.tocoo()._deduped_data()
|
||||
42
.CondaPkg/env/Lib/site-packages/scipy/sparse/base.py
vendored
Normal file
42
.CondaPkg/env/Lib/site-packages/scipy/sparse/base.py
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _base
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'MAXPRINT',
|
||||
'SparseEfficiencyWarning',
|
||||
'SparseFormatWarning',
|
||||
'SparseWarning',
|
||||
'asmatrix',
|
||||
'check_reshape_kwargs',
|
||||
'check_shape',
|
||||
'get_sum_dtype',
|
||||
'isdense',
|
||||
'isintlike',
|
||||
'isscalarlike',
|
||||
'issparse',
|
||||
'isspmatrix',
|
||||
'spmatrix',
|
||||
'validateaxis',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.base is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.base` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_base, name)
|
||||
46
.CondaPkg/env/Lib/site-packages/scipy/sparse/bsr.py
vendored
Normal file
46
.CondaPkg/env/Lib/site-packages/scipy/sparse/bsr.py
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _bsr
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'bsr_matmat',
|
||||
'bsr_matrix',
|
||||
'bsr_matvec',
|
||||
'bsr_matvecs',
|
||||
'bsr_sort_indices',
|
||||
'bsr_tocsr',
|
||||
'bsr_transpose',
|
||||
'check_shape',
|
||||
'csr_matmat_maxnnz',
|
||||
'get_index_dtype',
|
||||
'getdata',
|
||||
'getdtype',
|
||||
'isshape',
|
||||
'isspmatrix',
|
||||
'isspmatrix_bsr',
|
||||
'spmatrix',
|
||||
'to_native',
|
||||
'upcast',
|
||||
'warn',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.bsr is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.bsr` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_bsr, name)
|
||||
54
.CondaPkg/env/Lib/site-packages/scipy/sparse/compressed.py
vendored
Normal file
54
.CondaPkg/env/Lib/site-packages/scipy/sparse/compressed.py
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _compressed
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'IndexMixin',
|
||||
'SparseEfficiencyWarning',
|
||||
'check_shape',
|
||||
'csr_column_index1',
|
||||
'csr_column_index2',
|
||||
'csr_row_index',
|
||||
'csr_row_slice',
|
||||
'csr_sample_offsets',
|
||||
'csr_sample_values',
|
||||
'csr_todense',
|
||||
'downcast_intp_index',
|
||||
'get_csr_submatrix',
|
||||
'get_index_dtype',
|
||||
'get_sum_dtype',
|
||||
'getdtype',
|
||||
'is_pydata_spmatrix',
|
||||
'isdense',
|
||||
'isintlike',
|
||||
'isscalarlike',
|
||||
'isshape',
|
||||
'isspmatrix',
|
||||
'operator',
|
||||
'spmatrix',
|
||||
'to_native',
|
||||
'upcast',
|
||||
'upcast_char',
|
||||
'warn',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.compressed is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.compressed` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_compressed, name)
|
||||
53
.CondaPkg/env/Lib/site-packages/scipy/sparse/construct.py
vendored
Normal file
53
.CondaPkg/env/Lib/site-packages/scipy/sparse/construct.py
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _construct
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'block_diag',
|
||||
'bmat',
|
||||
'bsr_matrix',
|
||||
'check_random_state',
|
||||
'coo_matrix',
|
||||
'csc_matrix',
|
||||
'csr_hstack',
|
||||
'csr_matrix',
|
||||
'dia_matrix',
|
||||
'diags',
|
||||
'eye',
|
||||
'get_index_dtype',
|
||||
'hstack',
|
||||
'identity',
|
||||
'isscalarlike',
|
||||
'issparse',
|
||||
'kron',
|
||||
'kronsum',
|
||||
'numbers',
|
||||
'partial',
|
||||
'rand',
|
||||
'random',
|
||||
'rng_integers',
|
||||
'spdiags',
|
||||
'upcast',
|
||||
'vstack',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.construct is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.construct` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_construct, name)
|
||||
47
.CondaPkg/env/Lib/site-packages/scipy/sparse/coo.py
vendored
Normal file
47
.CondaPkg/env/Lib/site-packages/scipy/sparse/coo.py
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _coo
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'SparseEfficiencyWarning',
|
||||
'check_reshape_kwargs',
|
||||
'check_shape',
|
||||
'coo_matrix',
|
||||
'coo_matvec',
|
||||
'coo_tocsr',
|
||||
'coo_todense',
|
||||
'downcast_intp_index',
|
||||
'get_index_dtype',
|
||||
'getdata',
|
||||
'getdtype',
|
||||
'isshape',
|
||||
'isspmatrix',
|
||||
'isspmatrix_coo',
|
||||
'operator',
|
||||
'spmatrix',
|
||||
'to_native',
|
||||
'upcast',
|
||||
'upcast_char',
|
||||
'warn',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.coo is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.coo` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_coo, name)
|
||||
34
.CondaPkg/env/Lib/site-packages/scipy/sparse/csc.py
vendored
Normal file
34
.CondaPkg/env/Lib/site-packages/scipy/sparse/csc.py
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.sparse` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
import warnings
|
||||
from . import _csc
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'csc_matrix',
|
||||
'csc_tocsr',
|
||||
'expandptr',
|
||||
'get_index_dtype',
|
||||
'isspmatrix_csc',
|
||||
'spmatrix',
|
||||
'upcast',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in __all__:
|
||||
raise AttributeError(
|
||||
"scipy.sparse.csc is deprecated and has no attribute "
|
||||
f"{name}. Try looking in scipy.sparse instead.")
|
||||
|
||||
warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
|
||||
"the `scipy.sparse.csc` namespace is deprecated.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
|
||||
return getattr(_csc, name)
|
||||
208
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__init__.py
vendored
Normal file
208
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__init__.py
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
r"""
|
||||
Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
|
||||
==============================================================
|
||||
|
||||
.. currentmodule:: scipy.sparse.csgraph
|
||||
|
||||
Fast graph algorithms based on sparse matrix representations.
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
connected_components -- determine connected components of a graph
|
||||
laplacian -- compute the laplacian of a graph
|
||||
shortest_path -- compute the shortest path between points on a positive graph
|
||||
dijkstra -- use Dijkstra's algorithm for shortest path
|
||||
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
|
||||
bellman_ford -- use the Bellman-Ford algorithm for shortest path
|
||||
johnson -- use Johnson's algorithm for shortest path
|
||||
breadth_first_order -- compute a breadth-first order of nodes
|
||||
depth_first_order -- compute a depth-first order of nodes
|
||||
breadth_first_tree -- construct the breadth-first tree from a given node
|
||||
depth_first_tree -- construct a depth-first tree from a given node
|
||||
minimum_spanning_tree -- construct the minimum spanning tree of a graph
|
||||
reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
|
||||
maximum_flow -- solve the maximum flow problem for a graph
|
||||
maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
|
||||
min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
|
||||
structural_rank -- compute the structural rank of a graph
|
||||
NegativeCycleError
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
construct_dist_matrix
|
||||
csgraph_from_dense
|
||||
csgraph_from_masked
|
||||
csgraph_masked_from_dense
|
||||
csgraph_to_dense
|
||||
csgraph_to_masked
|
||||
reconstruct_path
|
||||
|
||||
Graph Representations
|
||||
---------------------
|
||||
This module uses graphs which are stored in a matrix format. A
|
||||
graph with N nodes can be represented by an (N x N) adjacency matrix G.
|
||||
If there is a connection from node i to node j, then G[i, j] = w, where
|
||||
w is the weight of the connection. For nodes i and j which are
|
||||
not connected, the value depends on the representation:
|
||||
|
||||
- for dense array representations, non-edges are represented by
|
||||
G[i, j] = 0, infinity, or NaN.
|
||||
|
||||
- for dense masked representations (of type np.ma.MaskedArray), non-edges
|
||||
are represented by masked values. This can be useful when graphs with
|
||||
zero-weight edges are desired.
|
||||
|
||||
- for sparse array representations, non-edges are represented by
|
||||
non-entries in the matrix. This sort of sparse representation also
|
||||
allows for edges with zero weights.
|
||||
|
||||
As a concrete example, imagine that you would like to represent the following
|
||||
undirected graph::
|
||||
|
||||
G
|
||||
|
||||
(0)
|
||||
/ \
|
||||
1 2
|
||||
/ \
|
||||
(2) (1)
|
||||
|
||||
This graph has three nodes, where node 0 and 1 are connected by an edge of
|
||||
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
|
||||
We can construct the dense, masked, and sparse representations as follows,
|
||||
keeping in mind that an undirected graph is represented by a symmetric matrix::
|
||||
|
||||
>>> import numpy as np
|
||||
>>> G_dense = np.array([[0, 2, 1],
|
||||
... [2, 0, 0],
|
||||
... [1, 0, 0]])
|
||||
>>> G_masked = np.ma.masked_values(G_dense, 0)
|
||||
>>> from scipy.sparse import csr_matrix
|
||||
>>> G_sparse = csr_matrix(G_dense)
|
||||
|
||||
This becomes more difficult when zero edges are significant. For example,
|
||||
consider the situation when we slightly modify the above graph::
|
||||
|
||||
G2
|
||||
|
||||
(0)
|
||||
/ \
|
||||
0 2
|
||||
/ \
|
||||
(2) (1)
|
||||
|
||||
This is identical to the previous graph, except nodes 0 and 2 are connected
|
||||
by an edge of zero weight. In this case, the dense representation above
|
||||
leads to ambiguities: how can non-edges be represented if zero is a meaningful
|
||||
value? In this case, either a masked or sparse representation must be used
|
||||
to eliminate the ambiguity::
|
||||
|
||||
>>> import numpy as np
|
||||
>>> G2_data = np.array([[np.inf, 2, 0 ],
|
||||
... [2, np.inf, np.inf],
|
||||
... [0, np.inf, np.inf]])
|
||||
>>> G2_masked = np.ma.masked_invalid(G2_data)
|
||||
>>> from scipy.sparse.csgraph import csgraph_from_dense
|
||||
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
|
||||
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
|
||||
>>> G2_sparse.data
|
||||
array([ 2., 0., 2., 0.])
|
||||
|
||||
Here we have used a utility routine from the csgraph submodule in order to
|
||||
convert the dense representation to a sparse representation which can be
|
||||
understood by the algorithms in submodule. By viewing the data array, we
|
||||
can see that the zero values are explicitly encoded in the graph.
|
||||
|
||||
Directed vs. undirected
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Matrices may represent either directed or undirected graphs. This is
|
||||
specified throughout the csgraph module by a boolean keyword. Graphs are
|
||||
assumed to be directed by default. In a directed graph, traversal from node
|
||||
i to node j can be accomplished over the edge G[i, j], but not the edge
|
||||
G[j, i]. Consider the following dense graph::
|
||||
|
||||
>>> import numpy as np
|
||||
>>> G_dense = np.array([[0, 1, 0],
|
||||
... [2, 0, 3],
|
||||
... [0, 4, 0]])
|
||||
|
||||
When ``directed=True`` we get the graph::
|
||||
|
||||
---1--> ---3-->
|
||||
(0) (1) (2)
|
||||
<--2--- <--4---
|
||||
|
||||
In a non-directed graph, traversal from node i to node j can be
|
||||
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
|
||||
and the two have unequal weights, then the smaller of the two is used.
|
||||
|
||||
So for the same graph, when ``directed=False`` we get the graph::
|
||||
|
||||
(0)--1--(1)--3--(2)
|
||||
|
||||
Note that a symmetric matrix will represent an undirected graph, regardless
|
||||
of whether the 'directed' keyword is set to True or False. In this case,
|
||||
using ``directed=True`` generally leads to more efficient computation.
|
||||
|
||||
The routines in this module accept as input either scipy.sparse representations
|
||||
(csr, csc, or lil format), masked representations, or dense representations
|
||||
with non-edges indicated by zeros, infinities, and NaN entries.
|
||||
"""
|
||||
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = ['connected_components',
|
||||
'laplacian',
|
||||
'shortest_path',
|
||||
'floyd_warshall',
|
||||
'dijkstra',
|
||||
'bellman_ford',
|
||||
'johnson',
|
||||
'breadth_first_order',
|
||||
'depth_first_order',
|
||||
'breadth_first_tree',
|
||||
'depth_first_tree',
|
||||
'minimum_spanning_tree',
|
||||
'reverse_cuthill_mckee',
|
||||
'maximum_flow',
|
||||
'maximum_bipartite_matching',
|
||||
'min_weight_full_bipartite_matching',
|
||||
'structural_rank',
|
||||
'construct_dist_matrix',
|
||||
'reconstruct_path',
|
||||
'csgraph_masked_from_dense',
|
||||
'csgraph_from_dense',
|
||||
'csgraph_from_masked',
|
||||
'csgraph_to_dense',
|
||||
'csgraph_to_masked',
|
||||
'NegativeCycleError']
|
||||
|
||||
from ._laplacian import laplacian
|
||||
from ._shortest_path import (
|
||||
shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson,
|
||||
NegativeCycleError
|
||||
)
|
||||
from ._traversal import (
|
||||
breadth_first_order, depth_first_order, breadth_first_tree,
|
||||
depth_first_tree, connected_components
|
||||
)
|
||||
from ._min_spanning_tree import minimum_spanning_tree
|
||||
from ._flow import maximum_flow
|
||||
from ._matching import (
|
||||
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
||||
)
|
||||
from ._reordering import reverse_cuthill_mckee, structural_rank
|
||||
from ._tools import (
|
||||
construct_dist_matrix, reconstruct_path, csgraph_from_dense,
|
||||
csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
|
||||
csgraph_to_masked
|
||||
)
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/setup.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/__pycache__/setup.cpython-311.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_flow.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_flow.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_flow.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_flow.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
555
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_laplacian.py
vendored
Normal file
555
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_laplacian.py
vendored
Normal file
@@ -0,0 +1,555 @@
|
||||
"""
|
||||
Laplacian of a compressed-sparse graph
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from scipy.sparse import isspmatrix
|
||||
from scipy.sparse.linalg import LinearOperator
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Graph laplacian
|
||||
def laplacian(
|
||||
csgraph,
|
||||
normed=False,
|
||||
return_diag=False,
|
||||
use_out_degree=False,
|
||||
*,
|
||||
copy=True,
|
||||
form="array",
|
||||
dtype=None,
|
||||
symmetrized=False,
|
||||
):
|
||||
"""
|
||||
Return the Laplacian of a directed graph.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
csgraph : array_like or sparse matrix, 2 dimensions
|
||||
compressed-sparse graph, with shape (N, N).
|
||||
normed : bool, optional
|
||||
If True, then compute symmetrically normalized Laplacian.
|
||||
Default: False.
|
||||
return_diag : bool, optional
|
||||
If True, then also return an array related to vertex degrees.
|
||||
Default: False.
|
||||
use_out_degree : bool, optional
|
||||
If True, then use out-degree instead of in-degree.
|
||||
This distinction matters only if the graph is asymmetric.
|
||||
Default: False.
|
||||
copy: bool, optional
|
||||
If False, then change `csgraph` in place if possible,
|
||||
avoiding doubling the memory use.
|
||||
Default: True, for backward compatibility.
|
||||
form: 'array', or 'function', or 'lo'
|
||||
Determines the format of the output Laplacian:
|
||||
|
||||
* 'array' is a numpy array;
|
||||
* 'function' is a pointer to evaluating the Laplacian-vector
|
||||
or Laplacian-matrix product;
|
||||
* 'lo' results in the format of the `LinearOperator`.
|
||||
|
||||
Choosing 'function' or 'lo' always avoids doubling
|
||||
the memory use, ignoring `copy` value.
|
||||
Default: 'array', for backward compatibility.
|
||||
dtype: None or one of numeric numpy dtypes, optional
|
||||
The dtype of the output. If ``dtype=None``, the dtype of the
|
||||
output matches the dtype of the input csgraph, except for
|
||||
the case ``normed=True`` and integer-like csgraph, where
|
||||
the output dtype is 'float' allowing accurate normalization,
|
||||
but dramatically increasing the memory use.
|
||||
Default: None, for backward compatibility.
|
||||
symmetrized: bool, optional
|
||||
If True, then the output Laplacian is symmetric/Hermitian.
|
||||
The symmetrization is done by ``csgraph + csgraph.T.conj``
|
||||
without dividing by 2 to preserve integer dtypes if possible
|
||||
prior to the construction of the Laplacian.
|
||||
The symmetrization will increase the memory footprint of
|
||||
sparse matrices unless the sparsity pattern is symmetric or
|
||||
`form` is 'function' or 'lo'.
|
||||
Default: False, for backward compatibility.
|
||||
|
||||
Returns
|
||||
-------
|
||||
lap : ndarray, or sparse matrix, or `LinearOperator`
|
||||
The N x N Laplacian of csgraph. It will be a NumPy array (dense)
|
||||
if the input was dense, or a sparse matrix otherwise, or
|
||||
the format of a function or `LinearOperator` if
|
||||
`form` equals 'function' or 'lo', respectively.
|
||||
diag : ndarray, optional
|
||||
The length-N main diagonal of the Laplacian matrix.
|
||||
For the normalized Laplacian, this is the array of square roots
|
||||
of vertex degrees or 1 if the degree is zero.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The Laplacian matrix of a graph is sometimes referred to as the
|
||||
"Kirchhoff matrix" or just the "Laplacian", and is useful in many
|
||||
parts of spectral graph theory.
|
||||
In particular, the eigen-decomposition of the Laplacian can give
|
||||
insight into many properties of the graph, e.g.,
|
||||
is commonly used for spectral data embedding and clustering.
|
||||
|
||||
The constructed Laplacian doubles the memory use if ``copy=True`` and
|
||||
``form="array"`` which is the default.
|
||||
Choosing ``copy=False`` has no effect unless ``form="array"``
|
||||
or the matrix is sparse in the ``coo`` format, or dense array, except
|
||||
for the integer input with ``normed=True`` that forces the float output.
|
||||
|
||||
Sparse input is reformatted into ``coo`` if ``form="array"``,
|
||||
which is the default.
|
||||
|
||||
If the input adjacency matrix is not symmetic, the Laplacian is
|
||||
also non-symmetric unless ``symmetrized=True`` is used.
|
||||
|
||||
Diagonal entries of the input adjacency matrix are ignored and
|
||||
replaced with zeros for the purpose of normalization where ``normed=True``.
|
||||
The normalization uses the inverse square roots of row-sums of the input
|
||||
adjacency matrix, and thus may fail if the row-sums contain
|
||||
negative or complex with a non-zero imaginary part values.
|
||||
|
||||
The normalization is symmetric, making the normalized Laplacian also
|
||||
symmetric if the input csgraph was symmetric.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import csgraph
|
||||
|
||||
Our first illustration is the symmetric graph
|
||||
|
||||
>>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
|
||||
>>> G
|
||||
array([[0, 0, 0, 0],
|
||||
[0, 1, 2, 3],
|
||||
[0, 2, 4, 6],
|
||||
[0, 3, 6, 9]])
|
||||
|
||||
and its symmetric Laplacian matrix
|
||||
|
||||
>>> csgraph.laplacian(G)
|
||||
array([[ 0, 0, 0, 0],
|
||||
[ 0, 5, -2, -3],
|
||||
[ 0, -2, 8, -6],
|
||||
[ 0, -3, -6, 9]])
|
||||
|
||||
The non-symmetric graph
|
||||
|
||||
>>> G = np.arange(9).reshape(3, 3)
|
||||
>>> G
|
||||
array([[0, 1, 2],
|
||||
[3, 4, 5],
|
||||
[6, 7, 8]])
|
||||
|
||||
has different row- and column sums, resulting in two varieties
|
||||
of the Laplacian matrix, using an in-degree, which is the default
|
||||
|
||||
>>> L_in_degree = csgraph.laplacian(G)
|
||||
>>> L_in_degree
|
||||
array([[ 9, -1, -2],
|
||||
[-3, 8, -5],
|
||||
[-6, -7, 7]])
|
||||
|
||||
or alternatively an out-degree
|
||||
|
||||
>>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
|
||||
>>> L_out_degree
|
||||
array([[ 3, -1, -2],
|
||||
[-3, 8, -5],
|
||||
[-6, -7, 13]])
|
||||
|
||||
Constructing a symmetric Laplacian matrix, one can add the two as
|
||||
|
||||
>>> L_in_degree + L_out_degree.T
|
||||
array([[ 12, -4, -8],
|
||||
[ -4, 16, -12],
|
||||
[ -8, -12, 20]])
|
||||
|
||||
or use the ``symmetrized=True`` option
|
||||
|
||||
>>> csgraph.laplacian(G, symmetrized=True)
|
||||
array([[ 12, -4, -8],
|
||||
[ -4, 16, -12],
|
||||
[ -8, -12, 20]])
|
||||
|
||||
that is equivalent to symmetrizing the original graph
|
||||
|
||||
>>> csgraph.laplacian(G + G.T)
|
||||
array([[ 12, -4, -8],
|
||||
[ -4, 16, -12],
|
||||
[ -8, -12, 20]])
|
||||
|
||||
The goal of normalization is to make the non-zero diagonal entries
|
||||
of the Laplacian matrix to be all unit, also scaling off-diagonal
|
||||
entries correspondingly. The normalization can be done manually, e.g.,
|
||||
|
||||
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
|
||||
>>> L, d = csgraph.laplacian(G, return_diag=True)
|
||||
>>> L
|
||||
array([[ 2, -1, -1],
|
||||
[-1, 2, -1],
|
||||
[-1, -1, 2]])
|
||||
>>> d
|
||||
array([2, 2, 2])
|
||||
>>> scaling = np.sqrt(d)
|
||||
>>> scaling
|
||||
array([1.41421356, 1.41421356, 1.41421356])
|
||||
>>> (1/scaling)*L*(1/scaling)
|
||||
array([[ 1. , -0.5, -0.5],
|
||||
[-0.5, 1. , -0.5],
|
||||
[-0.5, -0.5, 1. ]])
|
||||
|
||||
Or using ``normed=True`` option
|
||||
|
||||
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
|
||||
>>> L
|
||||
array([[ 1. , -0.5, -0.5],
|
||||
[-0.5, 1. , -0.5],
|
||||
[-0.5, -0.5, 1. ]])
|
||||
|
||||
which now instead of the diagonal returns the scaling coefficients
|
||||
|
||||
>>> d
|
||||
array([1.41421356, 1.41421356, 1.41421356])
|
||||
|
||||
Zero scaling coefficients are substituted with 1s, where scaling
|
||||
has thus no effect, e.g.,
|
||||
|
||||
>>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
|
||||
>>> G
|
||||
array([[0, 0, 0],
|
||||
[0, 0, 1],
|
||||
[0, 1, 0]])
|
||||
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
|
||||
>>> L
|
||||
array([[ 0., -0., -0.],
|
||||
[-0., 1., -1.],
|
||||
[-0., -1., 1.]])
|
||||
>>> d
|
||||
array([1., 1., 1.])
|
||||
|
||||
Only the symmetric normalization is implemented, resulting
|
||||
in a symmetric Laplacian matrix if and only if its graph is symmetric
|
||||
and has all non-negative degrees, like in the examples above.
|
||||
|
||||
The output Laplacian matrix is by default a dense array or a sparse matrix
|
||||
inferring its shape, format, and dtype from the input graph matrix:
|
||||
|
||||
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
|
||||
>>> G
|
||||
array([[0., 1., 1.],
|
||||
[1., 0., 1.],
|
||||
[1., 1., 0.]], dtype=float32)
|
||||
>>> csgraph.laplacian(G)
|
||||
array([[ 2., -1., -1.],
|
||||
[-1., 2., -1.],
|
||||
[-1., -1., 2.]], dtype=float32)
|
||||
|
||||
but can alternatively be generated matrix-free as a LinearOperator:
|
||||
|
||||
>>> L = csgraph.laplacian(G, form="lo")
|
||||
>>> L
|
||||
<3x3 _CustomLinearOperator with dtype=float32>
|
||||
>>> L(np.eye(3))
|
||||
array([[ 2., -1., -1.],
|
||||
[-1., 2., -1.],
|
||||
[-1., -1., 2.]])
|
||||
|
||||
or as a lambda-function:
|
||||
|
||||
>>> L = csgraph.laplacian(G, form="function")
|
||||
>>> L
|
||||
<function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
|
||||
>>> L(np.eye(3))
|
||||
array([[ 2., -1., -1.],
|
||||
[-1., 2., -1.],
|
||||
[-1., -1., 2.]])
|
||||
|
||||
The Laplacian matrix is used for
|
||||
spectral data clustering and embedding
|
||||
as well as for spectral graph partitioning.
|
||||
Our final example illustrates the latter
|
||||
for a noisy directed linear graph.
|
||||
|
||||
>>> from scipy.sparse import diags, random
|
||||
>>> from scipy.sparse.linalg import lobpcg
|
||||
|
||||
Create a directed linear graph with ``N=35`` vertices
|
||||
using a sparse adjacency matrix ``G``:
|
||||
|
||||
>>> N = 35
|
||||
>>> G = diags(np.ones(N-1), 1, format="csr")
|
||||
|
||||
Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
|
||||
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
|
||||
|
||||
Set initial approximations for eigenvectors:
|
||||
|
||||
>>> X = rng.random((N, 2))
|
||||
|
||||
The constant vector of ones is always a trivial eigenvector
|
||||
of the non-normalized Laplacian to be filtered out:
|
||||
|
||||
>>> Y = np.ones((N, 1))
|
||||
|
||||
Alternating (1) the sign of the graph weights allows determining
|
||||
labels for spectral max- and min- cuts in a single loop.
|
||||
Since the graph is undirected, the option ``symmetrized=True``
|
||||
must be used in the construction of the Laplacian.
|
||||
The option ``normed=True`` cannot be used in (2) for the negative weights
|
||||
here as the symmetric normalization evaluates square roots.
|
||||
The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
|
||||
a fixed memory footprint and read-only access to the graph.
|
||||
Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
|
||||
that determines the labels as the signs of its components in (5).
|
||||
Since the sign in an eigenvector is not deterministic and can flip,
|
||||
we fix the sign of the first component to be always +1 in (4).
|
||||
|
||||
>>> for cut in ["max", "min"]:
|
||||
... G = -G # 1.
|
||||
... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
|
||||
... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
|
||||
... eves *= np.sign(eves[0, 0]) # 4.
|
||||
... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
|
||||
max-cut labels:
|
||||
[1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
|
||||
min-cut labels:
|
||||
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
|
||||
|
||||
As anticipated for a (slightly noisy) linear graph,
|
||||
the max-cut strips all the edges of the graph coloring all
|
||||
odd vertices into one color and all even vertices into another one,
|
||||
while the balanced min-cut partitions the graph
|
||||
in the middle by deleting a single edge.
|
||||
Both determined partitions are optimal.
|
||||
"""
|
||||
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
|
||||
raise ValueError('csgraph must be a square matrix or array')
|
||||
|
||||
if normed and (
|
||||
np.issubdtype(csgraph.dtype, np.signedinteger)
|
||||
or np.issubdtype(csgraph.dtype, np.uint)
|
||||
):
|
||||
csgraph = csgraph.astype(np.float64)
|
||||
|
||||
if form == "array":
|
||||
create_lap = (
|
||||
_laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense
|
||||
)
|
||||
else:
|
||||
create_lap = (
|
||||
_laplacian_sparse_flo
|
||||
if isspmatrix(csgraph)
|
||||
else _laplacian_dense_flo
|
||||
)
|
||||
|
||||
degree_axis = 1 if use_out_degree else 0
|
||||
|
||||
lap, d = create_lap(
|
||||
csgraph,
|
||||
normed=normed,
|
||||
axis=degree_axis,
|
||||
copy=copy,
|
||||
form=form,
|
||||
dtype=dtype,
|
||||
symmetrized=symmetrized,
|
||||
)
|
||||
if return_diag:
|
||||
return lap, d
|
||||
return lap
|
||||
|
||||
|
||||
def _setdiag_dense(m, d):
|
||||
step = len(d) + 1
|
||||
m.flat[::step] = d
|
||||
|
||||
|
||||
def _laplace(m, d):
|
||||
return lambda v: v * d[:, np.newaxis] - m @ v
|
||||
|
||||
|
||||
def _laplace_normed(m, d, nd):
|
||||
laplace = _laplace(m, d)
|
||||
return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
|
||||
|
||||
|
||||
def _laplace_sym(m, d):
|
||||
return (
|
||||
lambda v: v * d[:, np.newaxis]
|
||||
- m @ v
|
||||
- np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
|
||||
)
|
||||
|
||||
|
||||
def _laplace_normed_sym(m, d, nd):
|
||||
laplace_sym = _laplace_sym(m, d)
|
||||
return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
|
||||
|
||||
|
||||
def _linearoperator(mv, shape, dtype):
|
||||
return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
|
||||
|
||||
|
||||
def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
|
||||
# The keyword argument `copy` is unused and has no effect here.
|
||||
del copy
|
||||
|
||||
if dtype is None:
|
||||
dtype = graph.dtype
|
||||
|
||||
graph_sum = graph.sum(axis=axis).getA1()
|
||||
graph_diagonal = graph.diagonal()
|
||||
diag = graph_sum - graph_diagonal
|
||||
if symmetrized:
|
||||
graph_sum += graph.sum(axis=1 - axis).getA1()
|
||||
diag = graph_sum - graph_diagonal - graph_diagonal
|
||||
|
||||
if normed:
|
||||
isolated_node_mask = diag == 0
|
||||
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
|
||||
if symmetrized:
|
||||
md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
|
||||
else:
|
||||
md = _laplace_normed(graph, graph_sum, 1.0 / w)
|
||||
if form == "function":
|
||||
return md, w.astype(dtype, copy=False)
|
||||
elif form == "lo":
|
||||
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
||||
return m, w.astype(dtype, copy=False)
|
||||
else:
|
||||
raise ValueError(f"Invalid form: {form!r}")
|
||||
else:
|
||||
if symmetrized:
|
||||
md = _laplace_sym(graph, graph_sum)
|
||||
else:
|
||||
md = _laplace(graph, graph_sum)
|
||||
if form == "function":
|
||||
return md, diag.astype(dtype, copy=False)
|
||||
elif form == "lo":
|
||||
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
||||
return m, diag.astype(dtype, copy=False)
|
||||
else:
|
||||
raise ValueError(f"Invalid form: {form!r}")
|
||||
|
||||
|
||||
def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
|
||||
# The keyword argument `form` is unused and has no effect here.
|
||||
del form
|
||||
|
||||
if dtype is None:
|
||||
dtype = graph.dtype
|
||||
|
||||
needs_copy = False
|
||||
if graph.format in ('lil', 'dok'):
|
||||
m = graph.tocoo()
|
||||
else:
|
||||
m = graph
|
||||
if copy:
|
||||
needs_copy = True
|
||||
|
||||
if symmetrized:
|
||||
m += m.T.conj()
|
||||
|
||||
w = m.sum(axis=axis).getA1() - m.diagonal()
|
||||
if normed:
|
||||
m = m.tocoo(copy=needs_copy)
|
||||
isolated_node_mask = (w == 0)
|
||||
w = np.where(isolated_node_mask, 1, np.sqrt(w))
|
||||
m.data /= w[m.row]
|
||||
m.data /= w[m.col]
|
||||
m.data *= -1
|
||||
m.setdiag(1 - isolated_node_mask)
|
||||
else:
|
||||
if m.format == 'dia':
|
||||
m = m.copy()
|
||||
else:
|
||||
m = m.tocoo(copy=needs_copy)
|
||||
m.data *= -1
|
||||
m.setdiag(w)
|
||||
|
||||
return m.astype(dtype, copy=False), w.astype(dtype)
|
||||
|
||||
|
||||
def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
|
||||
|
||||
if copy:
|
||||
m = np.array(graph)
|
||||
else:
|
||||
m = np.asarray(graph)
|
||||
|
||||
if dtype is None:
|
||||
dtype = m.dtype
|
||||
|
||||
graph_sum = m.sum(axis=axis)
|
||||
graph_diagonal = m.diagonal()
|
||||
diag = graph_sum - graph_diagonal
|
||||
if symmetrized:
|
||||
graph_sum += m.sum(axis=1 - axis)
|
||||
diag = graph_sum - graph_diagonal - graph_diagonal
|
||||
|
||||
if normed:
|
||||
isolated_node_mask = diag == 0
|
||||
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
|
||||
if symmetrized:
|
||||
md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
|
||||
else:
|
||||
md = _laplace_normed(m, graph_sum, 1.0 / w)
|
||||
if form == "function":
|
||||
return md, w.astype(dtype, copy=False)
|
||||
elif form == "lo":
|
||||
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
||||
return m, w.astype(dtype, copy=False)
|
||||
else:
|
||||
raise ValueError(f"Invalid form: {form!r}")
|
||||
else:
|
||||
if symmetrized:
|
||||
md = _laplace_sym(m, graph_sum)
|
||||
else:
|
||||
md = _laplace(m, graph_sum)
|
||||
if form == "function":
|
||||
return md, diag.astype(dtype, copy=False)
|
||||
elif form == "lo":
|
||||
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
||||
return m, diag.astype(dtype, copy=False)
|
||||
else:
|
||||
raise ValueError(f"Invalid form: {form!r}")
|
||||
|
||||
|
||||
def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
|
||||
|
||||
if form != "array":
|
||||
raise ValueError(f'{form!r} must be "array"')
|
||||
|
||||
if dtype is None:
|
||||
dtype = graph.dtype
|
||||
|
||||
if copy:
|
||||
m = np.array(graph)
|
||||
else:
|
||||
m = np.asarray(graph)
|
||||
|
||||
if dtype is None:
|
||||
dtype = m.dtype
|
||||
|
||||
if symmetrized:
|
||||
m += m.T.conj()
|
||||
np.fill_diagonal(m, 0)
|
||||
w = m.sum(axis=axis)
|
||||
if normed:
|
||||
isolated_node_mask = (w == 0)
|
||||
w = np.where(isolated_node_mask, 1, np.sqrt(w))
|
||||
m /= w
|
||||
m /= w[:, np.newaxis]
|
||||
m *= -1
|
||||
_setdiag_dense(m, 1 - isolated_node_mask)
|
||||
else:
|
||||
m *= -1
|
||||
_setdiag_dense(m, w)
|
||||
|
||||
return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
|
||||
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_matching.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_matching.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_matching.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_matching.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_reordering.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_reordering.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_reordering.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_reordering.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_shortest_path.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_shortest_path.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_shortest_path.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_shortest_path.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_tools.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_tools.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_tools.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_tools.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp311-win_amd64.dll.a
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp311-win_amd64.dll.a
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp311-win_amd64.pyd
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp311-win_amd64.pyd
vendored
Normal file
Binary file not shown.
56
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_validation.py
vendored
Normal file
56
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/_validation.py
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
import numpy as np
|
||||
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc
|
||||
from ._tools import csgraph_to_dense, csgraph_from_dense,\
|
||||
csgraph_masked_from_dense, csgraph_from_masked
|
||||
|
||||
DTYPE = np.float64
|
||||
|
||||
|
||||
def validate_graph(csgraph, directed, dtype=DTYPE,
|
||||
csr_output=True, dense_output=True,
|
||||
copy_if_dense=False, copy_if_sparse=False,
|
||||
null_value_in=0, null_value_out=np.inf,
|
||||
infinity_null=True, nan_null=True):
|
||||
"""Routine for validation and conversion of csgraph inputs"""
|
||||
if not (csr_output or dense_output):
|
||||
raise ValueError("Internal: dense or csr output must be true")
|
||||
|
||||
# if undirected and csc storage, then transposing in-place
|
||||
# is quicker than later converting to csr.
|
||||
if (not directed) and isspmatrix_csc(csgraph):
|
||||
csgraph = csgraph.T
|
||||
|
||||
if isspmatrix(csgraph):
|
||||
if csr_output:
|
||||
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
|
||||
else:
|
||||
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
|
||||
elif np.ma.isMaskedArray(csgraph):
|
||||
if dense_output:
|
||||
mask = csgraph.mask
|
||||
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
|
||||
csgraph[mask] = null_value_out
|
||||
else:
|
||||
csgraph = csgraph_from_masked(csgraph)
|
||||
else:
|
||||
if dense_output:
|
||||
csgraph = csgraph_masked_from_dense(csgraph,
|
||||
copy=copy_if_dense,
|
||||
null_value=null_value_in,
|
||||
nan_null=nan_null,
|
||||
infinity_null=infinity_null)
|
||||
mask = csgraph.mask
|
||||
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
|
||||
csgraph[mask] = null_value_out
|
||||
else:
|
||||
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
|
||||
infinity_null=infinity_null,
|
||||
nan_null=nan_null)
|
||||
|
||||
if csgraph.ndim != 2:
|
||||
raise ValueError("compressed-sparse graph must be 2-D")
|
||||
|
||||
if csgraph.shape[0] != csgraph.shape[1]:
|
||||
raise ValueError("compressed-sparse graph must be shape (N, N)")
|
||||
|
||||
return csgraph
|
||||
38
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/setup.py
vendored
Normal file
38
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/setup.py
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
import numpy
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
|
||||
config = Configuration('csgraph', parent_package, top_path)
|
||||
|
||||
config.add_data_dir('tests')
|
||||
|
||||
config.add_extension('_shortest_path',
|
||||
sources=['_shortest_path.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_traversal',
|
||||
sources=['_traversal.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_min_spanning_tree',
|
||||
sources=['_min_spanning_tree.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_matching',
|
||||
sources=['_matching.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_flow',
|
||||
sources=['_flow.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_reordering',
|
||||
sources=['_reordering.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
config.add_extension('_tools',
|
||||
sources=['_tools.c'],
|
||||
include_dirs=[numpy.get_include()])
|
||||
|
||||
return config
|
||||
0
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__init__.py
vendored
Normal file
0
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__init__.py
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-311.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-311.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
99
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py
vendored
Normal file
99
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_array_almost_equal
|
||||
from scipy.sparse import csgraph
|
||||
|
||||
|
||||
def test_weak_connections():
|
||||
Xde = np.array([[0, 1, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0]])
|
||||
|
||||
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
|
||||
|
||||
for X in Xsp, Xde:
|
||||
n_components, labels =\
|
||||
csgraph.connected_components(X, directed=True,
|
||||
connection='weak')
|
||||
|
||||
assert_equal(n_components, 2)
|
||||
assert_array_almost_equal(labels, [0, 0, 1])
|
||||
|
||||
|
||||
def test_strong_connections():
|
||||
X1de = np.array([[0, 1, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0]])
|
||||
X2de = X1de + X1de.T
|
||||
|
||||
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
|
||||
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
|
||||
|
||||
for X in X1sp, X1de:
|
||||
n_components, labels =\
|
||||
csgraph.connected_components(X, directed=True,
|
||||
connection='strong')
|
||||
|
||||
assert_equal(n_components, 3)
|
||||
labels.sort()
|
||||
assert_array_almost_equal(labels, [0, 1, 2])
|
||||
|
||||
for X in X2sp, X2de:
|
||||
n_components, labels =\
|
||||
csgraph.connected_components(X, directed=True,
|
||||
connection='strong')
|
||||
|
||||
assert_equal(n_components, 2)
|
||||
labels.sort()
|
||||
assert_array_almost_equal(labels, [0, 0, 1])
|
||||
|
||||
|
||||
def test_strong_connections2():
|
||||
X = np.array([[0, 0, 0, 0, 0, 0],
|
||||
[1, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 1, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0]])
|
||||
n_components, labels =\
|
||||
csgraph.connected_components(X, directed=True,
|
||||
connection='strong')
|
||||
assert_equal(n_components, 5)
|
||||
labels.sort()
|
||||
assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
|
||||
|
||||
|
||||
def test_weak_connections2():
|
||||
X = np.array([[0, 0, 0, 0, 0, 0],
|
||||
[1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 1, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0]])
|
||||
n_components, labels =\
|
||||
csgraph.connected_components(X, directed=True,
|
||||
connection='weak')
|
||||
assert_equal(n_components, 2)
|
||||
labels.sort()
|
||||
assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
|
||||
|
||||
|
||||
def test_ticket1876():
|
||||
# Regression test: this failed in the original implementation
|
||||
# There should be two strongly-connected components; previously gave one
|
||||
g = np.array([[0, 1, 1, 0],
|
||||
[1, 0, 0, 1],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 1, 0]])
|
||||
n_components, labels = csgraph.connected_components(g, connection='strong')
|
||||
|
||||
assert_equal(n_components, 2)
|
||||
assert_equal(labels[0], labels[1])
|
||||
assert_equal(labels[2], labels[3])
|
||||
|
||||
|
||||
def test_fully_connected_graph():
|
||||
# Fully connected dense matrices raised an exception.
|
||||
# https://github.com/scipy/scipy/issues/3818
|
||||
g = np.ones((4, 4))
|
||||
n_components, labels = csgraph.connected_components(g)
|
||||
assert_equal(n_components, 1)
|
||||
61
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_conversions.py
vendored
Normal file
61
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_conversions.py
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_almost_equal
|
||||
from scipy.sparse import csr_matrix
|
||||
from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
|
||||
|
||||
|
||||
def test_csgraph_from_dense():
|
||||
np.random.seed(1234)
|
||||
G = np.random.random((10, 10))
|
||||
some_nulls = (G < 0.4)
|
||||
all_nulls = (G < 0.8)
|
||||
|
||||
for null_value in [0, np.nan, np.inf]:
|
||||
G[all_nulls] = null_value
|
||||
with np.errstate(invalid="ignore"):
|
||||
G_csr = csgraph_from_dense(G, null_value=0)
|
||||
|
||||
G[all_nulls] = 0
|
||||
assert_array_almost_equal(G, G_csr.toarray())
|
||||
|
||||
for null_value in [np.nan, np.inf]:
|
||||
G[all_nulls] = 0
|
||||
G[some_nulls] = null_value
|
||||
with np.errstate(invalid="ignore"):
|
||||
G_csr = csgraph_from_dense(G, null_value=0)
|
||||
|
||||
G[all_nulls] = 0
|
||||
assert_array_almost_equal(G, G_csr.toarray())
|
||||
|
||||
|
||||
def test_csgraph_to_dense():
|
||||
np.random.seed(1234)
|
||||
G = np.random.random((10, 10))
|
||||
nulls = (G < 0.8)
|
||||
G[nulls] = np.inf
|
||||
|
||||
G_csr = csgraph_from_dense(G)
|
||||
|
||||
for null_value in [0, 10, -np.inf, np.inf]:
|
||||
G[nulls] = null_value
|
||||
assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
|
||||
|
||||
|
||||
def test_multiple_edges():
|
||||
# create a random sqare matrix with an even number of elements
|
||||
np.random.seed(1234)
|
||||
X = np.random.random((10, 10))
|
||||
Xcsr = csr_matrix(X)
|
||||
|
||||
# now double-up every other column
|
||||
Xcsr.indices[::2] = Xcsr.indices[1::2]
|
||||
|
||||
# normal sparse toarray() will sum the duplicated edges
|
||||
Xdense = Xcsr.toarray()
|
||||
assert_array_almost_equal(Xdense[:, 1::2],
|
||||
X[:, ::2] + X[:, 1::2])
|
||||
|
||||
# csgraph_to_dense chooses the minimum of each duplicated edge
|
||||
Xdense = csgraph_to_dense(Xcsr)
|
||||
assert_array_almost_equal(Xdense[:, 1::2],
|
||||
np.minimum(X[:, ::2], X[:, 1::2]))
|
||||
208
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_flow.py
vendored
Normal file
208
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_flow.py
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_equal
|
||||
import pytest
|
||||
|
||||
from scipy.sparse import csr_matrix, csc_matrix
|
||||
from scipy.sparse.csgraph import maximum_flow
|
||||
from scipy.sparse.csgraph._flow import (
|
||||
_add_reverse_edges, _make_edge_pointers, _make_tails
|
||||
)
|
||||
|
||||
methods = ['edmonds_karp', 'dinic']
|
||||
|
||||
def test_raises_on_dense_input():
|
||||
with pytest.raises(TypeError):
|
||||
graph = np.array([[0, 1], [0, 0]])
|
||||
maximum_flow(graph, 0, 1)
|
||||
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
||||
|
||||
|
||||
def test_raises_on_csc_input():
|
||||
with pytest.raises(TypeError):
|
||||
graph = csc_matrix([[0, 1], [0, 0]])
|
||||
maximum_flow(graph, 0, 1)
|
||||
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
||||
|
||||
|
||||
def test_raises_on_floating_point_input():
|
||||
with pytest.raises(ValueError):
|
||||
graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64)
|
||||
maximum_flow(graph, 0, 1)
|
||||
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
||||
|
||||
|
||||
def test_raises_on_non_square_input():
|
||||
with pytest.raises(ValueError):
|
||||
graph = csr_matrix([[0, 1, 2], [2, 1, 0]])
|
||||
maximum_flow(graph, 0, 1)
|
||||
|
||||
|
||||
def test_raises_when_source_is_sink():
|
||||
with pytest.raises(ValueError):
|
||||
graph = csr_matrix([[0, 1], [0, 0]])
|
||||
maximum_flow(graph, 0, 0)
|
||||
maximum_flow(graph, 0, 0, method='edmonds_karp')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
@pytest.mark.parametrize('source', [-1, 2, 3])
|
||||
def test_raises_when_source_is_out_of_bounds(source, method):
|
||||
with pytest.raises(ValueError):
|
||||
graph = csr_matrix([[0, 1], [0, 0]])
|
||||
maximum_flow(graph, source, 1, method=method)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
@pytest.mark.parametrize('sink', [-1, 2, 3])
|
||||
def test_raises_when_sink_is_out_of_bounds(sink, method):
|
||||
with pytest.raises(ValueError):
|
||||
graph = csr_matrix([[0, 1], [0, 0]])
|
||||
maximum_flow(graph, 0, sink, method=method)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_simple_graph(method):
|
||||
# This graph looks as follows:
|
||||
# (0) --5--> (1)
|
||||
graph = csr_matrix([[0, 5], [0, 0]])
|
||||
res = maximum_flow(graph, 0, 1, method=method)
|
||||
assert res.flow_value == 5
|
||||
expected_flow = np.array([[0, 5], [-5, 0]])
|
||||
assert_array_equal(res.flow.toarray(), expected_flow)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_bottle_neck_graph(method):
|
||||
# This graph cannot use the full capacity between 0 and 1:
|
||||
# (0) --5--> (1) --3--> (2)
|
||||
graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
|
||||
res = maximum_flow(graph, 0, 2, method=method)
|
||||
assert res.flow_value == 3
|
||||
expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]])
|
||||
assert_array_equal(res.flow.toarray(), expected_flow)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_backwards_flow(method):
|
||||
# This example causes backwards flow between vertices 3 and 4,
|
||||
# and so this test ensures that we handle that accordingly. See
|
||||
# https://stackoverflow.com/q/38843963/5085211
|
||||
# for more information.
|
||||
graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0],
|
||||
[0, 0, 10, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 10, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 10],
|
||||
[0, 0, 0, 10, 0, 10, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 10, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 10],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
res = maximum_flow(graph, 0, 7, method=method)
|
||||
assert res.flow_value == 20
|
||||
expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0],
|
||||
[-10, 0, 10, 0, 0, 0, 0, 0],
|
||||
[0, -10, 0, 10, 0, 0, 0, 0],
|
||||
[0, 0, -10, 0, 0, 0, 0, 10],
|
||||
[-10, 0, 0, 0, 0, 10, 0, 0],
|
||||
[0, 0, 0, 0, -10, 0, 10, 0],
|
||||
[0, 0, 0, 0, 0, -10, 0, 10],
|
||||
[0, 0, 0, -10, 0, 0, -10, 0]])
|
||||
assert_array_equal(res.flow.toarray(), expected_flow)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_example_from_clrs_chapter_26_1(method):
|
||||
# See page 659 in CLRS second edition, but note that the maximum flow
|
||||
# we find is slightly different than the one in CLRS; we push a flow of
|
||||
# 12 to v_1 instead of v_2.
|
||||
graph = csr_matrix([[0, 16, 13, 0, 0, 0],
|
||||
[0, 0, 10, 12, 0, 0],
|
||||
[0, 4, 0, 0, 14, 0],
|
||||
[0, 0, 9, 0, 0, 20],
|
||||
[0, 0, 0, 7, 0, 4],
|
||||
[0, 0, 0, 0, 0, 0]])
|
||||
res = maximum_flow(graph, 0, 5, method=method)
|
||||
assert res.flow_value == 23
|
||||
expected_flow = np.array([[0, 12, 11, 0, 0, 0],
|
||||
[-12, 0, 0, 12, 0, 0],
|
||||
[-11, 0, 0, 0, 11, 0],
|
||||
[0, -12, 0, 0, -7, 19],
|
||||
[0, 0, -11, 7, 0, 4],
|
||||
[0, 0, 0, -19, -4, 0]])
|
||||
assert_array_equal(res.flow.toarray(), expected_flow)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_disconnected_graph(method):
|
||||
# This tests the following disconnected graph:
|
||||
# (0) --5--> (1) (2) --3--> (3)
|
||||
graph = csr_matrix([[0, 5, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 9, 3],
|
||||
[0, 0, 0, 0]])
|
||||
res = maximum_flow(graph, 0, 3, method=method)
|
||||
assert res.flow_value == 0
|
||||
expected_flow = np.zeros((4, 4), dtype=np.int32)
|
||||
assert_array_equal(res.flow.toarray(), expected_flow)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', methods)
|
||||
def test_add_reverse_edges_large_graph(method):
|
||||
# Regression test for https://github.com/scipy/scipy/issues/14385
|
||||
n = 100_000
|
||||
indices = np.arange(1, n)
|
||||
indptr = np.array(list(range(n)) + [n - 1])
|
||||
data = np.ones(n - 1, dtype=np.int32)
|
||||
graph = csr_matrix((data, indices, indptr), shape=(n, n))
|
||||
res = maximum_flow(graph, 0, n - 1, method=method)
|
||||
assert res.flow_value == 1
|
||||
expected_flow = graph - graph.transpose()
|
||||
assert_array_equal(res.flow.data, expected_flow.data)
|
||||
assert_array_equal(res.flow.indices, expected_flow.indices)
|
||||
assert_array_equal(res.flow.indptr, expected_flow.indptr)
|
||||
|
||||
|
||||
def test_residual_raises_deprecation_warning():
|
||||
graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
|
||||
res = maximum_flow(graph, 0, 2)
|
||||
with pytest.deprecated_call():
|
||||
res.residual
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,b_data_expected", [
|
||||
([[]], []),
|
||||
([[0], [0]], []),
|
||||
([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]),
|
||||
([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])])
|
||||
def test_add_reverse_edges(a, b_data_expected):
|
||||
"""Test that the reversal of the edges of the input graph works
|
||||
as expected.
|
||||
"""
|
||||
a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a)))
|
||||
b = _add_reverse_edges(a)
|
||||
assert_array_equal(b.data, b_data_expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,expected", [
|
||||
([[]], []),
|
||||
([[0]], []),
|
||||
([[1]], [0]),
|
||||
([[0, 1], [10, 0]], [1, 0]),
|
||||
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])
|
||||
])
|
||||
def test_make_edge_pointers(a, expected):
|
||||
a = csr_matrix(a, dtype=np.int32)
|
||||
rev_edge_ptr = _make_edge_pointers(a)
|
||||
assert_array_equal(rev_edge_ptr, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,expected", [
|
||||
([[]], []),
|
||||
([[0]], []),
|
||||
([[1]], [0]),
|
||||
([[0, 1], [10, 0]], [0, 1]),
|
||||
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2])
|
||||
])
|
||||
def test_make_tails(a, expected):
|
||||
a = csr_matrix(a, dtype=np.int32)
|
||||
tails = _make_tails(a)
|
||||
assert_array_equal(tails, expected)
|
||||
358
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
vendored
Normal file
358
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
from pytest import raises as assert_raises
|
||||
from scipy import sparse
|
||||
|
||||
from scipy.sparse import csgraph
|
||||
|
||||
|
||||
def check_int_type(mat):
|
||||
return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
|
||||
mat.dtype, np.uint
|
||||
)
|
||||
|
||||
|
||||
def test_laplacian_value_error():
|
||||
for t in int, float, complex:
|
||||
for m in ([1, 1],
|
||||
[[[1]]],
|
||||
[[1, 2, 3], [4, 5, 6]],
|
||||
[[1, 2], [3, 4], [5, 5]]):
|
||||
A = np.array(m, dtype=t)
|
||||
assert_raises(ValueError, csgraph.laplacian, A)
|
||||
|
||||
|
||||
def _explicit_laplacian(x, normed=False):
|
||||
if sparse.issparse(x):
|
||||
x = x.toarray()
|
||||
x = np.asarray(x)
|
||||
y = -1.0 * x
|
||||
for j in range(y.shape[0]):
|
||||
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
|
||||
if normed:
|
||||
d = np.diag(y).copy()
|
||||
d[d == 0] = 1.0
|
||||
y /= d[:,None]**.5
|
||||
y /= d[None,:]**.5
|
||||
return y
|
||||
|
||||
|
||||
def _check_symmetric_graph_laplacian(mat, normed, copy=True):
|
||||
if not hasattr(mat, 'shape'):
|
||||
mat = eval(mat, dict(np=np, sparse=sparse))
|
||||
|
||||
if sparse.issparse(mat):
|
||||
sp_mat = mat
|
||||
mat = sp_mat.toarray()
|
||||
else:
|
||||
sp_mat = sparse.csr_matrix(mat)
|
||||
|
||||
mat_copy = np.copy(mat)
|
||||
sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
|
||||
|
||||
n_nodes = mat.shape[0]
|
||||
explicit_laplacian = _explicit_laplacian(mat, normed=normed)
|
||||
laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
|
||||
sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
|
||||
copy=copy)
|
||||
|
||||
if copy:
|
||||
assert_allclose(mat, mat_copy)
|
||||
_assert_allclose_sparse(sp_mat, sp_mat_copy)
|
||||
else:
|
||||
if not (normed and check_int_type(mat)):
|
||||
assert_allclose(laplacian, mat)
|
||||
if sp_mat.format == 'coo':
|
||||
_assert_allclose_sparse(sp_laplacian, sp_mat)
|
||||
|
||||
assert_allclose(laplacian, sp_laplacian.toarray())
|
||||
|
||||
for tested in [laplacian, sp_laplacian.toarray()]:
|
||||
if not normed:
|
||||
assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
|
||||
assert_allclose(tested.T, tested)
|
||||
assert_allclose(tested, explicit_laplacian)
|
||||
|
||||
|
||||
def test_symmetric_graph_laplacian():
|
||||
symmetric_mats = (
|
||||
'np.arange(10) * np.arange(10)[:, np.newaxis]',
|
||||
'np.ones((7, 7))',
|
||||
'np.eye(19)',
|
||||
'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
|
||||
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
|
||||
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
|
||||
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
|
||||
)
|
||||
for mat in symmetric_mats:
|
||||
for normed in True, False:
|
||||
for copy in True, False:
|
||||
_check_symmetric_graph_laplacian(mat, normed, copy)
|
||||
|
||||
|
||||
def _assert_allclose_sparse(a, b, **kwargs):
|
||||
# helper function that can deal with sparse matrices
|
||||
if sparse.issparse(a):
|
||||
a = a.toarray()
|
||||
if sparse.issparse(b):
|
||||
b = b.toarray()
|
||||
assert_allclose(a, b, **kwargs)
|
||||
|
||||
|
||||
def _check_laplacian_dtype_none(
|
||||
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
||||
):
|
||||
mat = arr_type(A, dtype=dtype)
|
||||
L, d = csgraph.laplacian(
|
||||
mat,
|
||||
normed=normed,
|
||||
return_diag=True,
|
||||
use_out_degree=use_out_degree,
|
||||
copy=copy,
|
||||
dtype=None,
|
||||
)
|
||||
if normed and check_int_type(mat):
|
||||
assert L.dtype == np.float64
|
||||
assert d.dtype == np.float64
|
||||
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
||||
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
||||
else:
|
||||
assert L.dtype == dtype
|
||||
assert d.dtype == dtype
|
||||
desired_L = np.asarray(desired_L).astype(dtype)
|
||||
desired_d = np.asarray(desired_d).astype(dtype)
|
||||
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
||||
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
||||
|
||||
if not copy:
|
||||
if not (normed and check_int_type(mat)):
|
||||
if type(mat) is np.ndarray:
|
||||
assert_allclose(L, mat)
|
||||
elif mat.format == "coo":
|
||||
_assert_allclose_sparse(L, mat)
|
||||
|
||||
|
||||
def _check_laplacian_dtype(
|
||||
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
||||
):
|
||||
mat = arr_type(A, dtype=dtype)
|
||||
L, d = csgraph.laplacian(
|
||||
mat,
|
||||
normed=normed,
|
||||
return_diag=True,
|
||||
use_out_degree=use_out_degree,
|
||||
copy=copy,
|
||||
dtype=dtype,
|
||||
)
|
||||
assert L.dtype == dtype
|
||||
assert d.dtype == dtype
|
||||
desired_L = np.asarray(desired_L).astype(dtype)
|
||||
desired_d = np.asarray(desired_d).astype(dtype)
|
||||
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
||||
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
||||
|
||||
if not copy:
|
||||
if not (normed and check_int_type(mat)):
|
||||
if type(mat) is np.ndarray:
|
||||
assert_allclose(L, mat)
|
||||
elif mat.format == 'coo':
|
||||
_assert_allclose_sparse(L, mat)
|
||||
|
||||
|
||||
INT_DTYPES = {np.intc, np.int_, np.longlong}
|
||||
REAL_DTYPES = {np.single, np.double, np.longdouble}
|
||||
COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
|
||||
# use sorted tuple to ensure fixed order of tests
|
||||
DTYPES = tuple(sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("arr_type", [np.array,
|
||||
sparse.csr_matrix,
|
||||
sparse.coo_matrix])
|
||||
@pytest.mark.parametrize("copy", [True, False])
|
||||
@pytest.mark.parametrize("normed", [True, False])
|
||||
@pytest.mark.parametrize("use_out_degree", [True, False])
|
||||
def test_asymmetric_laplacian(use_out_degree, normed,
|
||||
copy, dtype, arr_type):
|
||||
# adjacency matrix
|
||||
A = [[0, 1, 0],
|
||||
[4, 2, 0],
|
||||
[0, 0, 0]]
|
||||
A = arr_type(np.array(A), dtype=dtype)
|
||||
A_copy = A.copy()
|
||||
|
||||
if not normed and use_out_degree:
|
||||
# Laplacian matrix using out-degree
|
||||
L = [[1, -1, 0],
|
||||
[-4, 4, 0],
|
||||
[0, 0, 0]]
|
||||
d = [1, 4, 0]
|
||||
|
||||
if normed and use_out_degree:
|
||||
# normalized Laplacian matrix using out-degree
|
||||
L = [[1, -0.5, 0],
|
||||
[-2, 1, 0],
|
||||
[0, 0, 0]]
|
||||
d = [1, 2, 1]
|
||||
|
||||
if not normed and not use_out_degree:
|
||||
# Laplacian matrix using in-degree
|
||||
L = [[4, -1, 0],
|
||||
[-4, 1, 0],
|
||||
[0, 0, 0]]
|
||||
d = [4, 1, 0]
|
||||
|
||||
if normed and not use_out_degree:
|
||||
# normalized Laplacian matrix using in-degree
|
||||
L = [[1, -0.5, 0],
|
||||
[-2, 1, 0],
|
||||
[0, 0, 0]]
|
||||
d = [2, 1, 1]
|
||||
|
||||
_check_laplacian_dtype_none(
|
||||
A,
|
||||
L,
|
||||
d,
|
||||
normed=normed,
|
||||
use_out_degree=use_out_degree,
|
||||
copy=copy,
|
||||
dtype=dtype,
|
||||
arr_type=arr_type,
|
||||
)
|
||||
|
||||
_check_laplacian_dtype(
|
||||
A_copy,
|
||||
L,
|
||||
d,
|
||||
normed=normed,
|
||||
use_out_degree=use_out_degree,
|
||||
copy=copy,
|
||||
dtype=dtype,
|
||||
arr_type=arr_type,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
|
||||
'dok', 'dia', 'bsr'])
|
||||
@pytest.mark.parametrize("normed", [True, False])
|
||||
@pytest.mark.parametrize("copy", [True, False])
|
||||
def test_sparse_formats(fmt, normed, copy):
|
||||
mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt)
|
||||
_check_symmetric_graph_laplacian(mat, normed, copy)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix]
|
||||
)
|
||||
@pytest.mark.parametrize("form", ["array", "function", "lo"])
|
||||
def test_laplacian_symmetrized(arr_type, form):
|
||||
# adjacency matrix
|
||||
n = 3
|
||||
mat = arr_type(np.arange(n * n).reshape(n, n))
|
||||
L_in, d_in = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
form=form,
|
||||
)
|
||||
L_out, d_out = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
use_out_degree=True,
|
||||
form=form,
|
||||
)
|
||||
Ls, ds = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
symmetrized=True,
|
||||
form=form,
|
||||
)
|
||||
Ls_normed, ds_normed = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
symmetrized=True,
|
||||
normed=True,
|
||||
form=form,
|
||||
)
|
||||
mat += mat.T
|
||||
Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
|
||||
Lss_normed, dss_normed = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
normed=True,
|
||||
form=form,
|
||||
)
|
||||
|
||||
assert_allclose(ds, d_in + d_out)
|
||||
assert_allclose(ds, dss)
|
||||
assert_allclose(ds_normed, dss_normed)
|
||||
|
||||
d = {}
|
||||
for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
|
||||
if form == "array":
|
||||
d[L] = eval(L)
|
||||
else:
|
||||
d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
|
||||
|
||||
_assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
|
||||
_assert_allclose_sparse(d["Ls"], d["Lss"])
|
||||
_assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix]
|
||||
)
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("normed", [True, False])
|
||||
@pytest.mark.parametrize("symmetrized", [True, False])
|
||||
@pytest.mark.parametrize("use_out_degree", [True, False])
|
||||
@pytest.mark.parametrize("form", ["function", "lo"])
|
||||
def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
|
||||
n = 3
|
||||
mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
|
||||
mat = arr_type(np.array(mat), dtype=dtype)
|
||||
Lo, do = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
normed=normed,
|
||||
symmetrized=symmetrized,
|
||||
use_out_degree=use_out_degree,
|
||||
dtype=dtype,
|
||||
)
|
||||
La, da = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
normed=normed,
|
||||
symmetrized=symmetrized,
|
||||
use_out_degree=use_out_degree,
|
||||
dtype=dtype,
|
||||
form="array",
|
||||
)
|
||||
assert_allclose(do, da)
|
||||
_assert_allclose_sparse(Lo, La)
|
||||
|
||||
L, d = csgraph.laplacian(
|
||||
mat,
|
||||
return_diag=True,
|
||||
normed=normed,
|
||||
symmetrized=symmetrized,
|
||||
use_out_degree=use_out_degree,
|
||||
dtype=dtype,
|
||||
form=form,
|
||||
)
|
||||
assert_allclose(d, do)
|
||||
assert d.dtype == dtype
|
||||
Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
|
||||
_assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
|
||||
x = np.arange(6).reshape(3, 2)
|
||||
if not (normed and dtype in INT_DTYPES):
|
||||
assert_allclose(L(x), Lo @ x)
|
||||
else:
|
||||
# Normalized Lo is casted to integer, but L() is not
|
||||
pass
|
||||
|
||||
|
||||
def test_format_error_message():
|
||||
with pytest.raises(ValueError, match="Invalid form: 'toto'"):
|
||||
_ = csgraph.laplacian(np.eye(1), form='toto')
|
||||
239
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_matching.py
vendored
Normal file
239
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_matching.py
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_equal
|
||||
import pytest
|
||||
|
||||
from scipy.sparse import csr_matrix, coo_matrix, diags
|
||||
from scipy.sparse.csgraph import (
|
||||
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
||||
)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_raises_on_dense_input():
|
||||
with pytest.raises(TypeError):
|
||||
graph = np.array([[0, 1], [0, 0]])
|
||||
maximum_bipartite_matching(graph)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_empty_graph():
|
||||
graph = csr_matrix((0, 0))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
expected_matching = np.array([])
|
||||
assert_array_equal(expected_matching, x)
|
||||
assert_array_equal(expected_matching, y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_empty_left_partition():
|
||||
graph = csr_matrix((2, 0))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
assert_array_equal(np.array([]), x)
|
||||
assert_array_equal(np.array([-1, -1]), y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_empty_right_partition():
|
||||
graph = csr_matrix((0, 3))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
assert_array_equal(np.array([-1, -1, -1]), x)
|
||||
assert_array_equal(np.array([]), y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_graph_with_no_edges():
|
||||
graph = csr_matrix((2, 2))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
assert_array_equal(np.array([-1, -1]), x)
|
||||
assert_array_equal(np.array([-1, -1]), y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_graph_that_causes_augmentation():
|
||||
# In this graph, column 1 is initially assigned to row 1, but it should be
|
||||
# reassigned to make room for row 2.
|
||||
graph = csr_matrix([[1, 1], [1, 0]])
|
||||
x = maximum_bipartite_matching(graph, perm_type='column')
|
||||
y = maximum_bipartite_matching(graph, perm_type='row')
|
||||
expected_matching = np.array([1, 0])
|
||||
assert_array_equal(expected_matching, x)
|
||||
assert_array_equal(expected_matching, y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_graph_with_more_rows_than_columns():
|
||||
graph = csr_matrix([[1, 1], [1, 0], [0, 1]])
|
||||
x = maximum_bipartite_matching(graph, perm_type='column')
|
||||
y = maximum_bipartite_matching(graph, perm_type='row')
|
||||
assert_array_equal(np.array([0, -1, 1]), x)
|
||||
assert_array_equal(np.array([0, 2]), y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_graph_with_more_columns_than_rows():
|
||||
graph = csr_matrix([[1, 1, 0], [0, 0, 1]])
|
||||
x = maximum_bipartite_matching(graph, perm_type='column')
|
||||
y = maximum_bipartite_matching(graph, perm_type='row')
|
||||
assert_array_equal(np.array([0, 2]), x)
|
||||
assert_array_equal(np.array([0, -1, 1]), y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_explicit_zeros_count_as_edges():
|
||||
data = [0, 0]
|
||||
indices = [1, 0]
|
||||
indptr = [0, 1, 2]
|
||||
graph = csr_matrix((data, indices, indptr), shape=(2, 2))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
expected_matching = np.array([1, 0])
|
||||
assert_array_equal(expected_matching, x)
|
||||
assert_array_equal(expected_matching, y)
|
||||
|
||||
|
||||
def test_maximum_bipartite_matching_feasibility_of_result():
|
||||
# This is a regression test for GitHub issue #11458
|
||||
data = np.ones(50, dtype=int)
|
||||
indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
|
||||
14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
|
||||
10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
|
||||
indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
|
||||
47, 47, 48, 50]
|
||||
graph = csr_matrix((data, indices, indptr), shape=(20, 25))
|
||||
x = maximum_bipartite_matching(graph, perm_type='row')
|
||||
y = maximum_bipartite_matching(graph, perm_type='column')
|
||||
assert (x != -1).sum() == 13
|
||||
assert (y != -1).sum() == 13
|
||||
# Ensure that each element of the matching is in fact an edge in the graph.
|
||||
for u, v in zip(range(graph.shape[0]), y):
|
||||
if v != -1:
|
||||
assert graph[u, v]
|
||||
for u, v in zip(x, range(graph.shape[1])):
|
||||
if u != -1:
|
||||
assert graph[u, v]
|
||||
|
||||
|
||||
def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex():
|
||||
np.random.seed(42)
|
||||
A = diags(np.ones(25), offsets=0, format='csr')
|
||||
rand_perm = np.random.permutation(25)
|
||||
rand_perm2 = np.random.permutation(25)
|
||||
|
||||
Rrow = np.arange(25)
|
||||
Rcol = rand_perm
|
||||
Rdata = np.ones(25, dtype=int)
|
||||
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
||||
|
||||
Crow = rand_perm2
|
||||
Ccol = np.arange(25)
|
||||
Cdata = np.ones(25, dtype=int)
|
||||
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
||||
# Randomly permute identity matrix
|
||||
B = Rmat * A * Cmat
|
||||
|
||||
# Row permute
|
||||
perm = maximum_bipartite_matching(B, perm_type='row')
|
||||
Rrow = np.arange(25)
|
||||
Rcol = perm
|
||||
Rdata = np.ones(25, dtype=int)
|
||||
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
||||
C1 = Rmat * B
|
||||
|
||||
# Column permute
|
||||
perm2 = maximum_bipartite_matching(B, perm_type='column')
|
||||
Crow = perm2
|
||||
Ccol = np.arange(25)
|
||||
Cdata = np.ones(25, dtype=int)
|
||||
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
||||
C2 = B * Cmat
|
||||
|
||||
# Should get identity matrix back
|
||||
assert_equal(any(C1.diagonal() == 0), False)
|
||||
assert_equal(any(C2.diagonal() == 0), False)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
|
||||
def test_min_weight_full_matching_trivial_graph(num_rows, num_cols):
|
||||
biadjacency_matrix = csr_matrix((num_cols, num_rows))
|
||||
row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix)
|
||||
assert len(row_ind) == 0
|
||||
assert len(col_ind) == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('biadjacency_matrix',
|
||||
[
|
||||
[[1, 1, 1], [1, 0, 0], [1, 0, 0]],
|
||||
[[1, 1, 1], [0, 0, 1], [0, 0, 1]],
|
||||
[[1, 0, 0], [2, 0, 0]],
|
||||
[[0, 1, 0], [0, 2, 0]],
|
||||
[[1, 0], [2, 0], [5, 0]]
|
||||
])
|
||||
def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix):
|
||||
with pytest.raises(ValueError):
|
||||
min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix))
|
||||
|
||||
|
||||
def test_explicit_zero_causes_warning():
|
||||
with pytest.warns(UserWarning):
|
||||
biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3)))
|
||||
min_weight_full_bipartite_matching(biadjacency_matrix)
|
||||
|
||||
|
||||
# General test for linear sum assignment solvers to make it possible to rely
|
||||
# on the same tests for scipy.optimize.linear_sum_assignment.
|
||||
def linear_sum_assignment_assertions(
|
||||
solver, array_type, sign, test_case
|
||||
):
|
||||
cost_matrix, expected_cost = test_case
|
||||
maximize = sign == -1
|
||||
cost_matrix = sign * array_type(cost_matrix)
|
||||
expected_cost = sign * np.array(expected_cost)
|
||||
|
||||
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
||||
assert_array_equal(row_ind, np.sort(row_ind))
|
||||
assert_array_equal(expected_cost,
|
||||
np.array(cost_matrix[row_ind, col_ind]).flatten())
|
||||
|
||||
cost_matrix = cost_matrix.T
|
||||
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
||||
assert_array_equal(row_ind, np.sort(row_ind))
|
||||
assert_array_equal(np.sort(expected_cost),
|
||||
np.sort(np.array(
|
||||
cost_matrix[row_ind, col_ind])).flatten())
|
||||
|
||||
|
||||
linear_sum_assignment_test_cases = product(
|
||||
[-1, 1],
|
||||
[
|
||||
# Square
|
||||
([[400, 150, 400],
|
||||
[400, 450, 600],
|
||||
[300, 225, 300]],
|
||||
[150, 400, 300]),
|
||||
|
||||
# Rectangular variant
|
||||
([[400, 150, 400, 1],
|
||||
[400, 450, 600, 2],
|
||||
[300, 225, 300, 3]],
|
||||
[150, 2, 300]),
|
||||
|
||||
([[10, 10, 8],
|
||||
[9, 8, 1],
|
||||
[9, 7, 4]],
|
||||
[10, 1, 7]),
|
||||
|
||||
# Square
|
||||
([[10, 10, 8, 11],
|
||||
[9, 8, 1, 1],
|
||||
[9, 7, 4, 10]],
|
||||
[10, 1, 4]),
|
||||
|
||||
# Rectangular variant
|
||||
([[10, float("inf"), float("inf")],
|
||||
[float("inf"), float("inf"), 1],
|
||||
[float("inf"), 7, float("inf")]],
|
||||
[10, 1, 7])
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
|
||||
def test_min_weight_full_matching_small_inputs(sign, test_case):
|
||||
linear_sum_assignment_assertions(
|
||||
min_weight_full_bipartite_matching, csr_matrix, sign, test_case)
|
||||
70
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_reordering.py
vendored
Normal file
70
.CondaPkg/env/Lib/site-packages/scipy/sparse/csgraph/tests/test_reordering.py
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal
|
||||
from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
|
||||
from scipy.sparse import csc_matrix, csr_matrix, coo_matrix
|
||||
|
||||
|
||||
def test_graph_reverse_cuthill_mckee():
|
||||
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 1, 1, 0, 0, 1, 0, 1],
|
||||
[0, 1, 1, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 1, 0],
|
||||
[1, 0, 1, 0, 1, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 1, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
|
||||
|
||||
graph = csr_matrix(A)
|
||||
perm = reverse_cuthill_mckee(graph)
|
||||
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
|
||||
assert_equal(perm, correct_perm)
|
||||
|
||||
# Test int64 indices input
|
||||
graph.indices = graph.indices.astype('int64')
|
||||
graph.indptr = graph.indptr.astype('int64')
|
||||
perm = reverse_cuthill_mckee(graph, True)
|
||||
assert_equal(perm, correct_perm)
|
||||
|
||||
|
||||
def test_graph_reverse_cuthill_mckee_ordering():
|
||||
data = np.ones(63,dtype=int)
|
||||
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
|
||||
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
|
||||
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
|
||||
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
|
||||
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
|
||||
14, 15, 15, 15, 15, 15])
|
||||
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
|
||||
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
|
||||
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
|
||||
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
|
||||
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
|
||||
5, 7, 10, 13, 15])
|
||||
graph = coo_matrix((data, (rows,cols))).tocsr()
|
||||
perm = reverse_cuthill_mckee(graph)
|
||||
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
|
||||
0, 13, 7, 5, 9, 11, 1, 3])
|
||||
assert_equal(perm, correct_perm)
|
||||
|
||||
|
||||
def test_graph_structural_rank():
|
||||
# Test square matrix #1
|
||||
A = csc_matrix([[1, 1, 0],
|
||||
[1, 0, 1],
|
||||
[0, 1, 0]])
|
||||
assert_equal(structural_rank(A), 3)
|
||||
|
||||
# Test square matrix #2
|
||||
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
|
||||
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
|
||||
data = np.ones_like(rows)
|
||||
B = coo_matrix((data,(rows,cols)), shape=(8,8))
|
||||
assert_equal(structural_rank(B), 6)
|
||||
|
||||
#Test non-square matrix
|
||||
C = csc_matrix([[1, 0, 2, 0],
|
||||
[2, 0, 4, 0]])
|
||||
assert_equal(structural_rank(C), 2)
|
||||
|
||||
#Test tall matrix
|
||||
assert_equal(structural_rank(C.T), 2)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user