update
This commit is contained in:
103
.CondaPkg/env/Lib/site-packages/imageio/plugins/__init__.py
vendored
Normal file
103
.CondaPkg/env/Lib/site-packages/imageio/plugins/__init__.py
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Here you can find documentation on how to write your own plugin to allow
|
||||
ImageIO to access a new backend. Plugins are quite object oriented, and
|
||||
the relevant classes and their interaction are documented here:
|
||||
|
||||
.. currentmodule:: imageio
|
||||
|
||||
.. autosummary::
|
||||
:toctree: ../_autosummary
|
||||
:template: better_class.rst
|
||||
|
||||
imageio.core.Format
|
||||
imageio.core.Request
|
||||
|
||||
.. note::
|
||||
You can always check existing plugins if you want to see examples.
|
||||
|
||||
What methods to implement
|
||||
-------------------------
|
||||
|
||||
To implement a new plugin, create a new class that inherits from
|
||||
:class:`imageio.core.Format`. and implement the following functions:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: ../_autosummary
|
||||
|
||||
imageio.core.Format.__init__
|
||||
imageio.core.Format._can_read
|
||||
imageio.core.Format._can_write
|
||||
|
||||
Further, each format contains up to two nested classes; one for reading and
|
||||
one for writing. To support reading and/or writing, the respective classes
|
||||
need to be defined.
|
||||
|
||||
For reading, create a nested class that inherits from
|
||||
``imageio.core.Format.Reader`` and that implements the following functions:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the reader. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_get_length()`` to provide a suitable length based on what
|
||||
the user expects. Can be ``inf`` for streaming data.
|
||||
* Implement ``_get_data(index)`` to return an array and a meta-data dict.
|
||||
* Implement ``_get_meta_data(index)`` to return a meta-data dict. If index
|
||||
is None, it should return the 'global' meta-data.
|
||||
|
||||
For writing, create a nested class that inherits from
|
||||
``imageio.core.Format.Writer`` and implement the following functions:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the writer. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_append_data(im, meta)`` to add data (and meta-data).
|
||||
* Implement ``_set_meta_data(meta)`` to set the global meta-data.
|
||||
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import warnings
|
||||
|
||||
|
||||
# v2 imports remove in v3
|
||||
from .. import formats
|
||||
|
||||
# v2 allows formatting plugins by environment variable
|
||||
# this is done here.
|
||||
env_plugin_order = os.getenv("IMAGEIO_FORMAT_ORDER", None)
|
||||
if env_plugin_order is not None: # pragma: no cover
|
||||
warnings.warn(
|
||||
"Setting plugin priority through an environment variable is"
|
||||
" deprecated and will be removed in ImageIO v3. There is no"
|
||||
" replacement planned for this feature. If you have an"
|
||||
" active use-case for it, please reach out to us on GitHub.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(","))
|
||||
|
||||
|
||||
# this class replaces plugin module. For details
|
||||
# see https://stackoverflow.com/questions/2447353/getattr-on-a-module
|
||||
def __getattr__(name):
|
||||
"""Lazy-Import Plugins
|
||||
|
||||
This function dynamically loads plugins into the imageio.plugin
|
||||
namespace upon first access. For example, the following snippet will
|
||||
delay importing freeimage until the second line:
|
||||
|
||||
>>> import imageio
|
||||
>>> imageio.plugins.freeimage.download()
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
return importlib.import_module(f"imageio.plugins.{name}")
|
||||
except ImportError:
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'") from None
|
||||
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_dicom.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_dicom.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_swf.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_swf.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/bsdf.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/bsdf.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/dicom.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/dicom.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/example.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/example.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/feisem.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/feisem.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/fits.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/fits.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/freeimage.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/freeimage.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/gdal.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/gdal.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/grab.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/grab.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/lytro.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/lytro.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/npz.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/npz.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/opencv.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/opencv.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pyav.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/pyav.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/rawpy.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/rawpy.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/spe.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/spe.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/swf.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/swf.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/tifffile.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/tifffile.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/tifffile_v3.cpython-312.pyc
vendored
Normal file
BIN
.CondaPkg/env/Lib/site-packages/imageio/plugins/__pycache__/tifffile_v3.cpython-312.pyc
vendored
Normal file
Binary file not shown.
915
.CondaPkg/env/Lib/site-packages/imageio/plugins/_bsdf.py
vendored
Normal file
915
.CondaPkg/env/Lib/site-packages/imageio/plugins/_bsdf.py
vendored
Normal file
@@ -0,0 +1,915 @@
|
||||
#!/usr/bin/env python
|
||||
# This file is distributed under the terms of the 2-clause BSD License.
|
||||
# Copyright (c) 2017-2018, Almar Klein
|
||||
|
||||
"""
|
||||
Python implementation of the Binary Structured Data Format (BSDF).
|
||||
|
||||
BSDF is a binary format for serializing structured (scientific) data.
|
||||
See http://bsdf.io for more information.
|
||||
|
||||
This is the reference implementation, which is relatively relatively
|
||||
sophisticated, providing e.g. lazy loading of blobs and streamed
|
||||
reading/writing. A simpler Python implementation is available as
|
||||
``bsdf_lite.py``.
|
||||
|
||||
This module has no dependencies and works on Python 2.7 and 3.4+.
|
||||
|
||||
Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes.
|
||||
"""
|
||||
|
||||
# todo: in 2020, remove six stuff, __future__ and _isidentifier
|
||||
# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import bz2
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import types
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Notes on versioning: the major and minor numbers correspond to the
|
||||
# BSDF format version. The major number if increased when backward
|
||||
# incompatible changes are introduced. An implementation must raise an
|
||||
# exception when the file being read has a higher major version. The
|
||||
# minor number is increased when new backward compatible features are
|
||||
# introduced. An implementation must display a warning when the file
|
||||
# being read has a higher minor version. The patch version is increased
|
||||
# for subsequent releases of the implementation.
|
||||
VERSION = 2, 1, 2
|
||||
__version__ = ".".join(str(i) for i in VERSION)
|
||||
|
||||
|
||||
# %% The encoder and decoder implementation
|
||||
|
||||
# From six.py
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
if PY3:
|
||||
text_type = str
|
||||
string_types = str
|
||||
unicode_types = str
|
||||
integer_types = int
|
||||
classtypes = type
|
||||
else: # pragma: no cover
|
||||
logging.basicConfig() # avoid "no handlers found" error
|
||||
text_type = unicode # noqa
|
||||
string_types = basestring # noqa
|
||||
unicode_types = unicode # noqa
|
||||
integer_types = (int, long) # noqa
|
||||
classtypes = type, types.ClassType
|
||||
|
||||
# Shorthands
|
||||
spack = struct.pack
|
||||
strunpack = struct.unpack
|
||||
|
||||
|
||||
def lencode(x):
|
||||
"""Encode an unsigned integer into a variable sized blob of bytes."""
|
||||
# We could support 16 bit and 32 bit as well, but the gain is low, since
|
||||
# 9 bytes for collections with over 250 elements is marginal anyway.
|
||||
if x <= 250:
|
||||
return spack("<B", x)
|
||||
# elif x < 65536:
|
||||
# return spack('<BH', 251, x)
|
||||
# elif x < 4294967296:
|
||||
# return spack('<BI', 252, x)
|
||||
else:
|
||||
return spack("<BQ", 253, x)
|
||||
|
||||
|
||||
# Include len decoder for completeness; we've inlined it for performance.
|
||||
def lendecode(f):
|
||||
"""Decode an unsigned integer from a file."""
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
return n
|
||||
|
||||
|
||||
def encode_type_id(b, ext_id):
|
||||
"""Encode the type identifier, with or without extension id."""
|
||||
if ext_id is not None:
|
||||
bb = ext_id.encode("UTF-8")
|
||||
return b.upper() + lencode(len(bb)) + bb # noqa
|
||||
else:
|
||||
return b # noqa
|
||||
|
||||
|
||||
def _isidentifier(s): # pragma: no cover
|
||||
"""Use of str.isidentifier() for Legacy Python, but slower."""
|
||||
# http://stackoverflow.com/questions/2544972/
|
||||
return (
|
||||
isinstance(s, string_types)
|
||||
and re.match(r"^\w+$", s, re.UNICODE)
|
||||
and re.match(r"^[0-9]", s) is None
|
||||
)
|
||||
|
||||
|
||||
class BsdfSerializer(object):
|
||||
"""Instances of this class represent a BSDF encoder/decoder.
|
||||
|
||||
It acts as a placeholder for a set of extensions and encoding/decoding
|
||||
options. Use this to predefine extensions and options for high
|
||||
performance encoding/decoding. For general use, see the functions
|
||||
`save()`, `encode()`, `load()`, and `decode()`.
|
||||
|
||||
This implementation of BSDF supports streaming lists (keep adding
|
||||
to a list after writing the main file), lazy loading of blobs, and
|
||||
in-place editing of blobs (for streams opened with a+).
|
||||
|
||||
Options for encoding:
|
||||
|
||||
* compression (int or str): ``0`` or "no" for no compression (default),
|
||||
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
|
||||
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
|
||||
Note that some BSDF implementations (e.g. JavaScript) may not support
|
||||
compression.
|
||||
* use_checksum (bool): whether to include a checksum with binary blobs.
|
||||
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
|
||||
|
||||
Options for decoding:
|
||||
|
||||
* load_streaming (bool): if True, and the final object in the structure was
|
||||
a stream, will make it available as a stream in the decoded object.
|
||||
* lazy_blob (bool): if True, bytes are represented as Blob objects that can
|
||||
be used to lazily access the data, and also overwrite the data if the
|
||||
file is open in a+ mode.
|
||||
"""
|
||||
|
||||
def __init__(self, extensions=None, **options):
|
||||
self._extensions = {} # name -> extension
|
||||
self._extensions_by_cls = {} # cls -> (name, extension.encode)
|
||||
if extensions is None:
|
||||
extensions = standard_extensions
|
||||
for extension in extensions:
|
||||
self.add_extension(extension)
|
||||
self._parse_options(**options)
|
||||
|
||||
def _parse_options(
|
||||
self,
|
||||
compression=0,
|
||||
use_checksum=False,
|
||||
float64=True,
|
||||
load_streaming=False,
|
||||
lazy_blob=False,
|
||||
):
|
||||
# Validate compression
|
||||
if isinstance(compression, string_types):
|
||||
m = {"no": 0, "zlib": 1, "bz2": 2}
|
||||
compression = m.get(compression.lower(), compression)
|
||||
if compression not in (0, 1, 2):
|
||||
raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"')
|
||||
self._compression = compression
|
||||
|
||||
# Other encoding args
|
||||
self._use_checksum = bool(use_checksum)
|
||||
self._float64 = bool(float64)
|
||||
|
||||
# Decoding args
|
||||
self._load_streaming = bool(load_streaming)
|
||||
self._lazy_blob = bool(lazy_blob)
|
||||
|
||||
def add_extension(self, extension_class):
|
||||
"""Add an extension to this serializer instance, which must be
|
||||
a subclass of Extension. Can be used as a decorator.
|
||||
"""
|
||||
# Check class
|
||||
if not (
|
||||
isinstance(extension_class, type) and issubclass(extension_class, Extension)
|
||||
):
|
||||
raise TypeError("add_extension() expects a Extension class.")
|
||||
extension = extension_class()
|
||||
|
||||
# Get name
|
||||
name = extension.name
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if len(name) == 0 or len(name) > 250:
|
||||
raise NameError(
|
||||
"Extension names must be nonempty and shorter " "than 251 chars."
|
||||
)
|
||||
if name in self._extensions:
|
||||
logger.warning(
|
||||
'BSDF warning: overwriting extension "%s", '
|
||||
"consider removing first" % name
|
||||
)
|
||||
|
||||
# Get classes
|
||||
cls = extension.cls
|
||||
if not cls:
|
||||
clss = []
|
||||
elif isinstance(cls, (tuple, list)):
|
||||
clss = cls
|
||||
else:
|
||||
clss = [cls]
|
||||
for cls in clss:
|
||||
if not isinstance(cls, classtypes):
|
||||
raise TypeError("Extension classes must be types.")
|
||||
|
||||
# Store
|
||||
for cls in clss:
|
||||
self._extensions_by_cls[cls] = name, extension.encode
|
||||
self._extensions[name] = extension
|
||||
return extension_class
|
||||
|
||||
def remove_extension(self, name):
|
||||
"""Remove a converted by its unique name."""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if name in self._extensions:
|
||||
self._extensions.pop(name)
|
||||
for cls in list(self._extensions_by_cls.keys()):
|
||||
if self._extensions_by_cls[cls][0] == name:
|
||||
self._extensions_by_cls.pop(cls)
|
||||
|
||||
def _encode(self, f, value, streams, ext_id):
|
||||
"""Main encoder function."""
|
||||
x = encode_type_id
|
||||
|
||||
if value is None:
|
||||
f.write(x(b"v", ext_id)) # V for void
|
||||
elif value is True:
|
||||
f.write(x(b"y", ext_id)) # Y for yes
|
||||
elif value is False:
|
||||
f.write(x(b"n", ext_id)) # N for no
|
||||
elif isinstance(value, integer_types):
|
||||
if -32768 <= value <= 32767:
|
||||
f.write(x(b"h", ext_id) + spack("h", value)) # H for ...
|
||||
else:
|
||||
f.write(x(b"i", ext_id) + spack("<q", value)) # I for int
|
||||
elif isinstance(value, float):
|
||||
if self._float64:
|
||||
f.write(x(b"d", ext_id) + spack("<d", value)) # D for double
|
||||
else:
|
||||
f.write(x(b"f", ext_id) + spack("<f", value)) # f for float
|
||||
elif isinstance(value, unicode_types):
|
||||
bb = value.encode("UTF-8")
|
||||
f.write(x(b"s", ext_id) + lencode(len(bb))) # S for str
|
||||
f.write(bb)
|
||||
elif isinstance(value, (list, tuple)):
|
||||
f.write(x(b"l", ext_id) + lencode(len(value))) # L for list
|
||||
for v in value:
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, dict):
|
||||
f.write(x(b"m", ext_id) + lencode(len(value))) # M for mapping
|
||||
for key, v in value.items():
|
||||
if PY3:
|
||||
assert key.isidentifier() # faster
|
||||
else: # pragma: no cover
|
||||
assert _isidentifier(key)
|
||||
# yield ' ' * indent + key
|
||||
name_b = key.encode("UTF-8")
|
||||
f.write(lencode(len(name_b)))
|
||||
f.write(name_b)
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, bytes):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
blob = Blob(
|
||||
value, compression=self._compression, use_checksum=self._use_checksum
|
||||
)
|
||||
blob._to_file(f) # noqa
|
||||
elif isinstance(value, Blob):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
value._to_file(f) # noqa
|
||||
elif isinstance(value, BaseStream):
|
||||
# Initialize the stream
|
||||
if value.mode != "w":
|
||||
raise ValueError("Cannot serialize a read-mode stream.")
|
||||
elif isinstance(value, ListStream):
|
||||
f.write(x(b"l", ext_id) + spack("<BQ", 255, 0)) # L for list
|
||||
else:
|
||||
raise TypeError("Only ListStream is supported")
|
||||
# Mark this as *the* stream, and activate the stream.
|
||||
# The save() function verifies this is the last written object.
|
||||
if len(streams) > 0:
|
||||
raise ValueError("Can only have one stream per file.")
|
||||
streams.append(value)
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
else:
|
||||
if ext_id is not None:
|
||||
raise ValueError(
|
||||
"Extension %s wronfully encodes object to another "
|
||||
"extension object (though it may encode to a list/dict "
|
||||
"that contains other extension objects)." % ext_id
|
||||
)
|
||||
# Try if the value is of a type we know
|
||||
ex = self._extensions_by_cls.get(value.__class__, None)
|
||||
# Maybe its a subclass of a type we know
|
||||
if ex is None:
|
||||
for name, c in self._extensions.items():
|
||||
if c.match(self, value):
|
||||
ex = name, c.encode
|
||||
break
|
||||
else:
|
||||
ex = None
|
||||
# Success or fail
|
||||
if ex is not None:
|
||||
ext_id2, extension_encode = ex
|
||||
self._encode(f, extension_encode(self, value), streams, ext_id2)
|
||||
else:
|
||||
t = (
|
||||
"Class %r is not a valid base BSDF type, nor is it "
|
||||
"handled by an extension."
|
||||
)
|
||||
raise TypeError(t % value.__class__.__name__)
|
||||
|
||||
def _decode(self, f):
|
||||
"""Main decoder function."""
|
||||
|
||||
# Get value
|
||||
char = f.read(1)
|
||||
c = char.lower()
|
||||
|
||||
# Conversion (uppercase value identifiers signify converted values)
|
||||
if not char:
|
||||
raise EOFError()
|
||||
elif char != c:
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
|
||||
ext_id = f.read(n).decode("UTF-8")
|
||||
else:
|
||||
ext_id = None
|
||||
|
||||
if c == b"v":
|
||||
value = None
|
||||
elif c == b"y":
|
||||
value = True
|
||||
elif c == b"n":
|
||||
value = False
|
||||
elif c == b"h":
|
||||
value = strunpack("<h", f.read(2))[0]
|
||||
elif c == b"i":
|
||||
value = strunpack("<q", f.read(8))[0]
|
||||
elif c == b"f":
|
||||
value = strunpack("<f", f.read(4))[0]
|
||||
elif c == b"d":
|
||||
value = strunpack("<d", f.read(8))[0]
|
||||
elif c == b"s":
|
||||
n_s = strunpack("<B", f.read(1))[0]
|
||||
if n_s == 253:
|
||||
n_s = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = f.read(n_s).decode("UTF-8")
|
||||
elif c == b"l":
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n >= 254:
|
||||
# Streaming
|
||||
closed = n == 254
|
||||
n = strunpack("<Q", f.read(8))[0]
|
||||
if self._load_streaming:
|
||||
value = ListStream(n if closed else "r")
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
elif closed:
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
else:
|
||||
value = []
|
||||
try:
|
||||
while True:
|
||||
value.append(self._decode(f))
|
||||
except EOFError:
|
||||
pass
|
||||
else:
|
||||
# Normal
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
elif c == b"m":
|
||||
value = dict()
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
for i in range(n):
|
||||
n_name = strunpack("<B", f.read(1))[0]
|
||||
if n_name == 253:
|
||||
n_name = strunpack("<Q", f.read(8))[0] # noqa
|
||||
assert n_name > 0
|
||||
name = f.read(n_name).decode("UTF-8")
|
||||
value[name] = self._decode(f)
|
||||
elif c == b"b":
|
||||
if self._lazy_blob:
|
||||
value = Blob((f, True))
|
||||
else:
|
||||
blob = Blob((f, False))
|
||||
value = blob.get_bytes()
|
||||
else:
|
||||
raise RuntimeError("Parse error %r" % char)
|
||||
|
||||
# Convert value if we have an extension for it
|
||||
if ext_id is not None:
|
||||
extension = self._extensions.get(ext_id, None)
|
||||
if extension is not None:
|
||||
value = extension.decode(self, value)
|
||||
else:
|
||||
logger.warning("BSDF warning: no extension found for %r" % ext_id)
|
||||
|
||||
return value
|
||||
|
||||
def encode(self, ob):
|
||||
"""Save the given object to bytes."""
|
||||
f = BytesIO()
|
||||
self.save(f, ob)
|
||||
return f.getvalue()
|
||||
|
||||
def save(self, f, ob):
|
||||
"""Write the given object to the given file object."""
|
||||
f.write(b"BSDF")
|
||||
f.write(struct.pack("<B", VERSION[0]))
|
||||
f.write(struct.pack("<B", VERSION[1]))
|
||||
|
||||
# Prepare streaming, this list will have 0 or 1 item at the end
|
||||
streams = []
|
||||
|
||||
self._encode(f, ob, streams, None)
|
||||
|
||||
# Verify that stream object was at the end, and add initial elements
|
||||
if len(streams) > 0:
|
||||
stream = streams[0]
|
||||
if stream._start_pos != f.tell():
|
||||
raise ValueError(
|
||||
"The stream object must be " "the last object to be encoded."
|
||||
)
|
||||
|
||||
def decode(self, bb):
|
||||
"""Load the data structure that is BSDF-encoded in the given bytes."""
|
||||
f = BytesIO(bb)
|
||||
return self.load(f)
|
||||
|
||||
def load(self, f):
|
||||
"""Load a BSDF-encoded object from the given file object."""
|
||||
# Check magic string
|
||||
f4 = f.read(4)
|
||||
if f4 != b"BSDF":
|
||||
raise RuntimeError("This does not look like a BSDF file: %r" % f4)
|
||||
# Check version
|
||||
major_version = strunpack("<B", f.read(1))[0]
|
||||
minor_version = strunpack("<B", f.read(1))[0]
|
||||
file_version = "%i.%i" % (major_version, minor_version)
|
||||
if major_version != VERSION[0]: # major version should be 2
|
||||
t = (
|
||||
"Reading file with different major version (%s) "
|
||||
"from the implementation (%s)."
|
||||
)
|
||||
raise RuntimeError(t % (__version__, file_version))
|
||||
if minor_version > VERSION[1]: # minor should be < ours
|
||||
t = (
|
||||
"BSDF warning: reading file with higher minor version (%s) "
|
||||
"than the implementation (%s)."
|
||||
)
|
||||
logger.warning(t % (__version__, file_version))
|
||||
|
||||
return self._decode(f)
|
||||
|
||||
|
||||
# %% Streaming and blob-files
|
||||
|
||||
|
||||
class BaseStream(object):
|
||||
"""Base class for streams."""
|
||||
|
||||
def __init__(self, mode="w"):
|
||||
self._i = 0
|
||||
self._count = -1
|
||||
if isinstance(mode, int):
|
||||
self._count = mode
|
||||
mode = "r"
|
||||
elif mode == "w":
|
||||
self._count = 0
|
||||
assert mode in ("r", "w")
|
||||
self._mode = mode
|
||||
self._f = None
|
||||
self._start_pos = 0
|
||||
|
||||
def _activate(self, file, encode_func, decode_func):
|
||||
if self._f is not None: # Associated with another write
|
||||
raise IOError("Stream object cannot be activated twice?")
|
||||
self._f = file
|
||||
self._start_pos = self._f.tell()
|
||||
self._encode = encode_func
|
||||
self._decode = decode_func
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
"""The mode of this stream: 'r' or 'w'."""
|
||||
return self._mode
|
||||
|
||||
|
||||
class ListStream(BaseStream):
|
||||
"""A streamable list object used for writing or reading.
|
||||
In read mode, it can also be iterated over.
|
||||
"""
|
||||
|
||||
@property
|
||||
def count(self):
|
||||
"""The number of elements in the stream (can be -1 for unclosed
|
||||
streams in read-mode).
|
||||
"""
|
||||
return self._count
|
||||
|
||||
@property
|
||||
def index(self):
|
||||
"""The current index of the element to read/write."""
|
||||
return self._i
|
||||
|
||||
def append(self, item):
|
||||
"""Append an item to the streaming list. The object is immediately
|
||||
serialized and written to the underlying file.
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only append items to the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("List stream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot stream to a close file.")
|
||||
self._encode(self._f, item, [self], None)
|
||||
self._i += 1
|
||||
self._count += 1
|
||||
|
||||
def close(self, unstream=False):
|
||||
"""Close the stream, marking the number of written elements. New
|
||||
elements may still be appended, but they won't be read during decoding.
|
||||
If ``unstream`` is False, the stream is turned into a regular list
|
||||
(not streaming).
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only close when at the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot close a stream on a close file.")
|
||||
i = self._f.tell()
|
||||
self._f.seek(self._start_pos - 8 - 1)
|
||||
self._f.write(spack("<B", 253 if unstream else 254))
|
||||
self._f.write(spack("<Q", self._count))
|
||||
self._f.seek(i)
|
||||
|
||||
def next(self):
|
||||
"""Read and return the next element in the streaming list.
|
||||
Raises StopIteration if the stream is exhausted.
|
||||
"""
|
||||
if self._mode != "r":
|
||||
raise IOError("This ListStream in not in read mode.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if getattr(self._f, "closed", None): # not present on 2.7 http req :/
|
||||
raise IOError("Cannot read a stream from a close file.")
|
||||
if self._count >= 0:
|
||||
if self._i >= self._count:
|
||||
raise StopIteration()
|
||||
self._i += 1
|
||||
return self._decode(self._f)
|
||||
else:
|
||||
# This raises EOFError at some point.
|
||||
try:
|
||||
res = self._decode(self._f)
|
||||
self._i += 1
|
||||
return res
|
||||
except EOFError:
|
||||
self._count = self._i
|
||||
raise StopIteration()
|
||||
|
||||
def __iter__(self):
|
||||
if self._mode != "r":
|
||||
raise IOError("Cannot iterate: ListStream in not in read mode.")
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
|
||||
|
||||
class Blob(object):
|
||||
"""Object to represent a blob of bytes. When used to write a BSDF file,
|
||||
it's a wrapper for bytes plus properties such as what compression to apply.
|
||||
When used to read a BSDF file, it can be used to read the data lazily, and
|
||||
also modify the data if reading in 'r+' mode and the blob isn't compressed.
|
||||
"""
|
||||
|
||||
# For now, this does not allow re-sizing blobs (within the allocated size)
|
||||
# but this can be added later.
|
||||
|
||||
def __init__(self, bb, compression=0, extra_size=0, use_checksum=False):
|
||||
if isinstance(bb, bytes):
|
||||
self._f = None
|
||||
self.compressed = self._from_bytes(bb, compression)
|
||||
self.compression = compression
|
||||
self.allocated_size = self.used_size + extra_size
|
||||
self.use_checksum = use_checksum
|
||||
elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"):
|
||||
self._f, allow_seek = bb
|
||||
self.compressed = None
|
||||
self._from_file(self._f, allow_seek)
|
||||
self._modified = False
|
||||
else:
|
||||
raise TypeError("Wrong argument to create Blob.")
|
||||
|
||||
def _from_bytes(self, value, compression):
|
||||
"""When used to wrap bytes in a blob."""
|
||||
if compression == 0:
|
||||
compressed = value
|
||||
elif compression == 1:
|
||||
compressed = zlib.compress(value, 9)
|
||||
elif compression == 2:
|
||||
compressed = bz2.compress(value, 9)
|
||||
else: # pragma: no cover
|
||||
assert False, "Unknown compression identifier"
|
||||
|
||||
self.data_size = len(value)
|
||||
self.used_size = len(compressed)
|
||||
return compressed
|
||||
|
||||
def _to_file(self, f):
|
||||
"""Private friend method called by encoder to write a blob to a file."""
|
||||
# Write sizes - write at least in a size that allows resizing
|
||||
if self.allocated_size <= 250 and self.compression == 0:
|
||||
f.write(spack("<B", self.allocated_size))
|
||||
f.write(spack("<B", self.used_size))
|
||||
f.write(lencode(self.data_size))
|
||||
else:
|
||||
f.write(spack("<BQ", 253, self.allocated_size))
|
||||
f.write(spack("<BQ", 253, self.used_size))
|
||||
f.write(spack("<BQ", 253, self.data_size))
|
||||
# Compression and checksum
|
||||
f.write(spack("B", self.compression))
|
||||
if self.use_checksum:
|
||||
f.write(b"\xff" + hashlib.md5(self.compressed).digest())
|
||||
else:
|
||||
f.write(b"\x00")
|
||||
# Byte alignment (only necessary for uncompressed data)
|
||||
if self.compression == 0:
|
||||
alignment = 8 - (f.tell() + 1) % 8 # +1 for the byte to write
|
||||
f.write(spack("<B", alignment)) # padding for byte alignment
|
||||
f.write(b"\x00" * alignment)
|
||||
else:
|
||||
f.write(spack("<B", 0))
|
||||
# The actual data and extra space
|
||||
f.write(self.compressed)
|
||||
f.write(b"\x00" * (self.allocated_size - self.used_size))
|
||||
|
||||
def _from_file(self, f, allow_seek):
|
||||
"""Used when a blob is read by the decoder."""
|
||||
# Read blob header data (5 to 42 bytes)
|
||||
# Size
|
||||
allocated_size = strunpack("<B", f.read(1))[0]
|
||||
if allocated_size == 253:
|
||||
allocated_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
used_size = strunpack("<B", f.read(1))[0]
|
||||
if used_size == 253:
|
||||
used_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
data_size = strunpack("<B", f.read(1))[0]
|
||||
if data_size == 253:
|
||||
data_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
# Compression and checksum
|
||||
compression = strunpack("<B", f.read(1))[0]
|
||||
has_checksum = strunpack("<B", f.read(1))[0]
|
||||
if has_checksum:
|
||||
checksum = f.read(16)
|
||||
# Skip alignment
|
||||
alignment = strunpack("<B", f.read(1))[0]
|
||||
f.read(alignment)
|
||||
# Get or skip data + extra space
|
||||
if allow_seek:
|
||||
self.start_pos = f.tell()
|
||||
self.end_pos = self.start_pos + used_size
|
||||
f.seek(self.start_pos + allocated_size)
|
||||
else:
|
||||
self.start_pos = None
|
||||
self.end_pos = None
|
||||
self.compressed = f.read(used_size)
|
||||
f.read(allocated_size - used_size)
|
||||
# Store info
|
||||
self.alignment = alignment
|
||||
self.compression = compression
|
||||
self.use_checksum = checksum if has_checksum else None
|
||||
self.used_size = used_size
|
||||
self.allocated_size = allocated_size
|
||||
self.data_size = data_size
|
||||
|
||||
def seek(self, p):
|
||||
"""Seek to the given position (relative to the blob start)."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot seek in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if p < 0:
|
||||
p = self.allocated_size + p
|
||||
if p < 0 or p > self.allocated_size:
|
||||
raise IOError("Seek beyond blob boundaries.")
|
||||
self._f.seek(self.start_pos + p)
|
||||
|
||||
def tell(self):
|
||||
"""Get the current file pointer position (relative to the blob start)."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot tell in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
return self._f.tell() - self.start_pos
|
||||
|
||||
def write(self, bb):
|
||||
"""Write bytes to the blob."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot write in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily write in compressed blob.")
|
||||
if self._f.tell() + len(bb) > self.end_pos:
|
||||
raise IOError("Write beyond blob boundaries.")
|
||||
self._modified = True
|
||||
return self._f.write(bb)
|
||||
|
||||
def read(self, n):
|
||||
"""Read n bytes from the blob."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot read in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily read in compressed blob.")
|
||||
if self._f.tell() + n > self.end_pos:
|
||||
raise IOError("Read beyond blob boundaries.")
|
||||
return self._f.read(n)
|
||||
|
||||
def get_bytes(self):
|
||||
"""Get the contents of the blob as bytes."""
|
||||
if self.compressed is not None:
|
||||
compressed = self.compressed
|
||||
else:
|
||||
i = self._f.tell()
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(i)
|
||||
if self.compression == 0:
|
||||
value = compressed
|
||||
elif self.compression == 1:
|
||||
value = zlib.decompress(compressed)
|
||||
elif self.compression == 2:
|
||||
value = bz2.decompress(compressed)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Invalid compression %i" % self.compression)
|
||||
return value
|
||||
|
||||
def update_checksum(self):
|
||||
"""Reset the blob's checksum if present. Call this after modifying
|
||||
the data.
|
||||
"""
|
||||
# or ... should the presence of a checksum mean that data is proteced?
|
||||
if self.use_checksum and self._modified:
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(self.start_pos - self.alignment - 1 - 16)
|
||||
self._f.write(hashlib.md5(compressed).digest())
|
||||
|
||||
|
||||
# %% High-level functions
|
||||
|
||||
|
||||
def encode(ob, extensions=None, **options):
|
||||
"""Save (BSDF-encode) the given object to bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.encode(ob)
|
||||
|
||||
|
||||
def save(f, ob, extensions=None, **options):
|
||||
"""Save (BSDF-encode) the given object to the given filename or
|
||||
file object. See` BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
with open(f, "wb") as fp:
|
||||
return s.save(fp, ob)
|
||||
else:
|
||||
return s.save(f, ob)
|
||||
|
||||
|
||||
def decode(bb, extensions=None, **options):
|
||||
"""Load a (BSDF-encoded) structure from bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.decode(bb)
|
||||
|
||||
|
||||
def load(f, extensions=None, **options):
|
||||
"""Load a (BSDF-encoded) structure from the given filename or file object.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
if f.startswith(("~/", "~\\")): # pragma: no cover
|
||||
f = os.path.expanduser(f)
|
||||
with open(f, "rb") as fp:
|
||||
return s.load(fp)
|
||||
else:
|
||||
return s.load(f)
|
||||
|
||||
|
||||
# Aliases for json compat
|
||||
loads = decode
|
||||
dumps = encode
|
||||
|
||||
|
||||
# %% Standard extensions
|
||||
|
||||
# Defining extensions as a dict would be more compact and feel lighter, but
|
||||
# that would only allow lambdas, which is too limiting, e.g. for ndarray
|
||||
# extension.
|
||||
|
||||
|
||||
class Extension(object):
|
||||
"""Base class to implement BSDF extensions for special data types.
|
||||
|
||||
Extension classes are provided to the BSDF serializer, which
|
||||
instantiates the class. That way, the extension can be somewhat dynamic:
|
||||
e.g. the NDArrayExtension exposes the ndarray class only when numpy
|
||||
is imported.
|
||||
|
||||
A extension instance must have two attributes. These can be attributes of
|
||||
the class, or of the instance set in ``__init__()``:
|
||||
|
||||
* name (str): the name by which encoded values will be identified.
|
||||
* cls (type): the type (or list of types) to match values with.
|
||||
This is optional, but it makes the encoder select extensions faster.
|
||||
|
||||
Further, it needs 3 methods:
|
||||
|
||||
* `match(serializer, value) -> bool`: return whether the extension can
|
||||
convert the given value. The default is ``isinstance(value, self.cls)``.
|
||||
* `encode(serializer, value) -> encoded_value`: the function to encode a
|
||||
value to more basic data types.
|
||||
* `decode(serializer, encoded_value) -> value`: the function to decode an
|
||||
encoded value back to its intended representation.
|
||||
|
||||
"""
|
||||
|
||||
name = ""
|
||||
cls = ()
|
||||
|
||||
def __repr__(self):
|
||||
return "<BSDF extension %r at 0x%s>" % (self.name, hex(id(self)))
|
||||
|
||||
def match(self, s, v):
|
||||
return isinstance(v, self.cls)
|
||||
|
||||
def encode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
def decode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ComplexExtension(Extension):
|
||||
name = "c"
|
||||
cls = complex
|
||||
|
||||
def encode(self, s, v):
|
||||
return (v.real, v.imag)
|
||||
|
||||
def decode(self, s, v):
|
||||
return complex(v[0], v[1])
|
||||
|
||||
|
||||
class NDArrayExtension(Extension):
|
||||
name = "ndarray"
|
||||
|
||||
def __init__(self):
|
||||
if "numpy" in sys.modules:
|
||||
import numpy as np
|
||||
|
||||
self.cls = np.ndarray
|
||||
|
||||
def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS
|
||||
return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes")
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError: # pragma: no cover
|
||||
return v
|
||||
a = np.frombuffer(v["data"], dtype=v["dtype"])
|
||||
a.shape = v["shape"]
|
||||
return a
|
||||
|
||||
|
||||
standard_extensions = [ComplexExtension, NDArrayExtension]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Invoke CLI
|
||||
import bsdf_cli
|
||||
|
||||
bsdf_cli.main()
|
||||
932
.CondaPkg/env/Lib/site-packages/imageio/plugins/_dicom.py
vendored
Normal file
932
.CondaPkg/env/Lib/site-packages/imageio/plugins/_dicom.py
vendored
Normal file
@@ -0,0 +1,932 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading DICOM files.
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import sys
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
# Define a dictionary that contains the tags that we would like to know
|
||||
MINIDICT = {
|
||||
(0x7FE0, 0x0010): ("PixelData", "OB"),
|
||||
# Date and time
|
||||
(0x0008, 0x0020): ("StudyDate", "DA"),
|
||||
(0x0008, 0x0021): ("SeriesDate", "DA"),
|
||||
(0x0008, 0x0022): ("AcquisitionDate", "DA"),
|
||||
(0x0008, 0x0023): ("ContentDate", "DA"),
|
||||
(0x0008, 0x0030): ("StudyTime", "TM"),
|
||||
(0x0008, 0x0031): ("SeriesTime", "TM"),
|
||||
(0x0008, 0x0032): ("AcquisitionTime", "TM"),
|
||||
(0x0008, 0x0033): ("ContentTime", "TM"),
|
||||
# With what, where, by whom?
|
||||
(0x0008, 0x0060): ("Modality", "CS"),
|
||||
(0x0008, 0x0070): ("Manufacturer", "LO"),
|
||||
(0x0008, 0x0080): ("InstitutionName", "LO"),
|
||||
# Descriptions
|
||||
(0x0008, 0x1030): ("StudyDescription", "LO"),
|
||||
(0x0008, 0x103E): ("SeriesDescription", "LO"),
|
||||
# UID's
|
||||
(0x0008, 0x0016): ("SOPClassUID", "UI"),
|
||||
(0x0008, 0x0018): ("SOPInstanceUID", "UI"),
|
||||
(0x0020, 0x000D): ("StudyInstanceUID", "UI"),
|
||||
(0x0020, 0x000E): ("SeriesInstanceUID", "UI"),
|
||||
(0x0008, 0x0117): ("ContextUID", "UI"),
|
||||
# Numbers
|
||||
(0x0020, 0x0011): ("SeriesNumber", "IS"),
|
||||
(0x0020, 0x0012): ("AcquisitionNumber", "IS"),
|
||||
(0x0020, 0x0013): ("InstanceNumber", "IS"),
|
||||
(0x0020, 0x0014): ("IsotopeNumber", "IS"),
|
||||
(0x0020, 0x0015): ("PhaseNumber", "IS"),
|
||||
(0x0020, 0x0016): ("IntervalNumber", "IS"),
|
||||
(0x0020, 0x0017): ("TimeSlotNumber", "IS"),
|
||||
(0x0020, 0x0018): ("AngleNumber", "IS"),
|
||||
(0x0020, 0x0019): ("ItemNumber", "IS"),
|
||||
(0x0020, 0x0020): ("PatientOrientation", "CS"),
|
||||
(0x0020, 0x0030): ("ImagePosition", "CS"),
|
||||
(0x0020, 0x0032): ("ImagePositionPatient", "CS"),
|
||||
(0x0020, 0x0035): ("ImageOrientation", "CS"),
|
||||
(0x0020, 0x0037): ("ImageOrientationPatient", "CS"),
|
||||
# Patient information
|
||||
(0x0010, 0x0010): ("PatientName", "PN"),
|
||||
(0x0010, 0x0020): ("PatientID", "LO"),
|
||||
(0x0010, 0x0030): ("PatientBirthDate", "DA"),
|
||||
(0x0010, 0x0040): ("PatientSex", "CS"),
|
||||
(0x0010, 0x1010): ("PatientAge", "AS"),
|
||||
(0x0010, 0x1020): ("PatientSize", "DS"),
|
||||
(0x0010, 0x1030): ("PatientWeight", "DS"),
|
||||
# Image specific (required to construct numpy array)
|
||||
(0x0028, 0x0002): ("SamplesPerPixel", "US"),
|
||||
(0x0028, 0x0008): ("NumberOfFrames", "IS"),
|
||||
(0x0028, 0x0100): ("BitsAllocated", "US"),
|
||||
(0x0028, 0x0101): ("BitsStored", "US"),
|
||||
(0x0028, 0x0102): ("HighBit", "US"),
|
||||
(0x0028, 0x0103): ("PixelRepresentation", "US"),
|
||||
(0x0028, 0x0010): ("Rows", "US"),
|
||||
(0x0028, 0x0011): ("Columns", "US"),
|
||||
(0x0028, 0x1052): ("RescaleIntercept", "DS"),
|
||||
(0x0028, 0x1053): ("RescaleSlope", "DS"),
|
||||
# Image specific (for the user)
|
||||
(0x0028, 0x0030): ("PixelSpacing", "DS"),
|
||||
(0x0018, 0x0088): ("SliceSpacing", "DS"),
|
||||
}
|
||||
|
||||
# Define some special tags:
|
||||
# See PS 3.5-2008 section 7.5 (p.40)
|
||||
ItemTag = (0xFFFE, 0xE000) # start of Sequence Item
|
||||
ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item
|
||||
SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length
|
||||
|
||||
# Define set of groups that we're interested in (so we can quickly skip others)
|
||||
GROUPS = set([key[0] for key in MINIDICT.keys()])
|
||||
VRS = set([val[1] for val in MINIDICT.values()])
|
||||
|
||||
|
||||
class NotADicomFile(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CompressedDicom(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleDicomReader(object):
|
||||
"""
|
||||
This class provides reading of pixel data from DICOM files. It is
|
||||
focussed on getting the pixel data, not the meta info.
|
||||
|
||||
To use, first create an instance of this class (giving it
|
||||
a file object or filename). Next use the info attribute to
|
||||
get a dict of the meta data. The loading of pixel data is
|
||||
deferred until get_numpy_array() is called.
|
||||
|
||||
Comparison with Pydicom
|
||||
-----------------------
|
||||
|
||||
This code focusses on getting the pixel data out, which allows some
|
||||
shortcuts, resulting in the code being much smaller.
|
||||
|
||||
Since the processing of data elements is much cheaper (it skips a lot
|
||||
of tags), this code is about 3x faster than pydicom (except for the
|
||||
deflated DICOM files).
|
||||
|
||||
This class does borrow some code (and ideas) from the pydicom
|
||||
project, and (to the best of our knowledge) has the same limitations
|
||||
as pydicom with regard to the type of files that it can handle.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
For more advanced DICOM processing, please check out pydicom.
|
||||
|
||||
* Only a predefined subset of data elements (meta information) is read.
|
||||
* This is a reader; it can not write DICOM files.
|
||||
* (just like pydicom) it can handle none of the compressed DICOM
|
||||
formats except for "Deflated Explicit VR Little Endian"
|
||||
(1.2.840.10008.1.2.1.99).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, file):
|
||||
# Open file if filename given
|
||||
if isinstance(file, str):
|
||||
self._filename = file
|
||||
self._file = open(file, "rb")
|
||||
else:
|
||||
self._filename = "<unknown file>"
|
||||
self._file = file
|
||||
# Init variable to store position and size of pixel data
|
||||
self._pixel_data_loc = None
|
||||
# The meta header is always explicit and little endian
|
||||
self.is_implicit_VR = False
|
||||
self.is_little_endian = True
|
||||
self._unpackPrefix = "<"
|
||||
# Dict to store data elements of interest in
|
||||
self._info = {}
|
||||
# VR Conversion
|
||||
self._converters = {
|
||||
# Numbers
|
||||
"US": lambda x: self._unpack("H", x),
|
||||
"UL": lambda x: self._unpack("L", x),
|
||||
# Numbers encoded as strings
|
||||
"DS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"IS": lambda x: self._splitValues(x, int, "\\"),
|
||||
# strings
|
||||
"AS": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"DA": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"TM": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"UI": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
"CS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
}
|
||||
|
||||
# Initiate reading
|
||||
self._read()
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return self._info
|
||||
|
||||
def _splitValues(self, x, type, splitter):
|
||||
s = x.decode("ascii").strip("\x00")
|
||||
try:
|
||||
if splitter in s:
|
||||
return tuple([type(v) for v in s.split(splitter) if v.strip()])
|
||||
else:
|
||||
return type(s)
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
def _unpack(self, fmt, value):
|
||||
return struct.unpack(self._unpackPrefix + fmt, value)[0]
|
||||
|
||||
# Really only so we need minimal changes to _pixel_data_numpy
|
||||
def __iter__(self):
|
||||
return iter(self._info.keys())
|
||||
|
||||
def __getattr__(self, key):
|
||||
info = object.__getattribute__(self, "_info")
|
||||
if key in info:
|
||||
return info[key]
|
||||
return object.__getattribute__(self, key) # pragma: no cover
|
||||
|
||||
def _read(self):
|
||||
f = self._file
|
||||
# Check prefix after peamble
|
||||
f.seek(128)
|
||||
if f.read(4) != b"DICM":
|
||||
raise NotADicomFile("Not a valid DICOM file.")
|
||||
# Read
|
||||
self._read_header()
|
||||
self._read_data_elements()
|
||||
self._get_shape_and_sampling()
|
||||
# Close if done, reopen if necessary to read pixel data
|
||||
if os.path.isfile(self._filename):
|
||||
self._file.close()
|
||||
self._file = None
|
||||
|
||||
def _readDataElement(self):
|
||||
f = self._file
|
||||
# Get group and element
|
||||
group = self._unpack("H", f.read(2))
|
||||
element = self._unpack("H", f.read(2))
|
||||
# Get value length
|
||||
if self.is_implicit_VR:
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vr = f.read(2)
|
||||
if vr in (b"OB", b"OW", b"SQ", b"UN"):
|
||||
reserved = f.read(2) # noqa
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vl = self._unpack("H", f.read(2))
|
||||
# Get value
|
||||
if group == 0x7FE0 and element == 0x0010:
|
||||
here = f.tell()
|
||||
self._pixel_data_loc = here, vl
|
||||
f.seek(here + vl)
|
||||
return group, element, b"Deferred loading of pixel data"
|
||||
else:
|
||||
if vl == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = f.read(vl)
|
||||
return group, element, value
|
||||
|
||||
def _read_undefined_length_value(self, read_size=128):
|
||||
"""Copied (in compacted form) from PyDicom
|
||||
Copyright Darcy Mason.
|
||||
"""
|
||||
fp = self._file
|
||||
# data_start = fp.tell()
|
||||
search_rewind = 3
|
||||
bytes_to_find = struct.pack(
|
||||
self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1]
|
||||
)
|
||||
|
||||
found = False
|
||||
value_chunks = []
|
||||
while not found:
|
||||
chunk_start = fp.tell()
|
||||
bytes_read = fp.read(read_size)
|
||||
if len(bytes_read) < read_size:
|
||||
# try again,
|
||||
# if still don't get required amount, this is last block
|
||||
new_bytes = fp.read(read_size - len(bytes_read))
|
||||
bytes_read += new_bytes
|
||||
if len(bytes_read) < read_size:
|
||||
raise EOFError(
|
||||
"End of file reached before sequence " "delimiter found."
|
||||
)
|
||||
index = bytes_read.find(bytes_to_find)
|
||||
if index != -1:
|
||||
found = True
|
||||
value_chunks.append(bytes_read[:index])
|
||||
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
|
||||
length = fp.read(4)
|
||||
if length != b"\0\0\0\0":
|
||||
logger.warning(
|
||||
"Expected 4 zero bytes after undefined length " "delimiter"
|
||||
)
|
||||
else:
|
||||
fp.seek(fp.tell() - search_rewind) # rewind a bit
|
||||
# accumulate the bytes read (not including the rewind)
|
||||
value_chunks.append(bytes_read[:-search_rewind])
|
||||
|
||||
# if get here then have found the byte string
|
||||
return b"".join(value_chunks)
|
||||
|
||||
def _read_header(self):
|
||||
f = self._file
|
||||
TransferSyntaxUID = None
|
||||
|
||||
# Read all elements, store transferSyntax when we encounter it
|
||||
try:
|
||||
while True:
|
||||
fp_save = f.tell()
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
if group == 0x02:
|
||||
if group == 0x02 and element == 0x10:
|
||||
TransferSyntaxUID = value.decode("ascii").strip("\x00")
|
||||
else:
|
||||
# No more group 2: rewind and break
|
||||
# (don't trust group length)
|
||||
f.seek(fp_save)
|
||||
break
|
||||
except (EOFError, struct.error): # pragma: no cover
|
||||
raise RuntimeError("End of file reached while still in header.")
|
||||
|
||||
# Handle transfer syntax
|
||||
self._info["TransferSyntaxUID"] = TransferSyntaxUID
|
||||
#
|
||||
if TransferSyntaxUID is None:
|
||||
# Assume ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1":
|
||||
# ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.2":
|
||||
# ExplicitVRBigEndian
|
||||
is_implicit_VR, is_little_endian = False, False
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2":
|
||||
# implicit VR little endian
|
||||
is_implicit_VR, is_little_endian = True, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99":
|
||||
# DeflatedExplicitVRLittleEndian:
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
self._inflate()
|
||||
else:
|
||||
# http://www.dicomlibrary.com/dicom/transfer-syntax/
|
||||
t, extra_info = TransferSyntaxUID, ""
|
||||
if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG)"
|
||||
if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG 2000)"
|
||||
if t == "1.2.840.10008.1.2.5":
|
||||
extra_info = " (RLE)"
|
||||
if t == "1.2.840.10008.1.2.6.1":
|
||||
extra_info = " (RFC 2557)"
|
||||
raise CompressedDicom(
|
||||
"The dicom reader can only read files with "
|
||||
"uncompressed image data - not %r%s. You "
|
||||
"can try using dcmtk or gdcm to convert the "
|
||||
"image." % (t, extra_info)
|
||||
)
|
||||
|
||||
# From hereon, use implicit/explicit big/little endian
|
||||
self.is_implicit_VR = is_implicit_VR
|
||||
self.is_little_endian = is_little_endian
|
||||
self._unpackPrefix = "><"[is_little_endian]
|
||||
|
||||
def _read_data_elements(self):
|
||||
info = self._info
|
||||
try:
|
||||
while True:
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
# Is it a group we are interested in?
|
||||
if group in GROUPS:
|
||||
key = (group, element)
|
||||
name, vr = MINIDICT.get(key, (None, None))
|
||||
# Is it an element we are interested in?
|
||||
if name:
|
||||
# Store value
|
||||
converter = self._converters.get(vr, lambda x: x)
|
||||
info[name] = converter(value)
|
||||
except (EOFError, struct.error):
|
||||
pass # end of file ...
|
||||
|
||||
def get_numpy_array(self):
|
||||
"""Get numpy arra for this DICOM file, with the correct shape,
|
||||
and pixel values scaled appropriately.
|
||||
"""
|
||||
# Is there pixel data at all?
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# Load it now if it was not already loaded
|
||||
if self._pixel_data_loc and len(self.PixelData) < 100:
|
||||
# Reopen file?
|
||||
close_file = False
|
||||
if self._file is None:
|
||||
close_file = True
|
||||
self._file = open(self._filename, "rb")
|
||||
# Read data
|
||||
self._file.seek(self._pixel_data_loc[0])
|
||||
if self._pixel_data_loc[1] == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = self._file.read(self._pixel_data_loc[1])
|
||||
# Close file
|
||||
if close_file:
|
||||
self._file.close()
|
||||
self._file = None
|
||||
# Overwrite
|
||||
self._info["PixelData"] = value
|
||||
|
||||
# Get data
|
||||
data = self._pixel_data_numpy()
|
||||
data = self._apply_slope_and_offset(data)
|
||||
|
||||
# Remove data again to preserve memory
|
||||
# Note that the data for the original file is loaded twice ...
|
||||
self._info["PixelData"] = (
|
||||
b"Data converted to numpy array, " + b"raw data removed to preserve memory"
|
||||
)
|
||||
return data
|
||||
|
||||
def _get_shape_and_sampling(self):
|
||||
"""Get shape and sampling without actuall using the pixel data.
|
||||
In this way, the user can get an idea what's inside without having
|
||||
to load it.
|
||||
"""
|
||||
# Get shape (in the same way that pydicom does)
|
||||
if "NumberOfFrames" in self and self.NumberOfFrames > 1:
|
||||
if self.SamplesPerPixel > 1:
|
||||
shape = (
|
||||
self.SamplesPerPixel,
|
||||
self.NumberOfFrames,
|
||||
self.Rows,
|
||||
self.Columns,
|
||||
)
|
||||
else:
|
||||
shape = self.NumberOfFrames, self.Rows, self.Columns
|
||||
elif "SamplesPerPixel" in self:
|
||||
if self.SamplesPerPixel > 1:
|
||||
if self.BitsAllocated == 8:
|
||||
shape = self.SamplesPerPixel, self.Rows, self.Columns
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"DICOM plugin only handles "
|
||||
"SamplesPerPixel > 1 if Bits "
|
||||
"Allocated = 8"
|
||||
)
|
||||
else:
|
||||
shape = self.Rows, self.Columns
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"DICOM file has no SamplesPerPixel " "(perhaps this is a report?)"
|
||||
)
|
||||
|
||||
# Try getting sampling between pixels
|
||||
if "PixelSpacing" in self:
|
||||
sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1])
|
||||
else:
|
||||
sampling = 1.0, 1.0
|
||||
if "SliceSpacing" in self:
|
||||
sampling = (abs(self.SliceSpacing),) + sampling
|
||||
|
||||
# Ensure that sampling has as many elements as shape
|
||||
sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :]
|
||||
|
||||
# Set shape and sampling
|
||||
self._info["shape"] = shape
|
||||
self._info["sampling"] = sampling
|
||||
|
||||
def _pixel_data_numpy(self):
|
||||
"""Return a NumPy array of the pixel data."""
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# determine the type used for the array
|
||||
need_byteswap = self.is_little_endian != sys_is_little_endian
|
||||
|
||||
# Make NumPy format code, e.g. "uint16", "int32" etc
|
||||
# from two pieces of info:
|
||||
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
|
||||
# self.BitsAllocated -- 8, 16, or 32
|
||||
format_str = "%sint%d" % (
|
||||
("u", "")[self.PixelRepresentation],
|
||||
self.BitsAllocated,
|
||||
)
|
||||
try:
|
||||
numpy_format = np.dtype(format_str)
|
||||
except TypeError: # pragma: no cover
|
||||
raise TypeError(
|
||||
"Data type not understood by NumPy: format='%s', "
|
||||
" PixelRepresentation=%d, BitsAllocated=%d"
|
||||
% (numpy_format, self.PixelRepresentation, self.BitsAllocated)
|
||||
)
|
||||
|
||||
# Have correct Numpy format, so create the NumPy array
|
||||
arr = np.frombuffer(self.PixelData, numpy_format).copy()
|
||||
|
||||
# XXX byte swap - may later handle this in read_file!!?
|
||||
if need_byteswap:
|
||||
arr.byteswap(True) # True means swap in-place, don't make new copy
|
||||
|
||||
# Note the following reshape operations return a new *view* onto arr,
|
||||
# but don't copy the data
|
||||
arr = arr.reshape(*self._info["shape"])
|
||||
return arr
|
||||
|
||||
def _apply_slope_and_offset(self, data):
|
||||
"""
|
||||
If RescaleSlope and RescaleIntercept are present in the data,
|
||||
apply them. The data type of the data is changed if necessary.
|
||||
"""
|
||||
# Obtain slope and offset
|
||||
slope, offset = 1, 0
|
||||
needFloats, needApplySlopeOffset = False, False
|
||||
if "RescaleSlope" in self:
|
||||
needApplySlopeOffset = True
|
||||
slope = self.RescaleSlope
|
||||
if "RescaleIntercept" in self:
|
||||
needApplySlopeOffset = True
|
||||
offset = self.RescaleIntercept
|
||||
if int(slope) != slope or int(offset) != offset:
|
||||
needFloats = True
|
||||
if not needFloats:
|
||||
slope, offset = int(slope), int(offset)
|
||||
|
||||
# Apply slope and offset
|
||||
if needApplySlopeOffset:
|
||||
# Maybe we need to change the datatype?
|
||||
if data.dtype in [np.float32, np.float64]:
|
||||
pass
|
||||
elif needFloats:
|
||||
data = data.astype(np.float32)
|
||||
else:
|
||||
# Determine required range
|
||||
minReq, maxReq = data.min().item(), data.max().item()
|
||||
minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
|
||||
# Determine required datatype from that
|
||||
dtype = None
|
||||
if minReq < 0:
|
||||
# Signed integer type
|
||||
maxReq = max([-minReq, maxReq])
|
||||
if maxReq < 2**7:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2**15:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2**31:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
else:
|
||||
# Unsigned integer type
|
||||
if maxReq < 2**8:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2**16:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2**32:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
# Change datatype
|
||||
if dtype != data.dtype:
|
||||
data = data.astype(dtype)
|
||||
|
||||
# Apply slope and offset
|
||||
data *= slope
|
||||
data += offset
|
||||
|
||||
# Done
|
||||
return data
|
||||
|
||||
def _inflate(self):
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
|
||||
# following the file metadata was prepared the normal way,
|
||||
# then "deflate" compression applied.
|
||||
# All that is needed here is to decompress and then
|
||||
# use as normal in a file-like object
|
||||
zipped = self._file.read()
|
||||
# -MAX_WBITS part is from comp.lang.python answer:
|
||||
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
|
||||
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
|
||||
self._file = BytesIO(unzipped) # a file-like object
|
||||
|
||||
|
||||
class DicomSeries(object):
|
||||
"""DicomSeries
|
||||
This class represents a serie of dicom files (SimpleDicomReader
|
||||
objects) that belong together. If these are multiple files, they
|
||||
represent the slices of a volume (like for CT or MRI).
|
||||
"""
|
||||
|
||||
def __init__(self, suid, progressIndicator):
|
||||
# Init dataset list and the callback
|
||||
self._entries = []
|
||||
|
||||
# Init props
|
||||
self._suid = suid
|
||||
self._info = {}
|
||||
self._progressIndicator = progressIndicator
|
||||
|
||||
def __len__(self):
|
||||
return len(self._entries)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._entries)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._entries[index]
|
||||
|
||||
@property
|
||||
def suid(self):
|
||||
return self._suid
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""The shape of the data (nz, ny, nx)."""
|
||||
return self._info["shape"]
|
||||
|
||||
@property
|
||||
def sampling(self):
|
||||
"""The sampling (voxel distances) of the data (dz, dy, dx)."""
|
||||
return self._info["sampling"]
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
"""A dictionary containing the information as present in the
|
||||
first dicomfile of this serie. None if there are no entries."""
|
||||
return self._info
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""A description of the dicom series. Used fields are
|
||||
PatientName, shape of the data, SeriesDescription, and
|
||||
ImageComments.
|
||||
"""
|
||||
info = self.info
|
||||
|
||||
# If no info available, return simple description
|
||||
if not info: # pragma: no cover
|
||||
return "DicomSeries containing %i images" % len(self)
|
||||
|
||||
fields = []
|
||||
# Give patient name
|
||||
if "PatientName" in info:
|
||||
fields.append("" + info["PatientName"])
|
||||
# Also add dimensions
|
||||
if self.shape:
|
||||
tmp = [str(d) for d in self.shape]
|
||||
fields.append("x".join(tmp))
|
||||
# Try adding more fields
|
||||
if "SeriesDescription" in info:
|
||||
fields.append("'" + info["SeriesDescription"] + "'")
|
||||
if "ImageComments" in info:
|
||||
fields.append("'" + info["ImageComments"] + "'")
|
||||
|
||||
# Combine
|
||||
return " ".join(fields)
|
||||
|
||||
def __repr__(self):
|
||||
adr = hex(id(self)).upper()
|
||||
return "<DicomSeries with %i images at %s>" % (len(self), adr)
|
||||
|
||||
def get_numpy_array(self):
|
||||
"""Get (load) the data that this DicomSeries represents, and return
|
||||
it as a numpy array. If this serie contains multiple images, the
|
||||
resulting array is 3D, otherwise it's 2D.
|
||||
"""
|
||||
|
||||
# It's easy if no file or if just a single file
|
||||
if len(self) == 0:
|
||||
raise ValueError("Serie does not contain any files.")
|
||||
elif len(self) == 1:
|
||||
return self[0].get_numpy_array()
|
||||
|
||||
# Check info
|
||||
if self.info is None:
|
||||
raise RuntimeError("Cannot return volume if series not finished.")
|
||||
|
||||
# Init data (using what the dicom packaged produces as a reference)
|
||||
slice = self[0].get_numpy_array()
|
||||
vol = np.zeros(self.shape, dtype=slice.dtype)
|
||||
vol[0] = slice
|
||||
|
||||
# Fill volume
|
||||
self._progressIndicator.start("loading data", "", len(self))
|
||||
for z in range(1, len(self)):
|
||||
vol[z] = self[z].get_numpy_array()
|
||||
self._progressIndicator.set_progress(z + 1)
|
||||
self._progressIndicator.finish()
|
||||
|
||||
# Done
|
||||
import gc
|
||||
|
||||
gc.collect()
|
||||
return vol
|
||||
|
||||
def _append(self, dcm):
|
||||
self._entries.append(dcm)
|
||||
|
||||
def _sort(self):
|
||||
self._entries.sort(
|
||||
key=lambda k: (
|
||||
k.InstanceNumber,
|
||||
(
|
||||
k.ImagePositionPatient[2]
|
||||
if hasattr(k, "ImagePositionPatient")
|
||||
else None
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
def _finish(self):
|
||||
"""
|
||||
Evaluate the series of dicom files. Together they should make up
|
||||
a volumetric dataset. This means the files should meet certain
|
||||
conditions. Also some additional information has to be calculated,
|
||||
such as the distance between the slices. This method sets the
|
||||
attributes for "shape", "sampling" and "info".
|
||||
|
||||
This method checks:
|
||||
* that there are no missing files
|
||||
* that the dimensions of all images match
|
||||
* that the pixel spacing of all images match
|
||||
"""
|
||||
|
||||
# The datasets list should be sorted by instance number
|
||||
L = self._entries
|
||||
if len(L) == 0:
|
||||
return
|
||||
elif len(L) == 1:
|
||||
self._info = L[0].info
|
||||
return
|
||||
|
||||
# Get previous
|
||||
ds1 = L[0]
|
||||
# Init measures to calculate average of
|
||||
distance_sum = 0.0
|
||||
# Init measures to check (these are in 2D)
|
||||
dimensions = ds1.Rows, ds1.Columns
|
||||
# sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
|
||||
sampling = ds1.info["sampling"][:2] # row, column
|
||||
|
||||
for index in range(len(L)):
|
||||
# The first round ds1 and ds2 will be the same, for the
|
||||
# distance calculation this does not matter
|
||||
# Get current
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Update distance_sum to calculate distance later
|
||||
distance_sum += abs(pos1 - pos2)
|
||||
# Test measures
|
||||
dimensions2 = ds2.Rows, ds2.Columns
|
||||
# sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
|
||||
sampling2 = ds2.info["sampling"][:2] # row, column
|
||||
if dimensions != dimensions2:
|
||||
# We cannot produce a volume if the dimensions match
|
||||
raise ValueError("Dimensions of slices does not match.")
|
||||
if sampling != sampling2:
|
||||
# We can still produce a volume, but we should notify the user
|
||||
self._progressIndicator.write("Warn: sampling does not match.")
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Finish calculating average distance
|
||||
# (Note that there are len(L)-1 distances)
|
||||
distance_mean = distance_sum / (len(L) - 1)
|
||||
|
||||
# Set info dict
|
||||
self._info = L[0].info.copy()
|
||||
|
||||
# Store information that is specific for the serie
|
||||
self._info["shape"] = (len(L),) + ds2.info["shape"]
|
||||
self._info["sampling"] = (distance_mean,) + ds2.info["sampling"]
|
||||
|
||||
|
||||
def list_files(files, path):
|
||||
"""List all files in the directory, recursively."""
|
||||
for item in os.listdir(path):
|
||||
item = os.path.join(path, item)
|
||||
if os.path.isdir(item):
|
||||
list_files(files, item)
|
||||
elif os.path.isfile(item):
|
||||
files.append(item)
|
||||
|
||||
|
||||
def process_directory(request, progressIndicator, readPixelData=False):
|
||||
"""
|
||||
Reads dicom files and returns a list of DicomSeries objects, which
|
||||
contain information about the data, and can be used to load the
|
||||
image or volume data.
|
||||
|
||||
if readPixelData is True, the pixel data of all series is read. By
|
||||
default the loading of pixeldata is deferred until it is requested
|
||||
using the DicomSeries.get_pixel_array() method. In general, both
|
||||
methods should be equally fast.
|
||||
"""
|
||||
# Get directory to examine
|
||||
if os.path.isdir(request.filename):
|
||||
path = request.filename
|
||||
elif os.path.isfile(request.filename):
|
||||
path = os.path.dirname(request.filename)
|
||||
else: # pragma: no cover - tested earlier
|
||||
raise ValueError("Dicom plugin needs a valid filename to examine the directory")
|
||||
|
||||
# Check files
|
||||
files = []
|
||||
list_files(files, path) # Find files recursively
|
||||
|
||||
# Gather file data and put in DicomSeries
|
||||
series = {}
|
||||
count = 0
|
||||
progressIndicator.start("examining files", "files", len(files))
|
||||
for filename in files:
|
||||
# Show progress (note that we always start with a 0.0)
|
||||
count += 1
|
||||
progressIndicator.set_progress(count)
|
||||
# Skip DICOMDIR files
|
||||
if filename.count("DICOMDIR"): # pragma: no cover
|
||||
continue
|
||||
# Try loading dicom ...
|
||||
try:
|
||||
dcm = SimpleDicomReader(filename)
|
||||
except NotADicomFile:
|
||||
continue # skip non-dicom file
|
||||
except Exception as why: # pragma: no cover
|
||||
progressIndicator.write(str(why))
|
||||
continue
|
||||
# Get SUID and register the file with an existing or new series object
|
||||
try:
|
||||
suid = dcm.SeriesInstanceUID
|
||||
except AttributeError: # pragma: no cover
|
||||
continue # some other kind of dicom file
|
||||
if suid not in series:
|
||||
series[suid] = DicomSeries(suid, progressIndicator)
|
||||
series[suid]._append(dcm)
|
||||
|
||||
# Finish progress
|
||||
# progressIndicator.finish('Found %i series.' % len(series))
|
||||
|
||||
# Make a list and sort, so that the order is deterministic
|
||||
series = list(series.values())
|
||||
series.sort(key=lambda x: x.suid)
|
||||
|
||||
# Split series if necessary
|
||||
for serie in reversed([serie for serie in series]):
|
||||
splitSerieIfRequired(serie, series, progressIndicator)
|
||||
|
||||
# Finish all series
|
||||
# progressIndicator.start('analyse series', '', len(series))
|
||||
series_ = []
|
||||
for i in range(len(series)):
|
||||
try:
|
||||
series[i]._finish()
|
||||
series_.append(series[i])
|
||||
except Exception as err: # pragma: no cover
|
||||
progressIndicator.write(str(err))
|
||||
pass # Skip serie (probably report-like file without pixels)
|
||||
# progressIndicator.set_progress(i+1)
|
||||
progressIndicator.finish("Found %i correct series." % len(series_))
|
||||
|
||||
# Done
|
||||
return series_
|
||||
|
||||
|
||||
def splitSerieIfRequired(serie, series, progressIndicator):
|
||||
"""
|
||||
Split the serie in multiple series if this is required. The choice
|
||||
is based on examing the image position relative to the previous
|
||||
image. If it differs too much, it is assumed that there is a new
|
||||
dataset. This can happen for example in unspitted gated CT data.
|
||||
"""
|
||||
|
||||
# Sort the original list and get local name
|
||||
serie._sort()
|
||||
L = serie._entries
|
||||
# Init previous slice
|
||||
ds1 = L[0]
|
||||
# Check whether we can do this
|
||||
if "ImagePositionPatient" not in ds1:
|
||||
return
|
||||
# Initialize a list of new lists
|
||||
L2 = [[ds1]]
|
||||
# Init slice distance estimate
|
||||
distance = 0
|
||||
|
||||
for index in range(1, len(L)):
|
||||
# Get current slice
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Get distances
|
||||
newDist = abs(pos1 - pos2)
|
||||
# deltaDist = abs(firstPos-pos2)
|
||||
# If the distance deviates more than 2x from what we've seen,
|
||||
# we can agree it's a new dataset.
|
||||
if distance and newDist > 2.1 * distance:
|
||||
L2.append([])
|
||||
distance = 0
|
||||
else:
|
||||
# Test missing file
|
||||
if distance and newDist > 1.5 * distance:
|
||||
progressIndicator.write(
|
||||
"Warning: missing file after %r" % ds1._filename
|
||||
)
|
||||
distance = newDist
|
||||
# Add to last list
|
||||
L2[-1].append(ds2)
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Split if we should
|
||||
if len(L2) > 1:
|
||||
# At what position are we now?
|
||||
i = series.index(serie)
|
||||
# Create new series
|
||||
series2insert = []
|
||||
for L in L2:
|
||||
newSerie = DicomSeries(serie.suid, progressIndicator)
|
||||
newSerie._entries = L
|
||||
series2insert.append(newSerie)
|
||||
# Insert series and remove self
|
||||
for newSerie in reversed(series2insert):
|
||||
series.insert(i, newSerie)
|
||||
series.remove(serie)
|
||||
1312
.CondaPkg/env/Lib/site-packages/imageio/plugins/_freeimage.py
vendored
Normal file
1312
.CondaPkg/env/Lib/site-packages/imageio/plugins/_freeimage.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
897
.CondaPkg/env/Lib/site-packages/imageio/plugins/_swf.py
vendored
Normal file
897
.CondaPkg/env/Lib/site-packages/imageio/plugins/_swf.py
vendored
Normal file
@@ -0,0 +1,897 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
# This code was taken from https://github.com/almarklein/visvis/blob/master/vvmovie/images2swf.py
|
||||
|
||||
# styletest: ignore E261
|
||||
|
||||
"""
|
||||
Provides a function (write_swf) to store a series of numpy arrays in an
|
||||
SWF movie, that can be played on a wide range of OS's.
|
||||
|
||||
In desperation of wanting to share animated images, and then lacking a good
|
||||
writer for animated gif or .avi, I decided to look into SWF. This format
|
||||
is very well documented.
|
||||
|
||||
This is a pure python module to create an SWF file that shows a series
|
||||
of images. The images are stored using the DEFLATE algorithm (same as
|
||||
PNG and ZIP and which is included in the standard Python distribution).
|
||||
As this compression algorithm is much more effective than that used in
|
||||
GIF images, we obtain better quality (24 bit colors + alpha channel)
|
||||
while still producesing smaller files (a test showed ~75%). Although
|
||||
SWF also allows for JPEG compression, doing so would probably require
|
||||
a third party library for the JPEG encoding/decoding, we could
|
||||
perhaps do this via Pillow or freeimage.
|
||||
|
||||
sources and tools:
|
||||
|
||||
- SWF on wikipedia
|
||||
- Adobes "SWF File Format Specification" version 10
|
||||
(http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf)
|
||||
- swftools (swfdump in specific) for debugging
|
||||
- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really
|
||||
good quality, while file size is reduced with factors 20-100.
|
||||
A good program in my opinion. The free version has the limitation
|
||||
of a watermark in the upper left corner.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import time # noqa
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# todo: use Pillow to support reading JPEG images from SWF?
|
||||
|
||||
|
||||
# Base functions and classes
|
||||
|
||||
|
||||
class BitArray:
|
||||
"""Dynamic array of bits that automatically resizes
|
||||
with factors of two.
|
||||
Append bits using .append() or +=
|
||||
You can reverse bits using .reverse()
|
||||
"""
|
||||
|
||||
def __init__(self, initvalue=None):
|
||||
self.data = np.zeros((16,), dtype=np.uint8)
|
||||
self._len = 0
|
||||
if initvalue is not None:
|
||||
self.append(initvalue)
|
||||
|
||||
def __len__(self):
|
||||
return self._len # self.data.shape[0]
|
||||
|
||||
def __repr__(self):
|
||||
return self.data[: self._len].tobytes().decode("ascii")
|
||||
|
||||
def _checkSize(self):
|
||||
# check length... grow if necessary
|
||||
arraylen = self.data.shape[0]
|
||||
if self._len >= arraylen:
|
||||
tmp = np.zeros((arraylen * 2,), dtype=np.uint8)
|
||||
tmp[: self._len] = self.data[: self._len]
|
||||
self.data = tmp
|
||||
|
||||
def __add__(self, value):
|
||||
self.append(value)
|
||||
return self
|
||||
|
||||
def append(self, bits):
|
||||
# check input
|
||||
if isinstance(bits, BitArray):
|
||||
bits = str(bits)
|
||||
if isinstance(bits, int): # pragma: no cover - we dont use it
|
||||
bits = str(bits)
|
||||
if not isinstance(bits, str): # pragma: no cover
|
||||
raise ValueError("Append bits as strings or integers!")
|
||||
|
||||
# add bits
|
||||
for bit in bits:
|
||||
self.data[self._len] = ord(bit)
|
||||
self._len += 1
|
||||
self._checkSize()
|
||||
|
||||
def reverse(self):
|
||||
"""In-place reverse."""
|
||||
tmp = self.data[: self._len].copy()
|
||||
self.data[: self._len] = tmp[::-1]
|
||||
|
||||
def tobytes(self):
|
||||
"""Convert to bytes. If necessary,
|
||||
zeros are padded to the end (right side).
|
||||
"""
|
||||
bits = str(self)
|
||||
|
||||
# determine number of bytes
|
||||
nbytes = 0
|
||||
while nbytes * 8 < len(bits):
|
||||
nbytes += 1
|
||||
# pad
|
||||
bits = bits.ljust(nbytes * 8, "0")
|
||||
|
||||
# go from bits to bytes
|
||||
bb = bytes()
|
||||
for i in range(nbytes):
|
||||
tmp = int(bits[i * 8 : (i + 1) * 8], 2)
|
||||
bb += int2uint8(tmp)
|
||||
|
||||
# done
|
||||
return bb
|
||||
|
||||
|
||||
def int2uint32(i):
|
||||
return int(i).to_bytes(4, "little")
|
||||
|
||||
|
||||
def int2uint16(i):
|
||||
return int(i).to_bytes(2, "little")
|
||||
|
||||
|
||||
def int2uint8(i):
|
||||
return int(i).to_bytes(1, "little")
|
||||
|
||||
|
||||
def int2bits(i, n=None):
|
||||
"""convert int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Convert back using int(ss,2)."""
|
||||
ii = i
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("int2bits fail: len larger than padlength.")
|
||||
bb = str(bb).rjust(n, "0")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def bits2int(bb, n=8):
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get value in bits
|
||||
for i in range(len(bb)):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Make decimal
|
||||
return int(value[:n], 2)
|
||||
|
||||
|
||||
def get_type_and_len(bb):
|
||||
"""bb should be 6 bytes at least
|
||||
Return (type, length, length_of_full_tag)
|
||||
"""
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get first 16 bits
|
||||
for i in range(2):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Get type and length
|
||||
type = int(value[:10], 2)
|
||||
L = int(value[10:], 2)
|
||||
L2 = L + 2
|
||||
|
||||
# Long tag header?
|
||||
if L == 63: # '111111'
|
||||
value = ""
|
||||
for i in range(2, 6):
|
||||
b = bb[i : i + 1] # becomes a single-byte bytes()
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
L = int(value, 2)
|
||||
L2 = L + 6
|
||||
|
||||
# Done
|
||||
return type, L, L2
|
||||
|
||||
|
||||
def signedint2bits(i, n=None):
|
||||
"""convert signed int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Negative numbers are stored in 2's complement bit
|
||||
patterns, thus positive numbers always start with a 0.
|
||||
"""
|
||||
|
||||
# negative number?
|
||||
ii = i
|
||||
if i < 0:
|
||||
# A negative number, -n, is represented as the bitwise opposite of
|
||||
ii = abs(ii) - 1 # the positive-zero number n-1.
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
bb = "0" + str(bb) # always need the sign bit in front
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("signedint2bits fail: len larger than padlength.")
|
||||
bb = bb.rjust(n, "0")
|
||||
|
||||
# was it negative? (then opposite bits)
|
||||
if i < 0:
|
||||
bb = bb.replace("0", "x").replace("1", "0").replace("x", "1")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def twits2bits(arr):
|
||||
"""Given a few (signed) numbers, store them
|
||||
as compactly as possible in the wat specifief by the swf format.
|
||||
The numbers are multiplied by 20, assuming they
|
||||
are twits.
|
||||
Can be used to make the RECT record.
|
||||
"""
|
||||
|
||||
# first determine length using non justified bit strings
|
||||
maxlen = 1
|
||||
for i in arr:
|
||||
tmp = len(signedint2bits(i * 20))
|
||||
if tmp > maxlen:
|
||||
maxlen = tmp
|
||||
|
||||
# build array
|
||||
bits = int2bits(maxlen, 5)
|
||||
for i in arr:
|
||||
bits += signedint2bits(i * 20, maxlen)
|
||||
|
||||
return bits
|
||||
|
||||
|
||||
def floats2bits(arr):
|
||||
"""Given a few (signed) numbers, convert them to bits,
|
||||
stored as FB (float bit values). We always use 16.16.
|
||||
Negative numbers are not (yet) possible, because I don't
|
||||
know how the're implemented (ambiguity).
|
||||
"""
|
||||
bits = int2bits(31, 5) # 32 does not fit in 5 bits!
|
||||
for i in arr:
|
||||
if i < 0: # pragma: no cover
|
||||
raise ValueError("Dit not implement negative floats!")
|
||||
i1 = int(i)
|
||||
i2 = i - i1
|
||||
bits += int2bits(i1, 15)
|
||||
bits += int2bits(i2 * 2**16, 16)
|
||||
return bits
|
||||
|
||||
|
||||
# Base Tag
|
||||
|
||||
|
||||
class Tag:
|
||||
def __init__(self):
|
||||
self.bytes = bytes()
|
||||
self.tagtype = -1
|
||||
|
||||
def process_tag(self):
|
||||
"""Implement this to create the tag."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_tag(self):
|
||||
"""Calls processTag and attaches the header."""
|
||||
self.process_tag()
|
||||
|
||||
# tag to binary
|
||||
bits = int2bits(self.tagtype, 10)
|
||||
|
||||
# complete header uint16 thing
|
||||
bits += "1" * 6 # = 63 = 0x3f
|
||||
# make uint16
|
||||
bb = int2uint16(int(str(bits), 2))
|
||||
|
||||
# now add 32bit length descriptor
|
||||
bb += int2uint32(len(self.bytes))
|
||||
|
||||
# done, attach and return
|
||||
bb += self.bytes
|
||||
return bb
|
||||
|
||||
def make_rect_record(self, xmin, xmax, ymin, ymax):
|
||||
"""Simply uses makeCompactArray to produce
|
||||
a RECT Record."""
|
||||
return twits2bits([xmin, xmax, ymin, ymax])
|
||||
|
||||
def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None):
|
||||
# empty matrix?
|
||||
if scale_xy is None and rot_xy is None and trans_xy is None:
|
||||
return "0" * 8
|
||||
|
||||
# init
|
||||
bits = BitArray()
|
||||
|
||||
# scale
|
||||
if scale_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([scale_xy[0], scale_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# rotation
|
||||
if rot_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([rot_xy[0], rot_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# translation (no flag here)
|
||||
if trans_xy:
|
||||
bits += twits2bits([trans_xy[0], trans_xy[1]])
|
||||
else:
|
||||
bits += twits2bits([0, 0])
|
||||
|
||||
# done
|
||||
return bits
|
||||
|
||||
|
||||
# Control tags
|
||||
|
||||
|
||||
class ControlTag(Tag):
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
|
||||
|
||||
class FileAttributesTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 69
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = "\x00".encode("ascii") * (1 + 3)
|
||||
|
||||
|
||||
class ShowFrameTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 1
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = bytes()
|
||||
|
||||
|
||||
class SetBackgroundTag(ControlTag):
|
||||
"""Set the color in 0-255, or 0-1 (if floats given)."""
|
||||
|
||||
def __init__(self, *rgb):
|
||||
self.tagtype = 9
|
||||
if len(rgb) == 1:
|
||||
rgb = rgb[0]
|
||||
self.rgb = rgb
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
for i in range(3):
|
||||
clr = self.rgb[i]
|
||||
if isinstance(clr, float): # pragma: no cover - not used
|
||||
clr = clr * 255
|
||||
bb += int2uint8(clr)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class DoActionTag(Tag):
|
||||
def __init__(self, action="stop"):
|
||||
Tag.__init__(self)
|
||||
self.tagtype = 12
|
||||
self.actions = [action]
|
||||
|
||||
def append(self, action): # pragma: no cover - not used
|
||||
self.actions.append(action)
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
|
||||
for action in self.actions:
|
||||
action = action.lower()
|
||||
if action == "stop":
|
||||
bb += "\x07".encode("ascii")
|
||||
elif action == "play": # pragma: no cover - not used
|
||||
bb += "\x06".encode("ascii")
|
||||
else: # pragma: no cover
|
||||
logger.warning("unknown action: %s" % action)
|
||||
|
||||
bb += int2uint8(0)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
# Definition tags
|
||||
class DefinitionTag(Tag):
|
||||
counter = 0 # to give automatically id's
|
||||
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
DefinitionTag.counter += 1
|
||||
self.id = DefinitionTag.counter # id in dictionary
|
||||
|
||||
|
||||
class BitmapTag(DefinitionTag):
|
||||
def __init__(self, im):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 36 # DefineBitsLossless2
|
||||
|
||||
# convert image (note that format is ARGB)
|
||||
# even a grayscale image is stored in ARGB, nevertheless,
|
||||
# the fabilous deflate compression will make it that not much
|
||||
# more data is required for storing (25% or so, and less than 10%
|
||||
# when storing RGB as ARGB).
|
||||
|
||||
if len(im.shape) == 3:
|
||||
if im.shape[2] in [3, 4]:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :, i]
|
||||
if im.shape[2] == 4:
|
||||
tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
elif len(im.shape) == 2:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :]
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
# we changed the image to uint8 4 channels.
|
||||
# now compress!
|
||||
self._data = zlib.compress(tmp.tobytes(), zlib.DEFLATED)
|
||||
self.imshape = im.shape
|
||||
|
||||
def process_tag(self):
|
||||
# build tag
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id) # CharacterID
|
||||
bb += int2uint8(5) # BitmapFormat
|
||||
bb += int2uint16(self.imshape[1]) # BitmapWidth
|
||||
bb += int2uint16(self.imshape[0]) # BitmapHeight
|
||||
bb += self._data # ZlibBitmapData
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class PlaceObjectTag(ControlTag):
|
||||
def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 26
|
||||
self.depth = depth
|
||||
self.idToPlace = idToPlace
|
||||
self.xy = xy
|
||||
self.move = move
|
||||
|
||||
def process_tag(self):
|
||||
# retrieve stuff
|
||||
depth = self.depth
|
||||
xy = self.xy
|
||||
id = self.idToPlace
|
||||
|
||||
# build PlaceObject2
|
||||
bb = bytes()
|
||||
if self.move:
|
||||
bb += "\x07".encode("ascii")
|
||||
else:
|
||||
# (8 bit flags): 4:matrix, 2:character, 1:move
|
||||
bb += "\x06".encode("ascii")
|
||||
bb += int2uint16(depth) # Depth
|
||||
bb += int2uint16(id) # character id
|
||||
bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class ShapeTag(DefinitionTag):
|
||||
def __init__(self, bitmapId, xy, wh):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 2
|
||||
self.bitmapId = bitmapId
|
||||
self.xy = xy
|
||||
self.wh = wh
|
||||
|
||||
def process_tag(self):
|
||||
"""Returns a defineshape tag. with a bitmap fill"""
|
||||
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id)
|
||||
xy, wh = self.xy, self.wh
|
||||
tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds
|
||||
bb += tmp.tobytes()
|
||||
|
||||
# make SHAPEWITHSTYLE structure
|
||||
|
||||
# first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
bb += int2uint8(1) # FillStyleCount
|
||||
bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed)
|
||||
bb += int2uint16(self.bitmapId) # BitmapId
|
||||
# bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled)
|
||||
bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes()
|
||||
|
||||
# # first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
# bb += int2uint8(1) # FillStyleCount
|
||||
# bb += '\x00' # solid fill
|
||||
# bb += '\x00\x00\xff' # color
|
||||
|
||||
# second entry: LINESTYLEARRAY with a single line style
|
||||
bb += int2uint8(0) # LineStyleCount
|
||||
# bb += int2uint16(0*20) # Width
|
||||
# bb += '\x00\xff\x00' # Color
|
||||
|
||||
# third and fourth entry: NumFillBits and NumLineBits (4 bits each)
|
||||
# I each give them four bits, so 16 styles possible.
|
||||
bb += "\x44".encode("ascii")
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
# last entries: SHAPERECORDs ... (individual shape records not aligned)
|
||||
# STYLECHANGERECORD
|
||||
bits = BitArray()
|
||||
bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1]))
|
||||
# STRAIGHTEDGERECORD 4x
|
||||
bits += self.make_straight_edge_record(-self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, -self.wh[1])
|
||||
bits += self.make_straight_edge_record(self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, self.wh[1])
|
||||
|
||||
# ENDSHAPRECORD
|
||||
bits += self.make_end_shape_record()
|
||||
|
||||
self.bytes += bits.tobytes()
|
||||
|
||||
# done
|
||||
# self.bytes = bb
|
||||
|
||||
def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None):
|
||||
# first 6 flags
|
||||
# Note that we use FillStyle1. If we don't flash (at least 8) does not
|
||||
# recognize the frames properly when importing to library.
|
||||
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag (not an edge record)
|
||||
bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3)
|
||||
if lineStyle:
|
||||
bits += "1" # StateLineStyle
|
||||
else:
|
||||
bits += "0"
|
||||
if fillStyle:
|
||||
bits += "1" # StateFillStyle1
|
||||
else:
|
||||
bits += "0"
|
||||
bits += "0" # StateFillStyle0
|
||||
if moveTo:
|
||||
bits += "1" # StateMoveTo
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# give information
|
||||
# todo: nbits for fillStyle and lineStyle is hard coded.
|
||||
|
||||
if moveTo:
|
||||
bits += twits2bits([moveTo[0], moveTo[1]])
|
||||
if fillStyle:
|
||||
bits += int2bits(fillStyle, 4)
|
||||
if lineStyle:
|
||||
bits += int2bits(lineStyle, 4)
|
||||
|
||||
return bits
|
||||
|
||||
def make_straight_edge_record(self, *dxdy):
|
||||
if len(dxdy) == 1:
|
||||
dxdy = dxdy[0]
|
||||
|
||||
# determine required number of bits
|
||||
xbits = signedint2bits(dxdy[0] * 20)
|
||||
ybits = signedint2bits(dxdy[1] * 20)
|
||||
nbits = max([len(xbits), len(ybits)])
|
||||
|
||||
bits = BitArray()
|
||||
bits += "11" # TypeFlag and StraightFlag
|
||||
bits += int2bits(nbits - 2, 4)
|
||||
bits += "1" # GeneralLineFlag
|
||||
bits += signedint2bits(dxdy[0] * 20, nbits)
|
||||
bits += signedint2bits(dxdy[1] * 20, nbits)
|
||||
|
||||
# note: I do not make use of vertical/horizontal only lines...
|
||||
|
||||
return bits
|
||||
|
||||
def make_end_shape_record(self):
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag: no edge
|
||||
bits += "0" * 5 # EndOfShape
|
||||
return bits
|
||||
|
||||
|
||||
def read_pixels(bb, i, tagType, L1):
|
||||
"""With pf's seed after the recordheader, reads the pixeldata."""
|
||||
|
||||
# Get info
|
||||
charId = bb[i : i + 2] # noqa
|
||||
i += 2
|
||||
format = ord(bb[i : i + 1])
|
||||
i += 1
|
||||
width = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
height = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
|
||||
# If we can, get pixeldata and make numpy array
|
||||
if format != 5:
|
||||
logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.")
|
||||
else:
|
||||
# Read byte data
|
||||
offset = 2 + 1 + 2 + 2 # all the info bits
|
||||
bb2 = bb[i : i + (L1 - offset)]
|
||||
|
||||
# Decompress and make numpy array
|
||||
data = zlib.decompress(bb2)
|
||||
a = np.frombuffer(data, dtype=np.uint8)
|
||||
|
||||
# Set shape
|
||||
if tagType == 20:
|
||||
# DefineBitsLossless - RGB data
|
||||
try:
|
||||
a.shape = height, width, 3
|
||||
except Exception:
|
||||
# Byte align stuff might cause troubles
|
||||
logger.warning("Cannot read image due to byte alignment")
|
||||
if tagType == 36:
|
||||
# DefineBitsLossless2 - ARGB data
|
||||
a.shape = height, width, 4
|
||||
# Swap alpha channel to make RGBA
|
||||
b = a
|
||||
a = np.zeros_like(a)
|
||||
a[:, :, 0] = b[:, :, 1]
|
||||
a[:, :, 1] = b[:, :, 2]
|
||||
a[:, :, 2] = b[:, :, 3]
|
||||
a[:, :, 3] = b[:, :, 0]
|
||||
|
||||
return a
|
||||
|
||||
|
||||
# Last few functions
|
||||
|
||||
|
||||
# These are the original public functions, we don't use them, but we
|
||||
# keep it so that in principle this module can be used stand-alone.
|
||||
|
||||
|
||||
def checkImages(images): # pragma: no cover
|
||||
"""checkImages(images)
|
||||
Check numpy images and correct intensity range etc.
|
||||
The same for all movie formats.
|
||||
"""
|
||||
# Init results
|
||||
images2 = []
|
||||
|
||||
for im in images:
|
||||
if isinstance(im, np.ndarray):
|
||||
# Check and convert dtype
|
||||
if im.dtype == np.uint8:
|
||||
images2.append(im) # Ok
|
||||
elif im.dtype in [np.float32, np.float64]:
|
||||
theMax = im.max()
|
||||
if 128 < theMax < 300:
|
||||
pass # assume 0:255
|
||||
else:
|
||||
im = im.copy()
|
||||
im[im < 0] = 0
|
||||
im[im > 1] = 1
|
||||
im *= 255
|
||||
images2.append(im.astype(np.uint8))
|
||||
else:
|
||||
im = im.astype(np.uint8)
|
||||
images2.append(im)
|
||||
# Check size
|
||||
if im.ndim == 2:
|
||||
pass # ok
|
||||
elif im.ndim == 3:
|
||||
if im.shape[2] not in [3, 4]:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("Invalid image type: " + str(type(im)))
|
||||
|
||||
# Done
|
||||
return images2
|
||||
|
||||
|
||||
def build_file(
|
||||
fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8
|
||||
): # pragma: no cover
|
||||
"""Give the given file (as bytes) a header."""
|
||||
|
||||
# compose header
|
||||
bb = bytes()
|
||||
bb += "F".encode("ascii") # uncompressed
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += int2uint8(version) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
bb += int2uint8(0) + int2uint8(fps) # FrameRate
|
||||
bb += int2uint16(nframes)
|
||||
fp.write(bb)
|
||||
|
||||
# produce all tags
|
||||
for tag in taglist:
|
||||
fp.write(tag.get_tag())
|
||||
|
||||
# finish with end tag
|
||||
fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
# set size
|
||||
sze = fp.tell()
|
||||
fp.seek(4)
|
||||
fp.write(int2uint32(sze))
|
||||
|
||||
|
||||
def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover
|
||||
"""Write an swf-file from the specified images. If repeat is False,
|
||||
the movie is finished with a stop action. Duration may also
|
||||
be a list with durations for each frame (note that the duration
|
||||
for each frame is always an integer amount of the minimum duration.)
|
||||
|
||||
Images should be a list consisting numpy arrays with values between
|
||||
0 and 255 for integer types, and between 0 and 1 for float types.
|
||||
|
||||
"""
|
||||
|
||||
# Check images
|
||||
images2 = checkImages(images)
|
||||
|
||||
# Init
|
||||
taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)]
|
||||
|
||||
# Check duration
|
||||
if hasattr(duration, "__len__"):
|
||||
if len(duration) == len(images2):
|
||||
duration = [d for d in duration]
|
||||
else:
|
||||
raise ValueError("len(duration) doesn't match amount of images.")
|
||||
else:
|
||||
duration = [duration for im in images2]
|
||||
|
||||
# Build delays list
|
||||
minDuration = float(min(duration))
|
||||
delays = [round(d / minDuration) for d in duration]
|
||||
delays = [max(1, int(d)) for d in delays]
|
||||
|
||||
# Get FPS
|
||||
fps = 1.0 / minDuration
|
||||
|
||||
# Produce series of tags for each image
|
||||
# t0 = time.time()
|
||||
nframes = 0
|
||||
for im in images2:
|
||||
bm = BitmapTag(im)
|
||||
wh = (im.shape[1], im.shape[0])
|
||||
sh = ShapeTag(bm.id, (0, 0), wh)
|
||||
po = PlaceObjectTag(1, sh.id, move=nframes > 0)
|
||||
taglist.extend([bm, sh, po])
|
||||
for i in range(delays[nframes]):
|
||||
taglist.append(ShowFrameTag())
|
||||
nframes += 1
|
||||
|
||||
if not repeat:
|
||||
taglist.append(DoActionTag("stop"))
|
||||
|
||||
# Build file
|
||||
# t1 = time.time()
|
||||
fp = open(filename, "wb")
|
||||
try:
|
||||
build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
fp.close()
|
||||
# t2 = time.time()
|
||||
|
||||
# logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) )
|
||||
|
||||
|
||||
def read_swf(filename): # pragma: no cover
|
||||
"""Read all images from an SWF (shockwave flash) file. Returns a list
|
||||
of numpy arrays.
|
||||
|
||||
Limitation: only read the PNG encoded images (not the JPG encoded ones).
|
||||
"""
|
||||
|
||||
# Check whether it exists
|
||||
if not os.path.isfile(filename):
|
||||
raise IOError("File not found: " + str(filename))
|
||||
|
||||
# Init images
|
||||
images = []
|
||||
|
||||
# Open file and read all
|
||||
fp = open(filename, "rb")
|
||||
bb = fp.read()
|
||||
|
||||
try:
|
||||
# Check opening tag
|
||||
tmp = bb[0:3].decode("ascii", "ignore")
|
||||
if tmp.upper() == "FWS":
|
||||
pass # ok
|
||||
elif tmp.upper() == "CWS":
|
||||
# Decompress movie
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
else:
|
||||
raise IOError("Not a valid SWF file: " + str(filename))
|
||||
|
||||
# Set filepointer at first tag (skipping framesize RECT and two uin16's
|
||||
i = 8
|
||||
nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
i += Lrect + 4
|
||||
|
||||
# Iterate over the tags
|
||||
counter = 0
|
||||
while True:
|
||||
counter += 1
|
||||
|
||||
# Get tag header
|
||||
head = bb[i : i + 6]
|
||||
if not head:
|
||||
break # Done (we missed end tag)
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = get_type_and_len(head)
|
||||
if not L2:
|
||||
logger.warning("Invalid tag length, could not proceed")
|
||||
break
|
||||
# logger.warning(T, L2)
|
||||
|
||||
# Read image if we can
|
||||
if T in [20, 36]:
|
||||
im = read_pixels(bb, i + 6, T, L1)
|
||||
if im is not None:
|
||||
images.append(im)
|
||||
elif T in [6, 21, 35, 90]:
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Detect end tag
|
||||
if T == 0:
|
||||
break
|
||||
|
||||
# Next tag!
|
||||
i += L2
|
||||
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# Done
|
||||
return images
|
||||
|
||||
|
||||
# Backward compatibility; same public names as when this was images2swf.
|
||||
writeSwf = write_swf
|
||||
readSwf = read_swf
|
||||
10675
.CondaPkg/env/Lib/site-packages/imageio/plugins/_tifffile.py
vendored
Normal file
10675
.CondaPkg/env/Lib/site-packages/imageio/plugins/_tifffile.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
324
.CondaPkg/env/Lib/site-packages/imageio/plugins/bsdf.py
vendored
Normal file
324
.CondaPkg/env/Lib/site-packages/imageio/plugins/bsdf.py
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write BSDF files.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
The BSDF format enables reading and writing of image data in the
|
||||
BSDF serialization format. This format allows storage of images, volumes,
|
||||
and series thereof. Data can be of any numeric data type, and can
|
||||
optionally be compressed. Each image/volume can have associated
|
||||
meta data, which can consist of any data type supported by BSDF.
|
||||
|
||||
By default, image data is lazily loaded; the actual image data is
|
||||
not read until it is requested. This allows storing multiple images
|
||||
in a single file and still have fast access to individual images.
|
||||
Alternatively, a series of images can be read in streaming mode, reading
|
||||
images as they are read (e.g. from http).
|
||||
|
||||
BSDF is a simple generic binary format. It is easy to extend and there
|
||||
are standard extension definitions for 2D and 3D image data.
|
||||
Read more at http://bsdf.io.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
random_access : bool
|
||||
Whether individual images in the file can be read in random order.
|
||||
Defaults to True for normal files, and to False when reading from HTTP.
|
||||
If False, the file is read in "streaming mode", allowing reading
|
||||
files as they are read, but without support for "rewinding".
|
||||
Note that setting this to True when reading from HTTP, the whole file
|
||||
is read upon opening it (since lazy loading is not possible over HTTP).
|
||||
|
||||
compression : int
|
||||
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
||||
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
||||
compression (more compact but slower). Default 1 (zlib).
|
||||
Note that some BSDF implementations may not support compression
|
||||
(e.g. JavaScript).
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
def get_bsdf_serializer(options):
|
||||
from . import _bsdf as bsdf
|
||||
|
||||
class NDArrayExtension(bsdf.Extension):
|
||||
"""Copy of BSDF's NDArrayExtension but deal with lazy blobs."""
|
||||
|
||||
name = "ndarray"
|
||||
cls = np.ndarray
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
return v # return as dict, because of lazy blobs, decode in Image
|
||||
|
||||
class ImageExtension(bsdf.Extension):
|
||||
"""We implement two extensions that trigger on the Image classes."""
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(array=v.array, meta=v.meta)
|
||||
|
||||
def decode(self, s, v):
|
||||
return Image(v["array"], v["meta"])
|
||||
|
||||
class Image2DExtension(ImageExtension):
|
||||
name = "image2d"
|
||||
cls = Image2D
|
||||
|
||||
class Image3DExtension(ImageExtension):
|
||||
name = "image3d"
|
||||
cls = Image3D
|
||||
|
||||
exts = [NDArrayExtension, Image2DExtension, Image3DExtension]
|
||||
serializer = bsdf.BsdfSerializer(exts, **options)
|
||||
|
||||
return bsdf, serializer
|
||||
|
||||
|
||||
class Image:
|
||||
"""Class in which we wrap the array and meta data. By using an extension
|
||||
we can make BSDF trigger on these classes and thus encode the images.
|
||||
as actual images.
|
||||
"""
|
||||
|
||||
def __init__(self, array, meta):
|
||||
self.array = array
|
||||
self.meta = meta
|
||||
|
||||
def get_array(self):
|
||||
if not isinstance(self.array, np.ndarray):
|
||||
v = self.array
|
||||
blob = v["data"]
|
||||
if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob
|
||||
blob = blob.get_bytes()
|
||||
self.array = np.frombuffer(blob, dtype=v["dtype"])
|
||||
self.array.shape = v["shape"]
|
||||
return self.array
|
||||
|
||||
def get_meta(self):
|
||||
return self.meta
|
||||
|
||||
|
||||
class Image2D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class Image3D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class BsdfFormat(Format):
|
||||
"""The BSDF format enables reading and writing of image data in the
|
||||
BSDF serialization format. This format allows storage of images, volumes,
|
||||
and series thereof. Data can be of any numeric data type, and can
|
||||
optionally be compressed. Each image/volume can have associated
|
||||
meta data, which can consist of any data type supported by BSDF.
|
||||
|
||||
By default, image data is lazily loaded; the actual image data is
|
||||
not read until it is requested. This allows storing multiple images
|
||||
in a single file and still have fast access to individual images.
|
||||
Alternatively, a series of images can be read in streaming mode, reading
|
||||
images as they are read (e.g. from http).
|
||||
|
||||
BSDF is a simple generic binary format. It is easy to extend and there
|
||||
are standard extension definitions for 2D and 3D image data.
|
||||
Read more at http://bsdf.io.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
random_access : bool
|
||||
Whether individual images in the file can be read in random order.
|
||||
Defaults to True for normal files, and to False when reading from HTTP.
|
||||
If False, the file is read in "streaming mode", allowing reading
|
||||
files as they are read, but without support for "rewinding".
|
||||
Note that setting this to True when reading from HTTP, the whole file
|
||||
is read upon opening it (since lazy loading is not possible over HTTP).
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 2}
|
||||
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
||||
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
||||
compression (more compact but slower). Default 1 (zlib).
|
||||
Note that some BSDF implementations may not support compression
|
||||
(e.g. JavaScript).
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
# if request.extension in self.extensions:
|
||||
# return True
|
||||
if request.firstbytes.startswith(b"BSDF"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, random_access=None):
|
||||
# Validate - we need a BSDF file consisting of a list of images
|
||||
# The list is typically a stream, but does not have to be.
|
||||
assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file"
|
||||
# self.request.firstbytes[5:6] == major and minor version
|
||||
if not (
|
||||
self.request.firstbytes[6:15] == b"M\x07image2D"
|
||||
or self.request.firstbytes[6:15] == b"M\x07image3D"
|
||||
or self.request.firstbytes[6:7] == b"l"
|
||||
):
|
||||
pass # Actually, follow a more duck-type approach ...
|
||||
# raise RuntimeError('BSDF file does not look like an '
|
||||
# 'image container.')
|
||||
# Set options. If we think that seeking is allowed, we lazily load
|
||||
# blobs, and set streaming to False (i.e. the whole file is read,
|
||||
# but we skip over binary blobs), so that we subsequently allow
|
||||
# random access to the images.
|
||||
# If seeking is not allowed (e.g. with a http request), we cannot
|
||||
# lazily load blobs, but we can still load streaming from the web.
|
||||
options = {}
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
ra = False if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = False # Because we cannot seek now
|
||||
options["load_streaming"] = not ra # Load as a stream?
|
||||
else:
|
||||
ra = True if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = ra # Don't read data until needed
|
||||
options["load_streaming"] = not ra
|
||||
|
||||
file = self.request.get_file()
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
self._stream = self._serializer.load(file)
|
||||
# Another validation
|
||||
if (
|
||||
isinstance(self._stream, dict)
|
||||
and "meta" in self._stream
|
||||
and "array" in self._stream
|
||||
):
|
||||
self._stream = Image(self._stream["array"], self._stream["meta"])
|
||||
if not isinstance(self._stream, (Image, list, bsdf.ListStream)):
|
||||
raise RuntimeError(
|
||||
"BSDF file does not look seem to have an " "image container."
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if isinstance(self._stream, Image):
|
||||
return 1
|
||||
elif isinstance(self._stream, list):
|
||||
return len(self._stream)
|
||||
elif self._stream.count < 0:
|
||||
return np.inf
|
||||
return self._stream.count
|
||||
|
||||
def _get_data(self, index):
|
||||
# Validate
|
||||
if index < 0 or index >= self.get_length():
|
||||
raise IndexError(
|
||||
"Image index %i not in [0 %i]." % (index, self.get_length())
|
||||
)
|
||||
# Get Image object
|
||||
if isinstance(self._stream, Image):
|
||||
image_ob = self._stream # singleton
|
||||
elif isinstance(self._stream, list):
|
||||
# Easy when we have random access
|
||||
image_ob = self._stream[index]
|
||||
else:
|
||||
# For streaming, we need to skip over frames
|
||||
if index < self._stream.index:
|
||||
raise IndexError(
|
||||
"BSDF file is being read in streaming "
|
||||
"mode, thus does not allow rewinding."
|
||||
)
|
||||
while index > self._stream.index:
|
||||
self._stream.next()
|
||||
image_ob = self._stream.next() # Can raise StopIteration
|
||||
# Is this an image?
|
||||
if (
|
||||
isinstance(image_ob, dict)
|
||||
and "meta" in image_ob
|
||||
and "array" in image_ob
|
||||
):
|
||||
image_ob = Image(image_ob["array"], image_ob["meta"])
|
||||
if isinstance(image_ob, Image):
|
||||
# Return as array (if we have lazy blobs, they are read now)
|
||||
return image_ob.get_array(), image_ob.get_meta()
|
||||
else:
|
||||
r = repr(image_ob)
|
||||
r = r if len(r) < 200 else r[:197] + "..."
|
||||
raise RuntimeError("BSDF file contains non-image " + r)
|
||||
|
||||
def _get_meta_data(self, index): # pragma: no cover
|
||||
return {} # This format does not support global meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, compression=1):
|
||||
options = {"compression": compression}
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
if self.request.mode[1] in "iv":
|
||||
self._stream = None # Singleton image
|
||||
self._written = False
|
||||
else:
|
||||
# Series (stream) of images
|
||||
file = self.request.get_file()
|
||||
self._stream = bsdf.ListStream()
|
||||
self._serializer.save(file, self._stream)
|
||||
|
||||
def _close(self):
|
||||
# We close the stream here, which will mark the number of written
|
||||
# elements. If we would not close it, the file would be fine, it's
|
||||
# just that upon reading it would not be known how many items are
|
||||
# in there.
|
||||
if self._stream is not None:
|
||||
self._stream.close(False) # False says "keep this a stream"
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Determine dimension
|
||||
ndim = None
|
||||
if self.request.mode[1] in "iI":
|
||||
ndim = 2
|
||||
elif self.request.mode[1] in "vV":
|
||||
ndim = 3
|
||||
else:
|
||||
ndim = 3 # Make an educated guess
|
||||
if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4):
|
||||
ndim = 2
|
||||
# Validate shape
|
||||
assert ndim in (2, 3)
|
||||
if ndim == 2:
|
||||
assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4)
|
||||
else:
|
||||
assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4)
|
||||
# Wrap data and meta data in our special class that will trigger
|
||||
# the BSDF image2D or image3D extension.
|
||||
if ndim == 2:
|
||||
ob = Image2D(im, meta)
|
||||
else:
|
||||
ob = Image3D(im, meta)
|
||||
# Write directly or to stream
|
||||
if self._stream is None:
|
||||
assert not self._written, "Cannot write singleton image twice"
|
||||
self._written = True
|
||||
file = self.request.get_file()
|
||||
self._serializer.save(file, ob)
|
||||
else:
|
||||
self._stream.append(ob)
|
||||
|
||||
def set_meta_data(self, meta): # pragma: no cover
|
||||
raise RuntimeError("The BSDF format only supports " "per-image meta data.")
|
||||
333
.CondaPkg/env/Lib/site-packages/imageio/plugins/dicom.py
vendored
Normal file
333
.CondaPkg/env/Lib/site-packages/imageio/plugins/dicom.py
vendored
Normal file
@@ -0,0 +1,333 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read DICOM files.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
A format for reading DICOM images: a common format used to store
|
||||
medical image data, such as X-ray, CT and MRI.
|
||||
|
||||
This format borrows some code (and ideas) from the pydicom project. However,
|
||||
only a predefined subset of tags are extracted from the file. This allows
|
||||
for great simplifications allowing us to make a stand-alone reader, and
|
||||
also results in a much faster read time.
|
||||
|
||||
By default, only uncompressed and deflated transfer syntaxes are supported.
|
||||
If gdcm or dcmtk is installed, these will be used to automatically convert
|
||||
the data. See https://github.com/malaterre/GDCM/releases for installing GDCM.
|
||||
|
||||
This format provides functionality to group images of the same
|
||||
series together, thus extracting volumes (and multiple volumes).
|
||||
Using volread will attempt to yield a volume. If multiple volumes
|
||||
are present, the first one is given. Using mimread will simply yield
|
||||
all images in the given directory (not taking series into account).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
progress : {True, False, BaseProgressIndicator}
|
||||
Whether to show progress when reading from multiple files.
|
||||
Default True. By passing an object that inherits from
|
||||
BaseProgressIndicator, the way in which progress is reported
|
||||
can be costumized.
|
||||
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator
|
||||
from ..core import read_n_bytes
|
||||
|
||||
_dicom = None # lazily loaded in load_lib()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _dicom
|
||||
from . import _dicom
|
||||
|
||||
return _dicom
|
||||
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
|
||||
def get_dcmdjpeg_exe():
|
||||
fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win")
|
||||
for dir in (
|
||||
"c:\\dcmtk",
|
||||
"c:\\Program Files",
|
||||
"c:\\Program Files\\dcmtk",
|
||||
"c:\\Program Files (x86)\\dcmtk",
|
||||
):
|
||||
filename = os.path.join(dir, fname)
|
||||
if os.path.isfile(filename):
|
||||
return [filename]
|
||||
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_gdcmconv_exe():
|
||||
fname = "gdcmconv" + ".exe" * sys.platform.startswith("win")
|
||||
# Maybe it's on the path
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname, "--raw"]
|
||||
except Exception:
|
||||
pass
|
||||
# Select directories where it could be
|
||||
candidates = []
|
||||
base_dirs = [r"c:\Program Files"]
|
||||
for base_dir in base_dirs:
|
||||
if os.path.isdir(base_dir):
|
||||
for dname in os.listdir(base_dir):
|
||||
if dname.lower().startswith("gdcm"):
|
||||
suffix = dname[4:].strip()
|
||||
candidates.append((suffix, os.path.join(base_dir, dname)))
|
||||
# Sort, so higher versions are tried earlier
|
||||
candidates.sort(reverse=True)
|
||||
# Select executable
|
||||
filename = None
|
||||
for _, dirname in candidates:
|
||||
exe1 = os.path.join(dirname, "gdcmconv.exe")
|
||||
exe2 = os.path.join(dirname, "bin", "gdcmconv.exe")
|
||||
if os.path.isfile(exe1):
|
||||
filename = exe1
|
||||
break
|
||||
if os.path.isfile(exe2):
|
||||
filename = exe2
|
||||
break
|
||||
else:
|
||||
return None
|
||||
return [filename, "--raw"]
|
||||
|
||||
|
||||
class DicomFormat(Format):
|
||||
"""See :mod:`imageio.plugins.dicom`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If user URI was a directory, we check whether it has a DICOM file
|
||||
if os.path.isdir(request.filename):
|
||||
files = os.listdir(request.filename)
|
||||
for fname in sorted(files): # Sorting make it consistent
|
||||
filename = os.path.join(request.filename, fname)
|
||||
if os.path.isfile(filename) and "DICOMDIR" not in fname:
|
||||
with open(filename, "rb") as f:
|
||||
first_bytes = read_n_bytes(f, 140)
|
||||
return first_bytes[128:132] == b"DICM"
|
||||
else:
|
||||
return False
|
||||
# Check
|
||||
return request.firstbytes[128:132] == b"DICM"
|
||||
|
||||
def _can_write(self, request):
|
||||
# We cannot save yet. May be possible if we will used pydicom as
|
||||
# a backend.
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
_compressed_warning_dirs = set()
|
||||
|
||||
def _open(self, progress=True):
|
||||
if not _dicom:
|
||||
load_lib()
|
||||
if os.path.isdir(self.request.filename):
|
||||
# A dir can be given if the user used the format explicitly
|
||||
self._info = {}
|
||||
self._data = None
|
||||
else:
|
||||
# Read the given dataset now ...
|
||||
try:
|
||||
dcm = _dicom.SimpleDicomReader(self.request.get_file())
|
||||
except _dicom.CompressedDicom as err:
|
||||
# We cannot do this on our own. Perhaps with some help ...
|
||||
cmd = get_gdcmconv_exe()
|
||||
if not cmd and "JPEG" in str(err):
|
||||
cmd = get_dcmdjpeg_exe()
|
||||
if not cmd:
|
||||
msg = err.args[0].replace("using", "installing")
|
||||
msg = msg.replace("convert", "auto-convert")
|
||||
err.args = (msg,)
|
||||
raise
|
||||
else:
|
||||
fname1 = self.request.get_local_filename()
|
||||
fname2 = fname1 + ".raw"
|
||||
try:
|
||||
subprocess.check_call(cmd + [fname1, fname2])
|
||||
except Exception:
|
||||
raise err
|
||||
d = os.path.dirname(fname1)
|
||||
if d not in self._compressed_warning_dirs:
|
||||
self._compressed_warning_dirs.add(d)
|
||||
logger.warning(
|
||||
"DICOM file contained compressed data. "
|
||||
+ "Autoconverting with "
|
||||
+ cmd[0]
|
||||
+ " (this warning is shown once for each directory)"
|
||||
)
|
||||
dcm = _dicom.SimpleDicomReader(fname2)
|
||||
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
# Initialize series, list of DicomSeries objects
|
||||
self._series = None # only created if needed
|
||||
|
||||
# Set progress indicator
|
||||
if isinstance(progress, BaseProgressIndicator):
|
||||
self._progressIndicator = progress
|
||||
elif progress is True:
|
||||
p = StdoutProgressIndicator("Reading DICOM")
|
||||
self._progressIndicator = p
|
||||
elif progress in (None, False):
|
||||
self._progressIndicator = BaseProgressIndicator("Dummy")
|
||||
else:
|
||||
raise ValueError("Invalid value for progress.")
|
||||
|
||||
def _close(self):
|
||||
# Clean up
|
||||
self._info = None
|
||||
self._data = None
|
||||
self._series = None
|
||||
|
||||
@property
|
||||
def series(self):
|
||||
if self._series is None:
|
||||
pi = self._progressIndicator
|
||||
self._series = _dicom.process_directory(self.request, pi)
|
||||
return self._series
|
||||
|
||||
def _get_length(self):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
# User expects one, but lets be honest about this file
|
||||
return nslices
|
||||
elif self.request.mode[1] == "I":
|
||||
# User expects multiple, if this file has multiple slices, ok.
|
||||
# Otherwise we have to check the series.
|
||||
if nslices > 1:
|
||||
return nslices
|
||||
else:
|
||||
return sum([len(serie) for serie in self.series])
|
||||
elif self.request.mode[1] == "v":
|
||||
# User expects a volume, if this file has one, ok.
|
||||
# Otherwise we have to check the series
|
||||
if nslices > 1:
|
||||
return 1
|
||||
else:
|
||||
return len(self.series) # We assume one volume per series
|
||||
elif self.request.mode[1] == "V":
|
||||
# User expects multiple volumes. We have to check the series
|
||||
return len(self.series) # We assume one volume per series
|
||||
else:
|
||||
raise RuntimeError("DICOM plugin should know what to expect.")
|
||||
|
||||
def _get_slice_data(self, index):
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
# Allow index >1 only if this file contains >1
|
||||
if nslices > 1:
|
||||
return self._data[index], self._info
|
||||
elif index == 0:
|
||||
return self._data, self._info
|
||||
else:
|
||||
raise IndexError("Dicom file contains only one slice.")
|
||||
|
||||
def _get_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
return self._get_slice_data(index)
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data[index], self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].get_numpy_array(), L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data, self._info
|
||||
else:
|
||||
return (
|
||||
self.series[index].get_numpy_array(),
|
||||
self.series[index].info,
|
||||
)
|
||||
# mode is `?` (typically because we are using V3). If there is a
|
||||
# series (multiple files), index referrs to the element of the
|
||||
# series and we read volumes. If there is no series, index
|
||||
# referrs to the slice in the volume we read "flat" images.
|
||||
elif len(self.series) > 1:
|
||||
# mode is `?` and there are multiple series. Each series is a ndimage.
|
||||
return (
|
||||
self.series[index].get_numpy_array(),
|
||||
self.series[index].info,
|
||||
)
|
||||
else:
|
||||
# mode is `?` and there is only one series. Each slice is an ndimage.
|
||||
return self._get_slice_data(index)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
# Default is the meta data of the given file, or the "first" file.
|
||||
if index is None:
|
||||
return self._info
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
return self._info
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
return self.series[index].info
|
||||
else: # pragma: no cover
|
||||
raise ValueError("DICOM plugin should know what to expect.")
|
||||
145
.CondaPkg/env/Lib/site-packages/imageio/plugins/example.py
vendored
Normal file
145
.CondaPkg/env/Lib/site-packages/imageio/plugins/example.py
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Example plugin. You can use this as a template for your own plugin.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class DummyFormat(Format):
|
||||
"""The dummy format is an example format that does nothing.
|
||||
It will never indicate that it can read or write a file. When
|
||||
explicitly asked to read, it will simply read the bytes. When
|
||||
explicitly asked to write, it will raise an error.
|
||||
|
||||
This documentation is shown when the user does ``help('thisformat')``.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to read a certain image. Return True if this format
|
||||
# can do it.
|
||||
#
|
||||
# The format manager is aware of the extensions and the modes
|
||||
# that each format can handle. It will first ask all formats
|
||||
# that *seem* to be able to read it whether they can. If none
|
||||
# can, it will ask the remaining formats if they can: the
|
||||
# extension might be missing, and this allows formats to provide
|
||||
# functionality for certain extensions, while giving preference
|
||||
# to other plugins.
|
||||
#
|
||||
# If a format says it can, it should live up to it. The format
|
||||
# would ideally check the request.firstbytes and look for a
|
||||
# header of some kind.
|
||||
#
|
||||
# The request object has:
|
||||
# request.filename: a representation of the source (only for reporting)
|
||||
# request.firstbytes: the first 256 bytes of the file.
|
||||
# request.mode[0]: read or write mode
|
||||
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to write a certain image. It will first ask all
|
||||
# formats that *seem* to be able to write it whether they can.
|
||||
# If none can, it will ask the remaining formats if they can.
|
||||
#
|
||||
# Return True if the format can do it.
|
||||
|
||||
# In most cases, this code does suffice:
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, some_option=False, length=1):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to get access to the
|
||||
# data. Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
self._length = length # passed as an arg in this case for testing
|
||||
self._data = None
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return self._length
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._fp.read()
|
||||
# Put in a numpy array
|
||||
im = np.frombuffer(self._data, "uint8")
|
||||
im.shape = len(im), 1
|
||||
# Return array and dummy meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to write the data.
|
||||
# Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The dummy format cannot write image data.")
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The dummy format cannot write meta data.")
|
||||
|
||||
|
||||
# Register. You register an *instance* of a Format class. Here specify:
|
||||
format = DummyFormat(
|
||||
"dummy", # short name
|
||||
"An example format that does nothing.", # one line descr.
|
||||
".foobar .nonexistentext", # list of extensions
|
||||
"iI", # modes, characters in iIvV
|
||||
)
|
||||
formats.add_format(format)
|
||||
95
.CondaPkg/env/Lib/site-packages/imageio/plugins/feisem.py
vendored
Normal file
95
.CondaPkg/env/Lib/site-packages/imageio/plugins/feisem.py
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read TIFF from FEI SEM microscopes.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
This format is based on :mod:`TIFF <imageio.plugins.tifffile>`, and supports the
|
||||
same parameters. FEI microscopes append metadata as ASCII text at the end of the
|
||||
file, which this reader correctly extracts.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
discard_watermark : bool
|
||||
If True (default), discard the bottom rows of the image, which
|
||||
contain no image data, only a watermark with metadata.
|
||||
watermark_height : int
|
||||
The height in pixels of the FEI watermark. The default is 70.
|
||||
|
||||
See Also
|
||||
--------
|
||||
:mod:`imageio.plugins.tifffile`
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from .tifffile import TiffFormat
|
||||
|
||||
|
||||
class FEISEMFormat(TiffFormat):
|
||||
"""See :mod:`imageio.plugins.feisem`"""
|
||||
|
||||
def _can_write(self, request):
|
||||
return False # FEI-SEM only supports reading
|
||||
|
||||
class Reader(TiffFormat.Reader):
|
||||
def _get_data(self, index=0, discard_watermark=True, watermark_height=70):
|
||||
"""Get image and metadata from given index.
|
||||
|
||||
FEI images usually (always?) contain a watermark at the
|
||||
bottom of the image, 70 pixels high. We discard this by
|
||||
default as it does not contain any information not present
|
||||
in the metadata.
|
||||
"""
|
||||
im, meta = super(FEISEMFormat.Reader, self)._get_data(index)
|
||||
if discard_watermark:
|
||||
im = im[:-watermark_height]
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index=None):
|
||||
"""Read the metadata from an FEI SEM TIFF.
|
||||
|
||||
This metadata is included as ASCII text at the end of the file.
|
||||
|
||||
The index, if provided, is ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
Dictionary of metadata.
|
||||
"""
|
||||
if hasattr(self, "_fei_meta"):
|
||||
return self._fei_meta
|
||||
|
||||
md = {"root": {}}
|
||||
current_tag = "root"
|
||||
reading_metadata = False
|
||||
filename = self.request.get_local_filename()
|
||||
with open(filename, encoding="utf8", errors="ignore") as fin:
|
||||
for line in fin:
|
||||
if not reading_metadata:
|
||||
if not line.startswith("Date="):
|
||||
continue
|
||||
else:
|
||||
reading_metadata = True
|
||||
line = line.rstrip()
|
||||
if line.startswith("["):
|
||||
current_tag = line.lstrip("[").rstrip("]")
|
||||
md[current_tag] = {}
|
||||
else:
|
||||
if "=" in line: # ignore empty and irrelevant lines
|
||||
key, val = line.split("=", maxsplit=1)
|
||||
for tag_type in (int, float):
|
||||
try:
|
||||
val = tag_type(val)
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
md[current_tag][key] = val
|
||||
if not md["root"] and len(md) == 1:
|
||||
raise ValueError("Input file %s contains no FEI metadata." % filename)
|
||||
|
||||
self._fei_meta = md
|
||||
return md
|
||||
729
.CondaPkg/env/Lib/site-packages/imageio/plugins/ffmpeg.py
vendored
Normal file
729
.CondaPkg/env/Lib/site-packages/imageio/plugins/ffmpeg.py
vendored
Normal file
@@ -0,0 +1,729 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write video using FFMPEG
|
||||
|
||||
.. note::
|
||||
We are in the process of (slowly) replacing this plugin with a new one that
|
||||
is based on `pyav <https://pyav.org/docs/stable/>`_. It is faster and more
|
||||
flexible than the plugin documented here. Check the :mod:`pyav
|
||||
plugin's documentation <imageio.plugins.pyav>` for more information about
|
||||
this plugin.
|
||||
|
||||
Backend Library: https://github.com/imageio/imageio-ffmpeg
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[ffmpeg]
|
||||
|
||||
|
||||
The ffmpeg format provides reading and writing for a wide range of movie formats
|
||||
such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from
|
||||
webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy
|
||||
<https://github.com/Zulko/moviepy/>`_ by Zulko.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
fps : scalar
|
||||
The number of frames per second of the input stream. Default None (i.e.
|
||||
read at the file's native fps). One can use this for files with a
|
||||
variable fps, or in cases where imageio is unable to correctly detect
|
||||
the fps. In case of trouble opening camera streams, it may help to set an
|
||||
explicit fps value matching a framerate supported by the camera.
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
Setting this to True will internally call ``count_frames()``,
|
||||
and set the reader's length to that value instead of inf.
|
||||
size : str | tuple
|
||||
The frame size (i.e. resolution) to read the images, e.g.
|
||||
(100, 100) or "640x480". For camera streams, this allows setting
|
||||
the capture resolution. For normal video data, ffmpeg will
|
||||
rescale the data.
|
||||
dtype : str | type
|
||||
The dtype for the output arrays. Determines the bit-depth that
|
||||
is requested from ffmpeg. Supported dtypes: uint8, uint16.
|
||||
Default: uint8.
|
||||
pixelformat : str
|
||||
The pixel format for the camera to use (e.g. "yuyv422" or
|
||||
"gray"). The camera needs to support the format in order for
|
||||
this to take effect. Note that the images produced by this
|
||||
reader are always RGB.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use aggressive error handling:
|
||||
['-err_detect', 'aggressive']
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options (i.e. the
|
||||
stream being read by imageio).
|
||||
print_info : bool
|
||||
Print information about the video file as reported by ffmpeg.
|
||||
|
||||
Parameters for writing
|
||||
----------------------
|
||||
fps : scalar
|
||||
The number of frames per second. Default 10.
|
||||
codec : str
|
||||
the video codec to use. Default 'libx264', which represents the
|
||||
widely available mpeg4. Except when saving .wmv files, then the
|
||||
defaults is 'msmpeg4' which is more commonly supported for windows
|
||||
quality : float | None
|
||||
Video output quality. Default is 5. Uses variable bit rate. Highest
|
||||
quality is 10, lowest is 0. Set to None to prevent variable bitrate
|
||||
flags to FFMPEG so you can manually specify them using output_params
|
||||
instead. Specifying a fixed bitrate using 'bitrate' disables this
|
||||
parameter.
|
||||
bitrate : int | None
|
||||
Set a constant bitrate for the video encoding. Default is None causing
|
||||
'quality' parameter to be used instead. Better quality videos with
|
||||
smaller file sizes will result from using the 'quality' variable
|
||||
bitrate parameter rather than specifying a fixed bitrate with this
|
||||
parameter.
|
||||
pixelformat: str
|
||||
The output video pixel format. Default is 'yuv420p' which most widely
|
||||
supported by video players.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options (i.e. the
|
||||
stream that imageio provides).
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use only intra frames and set aspect ratio:
|
||||
['-intra', '-aspect', '16:9']
|
||||
ffmpeg_log_level: str
|
||||
Sets ffmpeg output log level. Default is "warning".
|
||||
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
|
||||
"verbose", or "debug". Also prints the FFMPEG command being used by
|
||||
imageio if "info", "verbose", or "debug".
|
||||
macro_block_size: int
|
||||
Size constraint for video. Width and height, must be divisible by this
|
||||
number. If not divisible by this number imageio will tell ffmpeg to
|
||||
scale the image up to the next closest size
|
||||
divisible by this number. Most codecs are compatible with a macroblock
|
||||
size of 16 (default), some can go smaller (4, 8). To disable this
|
||||
automatic feature set it to None or 1, however be warned many players
|
||||
can't decode videos that are odd in size and some codecs will produce
|
||||
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
|
||||
audio_path : str | None
|
||||
Audio path of any audio that needs to be written. Defaults to nothing,
|
||||
so no audio will be written. Please note, when writing shorter video
|
||||
than the original, ffmpeg will not truncate the audio track; it
|
||||
will maintain its original length and be longer than the video.
|
||||
audio_codec : str | None
|
||||
The audio codec to use. Defaults to nothing, but if an audio_path has
|
||||
been provided ffmpeg will attempt to set a default codec.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to
|
||||
encode/decode H.264 (likely due to licensing concerns). If you need this
|
||||
format on anaconda install ``conda-forge/ffmpeg`` instead.
|
||||
|
||||
You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a
|
||||
specific ffmpeg executable.
|
||||
|
||||
To get the number of frames before having read them all, you can use the
|
||||
``reader.count_frames()`` method (the reader will then use
|
||||
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames,
|
||||
note that this operation can take a few seconds on large files). Alternatively,
|
||||
the number of frames can be estimated from the fps and duration in the meta data
|
||||
(though these values themselves are not always present/reliable).
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import platform
|
||||
import threading
|
||||
import subprocess as sp
|
||||
import imageio_ffmpeg
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get camera format
|
||||
if sys.platform.startswith("win"):
|
||||
CAM_FORMAT = "dshow" # dshow or vfwcap
|
||||
elif sys.platform.startswith("linux"):
|
||||
CAM_FORMAT = "video4linux2"
|
||||
elif sys.platform.startswith("darwin"):
|
||||
CAM_FORMAT = "avfoundation"
|
||||
else: # pragma: no cover
|
||||
CAM_FORMAT = "unknown-cam-format"
|
||||
|
||||
|
||||
def download(directory=None, force_download=False): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"imageio.ffmpeg.download() has been deprecated. "
|
||||
"Use 'pip install imageio-ffmpeg' instead.'"
|
||||
)
|
||||
|
||||
|
||||
# For backwards compatibility - we dont use this ourselves
|
||||
def get_exe(): # pragma: no cover
|
||||
"""Wrapper for imageio_ffmpeg.get_ffmpeg_exe()"""
|
||||
|
||||
return imageio_ffmpeg.get_ffmpeg_exe()
|
||||
|
||||
|
||||
class FfmpegFormat(Format):
|
||||
"""Read/Write ImageResources using FFMPEG.
|
||||
|
||||
See :mod:`imageio.plugins.ffmpeg`
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Read from video stream?
|
||||
# Note that we could write the _video flag here, but a user might
|
||||
# select this format explicitly (and this code is not run)
|
||||
if re.match(r"<video(\d+)>", request.filename):
|
||||
return True
|
||||
|
||||
# Read from file that we know?
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
_frame_catcher = None
|
||||
_read_gen = None
|
||||
|
||||
def _get_cam_inputname(self, index):
|
||||
if sys.platform.startswith("linux"):
|
||||
return "/dev/" + self.request._video[1:-1]
|
||||
|
||||
elif sys.platform.startswith("win"):
|
||||
# Ask ffmpeg for list of dshow device names
|
||||
ffmpeg_api = imageio_ffmpeg
|
||||
cmd = [
|
||||
ffmpeg_api.get_ffmpeg_exe(),
|
||||
"-list_devices",
|
||||
"true",
|
||||
"-f",
|
||||
CAM_FORMAT,
|
||||
"-i",
|
||||
"dummy",
|
||||
]
|
||||
# Set `shell=True` in sp.run to prevent popup of a command
|
||||
# line window in frozen applications. Note: this would be a
|
||||
# security vulnerability if user-input goes into the cmd.
|
||||
# Note that the ffmpeg process returns with exit code 1 when
|
||||
# using `-list_devices` (or `-list_options`), even if the
|
||||
# command is successful, so we set `check=False` explicitly.
|
||||
completed_process = sp.run(
|
||||
cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE,
|
||||
encoding="utf-8",
|
||||
shell=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
# Return device name at index
|
||||
try:
|
||||
name = parse_device_names(completed_process.stderr)[index]
|
||||
except IndexError:
|
||||
raise IndexError("No ffdshow camera at index %i." % index)
|
||||
return "video=%s" % name
|
||||
|
||||
elif sys.platform.startswith("darwin"):
|
||||
# Appears that newer ffmpeg builds don't support -list-devices
|
||||
# on OS X. But you can directly open the camera by index.
|
||||
name = str(index)
|
||||
return name
|
||||
|
||||
else: # pragma: no cover
|
||||
return "??"
|
||||
|
||||
def _open(
|
||||
self,
|
||||
loop=False,
|
||||
size=None,
|
||||
dtype=None,
|
||||
pixelformat=None,
|
||||
print_info=False,
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
fps=None,
|
||||
):
|
||||
# Get generator functions
|
||||
self._ffmpeg_api = imageio_ffmpeg
|
||||
# Process input args
|
||||
self._arg_loop = bool(loop)
|
||||
if size is None:
|
||||
self._arg_size = None
|
||||
elif isinstance(size, tuple):
|
||||
self._arg_size = "%ix%i" % size
|
||||
elif isinstance(size, str) and "x" in size:
|
||||
self._arg_size = size
|
||||
else:
|
||||
raise ValueError('FFMPEG size must be tuple of "NxM"')
|
||||
if pixelformat is None:
|
||||
pass
|
||||
elif not isinstance(pixelformat, str):
|
||||
raise ValueError("FFMPEG pixelformat must be str")
|
||||
if dtype is None:
|
||||
self._dtype = np.dtype("uint8")
|
||||
else:
|
||||
self._dtype = np.dtype(dtype)
|
||||
allowed_dtypes = ["uint8", "uint16"]
|
||||
if self._dtype.name not in allowed_dtypes:
|
||||
raise ValueError(
|
||||
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
|
||||
)
|
||||
self._arg_pixelformat = pixelformat
|
||||
self._arg_input_params = input_params or []
|
||||
self._arg_output_params = output_params or []
|
||||
self._arg_input_params += ffmpeg_params or [] # backward compat
|
||||
# Write "_video"_arg - indicating webcam support
|
||||
self.request._video = None
|
||||
regex_match = re.match(r"<video(\d+)>", self.request.filename)
|
||||
if regex_match:
|
||||
self.request._video = self.request.filename
|
||||
# Get local filename
|
||||
if self.request._video:
|
||||
index = int(regex_match.group(1))
|
||||
self._filename = self._get_cam_inputname(index)
|
||||
else:
|
||||
self._filename = self.request.get_local_filename()
|
||||
# When passed to ffmpeg on command line, carets need to be escaped.
|
||||
self._filename = self._filename.replace("^", "^^")
|
||||
# Determine pixel format and depth
|
||||
self._depth = 3
|
||||
if self._dtype.name == "uint8":
|
||||
self._pix_fmt = "rgb24"
|
||||
self._bytes_per_channel = 1
|
||||
else:
|
||||
self._pix_fmt = "rgb48le"
|
||||
self._bytes_per_channel = 2
|
||||
# Initialize parameters
|
||||
self._pos = -1
|
||||
self._meta = {"plugin": "ffmpeg"}
|
||||
self._lastread = None
|
||||
|
||||
# Calculating this from fps and duration is not accurate,
|
||||
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
|
||||
# takes too long to do for each video. But we need it for looping.
|
||||
self._nframes = float("inf")
|
||||
if self._arg_loop and not self.request._video:
|
||||
self._nframes = self.count_frames()
|
||||
self._meta["nframes"] = self._nframes
|
||||
|
||||
# Specify input framerate? (only on macOS)
|
||||
# Ideally we'd get the supported framerate from the metadata, but we get the
|
||||
# metadata when we boot ffmpeg ... maybe we could refactor this so we can
|
||||
# get the metadata beforehand, but for now we'll just give it 2 tries on MacOS,
|
||||
# one with fps 30 and one with fps 15.
|
||||
need_input_fps = need_output_fps = False
|
||||
if self.request._video and platform.system().lower() == "darwin":
|
||||
if "-framerate" not in str(self._arg_input_params):
|
||||
need_input_fps = True
|
||||
if not self.request.kwargs.get("fps", None):
|
||||
need_output_fps = True
|
||||
if need_input_fps:
|
||||
self._arg_input_params.extend(["-framerate", str(float(30))])
|
||||
if need_output_fps:
|
||||
self._arg_output_params.extend(["-r", str(float(30))])
|
||||
|
||||
# Start ffmpeg subprocess and get meta information
|
||||
try:
|
||||
self._initialize()
|
||||
except IndexError:
|
||||
# Specify input framerate again, this time different.
|
||||
if need_input_fps:
|
||||
self._arg_input_params[-1] = str(float(15))
|
||||
self._initialize()
|
||||
else:
|
||||
raise
|
||||
|
||||
# For cameras, create thread that keeps reading the images
|
||||
if self.request._video:
|
||||
self._frame_catcher = FrameCatcher(self._read_gen)
|
||||
|
||||
# For reference - but disabled, because it is inaccurate
|
||||
# if self._meta["nframes"] == float("inf"):
|
||||
# if self._meta.get("fps", 0) > 0:
|
||||
# if self._meta.get("duration", 0) > 0:
|
||||
# n = round(self._meta["duration"] * self._meta["fps"])
|
||||
# self._meta["nframes"] = int(n)
|
||||
|
||||
def _close(self):
|
||||
# First close the frame catcher, because we cannot close the gen
|
||||
# if the frame catcher thread is using it
|
||||
if self._frame_catcher is not None:
|
||||
self._frame_catcher.stop_me()
|
||||
self._frame_catcher = None
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
self._read_gen = None
|
||||
|
||||
def count_frames(self):
|
||||
"""Count the number of frames. Note that this can take a few
|
||||
seconds for large files. Also note that it counts the number
|
||||
of frames in the original video and does not take a given fps
|
||||
into account.
|
||||
"""
|
||||
# This would have been nice, but this does not work :(
|
||||
# oargs = []
|
||||
# if self.request.kwargs.get("fps", None):
|
||||
# fps = float(self.request.kwargs["fps"])
|
||||
# oargs += ["-r", "%.02f" % fps]
|
||||
cf = self._ffmpeg_api.count_frames_and_secs
|
||||
return cf(self._filename)[0]
|
||||
|
||||
def _get_length(self):
|
||||
return self._nframes # only not inf if loop is True
|
||||
|
||||
def _get_data(self, index):
|
||||
"""Reads a frame at index. Note for coders: getting an
|
||||
arbitrary frame in the video with ffmpeg can be painfully
|
||||
slow if some decoding has to be done. This function tries
|
||||
to avoid fectching arbitrary frames whenever possible, by
|
||||
moving between adjacent frames."""
|
||||
# Modulo index (for looping)
|
||||
if self._arg_loop and self._nframes < float("inf"):
|
||||
index %= self._nframes
|
||||
|
||||
if index == self._pos:
|
||||
return self._lastread, dict(new=False)
|
||||
elif index < 0:
|
||||
raise IndexError("Frame index must be >= 0")
|
||||
elif index >= self._nframes:
|
||||
raise IndexError("Reached end of video")
|
||||
else:
|
||||
if (index < self._pos) or (index > self._pos + 100):
|
||||
self._initialize(index)
|
||||
else:
|
||||
self._skip_frames(index - self._pos - 1)
|
||||
result, is_new = self._read_frame()
|
||||
self._pos = index
|
||||
return result, dict(new=is_new)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._meta
|
||||
|
||||
def _initialize(self, index=0):
|
||||
# Close the current generator, and thereby terminate its subprocess
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
|
||||
iargs = []
|
||||
oargs = []
|
||||
|
||||
# Create input args
|
||||
iargs += self._arg_input_params
|
||||
if self.request._video:
|
||||
iargs += ["-f", CAM_FORMAT]
|
||||
if self._arg_pixelformat:
|
||||
iargs += ["-pix_fmt", self._arg_pixelformat]
|
||||
if self._arg_size:
|
||||
iargs += ["-s", self._arg_size]
|
||||
elif index > 0: # re-initialize / seek
|
||||
# Note: only works if we initialized earlier, and now have meta
|
||||
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
|
||||
# There are two ways to seek, one before -i (input_params) and
|
||||
# after (output_params). The former is fast, because it uses
|
||||
# keyframes, the latter is slow but accurate. According to
|
||||
# the article above, the fast method should also be accurate
|
||||
# from ffmpeg version 2.1, however in version 4.1 our tests
|
||||
# start failing again. Not sure why, but we can solve this
|
||||
# by combining slow and fast. Seek the long stretch using
|
||||
# the fast method, and seek the last 10s the slow way.
|
||||
starttime = index / self._meta["fps"]
|
||||
seek_slow = min(10, starttime)
|
||||
seek_fast = starttime - seek_slow
|
||||
# We used to have this epsilon earlier, when we did not use
|
||||
# the slow seek. I don't think we need it anymore.
|
||||
# epsilon = -1 / self._meta["fps"] * 0.1
|
||||
iargs += ["-ss", "%.06f" % (seek_fast)]
|
||||
oargs += ["-ss", "%.06f" % (seek_slow)]
|
||||
|
||||
# Output args, for writing to pipe
|
||||
if self._arg_size:
|
||||
oargs += ["-s", self._arg_size]
|
||||
if self.request.kwargs.get("fps", None):
|
||||
fps = float(self.request.kwargs["fps"])
|
||||
oargs += ["-r", "%.02f" % fps]
|
||||
oargs += self._arg_output_params
|
||||
|
||||
# Get pixelformat and bytes per pixel
|
||||
pix_fmt = self._pix_fmt
|
||||
bpp = self._depth * self._bytes_per_channel
|
||||
|
||||
# Create generator
|
||||
rf = self._ffmpeg_api.read_frames
|
||||
self._read_gen = rf(
|
||||
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
|
||||
)
|
||||
|
||||
# Read meta data. This start the generator (and ffmpeg subprocess)
|
||||
if self.request._video:
|
||||
# With cameras, catch error and turn into IndexError
|
||||
try:
|
||||
meta = self._read_gen.__next__()
|
||||
except IOError as err:
|
||||
err_text = str(err)
|
||||
if "darwin" in sys.platform:
|
||||
if "Unknown input format: 'avfoundation'" in err_text:
|
||||
err_text += (
|
||||
"Try installing FFMPEG using "
|
||||
"home brew to get a version with "
|
||||
"support for cameras."
|
||||
)
|
||||
raise IndexError(
|
||||
"No (working) camera at {}.\n\n{}".format(
|
||||
self.request._video, err_text
|
||||
)
|
||||
)
|
||||
else:
|
||||
self._meta.update(meta)
|
||||
elif index == 0:
|
||||
self._meta.update(self._read_gen.__next__())
|
||||
else:
|
||||
self._read_gen.__next__() # we already have meta data
|
||||
|
||||
def _skip_frames(self, n=1):
|
||||
"""Reads and throws away n frames"""
|
||||
for i in range(n):
|
||||
self._read_gen.__next__()
|
||||
self._pos += n
|
||||
|
||||
def _read_frame(self):
|
||||
# Read and convert to numpy array
|
||||
w, h = self._meta["size"]
|
||||
framesize = w * h * self._depth * self._bytes_per_channel
|
||||
# t0 = time.time()
|
||||
|
||||
# Read frame
|
||||
if self._frame_catcher: # pragma: no cover - camera thing
|
||||
s, is_new = self._frame_catcher.get_frame()
|
||||
else:
|
||||
s = self._read_gen.__next__()
|
||||
is_new = True
|
||||
|
||||
# Check
|
||||
if len(s) != framesize:
|
||||
raise RuntimeError(
|
||||
"Frame is %i bytes, but expected %i." % (len(s), framesize)
|
||||
)
|
||||
|
||||
result = np.frombuffer(s, dtype=self._dtype).copy()
|
||||
result = result.reshape((h, w, self._depth))
|
||||
# t1 = time.time()
|
||||
# print('etime', t1-t0)
|
||||
|
||||
# Store and return
|
||||
self._lastread = result
|
||||
return result, is_new
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
_write_gen = None
|
||||
|
||||
def _open(
|
||||
self,
|
||||
fps=10,
|
||||
codec="libx264",
|
||||
bitrate=None,
|
||||
pixelformat="yuv420p",
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
ffmpeg_log_level="quiet",
|
||||
quality=5,
|
||||
macro_block_size=16,
|
||||
audio_path=None,
|
||||
audio_codec=None,
|
||||
):
|
||||
self._ffmpeg_api = imageio_ffmpeg
|
||||
self._filename = self.request.get_local_filename()
|
||||
self._pix_fmt = None
|
||||
self._depth = None
|
||||
self._size = None
|
||||
|
||||
def _close(self):
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
self._write_gen = None
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Get props of image
|
||||
h, w = im.shape[:2]
|
||||
size = w, h
|
||||
depth = 1 if im.ndim == 2 else im.shape[2]
|
||||
|
||||
# Ensure that image is in uint8
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# To be written efficiently, ie. without creating an immutable
|
||||
# buffer, by calling im.tobytes() the array must be contiguous.
|
||||
if not im.flags.c_contiguous:
|
||||
# checkign the flag is a micro optimization.
|
||||
# the image will be a numpy subclass. See discussion
|
||||
# https://github.com/numpy/numpy/issues/11804
|
||||
im = np.ascontiguousarray(im)
|
||||
|
||||
# Set size and initialize if not initialized yet
|
||||
if self._size is None:
|
||||
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
|
||||
self._pix_fmt = map.get(depth, None)
|
||||
if self._pix_fmt is None:
|
||||
raise ValueError("Image must have 1, 2, 3 or 4 channels")
|
||||
self._size = size
|
||||
self._depth = depth
|
||||
self._initialize()
|
||||
|
||||
# Check size of image
|
||||
if size != self._size:
|
||||
raise ValueError("All images in a movie should have same size")
|
||||
if depth != self._depth:
|
||||
raise ValueError(
|
||||
"All images in a movie should have same " "number of channels"
|
||||
)
|
||||
|
||||
assert self._write_gen is not None # Check status
|
||||
|
||||
# Write. Yes, we can send the data in as a numpy array
|
||||
self._write_gen.send(im)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError(
|
||||
"The ffmpeg format does not support setting " "meta data."
|
||||
)
|
||||
|
||||
def _initialize(self):
|
||||
# Close existing generator
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
|
||||
# Get parameters
|
||||
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
|
||||
fps = self.request.kwargs.get("fps", 10)
|
||||
codec = self.request.kwargs.get("codec", None)
|
||||
bitrate = self.request.kwargs.get("bitrate", None)
|
||||
quality = self.request.kwargs.get("quality", None)
|
||||
input_params = self.request.kwargs.get("input_params") or []
|
||||
output_params = self.request.kwargs.get("output_params") or []
|
||||
output_params += self.request.kwargs.get("ffmpeg_params") or []
|
||||
pixelformat = self.request.kwargs.get("pixelformat", None)
|
||||
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
|
||||
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
|
||||
audio_path = self.request.kwargs.get("audio_path", None)
|
||||
audio_codec = self.request.kwargs.get("audio_codec", None)
|
||||
|
||||
macro_block_size = macro_block_size or 1 # None -> 1
|
||||
|
||||
# Create generator
|
||||
self._write_gen = self._ffmpeg_api.write_frames(
|
||||
self._filename,
|
||||
self._size,
|
||||
pix_fmt_in=self._pix_fmt,
|
||||
pix_fmt_out=pixelformat,
|
||||
fps=fps,
|
||||
quality=quality,
|
||||
bitrate=bitrate,
|
||||
codec=codec,
|
||||
macro_block_size=macro_block_size,
|
||||
ffmpeg_log_level=ffmpeg_log_level,
|
||||
input_params=input_params,
|
||||
output_params=output_params,
|
||||
audio_path=audio_path,
|
||||
audio_codec=audio_codec,
|
||||
)
|
||||
|
||||
# Seed the generator (this is where the ffmpeg subprocess starts)
|
||||
self._write_gen.send(None)
|
||||
|
||||
|
||||
class FrameCatcher(threading.Thread):
|
||||
"""Thread to keep reading the frame data from stdout. This is
|
||||
useful when streaming from a webcam. Otherwise, if the user code
|
||||
does not grab frames fast enough, the buffer will fill up, leading
|
||||
to lag, and ffmpeg can also stall (experienced on Linux). The
|
||||
get_frame() method always returns the last available image.
|
||||
"""
|
||||
|
||||
def __init__(self, gen):
|
||||
self._gen = gen
|
||||
self._frame = None
|
||||
self._frame_is_new = False
|
||||
self._lock = threading.RLock()
|
||||
threading.Thread.__init__(self)
|
||||
self.daemon = True # do not let this thread hold up Python shutdown
|
||||
self._should_stop = False
|
||||
self.start()
|
||||
|
||||
def stop_me(self):
|
||||
self._should_stop = True
|
||||
while self.is_alive():
|
||||
time.sleep(0.001)
|
||||
|
||||
def get_frame(self):
|
||||
while self._frame is None: # pragma: no cover - an init thing
|
||||
time.sleep(0.001)
|
||||
with self._lock:
|
||||
is_new = self._frame_is_new
|
||||
self._frame_is_new = False # reset
|
||||
return self._frame, is_new
|
||||
|
||||
def run(self):
|
||||
# This runs in the worker thread
|
||||
try:
|
||||
while not self._should_stop:
|
||||
time.sleep(0) # give control to other threads
|
||||
frame = self._gen.__next__()
|
||||
with self._lock:
|
||||
self._frame = frame
|
||||
self._frame_is_new = True
|
||||
except (StopIteration, EOFError):
|
||||
pass
|
||||
|
||||
|
||||
def parse_device_names(ffmpeg_output):
|
||||
"""Parse the output of the ffmpeg -list-devices command"""
|
||||
# Collect device names - get [friendly_name, alt_name] of each
|
||||
device_names = []
|
||||
in_video_devices = False
|
||||
for line in ffmpeg_output.splitlines():
|
||||
if line.startswith("[dshow"):
|
||||
logger.debug(line)
|
||||
line = line.split("]", 1)[1].strip()
|
||||
if in_video_devices and line.startswith('"'):
|
||||
friendly_name = line[1:-1]
|
||||
device_names.append([friendly_name, ""])
|
||||
elif in_video_devices and line.lower().startswith("alternative name"):
|
||||
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
|
||||
if sys.platform.startswith("win"):
|
||||
alt_name = alt_name.replace("&", "^&") # Tested to work
|
||||
else:
|
||||
alt_name = alt_name.replace("&", "\\&") # Does this work?
|
||||
device_names[-1][-1] = alt_name
|
||||
elif "video devices" in line:
|
||||
in_video_devices = True
|
||||
elif "devices" in line:
|
||||
# set False for subsequent "devices" sections
|
||||
in_video_devices = False
|
||||
# Post-process, see #441
|
||||
# prefer friendly names, use alt name if two cams have same friendly name
|
||||
device_names2 = []
|
||||
for friendly_name, alt_name in device_names:
|
||||
if friendly_name not in device_names2:
|
||||
device_names2.append(friendly_name)
|
||||
elif alt_name:
|
||||
device_names2.append(alt_name)
|
||||
else:
|
||||
device_names2.append(friendly_name) # duplicate, but not much we can do
|
||||
return device_names2
|
||||
126
.CondaPkg/env/Lib/site-packages/imageio/plugins/fits.py
vendored
Normal file
126
.CondaPkg/env/Lib/site-packages/imageio/plugins/fits.py
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read FITS files.
|
||||
|
||||
Backend Library: `Astropy <https://www.astropy.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[fits]
|
||||
|
||||
Flexible Image Transport System (FITS) is an open standard defining a
|
||||
digital file format useful for storage, transmission and processing of
|
||||
scientific and other images. FITS is the most commonly used digital
|
||||
file format in astronomy.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cache : bool
|
||||
If the file name is a URL, `~astropy.utils.data.download_file` is used
|
||||
to open the file. This specifies whether or not to save the file
|
||||
locally in Astropy's download cache (default: `True`).
|
||||
uint : bool
|
||||
Interpret signed integer data where ``BZERO`` is the
|
||||
central value and ``BSCALE == 1`` as unsigned integer
|
||||
data. For example, ``int16`` data with ``BZERO = 32768``
|
||||
and ``BSCALE = 1`` would be treated as ``uint16`` data.
|
||||
|
||||
Note, for backward compatibility, the kwarg **uint16** may
|
||||
be used instead. The kwarg was renamed when support was
|
||||
added for integers of any size.
|
||||
ignore_missing_end : bool
|
||||
Do not issue an exception when opening a file that is
|
||||
missing an ``END`` card in the last header.
|
||||
checksum : bool or str
|
||||
If `True`, verifies that both ``DATASUM`` and
|
||||
``CHECKSUM`` card values (when present in the HDU header)
|
||||
match the header and data of all HDU's in the file. Updates to a
|
||||
file that already has a checksum will preserve and update the
|
||||
existing checksums unless this argument is given a value of
|
||||
'remove', in which case the CHECKSUM and DATASUM values are not
|
||||
checked, and are removed when saving changes to the file.
|
||||
disable_image_compression : bool, optional
|
||||
If `True`, treats compressed image HDU's like normal
|
||||
binary table HDU's.
|
||||
do_not_scale_image_data : bool
|
||||
If `True`, image data is not scaled using BSCALE/BZERO values
|
||||
when read.
|
||||
ignore_blank : bool
|
||||
If `True`, the BLANK keyword is ignored if present.
|
||||
scale_back : bool
|
||||
If `True`, when saving changes to a file that contained scaled
|
||||
image data, restore the data to the original type and reapply the
|
||||
original BSCALE/BZERO values. This could lead to loss of accuracy
|
||||
if scaling back to integer values after performing floating point
|
||||
operations on the data.
|
||||
|
||||
"""
|
||||
|
||||
from ..core import Format
|
||||
|
||||
_fits = None # lazily loaded
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _fits
|
||||
try:
|
||||
from astropy.io import fits as _fits
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The FITS format relies on the astropy package."
|
||||
"Please refer to http://www.astropy.org/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _fits
|
||||
|
||||
|
||||
class FitsFormat(Format):
|
||||
"""See :mod:`imageio.plugins.fits`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We return True if ext matches, because this is the only plugin
|
||||
# that can. If astropy is not installed, a useful error follows.
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# No write support
|
||||
return False
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, cache=False, **kwargs):
|
||||
if not _fits:
|
||||
load_lib()
|
||||
hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs)
|
||||
|
||||
self._index = []
|
||||
allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU)
|
||||
for n, hdu in zip(range(len(hdulist)), hdulist):
|
||||
if isinstance(hdu, allowed_hdu_types):
|
||||
# Ignore (primary) header units with no data (use '.size'
|
||||
# rather than '.data' to avoid actually loading the image):
|
||||
if hdu.size > 0:
|
||||
self._index.append(n)
|
||||
self._hdulist = hdulist
|
||||
|
||||
def _close(self):
|
||||
self._hdulist.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._index)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._index):
|
||||
raise IndexError("Index out of range while reading from fits")
|
||||
im = self._hdulist[self._index[index]].data
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The fits format does not support meta data.")
|
||||
404
.CondaPkg/env/Lib/site-packages/imageio/plugins/freeimage.py
vendored
Normal file
404
.CondaPkg/env/Lib/site-packages/imageio/plugins/freeimage.py
vendored
Normal file
@@ -0,0 +1,404 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write images using FreeImage.
|
||||
|
||||
Backend Library: `FreeImage <https://freeimage.sourceforge.io/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
imageio_download_bin freeimage
|
||||
|
||||
or you can download the backend using the function::
|
||||
|
||||
imageio.plugins.freeimage.download()
|
||||
|
||||
Each Freeimage format has the ``flags`` keyword argument. See the `Freeimage
|
||||
documentation <https://freeimage.sourceforge.io/>`_ for more information.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
flags : int
|
||||
A freeimage-specific option. In most cases we provide explicit
|
||||
parameters for influencing image reading.
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ..core.request import RETURN_BYTES
|
||||
from ._freeimage import FNAME_PER_PLATFORM, IO_FLAGS, download, fi # noqa
|
||||
|
||||
# todo: support files with only meta data
|
||||
|
||||
|
||||
class FreeimageFormat(Format):
|
||||
"""See :mod:`imageio.plugins.freeimage`"""
|
||||
|
||||
_modes = "i"
|
||||
|
||||
def __init__(self, name, description, extensions=None, modes=None, *, fif=None):
|
||||
super().__init__(name, description, extensions=extensions, modes=modes)
|
||||
self._fif = fif
|
||||
|
||||
@property
|
||||
def fif(self):
|
||||
return self._fif # Set when format is created
|
||||
|
||||
def _can_read(self, request):
|
||||
# Ask freeimage if it can read it, maybe ext missing
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "r", request.firstbytes)
|
||||
except Exception: # pragma: no cover
|
||||
request._fif = -1
|
||||
if request._fif == self.fif:
|
||||
return True
|
||||
elif request._fif == 7 and self.fif == 14:
|
||||
# PPM gets identified as PBM and PPM can read PBM
|
||||
# see: https://github.com/imageio/imageio/issues/677
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# Ask freeimage, because we are not aware of all formats
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "w")
|
||||
except ValueError: # pragma: no cover
|
||||
if request.raw_uri == RETURN_BYTES:
|
||||
request._fif = self.fif
|
||||
else:
|
||||
request._fif = -1
|
||||
if request._fif is self.fif:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _open(self, flags=0):
|
||||
self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("This format only supports singleton images.")
|
||||
return self._bm.get_image_data(), self._bm.get_meta_data()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._bm.get_meta_data()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._flags = flags # Store flags for later use
|
||||
self._bm = None
|
||||
self._is_set = False # To prevent appending more than one image
|
||||
self._meta = {}
|
||||
|
||||
def _close(self):
|
||||
# Set global meta data
|
||||
self._bm.set_meta_data(self._meta)
|
||||
# Write and close
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Check if set
|
||||
if not self._is_set:
|
||||
self._is_set = True
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Singleton image; " "can only append image data once."
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
# Lazy instantaion of the bitmap, we need image data
|
||||
if self._bm is None:
|
||||
self._bm = fi.create_bitmap(
|
||||
self.request.filename, self.format.fif, self._flags
|
||||
)
|
||||
self._bm.allocate(im)
|
||||
# Set data
|
||||
self._bm.set_image_data(im)
|
||||
# There is no distinction between global and per-image meta data
|
||||
# for singleton images
|
||||
self._meta = meta
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
self._meta = meta
|
||||
|
||||
|
||||
# Special plugins
|
||||
|
||||
# todo: there is also FIF_LOAD_NOPIXELS,
|
||||
# but perhaps that should be used with get_meta_data.
|
||||
|
||||
|
||||
class FreeimageBmpFormat(FreeimageFormat):
|
||||
"""A BMP format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : bool
|
||||
Whether to compress the bitmap using RLE when saving. Default False.
|
||||
It seems this does not always work, but who cares, you should use
|
||||
PNG anyway.
|
||||
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if compression:
|
||||
flags |= IO_FLAGS.BMP_SAVE_RLE
|
||||
else:
|
||||
flags |= IO_FLAGS.BMP_DEFAULT
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePngFormat(FreeimageFormat):
|
||||
"""A PNG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
ignoregamma : bool
|
||||
Avoid gamma correction. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 6, 9}
|
||||
The compression factor. Higher factors result in more
|
||||
compression at the cost of speed. Note that PNG compression is
|
||||
always lossless. Default 9.
|
||||
quantize : int
|
||||
If specified, turn the given RGB or RGBA image in a paletted image
|
||||
for more efficient storage. The value should be between 2 and 256.
|
||||
If the value of 0 the image is not quantized.
|
||||
interlaced : bool
|
||||
Save using Adam7 interlacing. Default False.
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, ignoregamma=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if ignoregamma:
|
||||
flags |= IO_FLAGS.PNG_IGNOREGAMMA
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=9, quantize=0, interlaced=False):
|
||||
compression_map = {
|
||||
0: IO_FLAGS.PNG_Z_NO_COMPRESSION,
|
||||
1: IO_FLAGS.PNG_Z_BEST_SPEED,
|
||||
6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION,
|
||||
9: IO_FLAGS.PNG_Z_BEST_COMPRESSION,
|
||||
}
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if interlaced:
|
||||
flags |= IO_FLAGS.PNG_INTERLACED
|
||||
try:
|
||||
flags |= compression_map[compression]
|
||||
except KeyError:
|
||||
raise ValueError("Png compression must be 0, 1, 6, or 9.")
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16":
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
# Quantize?
|
||||
q = int(self.request.kwargs.get("quantize", False))
|
||||
if not q:
|
||||
pass
|
||||
elif not (im.ndim == 3 and im.shape[-1] == 3):
|
||||
raise ValueError("Can only quantize RGB images")
|
||||
elif q < 2 or q > 256:
|
||||
raise ValueError("PNG quantize param must be 2..256")
|
||||
else:
|
||||
bm = self._bm.quantize(0, q)
|
||||
self._bm.close()
|
||||
self._bm = bm
|
||||
|
||||
|
||||
class FreeimageJpegFormat(FreeimageFormat):
|
||||
"""A JPEG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale and RGB images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
exifrotate : bool
|
||||
Automatically rotate the image according to the exif flag.
|
||||
Default True. If 2 is given, do the rotation in Python instead
|
||||
of freeimage.
|
||||
quickread : bool
|
||||
Read the image more quickly, at the expense of quality.
|
||||
Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
quality : scalar
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
On saving, compute optimal Huffman coding tables (can reduce a
|
||||
few percent of file size). Default False.
|
||||
baseline : bool
|
||||
Save basic JPEG, without metadata or any markers. Default False.
|
||||
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, exifrotate=True, quickread=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if exifrotate and exifrotate != 2:
|
||||
flags |= IO_FLAGS.JPEG_EXIFROTATE
|
||||
if not quickread:
|
||||
flags |= IO_FLAGS.JPEG_ACCURATE
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageFormat.Reader._get_data(self, index)
|
||||
im = self._rotate(im, meta)
|
||||
return im, meta
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Freeimage is also supposed to
|
||||
support that, and I am pretty sure it once did, but now it
|
||||
does not, so let's just do it in Python.
|
||||
Edit: and now it works again, just leave in place as a fallback.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", None) == 2:
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(
|
||||
self, flags=0, quality=75, progressive=False, optimize=False, baseline=False
|
||||
):
|
||||
# Test quality
|
||||
quality = int(quality)
|
||||
if quality < 1 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 1 and 100.")
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
flags |= quality
|
||||
if progressive:
|
||||
flags |= IO_FLAGS.JPEG_PROGRESSIVE
|
||||
if optimize:
|
||||
flags |= IO_FLAGS.JPEG_OPTIMIZE
|
||||
if baseline:
|
||||
flags |= IO_FLAGS.JPEG_BASELINE
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePnmFormat(FreeimageFormat):
|
||||
"""A PNM format based on the Freeimage library.
|
||||
|
||||
This format supports single bit (PBM), grayscale (PGM) and RGB (PPM)
|
||||
images, even with ASCII or binary coding.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
use_ascii : bool
|
||||
Save with ASCII coding. Default True.
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, use_ascii=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if use_ascii:
|
||||
flags |= IO_FLAGS.PNM_SAVE_ASCII
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
316
.CondaPkg/env/Lib/site-packages/imageio/plugins/freeimagemulti.py
vendored
Normal file
316
.CondaPkg/env/Lib/site-packages/imageio/plugins/freeimagemulti.py
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Plugin for multi-image freeimafe formats, like animated GIF and ico.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ._freeimage import fi, IO_FLAGS
|
||||
from .freeimage import FreeimageFormat
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FreeimageMulti(FreeimageFormat):
|
||||
"""Base class for freeimage formats that support multiple images."""
|
||||
|
||||
_modes = "iI"
|
||||
_fif = -1
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, flags=0):
|
||||
flags = int(flags)
|
||||
# Create bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._bm)
|
||||
|
||||
def _get_data(self, index):
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_image_data(), sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
index = index or 0
|
||||
if index < 0 or index >= len(self._bm):
|
||||
raise IndexError()
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Set flags
|
||||
self._flags = flags = int(flags)
|
||||
# Instantiate multi-page bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
# Close bitmap
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Prepare data
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Create sub bitmap
|
||||
sub1 = fi.create_bitmap(self._bm._filename, self.format.fif)
|
||||
# Let subclass add data to bitmap, optionally return new
|
||||
sub2 = self._append_bitmap(im, meta, sub1)
|
||||
# Add
|
||||
self._bm.append_bitmap(sub2)
|
||||
sub2.close()
|
||||
if sub1 is not sub2:
|
||||
sub1.close()
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Set data
|
||||
bitmap.allocate(im)
|
||||
bitmap.set_image_data(im)
|
||||
bitmap.set_meta_data(meta)
|
||||
# Return that same bitmap
|
||||
return bitmap
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
pass # ignore global meta data
|
||||
|
||||
|
||||
class MngFormat(FreeimageMulti):
|
||||
"""An Mng format based on the Freeimage library.
|
||||
|
||||
Read only. Seems broken.
|
||||
"""
|
||||
|
||||
_fif = 6
|
||||
|
||||
def _can_write(self, request): # pragma: no cover
|
||||
return False
|
||||
|
||||
|
||||
class IcoFormat(FreeimageMulti):
|
||||
"""An ICO format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
makealpha : bool
|
||||
Convert to 32-bit and create an alpha channel from the AND-
|
||||
mask when loading. Default False. Note that this returns wrong
|
||||
results if the image was already RGBA.
|
||||
|
||||
"""
|
||||
|
||||
_fif = 1
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, makealpha=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if makealpha:
|
||||
flags |= IO_FLAGS.ICO_MAKEALPHA
|
||||
return FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
|
||||
class GifFormat(FreeimageMulti):
|
||||
"""A format for reading and writing static and animated GIF, based
|
||||
on the Freeimage library.
|
||||
|
||||
Images read with this format are always RGBA. Currently,
|
||||
the alpha channel is ignored when saving RGB images with this
|
||||
format.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
playback : bool
|
||||
'Play' the GIF to generate each frame (as 32bpp) instead of
|
||||
returning raw frame data when loading. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
loop : int
|
||||
The number of iterations. Default 0 (meaning loop indefinitely)
|
||||
duration : {float, list}
|
||||
The duration (in seconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
Note that in the GIF format the duration/delay is expressed in
|
||||
hundredths of a second, which limits the precision of the duration.
|
||||
fps : float
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
quantizer : {'wu', 'nq'}
|
||||
The quantization algorithm:
|
||||
* wu - Wu, Xiaolin, Efficient Statistical Computations for
|
||||
Optimal Color Quantization
|
||||
* nq (neuqant) - Dekker A. H., Kohonen neural networks for
|
||||
optimal color quantization
|
||||
subrectangles : bool
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Unfortunately, this option seems currently broken
|
||||
because FreeImage does not handle DisposalMethod correctly.
|
||||
Default False.
|
||||
"""
|
||||
|
||||
_fif = 25
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, playback=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if playback:
|
||||
flags |= IO_FLAGS.GIF_PLAYBACK
|
||||
FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageMulti.Reader._get_data(self, index)
|
||||
# im = im[:, :, :3] # Drop alpha channel
|
||||
return im, meta
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(FreeimageMulti.Writer):
|
||||
# todo: subrectangles
|
||||
# todo: global palette
|
||||
|
||||
def _open(
|
||||
self,
|
||||
flags=0,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer="Wu",
|
||||
subrectangles=False,
|
||||
):
|
||||
# Check palettesize
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
self._palettesize = palettesize
|
||||
# Check quantizer
|
||||
self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None)
|
||||
if self._quantizer is None:
|
||||
raise ValueError('Invalid quantizer, must be "wu" or "nq".')
|
||||
# Check frametime
|
||||
if duration is None:
|
||||
self._frametime = [int(1000 / float(fps) + 0.5)]
|
||||
elif isinstance(duration, list):
|
||||
self._frametime = [int(1000 * d) for d in duration]
|
||||
elif isinstance(duration, (float, int)):
|
||||
self._frametime = [int(1000 * duration)]
|
||||
else:
|
||||
raise ValueError("Invalid value for duration: %r" % duration)
|
||||
# Check subrectangles
|
||||
self._subrectangles = bool(subrectangles)
|
||||
self._prev_im = None
|
||||
# Init
|
||||
FreeimageMulti.Writer._open(self, flags)
|
||||
# Set global meta data
|
||||
self._meta = {}
|
||||
self._meta["ANIMATION"] = {
|
||||
# 'GlobalPalette': np.array([0]).astype(np.uint8),
|
||||
"Loop": np.array([loop]).astype(np.uint32),
|
||||
# 'LogicalWidth': np.array([x]).astype(np.uint16),
|
||||
# 'LogicalHeight': np.array([x]).astype(np.uint16),
|
||||
}
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Prepare meta data
|
||||
meta = meta.copy()
|
||||
meta_a = meta["ANIMATION"] = {}
|
||||
# If this is the first frame, assign it our "global" meta data
|
||||
if len(self._bm) == 0:
|
||||
meta.update(self._meta)
|
||||
meta_a = meta["ANIMATION"]
|
||||
# Set frame time
|
||||
index = len(self._bm)
|
||||
if index < len(self._frametime):
|
||||
ft = self._frametime[index]
|
||||
else:
|
||||
ft = self._frametime[-1]
|
||||
meta_a["FrameTime"] = np.array([ft]).astype(np.uint32)
|
||||
# Check array
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
im = im[:, :, :3]
|
||||
# Process subrectangles
|
||||
im_uncropped = im
|
||||
if self._subrectangles and self._prev_im is not None:
|
||||
im, xy = self._get_sub_rectangles(self._prev_im, im)
|
||||
meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8)
|
||||
meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16)
|
||||
meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16)
|
||||
self._prev_im = im_uncropped
|
||||
# Set image data
|
||||
sub2 = sub1 = bitmap
|
||||
sub1.allocate(im)
|
||||
sub1.set_image_data(im)
|
||||
# Quantize it if its RGB
|
||||
if im.ndim == 3 and im.shape[-1] == 3:
|
||||
sub2 = sub1.quantize(self._quantizer, self._palettesize)
|
||||
# Set meta data and return
|
||||
sub2.set_meta_data(meta)
|
||||
return sub2
|
||||
|
||||
def _get_sub_rectangles(self, prev, im):
|
||||
"""
|
||||
Calculate the minimal rectangles that need updating each frame.
|
||||
Returns a two-element tuple containing the cropped images and a
|
||||
list of x-y positions.
|
||||
"""
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1]) + 1
|
||||
y0, y1 = int(Y[0]), int(Y[-1]) + 1
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
# Cut out and return
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
71
.CondaPkg/env/Lib/site-packages/imageio/plugins/gdal.py
vendored
Normal file
71
.CondaPkg/env/Lib/site-packages/imageio/plugins/gdal.py
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read GDAL files.
|
||||
|
||||
Backend: `GDAL <https://gdal.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[gdal]
|
||||
|
||||
Parameters
|
||||
----------
|
||||
none
|
||||
"""
|
||||
|
||||
from ..core import Format, has_module
|
||||
|
||||
_gdal = None # lazily loaded in load_lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _gdal
|
||||
try:
|
||||
import osgeo.gdal as _gdal
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The GDAL format relies on the GDAL package."
|
||||
"Please refer to http://www.gdal.org/"
|
||||
"for further instructions."
|
||||
)
|
||||
return _gdal
|
||||
|
||||
|
||||
GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg")
|
||||
|
||||
|
||||
class GdalFormat(Format):
|
||||
"""See :mod:`imageio.plugins.gdal`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.extension in (".ecw",):
|
||||
return True
|
||||
if has_module("osgeo.gdal"):
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
if not _gdal:
|
||||
load_lib()
|
||||
self._ds = _gdal.Open(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
del self._ds
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("Gdal file contains only one dataset")
|
||||
return self._ds.ReadAsArray(), self._get_meta_data(index)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._ds.GetMetadata()
|
||||
105
.CondaPkg/env/Lib/site-packages/imageio/plugins/grab.py
vendored
Normal file
105
.CondaPkg/env/Lib/site-packages/imageio/plugins/grab.py
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
"""
|
||||
PIL-based formats to take screenshots and grab from the clipboard.
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class BaseGrabFormat(Format):
|
||||
"""Base format for grab formats."""
|
||||
|
||||
_pillow_imported = False
|
||||
_ImageGrab = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseGrabFormat, self).__init__(*args, **kwargs)
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
|
||||
try:
|
||||
from PIL import ImageGrab
|
||||
except ImportError:
|
||||
return None
|
||||
self._ImageGrab = ImageGrab
|
||||
return self._ImageGrab
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
pass
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
return self.format._get_data(index)
|
||||
|
||||
|
||||
class ScreenGrabFormat(BaseGrabFormat):
|
||||
"""The ScreenGrabFormat provided a means to grab screenshots using
|
||||
the uri of "<screen>".
|
||||
|
||||
This functionality is provided via Pillow. Note that "<screen>" is
|
||||
only supported on Windows and OS X.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.filename != "<screen>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grab()
|
||||
assert pil_im is not None
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
|
||||
|
||||
class ClipboardGrabFormat(BaseGrabFormat):
|
||||
"""The ClipboardGrabFormat provided a means to grab image data from
|
||||
the clipboard, using the uri "<clipboard>"
|
||||
|
||||
This functionality is provided via Pillow. Note that "<clipboard>" is
|
||||
only supported on Windows.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.filename != "<clipboard>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grabclipboard()
|
||||
if pil_im is None:
|
||||
raise RuntimeError(
|
||||
"There seems to be no image data on the " "clipboard now."
|
||||
)
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
714
.CondaPkg/env/Lib/site-packages/imageio/plugins/lytro.py
vendored
Normal file
714
.CondaPkg/env/Lib/site-packages/imageio/plugins/lytro.py
vendored
Normal file
@@ -0,0 +1,714 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, imageio contributors
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
#
|
||||
|
||||
""" Read LFR files (Lytro Illum).
|
||||
|
||||
Backend: internal
|
||||
|
||||
Plugin to read Lytro Illum .lfr and .raw files as produced
|
||||
by the Lytro Illum light field camera. It is actually a collection
|
||||
of plugins, each supporting slightly different keyword arguments
|
||||
|
||||
Parameters
|
||||
----------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
(only for lytro-lfr and lytro-lfp)
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
|
||||
"""
|
||||
#
|
||||
#
|
||||
# This code is based on work by
|
||||
# David Uhlig and his lfr_reader
|
||||
# (https://www.iiit.kit.edu/uhlig.php)
|
||||
# Donald Dansereau and his Matlab LF Toolbox
|
||||
# (http://dgd.vision/Tools/LFToolbox/)
|
||||
# and Behnam Esfahbod and his Python LFP-Reader
|
||||
# (https://github.com/behnam/python-lfp-reader/)
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
from ..v2 import imread
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor
|
||||
LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728)
|
||||
LYTRO_F01_IMAGE_SIZE = (3280, 3280)
|
||||
|
||||
# Parameter of lfr file format
|
||||
HEADER_LENGTH = 12
|
||||
SIZE_LENGTH = 4 # = 16 - header_length
|
||||
SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4)
|
||||
PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length
|
||||
DATA_CHUNKS_ILLUM = 11
|
||||
DATA_CHUNKS_F01 = 3
|
||||
|
||||
|
||||
class LytroFormat(Format):
|
||||
"""Base class for Lytro format.
|
||||
The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and
|
||||
LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format
|
||||
for the Illum and original F01 camera respectively.
|
||||
Writing is not supported.
|
||||
"""
|
||||
|
||||
# Only single images are supported.
|
||||
_modes = "i"
|
||||
|
||||
def _can_write(self, request):
|
||||
# Writing of Lytro files is not supported
|
||||
return False
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The lytro format cannot write image data.")
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The lytro format cannot write meta data.")
|
||||
|
||||
|
||||
class LytroIllumRawFormat(LytroFormat):
|
||||
"""This is the Lytro Illum RAW format.
|
||||
The raw format is a 10bit image format as used by the Lytro Illum
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 10-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::5]
|
||||
t1 = array[1::5]
|
||||
t2 = array[2::5]
|
||||
t3 = array[3::5]
|
||||
lsb = array[4::5]
|
||||
|
||||
t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3)
|
||||
t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2)
|
||||
t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4)
|
||||
t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6)
|
||||
|
||||
image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::4] = t0.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 1::4] = t1.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 2::4] = t2.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 3::4] = t3.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 1023 as the Lytro Illum saves 10-bit raw data.
|
||||
return np.divide(image, 1023.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
|
||||
else:
|
||||
# Return empty image
|
||||
img = np.array([])
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfrFormat(LytroFormat):
|
||||
"""This is the Lytro Illum LFR format.
|
||||
The lfr is a image and meta data container format as used by the
|
||||
Lytro Illum light field camera.
|
||||
The format will read the specified lfr file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".lfr",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False, include_thumbnail=True):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
self._meta_only = meta_only
|
||||
self._include_thumbnail = include_thumbnail
|
||||
|
||||
self._find_header()
|
||||
self._find_chunks()
|
||||
self._find_meta()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["frames"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
if not self._meta_only:
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
# Read image preview thumbnail
|
||||
if self._include_thumbnail:
|
||||
chunk_dict = self._content["thumbnails"][0]
|
||||
if chunk_dict["imageRef"] in self._chunks:
|
||||
# Read thumbnail image from thumbnail chunk
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
# Read binary data, read image as jpeg
|
||||
thumbnail_data = self._file.read(size)
|
||||
thumbnail_img = imread(thumbnail_data, format="jpeg")
|
||||
|
||||
thumbnail_height = chunk_dict["height"]
|
||||
thumbnail_width = chunk_dict["width"]
|
||||
|
||||
# Add thumbnail to metadata
|
||||
self.metadata["thumbnail"] = {
|
||||
"image": thumbnail_img,
|
||||
"height": thumbnail_height,
|
||||
"width": thumbnail_width,
|
||||
}
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFR file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFR file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_ILLUM):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFR chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfr file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
|
||||
np.uint16
|
||||
)
|
||||
im = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
else:
|
||||
im = np.array([])
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
|
||||
|
||||
class LytroF01RawFormat(LytroFormat):
|
||||
"""This is the Lytro RAW format for the original F01 Lytro camera.
|
||||
The raw format is a 12bit image format as used by the Lytro F01
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 12-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::3]
|
||||
t1 = array[1::3]
|
||||
t2 = array[2::3]
|
||||
|
||||
a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4)
|
||||
a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2
|
||||
|
||||
image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::2] = a0.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
image[:, 1::2] = a1.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 4095 as the Lytro F01 saves 12-bit raw data.
|
||||
return np.divide(image, 4095.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroF01RawFormat.rearrange_bits(raw)
|
||||
|
||||
else:
|
||||
img = np.array([])
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfpFormat(LytroFormat):
|
||||
"""This is the Lytro Illum LFP format.
|
||||
The lfp is a image and meta data container format as used by the
|
||||
Lytro F01 light field camera.
|
||||
The format will read the specified lfp file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".lfp",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
self._find_header()
|
||||
self._find_meta()
|
||||
self._find_chunks()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["picture"]["frameArray"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
if not self._meta_only:
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFP file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFP file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_F01):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
data = self._file.read(5) # Skip 5
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFP chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfp file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
|
||||
np.uint16
|
||||
)
|
||||
im = LytroF01RawFormat.rearrange_bits(raw)
|
||||
else:
|
||||
im = np.array([])
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
85
.CondaPkg/env/Lib/site-packages/imageio/plugins/npz.py
vendored
Normal file
85
.CondaPkg/env/Lib/site-packages/imageio/plugins/npz.py
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write NPZ files.
|
||||
|
||||
Backend: `Numpy <https://numpy.org/doc/stable/reference/generated/numpy.savez.html>`_
|
||||
|
||||
NPZ is a file format by numpy that provides storage of array data using gzip
|
||||
compression. This imageio plugin supports data of any shape, and also supports
|
||||
multiple images per file. However, the npz format does not provide streaming;
|
||||
all data is read/written at once. Further, there is no support for meta data.
|
||||
|
||||
See the BSDF format for a similar (but more fully featured) format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
This format is not available on Pypy.
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class NpzFormat(Format):
|
||||
"""See :mod:`imageio.plugins.npz`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
# Load npz file, which provides another file like object
|
||||
self._npz = np.load(self.request.get_file())
|
||||
assert isinstance(self._npz, np.lib.npyio.NpzFile)
|
||||
# Get list of names, ordered by name, but smarter
|
||||
self._names = sorted(self._npz.files, key=lambda x: x.split("_")[-1])
|
||||
|
||||
def _close(self):
|
||||
self._npz.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._names)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._names):
|
||||
raise IndexError("Index out of range while reading from nzp")
|
||||
im = self._npz[self._names[index]]
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
# Npz is not such a great format. We cannot stream to the file.
|
||||
# So we remember all images and write them to file at the end.
|
||||
self._images = []
|
||||
|
||||
def _close(self):
|
||||
# Write everything
|
||||
np.savez_compressed(self.request.get_file(), *self._images)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
self._images.append(im) # discart meta data
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
313
.CondaPkg/env/Lib/site-packages/imageio/plugins/opencv.py
vendored
Normal file
313
.CondaPkg/env/Lib/site-packages/imageio/plugins/opencv.py
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
"""Read/Write images using OpenCV.
|
||||
|
||||
Backend Library: `OpenCV <https://opencv.org/>`_
|
||||
|
||||
This plugin wraps OpenCV (also known as ``cv2``), a popular image processing
|
||||
library. Currently, it exposes OpenCVs image reading capability (no video or GIF
|
||||
support yet); however, this may be added in future releases.
|
||||
|
||||
Methods
|
||||
-------
|
||||
.. note::
|
||||
Check the respective function for a list of supported kwargs and their
|
||||
documentation.
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
OpenCVPlugin.read
|
||||
OpenCVPlugin.iter
|
||||
OpenCVPlugin.write
|
||||
OpenCVPlugin.properties
|
||||
OpenCVPlugin.metadata
|
||||
|
||||
Pixel Formats (Colorspaces)
|
||||
---------------------------
|
||||
|
||||
OpenCV is known to process images in BGR; however, most of the python ecosystem
|
||||
(in particular matplotlib and other pydata libraries) use the RGB. As such,
|
||||
images are converted to RGB, RGBA, or grayscale (where applicable) by default.
|
||||
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ..core import Request
|
||||
from ..core.request import URI_BYTES, InitializationError, IOMode
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
class OpenCVPlugin(PluginV3):
|
||||
def __init__(self, request: Request) -> None:
|
||||
super().__init__(request)
|
||||
|
||||
self.file_handle = request.get_local_filename()
|
||||
if request._uri_type is URI_BYTES:
|
||||
self.filename = "<bytes>"
|
||||
else:
|
||||
self.filename = request.raw_uri
|
||||
|
||||
mode = request.mode.io_mode
|
||||
if mode == IOMode.read and not cv2.haveImageReader(self.file_handle):
|
||||
raise InitializationError(f"OpenCV can't read `{self.filename}`.")
|
||||
elif mode == IOMode.write and not cv2.haveImageWriter(self.file_handle):
|
||||
raise InitializationError(f"OpenCV can't write to `{self.filename}`.")
|
||||
|
||||
def read(
|
||||
self,
|
||||
*,
|
||||
index: int = None,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> np.ndarray:
|
||||
"""Read an image from the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int, Ellipsis
|
||||
If int, read the index-th image from the ImageResource. If ``...``,
|
||||
read all images from the ImageResource and stack them along a new,
|
||||
prepended, batch dimension. If None (default), use ``index=0`` if
|
||||
the image contains exactly one image and ``index=...`` otherwise.
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : np.ndarray
|
||||
The decoded image as a numpy array.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
index = 0 if n_images == 1 else ...
|
||||
|
||||
if index is ...:
|
||||
retval, img = cv2.imreadmulti(self.file_handle, flags=flags)
|
||||
is_batch = True
|
||||
else:
|
||||
retval, img = cv2.imreadmulti(self.file_handle, index, 1, flags=flags)
|
||||
is_batch = False
|
||||
|
||||
if retval is False:
|
||||
raise ValueError(f"Could not read index `{index}` from `{self.filename}`.")
|
||||
|
||||
if img[0].ndim == 2:
|
||||
in_colorspace = "GRAY"
|
||||
out_colorspace = colorspace or "GRAY"
|
||||
elif img[0].shape[-1] == 4:
|
||||
in_colorspace = "BGRA"
|
||||
out_colorspace = colorspace or "RGBA"
|
||||
else:
|
||||
in_colorspace = "BGR"
|
||||
out_colorspace = colorspace or "RGB"
|
||||
|
||||
if isinstance(colorspace, int):
|
||||
cvt_space = colorspace
|
||||
elif in_colorspace == out_colorspace.upper():
|
||||
cvt_space = None
|
||||
else:
|
||||
out_colorspace = out_colorspace.upper()
|
||||
cvt_space = getattr(cv2, f"COLOR_{in_colorspace}2{out_colorspace}")
|
||||
|
||||
if cvt_space is not None:
|
||||
img = np.stack([cv2.cvtColor(x, cvt_space) for x in img])
|
||||
else:
|
||||
img = np.stack(img)
|
||||
|
||||
return img if is_batch else img[0]
|
||||
|
||||
def iter(
|
||||
self,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> np.ndarray:
|
||||
"""Yield images from the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Yields
|
||||
------
|
||||
ndimage : np.ndarray
|
||||
The decoded image as a numpy array.
|
||||
|
||||
"""
|
||||
for idx in range(cv2.imcount(self.file_handle)):
|
||||
yield self.read(index=idx, flags=flags, colorspace=colorspace)
|
||||
|
||||
def write(
|
||||
self,
|
||||
ndimage: Union[ArrayLike, List[ArrayLike]],
|
||||
is_batch: bool = False,
|
||||
params: List[int] = None,
|
||||
) -> Optional[bytes]:
|
||||
"""Save an ndimage in the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : ArrayLike, List[ArrayLike]
|
||||
The image data that will be written to the file. It is either a
|
||||
single image, a batch of images, or a list of images.
|
||||
is_batch : bool
|
||||
If True, the provided ndimage is a batch of images. If False (default), the
|
||||
provided ndimage is a single image. If the provided ndimage is a list of images,
|
||||
this parameter has no effect.
|
||||
params : List[int]
|
||||
A list of parameters that will be passed to OpenCVs imwrite or
|
||||
imwritemulti functions. Possible values are documented in the
|
||||
`OpenCV documentation
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce>`_.
|
||||
|
||||
Returns
|
||||
-------
|
||||
encoded_image : bytes, None
|
||||
If the ImageResource is ``"<bytes>"`` the call to write returns the
|
||||
encoded image as a bytes string. Otherwise it returns None.
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(ndimage, list):
|
||||
ndimage = np.stack(ndimage, axis=0)
|
||||
elif not is_batch:
|
||||
ndimage = ndimage[None, ...]
|
||||
|
||||
if ndimage[0].ndim == 2:
|
||||
n_channels = 1
|
||||
else:
|
||||
n_channels = ndimage[0].shape[-1]
|
||||
|
||||
if n_channels == 1:
|
||||
ndimage_cv2 = [x for x in ndimage]
|
||||
elif n_channels == 4:
|
||||
ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGBA2BGRA) for x in ndimage]
|
||||
else:
|
||||
ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in ndimage]
|
||||
|
||||
retval = cv2.imwritemulti(self.file_handle, ndimage_cv2, params)
|
||||
|
||||
if retval is False:
|
||||
# not sure what scenario would trigger this, but
|
||||
# it can occur theoretically.
|
||||
raise IOError("OpenCV failed to write.") # pragma: no cover
|
||||
|
||||
if self.request._uri_type == URI_BYTES:
|
||||
return Path(self.file_handle).read_bytes()
|
||||
|
||||
def properties(
|
||||
self,
|
||||
index: int = None,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> ImageProperties:
|
||||
"""Standardized image metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int, Ellipsis
|
||||
If int, get the properties of the index-th image in the
|
||||
ImageResource. If ``...``, get the properties of the image stack
|
||||
that contains all images. If None (default), use ``index=0`` if the
|
||||
image contains exactly one image and ``index=...`` otherwise.
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
props : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Reading properties with OpenCV involves decoding pixel data, because
|
||||
OpenCV doesn't provide a direct way to access metadata.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
is_batch = n_images > 1
|
||||
elif index is Ellipsis:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
is_batch = True
|
||||
else:
|
||||
is_batch = False
|
||||
|
||||
# unfortunately, OpenCV doesn't allow reading shape without reading pixel data
|
||||
if is_batch:
|
||||
img = self.read(index=0, flags=flags, colorspace=colorspace)
|
||||
return ImageProperties(
|
||||
shape=(n_images, *img.shape),
|
||||
dtype=img.dtype,
|
||||
n_images=n_images,
|
||||
is_batch=True,
|
||||
)
|
||||
|
||||
img = self.read(index=index, flags=flags, colorspace=colorspace)
|
||||
return ImageProperties(shape=img.shape, dtype=img.dtype, is_batch=False)
|
||||
|
||||
def metadata(
|
||||
self, index: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Format-specific metadata.
|
||||
|
||||
.. warning::
|
||||
OpenCV does not support reading metadata. When called, this function
|
||||
will raise a ``NotImplementedError``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
This parameter has no effect.
|
||||
exclude_applied : bool
|
||||
This parameter has no effect.
|
||||
|
||||
"""
|
||||
|
||||
warnings.warn("OpenCV does not support reading metadata.", UserWarning)
|
||||
return dict()
|
||||
613
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow.py
vendored
Normal file
613
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow.py
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using Pillow/PIL.
|
||||
|
||||
Backend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_
|
||||
|
||||
Plugin that wraps the the Pillow library. Pillow is a friendly fork of PIL
|
||||
(Python Image Library) and supports reading and writing of common formats (jpg,
|
||||
png, gif, tiff, ...). For, the complete list of features and supported formats
|
||||
please refer to pillows official docs (see the Backend Library link).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : Request
|
||||
A request object representing the resource to be operated on.
|
||||
|
||||
Methods
|
||||
-------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: _plugins/pillow
|
||||
|
||||
PillowPlugin.read
|
||||
PillowPlugin.write
|
||||
PillowPlugin.iter
|
||||
PillowPlugin.get_meta
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import numpy as np
|
||||
from PIL import ExifTags, GifImagePlugin, Image, ImageSequence, UnidentifiedImageError
|
||||
from PIL import __version__ as pil_version # type: ignore
|
||||
|
||||
from ..core.request import URI_BYTES, InitializationError, IOMode, Request
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
def pillow_version() -> Tuple[int]:
|
||||
return tuple(int(x) for x in pil_version.split("."))
|
||||
|
||||
|
||||
def _exif_orientation_transform(orientation: int, mode: str) -> Callable:
|
||||
# get transformation that transforms an image from a
|
||||
# given EXIF orientation into the standard orientation
|
||||
|
||||
# -1 if the mode has color channel, 0 otherwise
|
||||
axis = -2 if Image.getmodebands(mode) > 1 else -1
|
||||
|
||||
EXIF_ORIENTATION = {
|
||||
1: lambda x: x,
|
||||
2: lambda x: np.flip(x, axis=axis),
|
||||
3: lambda x: np.rot90(x, k=2),
|
||||
4: lambda x: np.flip(x, axis=axis - 1),
|
||||
5: lambda x: np.flip(np.rot90(x, k=3), axis=axis),
|
||||
6: lambda x: np.rot90(x, k=3),
|
||||
7: lambda x: np.flip(np.rot90(x, k=1), axis=axis),
|
||||
8: lambda x: np.rot90(x, k=1),
|
||||
}
|
||||
|
||||
return EXIF_ORIENTATION[orientation]
|
||||
|
||||
|
||||
class PillowPlugin(PluginV3):
|
||||
def __init__(self, request: Request) -> None:
|
||||
"""Instantiate a new Pillow Plugin Object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : {Request}
|
||||
A request object representing the resource to be operated on.
|
||||
|
||||
"""
|
||||
|
||||
super().__init__(request)
|
||||
|
||||
# Register HEIF opener for Pillow
|
||||
try:
|
||||
from pillow_heif import register_heif_opener
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
register_heif_opener()
|
||||
|
||||
# Register AVIF opener for Pillow
|
||||
try:
|
||||
from pillow_heif import register_avif_opener
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
register_avif_opener()
|
||||
|
||||
self._image: Image = None
|
||||
self.images_to_write = []
|
||||
|
||||
if request.mode.io_mode == IOMode.read:
|
||||
try:
|
||||
with Image.open(request.get_file()):
|
||||
# Check if it is generally possible to read the image.
|
||||
# This will not read any data and merely try to find a
|
||||
# compatible pillow plugin (ref: the pillow docs).
|
||||
pass
|
||||
except UnidentifiedImageError:
|
||||
if request._uri_type == URI_BYTES:
|
||||
raise InitializationError(
|
||||
"Pillow can not read the provided bytes."
|
||||
) from None
|
||||
else:
|
||||
raise InitializationError(
|
||||
f"Pillow can not read {request.raw_uri}."
|
||||
) from None
|
||||
|
||||
self._image = Image.open(self._request.get_file())
|
||||
else:
|
||||
self.save_args = {}
|
||||
|
||||
extension = self.request.extension or self.request.format_hint
|
||||
if extension is None:
|
||||
warnings.warn(
|
||||
"Can't determine file format to write as. You _must_"
|
||||
" set `format` during write or the call will fail. Use "
|
||||
"`extension` to supress this warning. ",
|
||||
UserWarning,
|
||||
)
|
||||
return
|
||||
|
||||
tirage = [Image.preinit, Image.init]
|
||||
for format_loader in tirage:
|
||||
format_loader()
|
||||
if extension in Image.registered_extensions().keys():
|
||||
return
|
||||
|
||||
raise InitializationError(
|
||||
f"Pillow can not write `{extension}` files."
|
||||
) from None
|
||||
|
||||
def close(self) -> None:
|
||||
self._flush_writer()
|
||||
|
||||
if self._image:
|
||||
self._image.close()
|
||||
|
||||
self._request.finish()
|
||||
|
||||
def read(
|
||||
self,
|
||||
*,
|
||||
index: int = None,
|
||||
mode: str = None,
|
||||
rotate: bool = False,
|
||||
apply_gamma: bool = False,
|
||||
writeable_output: bool = True,
|
||||
pilmode: str = None,
|
||||
exifrotate: bool = None,
|
||||
as_gray: bool = None,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Parses the given URI and creates a ndarray from it.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return it.
|
||||
If index is an ellipsis (...), read all ndimages in the file and
|
||||
stack them along a new batch dimension and return them. If index is
|
||||
None, this plugin reads the first image of the file (index=0) unless
|
||||
the image is a GIF or APNG, in which case all images are read
|
||||
(index=...).
|
||||
mode : str
|
||||
Convert the image to the given mode before returning it. If None,
|
||||
the mode will be left unchanged. Possible modes can be found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
rotate : bool
|
||||
If True and the image contains an EXIF orientation tag,
|
||||
apply the orientation before returning the ndimage.
|
||||
apply_gamma : bool
|
||||
If True and the image contains metadata about gamma, apply gamma
|
||||
correction to the image.
|
||||
writable_output : bool
|
||||
If True, ensure that the image is writable before returning it to
|
||||
the user. This incurs a full copy of the pixel data if the data
|
||||
served by pillow is read-only. Consequentially, setting this flag to
|
||||
False improves performance for some images.
|
||||
pilmode : str
|
||||
Deprecated, use `mode` instead.
|
||||
exifrotate : bool
|
||||
Deprecated, use `rotate` instead.
|
||||
as_gray : bool
|
||||
Deprecated. Exists to raise a constructive error message.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : ndarray
|
||||
A numpy array containing the loaded image data
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you read a paletted image (e.g. GIF) then the plugin will apply the
|
||||
palette by default. Should you wish to read the palette indices of each
|
||||
pixel use ``mode="P"``. The coresponding color pallete can be found in
|
||||
the image's metadata using the ``palette`` key when metadata is
|
||||
extracted using the ``exclude_applied=False`` kwarg. The latter is
|
||||
needed, as palettes are applied by default and hence excluded by default
|
||||
to keep metadata and pixel data consistent.
|
||||
|
||||
"""
|
||||
|
||||
if pilmode is not None:
|
||||
warnings.warn(
|
||||
"`pilmode` is deprecated. Use `mode` instead.", DeprecationWarning
|
||||
)
|
||||
mode = pilmode
|
||||
|
||||
if exifrotate is not None:
|
||||
warnings.warn(
|
||||
"`exifrotate` is deprecated. Use `rotate` instead.", DeprecationWarning
|
||||
)
|
||||
rotate = exifrotate
|
||||
|
||||
if as_gray is not None:
|
||||
raise TypeError(
|
||||
"The keyword `as_gray` is no longer supported."
|
||||
"Use `mode='F'` for a backward-compatible result, or "
|
||||
" `mode='L'` for an integer-valued result."
|
||||
)
|
||||
|
||||
if self._image.format == "GIF":
|
||||
# Converting GIF P frames to RGB
|
||||
# https://github.com/python-pillow/Pillow/pull/6150
|
||||
GifImagePlugin.LOADING_STRATEGY = (
|
||||
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
|
||||
)
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if isinstance(index, int):
|
||||
# will raise IO error if index >= number of frames in image
|
||||
self._image.seek(index)
|
||||
image = self._apply_transforms(
|
||||
self._image, mode, rotate, apply_gamma, writeable_output
|
||||
)
|
||||
else:
|
||||
iterator = self.iter(
|
||||
mode=mode,
|
||||
rotate=rotate,
|
||||
apply_gamma=apply_gamma,
|
||||
writeable_output=writeable_output,
|
||||
)
|
||||
image = np.stack([im for im in iterator], axis=0)
|
||||
|
||||
return image
|
||||
|
||||
def iter(
|
||||
self,
|
||||
*,
|
||||
mode: str = None,
|
||||
rotate: bool = False,
|
||||
apply_gamma: bool = False,
|
||||
writeable_output: bool = True,
|
||||
) -> Iterator[np.ndarray]:
|
||||
"""
|
||||
Iterate over all ndimages/frames in the URI
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {str, None}
|
||||
Convert the image to the given mode before returning it. If None,
|
||||
the mode will be left unchanged. Possible modes can be found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
rotate : {bool}
|
||||
If set to ``True`` and the image contains an EXIF orientation tag,
|
||||
apply the orientation before returning the ndimage.
|
||||
apply_gamma : {bool}
|
||||
If ``True`` and the image contains metadata about gamma, apply gamma
|
||||
correction to the image.
|
||||
writable_output : bool
|
||||
If True, ensure that the image is writable before returning it to
|
||||
the user. This incurs a full copy of the pixel data if the data
|
||||
served by pillow is read-only. Consequentially, setting this flag to
|
||||
False improves performance for some images.
|
||||
"""
|
||||
|
||||
for im in ImageSequence.Iterator(self._image):
|
||||
yield self._apply_transforms(
|
||||
im, mode, rotate, apply_gamma, writeable_output
|
||||
)
|
||||
|
||||
def _apply_transforms(
|
||||
self, image, mode, rotate, apply_gamma, writeable_output
|
||||
) -> np.ndarray:
|
||||
if mode is not None:
|
||||
image = image.convert(mode)
|
||||
elif image.mode == "P":
|
||||
# adjust for pillow9 changes
|
||||
# see: https://github.com/python-pillow/Pillow/issues/5929
|
||||
image = image.convert(image.palette.mode)
|
||||
elif image.format == "PNG" and image.mode == "I":
|
||||
major, minor, patch = pillow_version()
|
||||
|
||||
if sys.byteorder == "little":
|
||||
desired_mode = "I;16"
|
||||
else: # pragma: no cover
|
||||
# can't test big-endian in GH-Actions
|
||||
desired_mode = "I;16B"
|
||||
|
||||
if major < 10: # pragma: no cover
|
||||
warnings.warn(
|
||||
"Loading 16-bit (uint16) PNG as int32 due to limitations "
|
||||
"in pillow's PNG decoder. This will be fixed in a future "
|
||||
"version of pillow which will make this warning dissapear.",
|
||||
UserWarning,
|
||||
)
|
||||
elif minor < 1: # pragma: no cover
|
||||
# pillow<10.1.0 can directly decode into 16-bit grayscale
|
||||
image.mode = desired_mode
|
||||
else:
|
||||
# pillow >= 10.1.0
|
||||
image = image.convert(desired_mode)
|
||||
|
||||
image = np.asarray(image)
|
||||
|
||||
meta = self.metadata(index=self._image.tell(), exclude_applied=False)
|
||||
if rotate and "Orientation" in meta:
|
||||
transformation = _exif_orientation_transform(
|
||||
meta["Orientation"], self._image.mode
|
||||
)
|
||||
image = transformation(image)
|
||||
|
||||
if apply_gamma and "gamma" in meta:
|
||||
gamma = float(meta["gamma"])
|
||||
scale = float(65536 if image.dtype == np.uint16 else 255)
|
||||
gain = 1.0
|
||||
image = ((image / scale) ** gamma) * scale * gain + 0.4999
|
||||
image = np.round(image).astype(np.uint8)
|
||||
|
||||
if writeable_output and not image.flags["WRITEABLE"]:
|
||||
image = np.array(image)
|
||||
|
||||
return image
|
||||
|
||||
def write(
|
||||
self,
|
||||
ndimage: Union[ArrayLike, List[ArrayLike]],
|
||||
*,
|
||||
mode: str = None,
|
||||
format: str = None,
|
||||
is_batch: bool = None,
|
||||
**kwargs,
|
||||
) -> Optional[bytes]:
|
||||
"""
|
||||
Write an ndimage to the URI specified in path.
|
||||
|
||||
If the URI points to a file on the current host and the file does not
|
||||
yet exist it will be created. If the file exists already, it will be
|
||||
appended if possible; otherwise, it will be replaced.
|
||||
|
||||
If necessary, the image is broken down along the leading dimension to
|
||||
fit into individual frames of the chosen format. If the format doesn't
|
||||
support multiple frames, and IOError is raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray or list
|
||||
The ndimage to write. If a list is given each element is expected to
|
||||
be an ndimage.
|
||||
mode : str
|
||||
Specify the image's color format. If None (default), the mode is
|
||||
inferred from the array's shape and dtype. Possible modes can be
|
||||
found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
format : str
|
||||
Optional format override. If omitted, the format to use is
|
||||
determined from the filename extension. If a file object was used
|
||||
instead of a filename, this parameter must always be used.
|
||||
is_batch : bool
|
||||
Explicitly tell the writer that ``image`` is a batch of images
|
||||
(True) or not (False). If None, the writer will guess this from the
|
||||
provided ``mode`` or ``image.shape``. While the latter often works,
|
||||
it may cause problems for small images due to aliasing of spatial
|
||||
and color-channel axes.
|
||||
kwargs : ...
|
||||
Extra arguments to pass to pillow. If a writer doesn't recognise an
|
||||
option, it is silently ignored. The available options are described
|
||||
in pillow's `image format documentation
|
||||
<https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html>`_
|
||||
for each writer.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When writing batches of very narrow (2-4 pixels wide) gray images set
|
||||
the ``mode`` explicitly to avoid the batch being identified as a colored
|
||||
image.
|
||||
|
||||
"""
|
||||
if "fps" in kwargs:
|
||||
warnings.warn(
|
||||
"The keyword `fps` is no longer supported. Use `duration`"
|
||||
"(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50).",
|
||||
DeprecationWarning,
|
||||
)
|
||||
kwargs["duration"] = 1000 * 1 / kwargs.get("fps")
|
||||
|
||||
if isinstance(ndimage, list):
|
||||
ndimage = np.stack(ndimage, axis=0)
|
||||
is_batch = True
|
||||
else:
|
||||
ndimage = np.asarray(ndimage)
|
||||
|
||||
# check if ndimage is a batch of frames/pages (e.g. for writing GIF)
|
||||
# if mode is given, use it; otherwise fall back to image.ndim only
|
||||
if is_batch is not None:
|
||||
pass
|
||||
elif mode is not None:
|
||||
is_batch = (
|
||||
ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2
|
||||
)
|
||||
elif ndimage.ndim == 2:
|
||||
is_batch = False
|
||||
elif ndimage.ndim == 3 and ndimage.shape[-1] == 1:
|
||||
raise ValueError("Can't write images with one color channel.")
|
||||
elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]:
|
||||
# Note: this makes a channel-last assumption
|
||||
is_batch = False
|
||||
else:
|
||||
is_batch = True
|
||||
|
||||
if not is_batch:
|
||||
ndimage = ndimage[None, ...]
|
||||
|
||||
for frame in ndimage:
|
||||
pil_frame = Image.fromarray(frame, mode=mode)
|
||||
if "bits" in kwargs:
|
||||
pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"])
|
||||
self.images_to_write.append(pil_frame)
|
||||
|
||||
if (
|
||||
format is not None
|
||||
and "format" in self.save_args
|
||||
and self.save_args["format"] != format
|
||||
):
|
||||
old_format = self.save_args["format"]
|
||||
warnings.warn(
|
||||
"Changing the output format during incremental"
|
||||
" writes is strongly discouraged."
|
||||
f" Was `{old_format}`, is now `{format}`.",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
extension = self.request.extension or self.request.format_hint
|
||||
self.save_args["format"] = format or Image.registered_extensions()[extension]
|
||||
self.save_args.update(kwargs)
|
||||
|
||||
# when writing to `bytes` we flush instantly
|
||||
result = None
|
||||
if self._request._uri_type == URI_BYTES:
|
||||
self._flush_writer()
|
||||
file = cast(BytesIO, self._request.get_file())
|
||||
result = file.getvalue()
|
||||
|
||||
return result
|
||||
|
||||
def _flush_writer(self):
|
||||
if len(self.images_to_write) == 0:
|
||||
return
|
||||
|
||||
primary_image = self.images_to_write.pop(0)
|
||||
|
||||
if len(self.images_to_write) > 0:
|
||||
self.save_args["save_all"] = True
|
||||
self.save_args["append_images"] = self.images_to_write
|
||||
|
||||
primary_image.save(self._request.get_file(), **self.save_args)
|
||||
self.images_to_write.clear()
|
||||
self.save_args.clear()
|
||||
|
||||
def get_meta(self, *, index=0) -> Dict[str, Any]:
|
||||
return self.metadata(index=index, exclude_applied=False)
|
||||
|
||||
def metadata(
|
||||
self, index: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Read ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : {integer, None}
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
metadata. If index is an ellipsis (...), read and return global
|
||||
metadata. If index is None, this plugin reads metadata from the
|
||||
first image of the file (index=0) unless the image is a GIF or APNG,
|
||||
in which case global metadata is read (index=...).
|
||||
exclude_applied : bool
|
||||
If True, exclude metadata fields that are applied to the image while
|
||||
reading. For example, if the binary data contains a rotation flag,
|
||||
the image is rotated by default and the rotation flag is excluded
|
||||
from the metadata to avoid confusion.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary of format-specific metadata.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if isinstance(index, int) and self._image.tell() != index:
|
||||
self._image.seek(index)
|
||||
|
||||
metadata = self._image.info.copy()
|
||||
metadata["mode"] = self._image.mode
|
||||
metadata["shape"] = self._image.size
|
||||
|
||||
if self._image.mode == "P" and not exclude_applied:
|
||||
metadata["palette"] = np.asarray(tuple(self._image.palette.colors.keys()))
|
||||
|
||||
if self._image.getexif():
|
||||
exif_data = {
|
||||
ExifTags.TAGS.get(key, "unknown"): value
|
||||
for key, value in dict(self._image.getexif()).items()
|
||||
}
|
||||
exif_data.pop("unknown", None)
|
||||
metadata.update(exif_data)
|
||||
|
||||
if exclude_applied:
|
||||
metadata.pop("Orientation", None)
|
||||
|
||||
return metadata
|
||||
|
||||
def properties(self, index: int = None) -> ImageProperties:
|
||||
"""Standardized ndimage metadata
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
properties. If index is an ellipsis (...), read and return the
|
||||
properties of all ndimages in the file stacked along a new batch
|
||||
dimension. If index is None, this plugin reads and returns the
|
||||
properties of the first image (index=0) unless the image is a GIF or
|
||||
APNG, in which case it reads and returns the properties all images
|
||||
(index=...).
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This does not decode pixel data and is fast for large images.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if index is Ellipsis:
|
||||
self._image.seek(0)
|
||||
else:
|
||||
self._image.seek(index)
|
||||
|
||||
if self._image.mode == "P":
|
||||
# mode of palette images is determined by their palette
|
||||
mode = self._image.palette.mode
|
||||
else:
|
||||
mode = self._image.mode
|
||||
|
||||
width: int = self._image.width
|
||||
height: int = self._image.height
|
||||
shape: Tuple[int, ...] = (height, width)
|
||||
|
||||
n_frames: Optional[int] = None
|
||||
if index is ...:
|
||||
n_frames = getattr(self._image, "n_frames", 1)
|
||||
shape = (n_frames, *shape)
|
||||
|
||||
dummy = np.asarray(Image.new(mode, (1, 1)))
|
||||
pil_shape: Tuple[int, ...] = dummy.shape
|
||||
if len(pil_shape) > 2:
|
||||
shape = (*shape, *pil_shape[2:])
|
||||
|
||||
return ImageProperties(
|
||||
shape=shape,
|
||||
dtype=dummy.dtype,
|
||||
n_images=n_frames,
|
||||
is_batch=index is Ellipsis,
|
||||
)
|
||||
1053
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow_info.py
vendored
Normal file
1053
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow_info.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
823
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow_legacy.py
vendored
Normal file
823
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillow_legacy.py
vendored
Normal file
@@ -0,0 +1,823 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using pillow/PIL (legacy).
|
||||
|
||||
Backend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_
|
||||
|
||||
Pillow is a friendly fork of PIL (Python Image Library) and supports
|
||||
reading and writing of common formats (jpg, png, gif, tiff, ...). While
|
||||
these docs provide an overview of some of its features, pillow is
|
||||
constantly improving. Hence, the complete list of features can be found
|
||||
in pillows official docs (see the Backend Library link).
|
||||
|
||||
Parameters for Reading
|
||||
----------------------
|
||||
pilmode : str
|
||||
(Available for all formats except GIF-PIL)
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
(Available for all formats except GIF-PIL)
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
ignoregamma : bool
|
||||
(Only available in PNG-PIL)
|
||||
Avoid gamma correction. Default True.
|
||||
exifrotate : bool
|
||||
(Only available in JPEG-PIL)
|
||||
Automatically rotate the image according to exif flag. Default True.
|
||||
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
optimize : bool
|
||||
(Only available in PNG-PIL)
|
||||
If present and true, instructs the PNG writer to make the output file
|
||||
as small as possible. This includes extra processing in order to find
|
||||
optimal encoder settings.
|
||||
transparency:
|
||||
(Only available in PNG-PIL)
|
||||
This option controls what color image to mark as transparent.
|
||||
dpi: tuple of two scalars
|
||||
(Only available in PNG-PIL)
|
||||
The desired dpi in each direction.
|
||||
pnginfo: PIL.PngImagePlugin.PngInfo
|
||||
(Only available in PNG-PIL)
|
||||
Object containing text tags.
|
||||
compress_level: int
|
||||
(Only available in PNG-PIL)
|
||||
ZLIB compression level, a number between 0 and 9: 1 gives best speed,
|
||||
9 gives best compression, 0 gives no compression at all. Default is 9.
|
||||
When ``optimize`` option is True ``compress_level`` has no effect
|
||||
(it is set to 9 regardless of a value passed).
|
||||
compression: int
|
||||
(Only available in PNG-PIL)
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
compress_level.
|
||||
icc_profile:
|
||||
(Only available in PNG-PIL)
|
||||
The ICC Profile to include in the saved file.
|
||||
bits (experimental): int
|
||||
(Only available in PNG-PIL)
|
||||
This option controls how many bits to store. If omitted,
|
||||
the PNG writer uses 8 bits (256 colors).
|
||||
quantize:
|
||||
(Only available in PNG-PIL)
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
bits. In this case, given as a number between 1-256.
|
||||
dictionary (experimental): dict
|
||||
(Only available in PNG-PIL)
|
||||
Set the ZLIB encoder dictionary.
|
||||
prefer_uint8: bool
|
||||
(Only available in PNG-PIL)
|
||||
Let the PNG writer truncate uint16 image arrays to uint8 if their values fall
|
||||
within the range [0, 255]. Defaults to true for legacy compatibility, however
|
||||
it is recommended to set this to false to avoid unexpected behavior when
|
||||
saving e.g. weakly saturated images.
|
||||
|
||||
quality : scalar
|
||||
(Only available in JPEG-PIL)
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
(Only available in JPEG-PIL)
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
(Only available in JPEG-PIL)
|
||||
On saving, compute optimal Huffman coding tables (can reduce a few
|
||||
percent of file size). Default False.
|
||||
dpi : tuple of int
|
||||
(Only available in JPEG-PIL)
|
||||
The pixel density, ``(x,y)``.
|
||||
icc_profile : object
|
||||
(Only available in JPEG-PIL)
|
||||
If present and true, the image is stored with the provided ICC profile.
|
||||
If this parameter is not provided, the image will be saved with no
|
||||
profile attached.
|
||||
exif : dict
|
||||
(Only available in JPEG-PIL)
|
||||
If present, the image will be stored with the provided raw EXIF data.
|
||||
subsampling : str
|
||||
(Only available in JPEG-PIL)
|
||||
Sets the subsampling for the encoder. See Pillow docs for details.
|
||||
qtables : object
|
||||
(Only available in JPEG-PIL)
|
||||
Set the qtables for the encoder. See Pillow docs for details.
|
||||
quality_mode : str
|
||||
(Only available in JPEG2000-PIL)
|
||||
Either `"rates"` or `"dB"` depending on the units you want to use to
|
||||
specify image quality.
|
||||
quality : float
|
||||
(Only available in JPEG2000-PIL)
|
||||
Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio
|
||||
in decibels (if quality mode is `dB`).
|
||||
loop : int
|
||||
(Only available in GIF-PIL)
|
||||
The number of iterations. Default 0 (meaning loop indefinitely).
|
||||
duration : {float, list}
|
||||
(Only available in GIF-PIL)
|
||||
The duration (in milliseconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
fps : float
|
||||
(Only available in GIF-PIL)
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
(Only available in GIF-PIL)
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
subrectangles : bool
|
||||
(Only available in GIF-PIL)
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Default False.
|
||||
|
||||
Notes
|
||||
-----
|
||||
To enable JPEG 2000 support, you need to build and install the OpenJPEG library,
|
||||
version 2.0.0 or higher, before building the Python Imaging Library. Windows
|
||||
users can install the OpenJPEG binaries available on the OpenJPEG website, but
|
||||
must add them to their PATH in order to use PIL (if you fail to do this, you
|
||||
will get errors about not being able to load the ``_imaging`` DLL).
|
||||
|
||||
GIF images read with this plugin are always RGBA. The alpha channel is ignored
|
||||
when saving RGB images.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ..core.request import URI_FILE, URI_BYTES
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX.
|
||||
|
||||
|
||||
GENERIC_DOCS = """
|
||||
Parameters for reading
|
||||
----------------------
|
||||
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
"""
|
||||
|
||||
|
||||
class PillowFormat(Format):
|
||||
"""
|
||||
Base format class for Pillow formats.
|
||||
"""
|
||||
|
||||
_pillow_imported = False
|
||||
_Image = None
|
||||
_modes = "i"
|
||||
_description = ""
|
||||
|
||||
def __init__(self, *args, plugin_id: str = None, **kwargs):
|
||||
super(PillowFormat, self).__init__(*args, **kwargs)
|
||||
# Used to synchronize _init_pillow(), see #244
|
||||
self._lock = threading.RLock()
|
||||
|
||||
self._plugin_id = plugin_id
|
||||
|
||||
@property
|
||||
def plugin_id(self):
|
||||
"""The PIL plugin id."""
|
||||
return self._plugin_id # Set when format is created
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError(
|
||||
"Imageio Pillow plugin requires " "Pillow, not PIL!"
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
self._Image = Image
|
||||
elif self._Image is None: # pragma: no cover
|
||||
raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.")
|
||||
Image = self._Image
|
||||
|
||||
if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"):
|
||||
Image.preinit()
|
||||
else:
|
||||
Image.init()
|
||||
return Image
|
||||
|
||||
def _can_read(self, request):
|
||||
Image = self._init_pillow()
|
||||
if self.plugin_id in Image.OPEN:
|
||||
factory, accept = Image.OPEN[self.plugin_id]
|
||||
if accept:
|
||||
if request.firstbytes and accept(request.firstbytes):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
Image = self._init_pillow()
|
||||
if request.extension in self.extensions or request._uri_type in [
|
||||
URI_FILE,
|
||||
URI_BYTES,
|
||||
]:
|
||||
if self.plugin_id in Image.SAVE:
|
||||
return True
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
factory, accept = Image.OPEN[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot read images." % self.format.name)
|
||||
self._fp = self._get_file()
|
||||
self._im = factory(self._fp, "")
|
||||
if hasattr(Image, "_decompression_bomb_check"):
|
||||
Image._decompression_bomb_check(self._im.size)
|
||||
# Save the raw mode used by the palette for a BMP because it may not be the number of channels
|
||||
# When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument
|
||||
# However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame.
|
||||
# This issue is resolved by using the raw palette data but the rawmode information is now lost. So we
|
||||
# store the raw mode for later use
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
pil_try_read(self._im)
|
||||
# Store args
|
||||
self._kwargs = dict(
|
||||
as_gray=as_gray, is_gray=_palette_is_grayscale(self._im)
|
||||
)
|
||||
# setting mode=None is not the same as just not providing it
|
||||
if pilmode is not None:
|
||||
self._kwargs["mode"] = pilmode
|
||||
# Set length
|
||||
self._length = 1
|
||||
if hasattr(self._im, "n_frames"):
|
||||
self._length = self._im.n_frames
|
||||
|
||||
def _get_file(self):
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
save_pillow_close(self._im)
|
||||
if self._we_own_fp:
|
||||
self._fp.close()
|
||||
# else: request object handles closing the _fp
|
||||
|
||||
def _get_length(self):
|
||||
return self._length
|
||||
|
||||
def _seek(self, index):
|
||||
try:
|
||||
self._im.seek(index)
|
||||
except EOFError:
|
||||
raise IndexError("Could not seek to index %i" % index)
|
||||
|
||||
def _get_data(self, index):
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
i = self._im.tell()
|
||||
if i > index:
|
||||
self._seek(index) # just try
|
||||
else:
|
||||
while i < index: # some formats need to be read in sequence
|
||||
i += 1
|
||||
self._seek(i)
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
self._im.getdata()[0]
|
||||
im = pil_get_frame(self._im, **self._kwargs)
|
||||
return im, self._im.info
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._im.info
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
self._save_func = Image.SAVE[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot write images." % self.format.name)
|
||||
self._fp = self.request.get_file()
|
||||
self._meta = {}
|
||||
self._written = False
|
||||
|
||||
def _close(self):
|
||||
pass # request object handled closing _fp
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if self._written:
|
||||
raise RuntimeError(
|
||||
"Format %s only supports single images." % self.format.name
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
self._written = True
|
||||
self._meta.update(meta)
|
||||
img = ndarray_to_pil(
|
||||
im, self.format.plugin_id, self._meta.pop("prefer_uint8", True)
|
||||
)
|
||||
if "bits" in self._meta:
|
||||
img = img.quantize() # Make it a P image, so bits arg is used
|
||||
img.save(self._fp, format=self.format.plugin_id, **self._meta)
|
||||
save_pillow_close(img)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta.update(meta)
|
||||
|
||||
|
||||
class PNGFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, ignoregamma=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
if not self.request.kwargs.get("ignoregamma", True):
|
||||
# The gamma value in the file represents the gamma factor for the
|
||||
# hardware on the system where the file was created, and is meant
|
||||
# to be able to match the colors with the system on which the
|
||||
# image is shown. See also issue #366
|
||||
try:
|
||||
gamma = float(info["gamma"])
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
else:
|
||||
scale = float(65536 if im.dtype == np.uint16 else 255)
|
||||
gain = 1.0
|
||||
im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999
|
||||
return im, info
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, compression=None, quantize=None, interlaced=False, **kwargs):
|
||||
# Better default for compression
|
||||
kwargs["compress_level"] = kwargs.get("compress_level", 9)
|
||||
|
||||
if compression is not None:
|
||||
if compression < 0 or compression > 9:
|
||||
raise ValueError("Invalid PNG compression level: %r" % compression)
|
||||
kwargs["compress_level"] = compression
|
||||
if quantize is not None:
|
||||
for bits in range(1, 9):
|
||||
if 2**bits == quantize:
|
||||
break
|
||||
else:
|
||||
raise ValueError(
|
||||
"PNG quantize must be power of two, " "not %r" % quantize
|
||||
)
|
||||
kwargs["bits"] = bits
|
||||
if interlaced:
|
||||
logger.warning("PIL PNG writer cannot produce interlaced images.")
|
||||
|
||||
ok_keys = (
|
||||
"optimize",
|
||||
"transparency",
|
||||
"dpi",
|
||||
"pnginfo",
|
||||
"bits",
|
||||
"compress_level",
|
||||
"icc_profile",
|
||||
"dictionary",
|
||||
"prefer_uint8",
|
||||
)
|
||||
for key in kwargs:
|
||||
if key not in ok_keys:
|
||||
raise TypeError("Invalid arg for PNG writer: %r" % key)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1):
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class JPEGFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, exifrotate=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality=75, progressive=False, optimize=False, **kwargs):
|
||||
# The JPEG quality can be between 0 (worst) and 100 (best)
|
||||
quality = int(quality)
|
||||
if quality < 0 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 0 and 100.")
|
||||
|
||||
kwargs["quality"] = quality
|
||||
kwargs["progressive"] = bool(progressive)
|
||||
kwargs["optimize"] = bool(progressive)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
class JPEG2000Format(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality_mode="rates", quality=5, **kwargs):
|
||||
# Check quality - in Pillow it should be no higher than 95
|
||||
if quality_mode not in {"rates", "dB"}:
|
||||
raise ValueError("Quality mode should be either 'rates' or 'dB'")
|
||||
|
||||
quality = float(quality)
|
||||
|
||||
if quality_mode == "rates" and (quality < 1 or quality > 1000):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid rate!".format(quality)
|
||||
)
|
||||
elif quality_mode == "dB" and (quality < 15 or quality > 100):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid PSNR!".format(quality)
|
||||
)
|
||||
|
||||
kwargs["quality_mode"] = quality_mode
|
||||
kwargs["quality_layers"] = [quality]
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError(
|
||||
"The current implementation of JPEG 2000 does not support alpha channel."
|
||||
)
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
def save_pillow_close(im):
|
||||
# see issue #216 and #300
|
||||
if hasattr(im, "close"):
|
||||
if hasattr(getattr(im, "fp", None), "close"):
|
||||
im.close()
|
||||
|
||||
|
||||
# Func from skimage
|
||||
|
||||
# This cells contains code from scikit-image, in particular from
|
||||
# http://github.com/scikit-image/scikit-image/blob/master/
|
||||
# skimage/io/_plugins/pil_plugin.py
|
||||
# The scikit-image license applies.
|
||||
|
||||
|
||||
def pil_try_read(im):
|
||||
try:
|
||||
# this will raise an IOError if the file is not readable
|
||||
im.getdata()[0]
|
||||
except IOError as e:
|
||||
site = "http://pillow.readthedocs.io/en/latest/installation.html"
|
||||
site += "#external-libraries"
|
||||
pillow_error_message = str(e)
|
||||
error_message = (
|
||||
'Could not load "%s" \n'
|
||||
'Reason: "%s"\n'
|
||||
"Please see documentation at: %s"
|
||||
% (im.filename, pillow_error_message, site)
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
|
||||
|
||||
def _palette_is_grayscale(pil_image):
|
||||
if pil_image.mode != "P":
|
||||
return False
|
||||
elif pil_image.info.get("transparency", None): # see issue #475
|
||||
return False
|
||||
# get palette as an array with R, G, B columns
|
||||
# Note: starting in pillow 9.1 palettes may have less than 256 entries
|
||||
palette = np.asarray(pil_image.getpalette()).reshape((-1, 3))
|
||||
# Not all palette colors are used; unused colors have junk values.
|
||||
start, stop = pil_image.getextrema()
|
||||
valid_palette = palette[start : stop + 1]
|
||||
# Image is grayscale if channel differences (R - G and G - B)
|
||||
# are all zero.
|
||||
return np.allclose(np.diff(valid_palette), 0)
|
||||
|
||||
|
||||
def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None):
|
||||
"""
|
||||
is_gray: Whether the image *is* gray (by inspecting its palette).
|
||||
as_gray: Whether the resulting image must be converted to gaey.
|
||||
mode: The mode to convert to.
|
||||
"""
|
||||
|
||||
if is_gray is None:
|
||||
is_gray = _palette_is_grayscale(im)
|
||||
|
||||
frame = im
|
||||
|
||||
# Convert ...
|
||||
if mode is not None:
|
||||
# Mode is explicitly given ...
|
||||
if mode != im.mode:
|
||||
frame = im.convert(mode)
|
||||
elif as_gray:
|
||||
pass # don't do any auto-conversions (but do the explicit one above)
|
||||
elif im.mode == "P" and is_gray:
|
||||
# Paletted images that are already gray by their palette
|
||||
# are converted so that the resulting numpy array is 2D.
|
||||
frame = im.convert("L")
|
||||
elif im.mode == "P":
|
||||
# Paletted images are converted to RGB/RGBA. We jump some loops to make
|
||||
# this work well.
|
||||
if im.info.get("transparency", None) is not None:
|
||||
# Let Pillow apply the transparency, see issue #210 and #246
|
||||
frame = im.convert("RGBA")
|
||||
elif im.palette.mode in ("RGB", "RGBA"):
|
||||
# We can do this ourselves. Pillow seems to sometimes screw
|
||||
# this up if a multi-gif has a palette for each frame ...
|
||||
# Create palette array
|
||||
p = np.frombuffer(im.palette.getdata()[1], np.uint8)
|
||||
# Restore the raw mode that was saved to be used to parse the palette
|
||||
if hasattr(im.palette, "rawmode_saved"):
|
||||
im.palette.rawmode = im.palette.rawmode_saved
|
||||
mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode
|
||||
nchannels = len(mode)
|
||||
# Shape it.
|
||||
p.shape = -1, nchannels
|
||||
if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"):
|
||||
p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype)))
|
||||
# Swap the axes if the mode is in BGR and not RGB
|
||||
if mode.startswith("BGR"):
|
||||
p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]]
|
||||
# Apply palette
|
||||
frame_paletted = np.array(im, np.uint8)
|
||||
try:
|
||||
frame = p[frame_paletted]
|
||||
except Exception:
|
||||
# Ok, let PIL do it. The introduction of the branch that
|
||||
# tests `im.info['transparency']` should make this happen
|
||||
# much less often, but let's keep it, to be safe.
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
# Let Pillow do it. Unlinke skimage, we always convert
|
||||
# to RGBA; palettes can be RGBA.
|
||||
if True: # im.format == 'PNG' and 'transparency' in im.info:
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
frame = im.convert("RGB")
|
||||
elif "A" in im.mode:
|
||||
frame = im.convert("RGBA")
|
||||
elif im.mode == "CMYK":
|
||||
frame = im.convert("RGB")
|
||||
elif im.format == "GIF" and im.mode == "RGB":
|
||||
# pillow9 returns RGBA images for subsequent frames so that it can deal
|
||||
# with multi-frame GIF that use frame-level palettes and don't dispose
|
||||
# all areas.
|
||||
|
||||
# For backwards compatibility, we promote everything to RGBA.
|
||||
frame = im.convert("RGBA")
|
||||
|
||||
# Apply a post-convert if necessary
|
||||
if as_gray:
|
||||
frame = frame.convert("F") # Scipy compat
|
||||
elif not isinstance(frame, np.ndarray) and frame.mode == "1":
|
||||
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
|
||||
# can cause a segfault, or generate garbage. See
|
||||
# https://github.com/scipy/scipy/issues/2138 and
|
||||
# https://github.com/python-pillow/Pillow/issues/350.
|
||||
#
|
||||
# This converts im from a 1-bit image to an 8-bit image.
|
||||
frame = frame.convert("L")
|
||||
|
||||
# Convert to numpy array
|
||||
if im.mode.startswith("I;16"):
|
||||
# e.g. in16 PNG's
|
||||
shape = im.size
|
||||
dtype = ">u2" if im.mode.endswith("B") else "<u2"
|
||||
if "S" in im.mode:
|
||||
dtype = dtype.replace("u", "i")
|
||||
frame = np.frombuffer(frame.tobytes(), dtype).copy()
|
||||
frame.shape = shape[::-1]
|
||||
else:
|
||||
# Use uint16 for PNG's in mode I
|
||||
if im.format == "PNG" and im.mode == "I" and dtype is None:
|
||||
dtype = "uint16"
|
||||
frame = np.array(frame, dtype=dtype)
|
||||
|
||||
return frame
|
||||
|
||||
|
||||
def ndarray_to_pil(arr, format_str=None, prefer_uint8=True):
|
||||
from PIL import Image
|
||||
|
||||
if arr.ndim == 3:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = {3: "RGB", 4: "RGBA"}[arr.shape[2]]
|
||||
|
||||
elif format_str in ["png", "PNG"]:
|
||||
mode = "I;16"
|
||||
mode_base = "I"
|
||||
|
||||
if arr.dtype.kind == "f":
|
||||
arr = image_as_uint(arr)
|
||||
|
||||
elif prefer_uint8 and arr.max() < 256 and arr.min() >= 0:
|
||||
arr = arr.astype(np.uint8)
|
||||
mode = mode_base = "L"
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=16)
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = "L"
|
||||
mode_base = "L"
|
||||
|
||||
if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6:
|
||||
# Pillow < v6.0.0 has limited support for the "I;16" mode,
|
||||
# requiring us to fall back to this expensive workaround.
|
||||
# tobytes actually creates a copy of the image, which is costly.
|
||||
array_buffer = arr.tobytes()
|
||||
if arr.ndim == 2:
|
||||
im = Image.new(mode_base, arr.T.shape)
|
||||
im.frombytes(array_buffer, "raw", mode)
|
||||
else:
|
||||
image_shape = (arr.shape[1], arr.shape[0])
|
||||
im = Image.frombytes(mode, image_shape, array_buffer)
|
||||
return im
|
||||
else:
|
||||
return Image.fromarray(arr, mode)
|
||||
|
||||
|
||||
# imported for backwards compatibility
|
||||
from .pillowmulti import GIFFormat, TIFFFormat # noqa: E402, F401
|
||||
338
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillowmulti.py
vendored
Normal file
338
.CondaPkg/env/Lib/site-packages/imageio/plugins/pillowmulti.py
vendored
Normal file
@@ -0,0 +1,338 @@
|
||||
"""
|
||||
PIL formats for multiple images.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .pillow_legacy import PillowFormat, image_as_uint, ndarray_to_pil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NeuQuant = None # we can implement this when we need it
|
||||
|
||||
|
||||
class TIFFFormat(PillowFormat):
|
||||
_modes = "i" # arg, why bother; people should use the tiffile version
|
||||
_description = "TIFF format (Pillow)"
|
||||
|
||||
|
||||
class GIFFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
_modes = "iI"
|
||||
_description = "Static and animated gif (Pillow)"
|
||||
|
||||
# GIF reader needs no modifications compared to base pillow reader
|
||||
|
||||
class Writer(PillowFormat.Writer): # pragma: no cover
|
||||
def _open(
|
||||
self,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer=0,
|
||||
subrectangles=False,
|
||||
):
|
||||
from PIL import __version__ as pillow_version
|
||||
|
||||
major, minor, patch = tuple(int(x) for x in pillow_version.split("."))
|
||||
if major == 10 and minor >= 1:
|
||||
raise ImportError(
|
||||
f"Pillow v{pillow_version} is not supported by ImageIO's legacy "
|
||||
"pillow plugin when writing GIFs. Consider switching to the new "
|
||||
"plugin or downgrading to `pillow<10.1.0`."
|
||||
)
|
||||
|
||||
# Check palettesize
|
||||
palettesize = int(palettesize)
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
# Duratrion / fps
|
||||
if duration is None:
|
||||
self._duration = 1.0 / float(fps)
|
||||
elif isinstance(duration, (list, tuple)):
|
||||
self._duration = [float(d) for d in duration]
|
||||
else:
|
||||
self._duration = float(duration)
|
||||
# loop
|
||||
loop = float(loop)
|
||||
if loop <= 0 or loop == float("inf"):
|
||||
loop = 0
|
||||
loop = int(loop)
|
||||
# Subrectangles / dispose
|
||||
subrectangles = bool(subrectangles)
|
||||
self._dispose = 1 if subrectangles else 2
|
||||
# The "0" (median cut) quantizer is by far the best
|
||||
|
||||
fp = self.request.get_file()
|
||||
self._writer = GifWriter(
|
||||
fp, subrectangles, loop, quantizer, int(palettesize)
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
self._writer.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
duration = self._duration
|
||||
if isinstance(duration, list):
|
||||
duration = duration[min(len(duration) - 1, self._writer._count)]
|
||||
dispose = self._dispose
|
||||
self._writer.add_image(im, duration, dispose)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def intToBin(i):
|
||||
return i.to_bytes(2, byteorder="little")
|
||||
|
||||
|
||||
class GifWriter: # pragma: no cover
|
||||
"""Class that for helping write the animated GIF file. This is based on
|
||||
code from images2gif.py (part of visvis). The version here is modified
|
||||
to allow streamed writing.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file,
|
||||
opt_subrectangle=True,
|
||||
opt_loop=0,
|
||||
opt_quantizer=0,
|
||||
opt_palette_size=256,
|
||||
):
|
||||
self.fp = file
|
||||
|
||||
self.opt_subrectangle = opt_subrectangle
|
||||
self.opt_loop = opt_loop
|
||||
self.opt_quantizer = opt_quantizer
|
||||
self.opt_palette_size = opt_palette_size
|
||||
|
||||
self._previous_image = None # as np array
|
||||
self._global_palette = None # as bytes
|
||||
self._count = 0
|
||||
|
||||
from PIL.GifImagePlugin import getdata
|
||||
|
||||
self.getdata = getdata
|
||||
|
||||
def add_image(self, im, duration, dispose):
|
||||
# Prepare image
|
||||
im_rect, rect = im, (0, 0)
|
||||
if self.opt_subrectangle:
|
||||
im_rect, rect = self.getSubRectangle(im)
|
||||
im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size)
|
||||
|
||||
# Get pallette - apparently, this is the 3d element of the header
|
||||
# (but it has not always been). Best we've got. Its not the same
|
||||
# as im_pil.palette.tobytes().
|
||||
from PIL.GifImagePlugin import getheader
|
||||
|
||||
palette = getheader(im_pil)[0][3]
|
||||
|
||||
# Write image
|
||||
if self._count == 0:
|
||||
self.write_header(im_pil, palette, self.opt_loop)
|
||||
self._global_palette = palette
|
||||
self.write_image(im_pil, palette, rect, duration, dispose)
|
||||
# assert len(palette) == len(self._global_palette)
|
||||
|
||||
# Bookkeeping
|
||||
self._previous_image = im
|
||||
self._count += 1
|
||||
|
||||
def write_header(self, im, globalPalette, loop):
|
||||
# Gather info
|
||||
header = self.getheaderAnim(im)
|
||||
appext = self.getAppExt(loop)
|
||||
# Write
|
||||
self.fp.write(header)
|
||||
self.fp.write(globalPalette)
|
||||
self.fp.write(appext)
|
||||
|
||||
def close(self):
|
||||
self.fp.write(";".encode("utf-8")) # end gif
|
||||
|
||||
def write_image(self, im, palette, rect, duration, dispose):
|
||||
fp = self.fp
|
||||
|
||||
# Gather local image header and data, using PIL's getdata. That
|
||||
# function returns a list of bytes objects, but which parts are
|
||||
# what has changed multiple times, so we put together the first
|
||||
# parts until we have enough to form the image header.
|
||||
data = self.getdata(im)
|
||||
imdes = b""
|
||||
while data and len(imdes) < 11:
|
||||
imdes += data.pop(0)
|
||||
assert len(imdes) == 11
|
||||
|
||||
# Make image descriptor suitable for using 256 local color palette
|
||||
lid = self.getImageDescriptor(im, rect)
|
||||
graphext = self.getGraphicsControlExt(duration, dispose)
|
||||
|
||||
# Write local header
|
||||
if (palette != self._global_palette) or (dispose != 2):
|
||||
# Use local color palette
|
||||
fp.write(graphext)
|
||||
fp.write(lid) # write suitable image descriptor
|
||||
fp.write(palette) # write local color table
|
||||
fp.write(b"\x08") # LZW minimum size code
|
||||
else:
|
||||
# Use global color palette
|
||||
fp.write(graphext)
|
||||
fp.write(imdes) # write suitable image descriptor
|
||||
|
||||
# Write image data
|
||||
for d in data:
|
||||
fp.write(d)
|
||||
|
||||
def getheaderAnim(self, im):
|
||||
"""Get animation header. To replace PILs getheader()[0]"""
|
||||
bb = b"GIF89a"
|
||||
bb += intToBin(im.size[0])
|
||||
bb += intToBin(im.size[1])
|
||||
bb += b"\x87\x00\x00"
|
||||
return bb
|
||||
|
||||
def getImageDescriptor(self, im, xy=None):
|
||||
"""Used for the local color table properties per image.
|
||||
Otherwise global color table applies to all frames irrespective of
|
||||
whether additional colors comes in play that require a redefined
|
||||
palette. Still a maximum of 256 color per frame, obviously.
|
||||
|
||||
Written by Ant1 on 2010-08-22
|
||||
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
|
||||
"""
|
||||
|
||||
# Defaule use full image and place at upper left
|
||||
if xy is None:
|
||||
xy = (0, 0)
|
||||
|
||||
# Image separator,
|
||||
bb = b"\x2C"
|
||||
|
||||
# Image position and size
|
||||
bb += intToBin(xy[0]) # Left position
|
||||
bb += intToBin(xy[1]) # Top position
|
||||
bb += intToBin(im.size[0]) # image width
|
||||
bb += intToBin(im.size[1]) # image height
|
||||
|
||||
# packed field: local color table flag1, interlace0, sorted table0,
|
||||
# reserved00, lct size111=7=2^(7 + 1)=256.
|
||||
bb += b"\x87"
|
||||
|
||||
# LZW minimum size code now comes later, begining of [imagedata] blocks
|
||||
return bb
|
||||
|
||||
def getAppExt(self, loop):
|
||||
"""Application extension. This part specifies the amount of loops.
|
||||
If loop is 0 or inf, it goes on infinitely.
|
||||
"""
|
||||
if loop == 1:
|
||||
return b""
|
||||
if loop == 0:
|
||||
loop = 2**16 - 1
|
||||
bb = b""
|
||||
if loop != 0: # omit the extension if we would like a nonlooping gif
|
||||
bb = b"\x21\xFF\x0B" # application extension
|
||||
bb += b"NETSCAPE2.0"
|
||||
bb += b"\x03\x01"
|
||||
bb += intToBin(loop)
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getGraphicsControlExt(self, duration=0.1, dispose=2):
|
||||
"""Graphics Control Extension. A sort of header at the start of
|
||||
each image. Specifies duration and transparancy.
|
||||
|
||||
Dispose
|
||||
-------
|
||||
* 0 - No disposal specified.
|
||||
* 1 - Do not dispose. The graphic is to be left in place.
|
||||
* 2 - Restore to background color. The area used by the graphic
|
||||
must be restored to the background color.
|
||||
* 3 - Restore to previous. The decoder is required to restore the
|
||||
area overwritten by the graphic with what was there prior to
|
||||
rendering the graphic.
|
||||
* 4-7 -To be defined.
|
||||
"""
|
||||
|
||||
bb = b"\x21\xF9\x04"
|
||||
bb += chr((dispose & 3) << 2).encode("utf-8")
|
||||
# low bit 1 == transparency,
|
||||
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
|
||||
# are dispose.
|
||||
bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds
|
||||
bb += b"\x00" # no transparant color
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getSubRectangle(self, im):
|
||||
"""Calculate the minimal rectangle that need updating. Returns
|
||||
a two-element tuple containing the cropped image and an x-y tuple.
|
||||
|
||||
Calculating the subrectangles takes extra time, obviously. However,
|
||||
if the image sizes were reduced, the actual writing of the GIF
|
||||
goes faster. In some cases applying this method produces a GIF faster.
|
||||
"""
|
||||
|
||||
# Cannot do subrectangle for first image
|
||||
if self._count == 0:
|
||||
return im, (0, 0)
|
||||
|
||||
prev = self._previous_image
|
||||
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1] + 1)
|
||||
y0, y1 = int(Y[0]), int(Y[-1] + 1)
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
|
||||
def converToPIL(self, im, quantizer, palette_size=256):
|
||||
"""Convert image to Paletted PIL image.
|
||||
|
||||
PIL used to not do a very good job at quantization, but I guess
|
||||
this has improved a lot (at least in Pillow). I don't think we need
|
||||
neuqant (and we can add it later if we really want).
|
||||
"""
|
||||
|
||||
im_pil = ndarray_to_pil(im, "gif")
|
||||
|
||||
if quantizer in ("nq", "neuquant"):
|
||||
# NeuQuant algorithm
|
||||
nq_samplefac = 10 # 10 seems good in general
|
||||
im_pil = im_pil.convert("RGBA") # NQ assumes RGBA
|
||||
nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors
|
||||
im_pil = nqInstance.quantize(im_pil, colors=palette_size)
|
||||
elif quantizer in (0, 1, 2):
|
||||
# Adaptive PIL algorithm
|
||||
if quantizer == 2:
|
||||
im_pil = im_pil.convert("RGBA")
|
||||
else:
|
||||
im_pil = im_pil.convert("RGB")
|
||||
im_pil = im_pil.quantize(colors=palette_size, method=quantizer)
|
||||
else:
|
||||
raise ValueError("Invalid value for quantizer: %r" % quantizer)
|
||||
return im_pil
|
||||
1198
.CondaPkg/env/Lib/site-packages/imageio/plugins/pyav.py
vendored
Normal file
1198
.CondaPkg/env/Lib/site-packages/imageio/plugins/pyav.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
191
.CondaPkg/env/Lib/site-packages/imageio/plugins/rawpy.py
vendored
Normal file
191
.CondaPkg/env/Lib/site-packages/imageio/plugins/rawpy.py
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
""" Read/Write images using rawpy.
|
||||
|
||||
rawpy is an easy-to-use Python wrapper for the LibRaw library.
|
||||
It also contains some extra functionality for finding and repairing hot/dead pixels.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||
import rawpy
|
||||
import numpy as np
|
||||
|
||||
from ..core.request import URI_BYTES, InitializationError, IOMode, Request
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
class RawPyPlugin(PluginV3):
|
||||
"""A class representing the rawpy plugin.
|
||||
|
||||
Methods
|
||||
-------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: _plugins/rawpy
|
||||
|
||||
RawPyPlugin.read
|
||||
"""
|
||||
|
||||
def __init__(self, request: Request) -> None:
|
||||
"""Instantiates a new rawpy plugin object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request: Request
|
||||
A request object representing the resource to be operated on.
|
||||
"""
|
||||
|
||||
super().__init__(request)
|
||||
|
||||
self._image_file = None
|
||||
|
||||
if request.mode.io_mode == IOMode.read:
|
||||
try:
|
||||
self._image_file = rawpy.imread(request.get_file())
|
||||
except (
|
||||
rawpy.NotSupportedError,
|
||||
rawpy.LibRawFileUnsupportedError,
|
||||
rawpy.LibRawIOError,
|
||||
):
|
||||
if request._uri_type == URI_BYTES:
|
||||
raise InitializationError(
|
||||
"RawPy can not read the provided bytes."
|
||||
) from None
|
||||
else:
|
||||
raise InitializationError(
|
||||
f"RawPy can not read {request.raw_uri}."
|
||||
) from None
|
||||
elif request.mode.io_mode == IOMode.write:
|
||||
raise InitializationError("RawPy does not support writing.") from None
|
||||
|
||||
def close(self) -> None:
|
||||
if self._image_file:
|
||||
self._image_file.close()
|
||||
|
||||
self._request.finish()
|
||||
|
||||
def read(self, *, index: int = 0, **kwargs) -> np.ndarray:
|
||||
"""Read Raw Image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
nd_image: ndarray
|
||||
The image data
|
||||
"""
|
||||
|
||||
nd_image: np.ndarray
|
||||
|
||||
try:
|
||||
nd_image = self._image_file.postprocess(**kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if index is Ellipsis:
|
||||
nd_image = nd_image[None, ...]
|
||||
|
||||
return nd_image
|
||||
|
||||
def write(self, ndimage: Union[ArrayLike, List[ArrayLike]]) -> Optional[bytes]:
|
||||
"""RawPy does not support writing."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def iter(self) -> Iterator[np.ndarray]:
|
||||
"""Load the image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
nd_image: ndarray
|
||||
The image data
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.read()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def metadata(
|
||||
self, index: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Read ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
exclude_applied : bool
|
||||
If True, exclude metadata fields that are applied to the image while
|
||||
reading. For example, if the binary data contains a rotation flag,
|
||||
the image is rotated by default and the rotation flag is excluded
|
||||
from the metadata to avoid confusion.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary of format-specific metadata.
|
||||
|
||||
"""
|
||||
|
||||
metadata = {}
|
||||
|
||||
image_size = self._image_file.sizes
|
||||
|
||||
metadata["black_level_per_channel"] = self._image_file.black_level_per_channel
|
||||
metadata["camera_white_level_per_channel"] = (
|
||||
self._image_file.camera_white_level_per_channel
|
||||
)
|
||||
metadata["color_desc"] = self._image_file.color_desc
|
||||
metadata["color_matrix"] = self._image_file.color_matrix
|
||||
metadata["daylight_whitebalance"] = self._image_file.daylight_whitebalance
|
||||
metadata["dtype"] = self._image_file.raw_image.dtype
|
||||
metadata["flip"] = image_size.flip
|
||||
metadata["num_colors"] = self._image_file.num_colors
|
||||
metadata["tone_curve"] = self._image_file.tone_curve
|
||||
metadata["width"] = image_size.width
|
||||
metadata["height"] = image_size.height
|
||||
metadata["raw_width"] = image_size.raw_width
|
||||
metadata["raw_height"] = image_size.raw_height
|
||||
metadata["raw_shape"] = self._image_file.raw_image.shape
|
||||
metadata["iwidth"] = image_size.iwidth
|
||||
metadata["iheight"] = image_size.iheight
|
||||
metadata["pixel_aspect"] = image_size.pixel_aspect
|
||||
metadata["white_level"] = self._image_file.white_level
|
||||
|
||||
if exclude_applied:
|
||||
metadata.pop("black_level_per_channel", None)
|
||||
metadata.pop("camera_white_level_per_channel", None)
|
||||
metadata.pop("color_desc", None)
|
||||
metadata.pop("color_matrix", None)
|
||||
metadata.pop("daylight_whitebalance", None)
|
||||
metadata.pop("dtype", None)
|
||||
metadata.pop("flip", None)
|
||||
metadata.pop("num_colors", None)
|
||||
metadata.pop("tone_curve", None)
|
||||
metadata.pop("raw_width", None)
|
||||
metadata.pop("raw_height", None)
|
||||
metadata.pop("raw_shape", None)
|
||||
metadata.pop("iwidth", None)
|
||||
metadata.pop("iheight", None)
|
||||
metadata.pop("white_level", None)
|
||||
|
||||
return metadata
|
||||
|
||||
def properties(self, index: int = None) -> ImageProperties:
|
||||
"""Standardized ndimage metadata
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This does not decode pixel data and is fast for large images.
|
||||
|
||||
"""
|
||||
|
||||
ImageSize = self._image_file.sizes
|
||||
|
||||
width: int = ImageSize.width
|
||||
height: int = ImageSize.height
|
||||
shape: Tuple[int, ...] = (height, width)
|
||||
|
||||
dtype = self._image_file.raw_image.dtype
|
||||
|
||||
return ImageProperties(shape=shape, dtype=dtype)
|
||||
156
.CondaPkg/env/Lib/site-packages/imageio/plugins/simpleitk.py
vendored
Normal file
156
.CondaPkg/env/Lib/site-packages/imageio/plugins/simpleitk.py
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using SimpleITK.
|
||||
|
||||
Backend: `Insight Toolkit <https://itk.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[itk]
|
||||
|
||||
The ItkFormat uses the ITK or SimpleITK library to support a range of
|
||||
ITK-related formats. It also supports a few common formats (e.g. PNG and JPEG).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
from ..core import Format, has_module
|
||||
|
||||
_itk = None # Defer loading to load_lib() function.
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _itk, _read_function, _write_function
|
||||
try:
|
||||
import itk as _itk
|
||||
|
||||
_read_function = _itk.imread
|
||||
_write_function = _itk.imwrite
|
||||
except ImportError:
|
||||
try:
|
||||
import SimpleITK as _itk
|
||||
|
||||
_read_function = _itk.ReadImage
|
||||
_write_function = _itk.WriteImage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"itk could not be found. "
|
||||
"Please try "
|
||||
" python -m pip install itk "
|
||||
"or "
|
||||
" python -m pip install simpleitk "
|
||||
"or refer to "
|
||||
" https://itkpythonpackage.readthedocs.io/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _itk
|
||||
|
||||
|
||||
# Split up in real ITK and all supported formats.
|
||||
ITK_FORMATS = (
|
||||
".gipl",
|
||||
".ipl",
|
||||
".mha",
|
||||
".mhd",
|
||||
".nhdr",
|
||||
"nia",
|
||||
"hdr",
|
||||
".nrrd",
|
||||
".nii",
|
||||
".nii.gz",
|
||||
".img",
|
||||
".img.gz",
|
||||
".vtk",
|
||||
"hdf5",
|
||||
"lsm",
|
||||
"mnc",
|
||||
"mnc2",
|
||||
"mgh",
|
||||
"mnc",
|
||||
"pic",
|
||||
)
|
||||
ALL_FORMATS = ITK_FORMATS + (
|
||||
".bmp",
|
||||
".jpeg",
|
||||
".jpg",
|
||||
".png",
|
||||
".tiff",
|
||||
".tif",
|
||||
".dicom",
|
||||
".dcm",
|
||||
".gdcm",
|
||||
)
|
||||
|
||||
|
||||
class ItkFormat(Format):
|
||||
"""See :mod:`imageio.plugins.simpleitk`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If the request is a format that only this plugin can handle,
|
||||
# we report that we can do it; a useful error will be raised
|
||||
# when simpleitk is not installed. For the more common formats
|
||||
# we only report that we can read if the library is installed.
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pixel_type=None, fallback_only=None, **kwargs):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
args = ()
|
||||
if pixel_type is not None:
|
||||
args += (pixel_type,)
|
||||
if fallback_only is not None:
|
||||
args += (fallback_only,)
|
||||
self._img = _read_function(self.request.get_local_filename(), *args)
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index != 0:
|
||||
error_msg = "Index out of range while reading from itk file"
|
||||
raise IndexError(error_msg)
|
||||
|
||||
# Return array and empty meta data
|
||||
return _itk.GetArrayFromImage(self._img), {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
_itk_img = _itk.GetImageFromArray(im)
|
||||
_write_function(_itk_img, self.request.get_local_filename())
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
955
.CondaPkg/env/Lib/site-packages/imageio/plugins/spe.py
vendored
Normal file
955
.CondaPkg/env/Lib/site-packages/imageio/plugins/spe.py
vendored
Normal file
@@ -0,0 +1,955 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read SPE files.
|
||||
|
||||
This plugin supports reading files saved in the Princeton Instruments
|
||||
SPE file format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
check_filesize : bool
|
||||
The number of frames in the file is stored in the file header. However,
|
||||
this number may be wrong for certain software. If this is `True`
|
||||
(default), derive the number of frames also from the file size and
|
||||
raise a warning if the two values do not match.
|
||||
char_encoding : str
|
||||
Deprecated. Exists for backwards compatibility; use ``char_encoding`` of
|
||||
``metadata`` instead.
|
||||
sdt_meta : bool
|
||||
Deprecated. Exists for backwards compatibility; use ``sdt_control`` of
|
||||
``metadata`` instead.
|
||||
|
||||
Methods
|
||||
-------
|
||||
.. note::
|
||||
Check the respective function for a list of supported kwargs and detailed
|
||||
documentation.
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
SpePlugin.read
|
||||
SpePlugin.iter
|
||||
SpePlugin.properties
|
||||
SpePlugin.metadata
|
||||
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import os
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core.request import Request, IOMode, InitializationError
|
||||
from ..core.v3_plugin_api import PluginV3, ImageProperties
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Spec:
|
||||
"""SPE file specification data
|
||||
|
||||
Tuples of (offset, datatype, count), where offset is the offset in the SPE
|
||||
file and datatype is the datatype as used in `numpy.fromfile`()
|
||||
|
||||
`data_start` is the offset of actual image data.
|
||||
|
||||
`dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0]
|
||||
is dtype("<f") (which is np.float32).
|
||||
|
||||
`controllers` maps the `type` metadata to a human readable name
|
||||
|
||||
`readout_modes` maps the `readoutMode` metadata to something human readable
|
||||
although this may not be accurate since there is next to no documentation
|
||||
to be found.
|
||||
"""
|
||||
|
||||
basic = {
|
||||
"datatype": (108, "<h"), # dtypes
|
||||
"xdim": (42, "<H"),
|
||||
"ydim": (656, "<H"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"NumFrames": (1446, "<i"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
metadata = {
|
||||
# ROI information
|
||||
"NumROI": (1510, "<h"),
|
||||
"ROIs": (
|
||||
1512,
|
||||
np.dtype(
|
||||
[
|
||||
("startx", "<H"),
|
||||
("endx", "<H"),
|
||||
("groupx", "<H"),
|
||||
("starty", "<H"),
|
||||
("endy", "<H"),
|
||||
("groupy", "<H"),
|
||||
]
|
||||
),
|
||||
10,
|
||||
),
|
||||
# chip-related sizes
|
||||
"xDimDet": (6, "<H"),
|
||||
"yDimDet": (18, "<H"),
|
||||
"VChipXdim": (14, "<h"),
|
||||
"VChipYdim": (16, "<h"),
|
||||
# other stuff
|
||||
"controller_version": (0, "<h"),
|
||||
"logic_output": (2, "<h"),
|
||||
"amp_high_cap_low_noise": (4, "<H"), # enum?
|
||||
"mode": (8, "<h"), # enum?
|
||||
"exposure_sec": (10, "<f"),
|
||||
"date": (20, "<10S"),
|
||||
"detector_temp": (36, "<f"),
|
||||
"detector_type": (40, "<h"),
|
||||
"st_diode": (44, "<h"),
|
||||
"delay_time": (46, "<f"),
|
||||
# shutter_control: normal, disabled open, disabled closed
|
||||
# But which one is which?
|
||||
"shutter_control": (50, "<H"),
|
||||
"absorb_live": (52, "<h"),
|
||||
"absorb_mode": (54, "<H"),
|
||||
"can_do_virtual_chip": (56, "<h"),
|
||||
"threshold_min_live": (58, "<h"),
|
||||
"threshold_min_val": (60, "<f"),
|
||||
"threshold_max_live": (64, "<h"),
|
||||
"threshold_max_val": (66, "<f"),
|
||||
"time_local": (172, "<7S"),
|
||||
"time_utc": (179, "<7S"),
|
||||
"adc_offset": (188, "<H"),
|
||||
"adc_rate": (190, "<H"),
|
||||
"adc_type": (192, "<H"),
|
||||
"adc_resolution": (194, "<H"),
|
||||
"adc_bit_adjust": (196, "<H"),
|
||||
"gain": (198, "<H"),
|
||||
"comments": (200, "<80S", 5),
|
||||
"geometric": (600, "<H"), # flags
|
||||
"sw_version": (688, "<16S"),
|
||||
"spare_4": (742, "<436S"),
|
||||
"XPrePixels": (98, "<h"),
|
||||
"XPostPixels": (100, "<h"),
|
||||
"YPrePixels": (102, "<h"),
|
||||
"YPostPixels": (104, "<h"),
|
||||
"readout_time": (672, "<f"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"type": (704, "<h"), # controllers
|
||||
"clockspeed_us": (1428, "<f"),
|
||||
"readout_mode": (1480, "<H"), # readout_modes
|
||||
"window_size": (1482, "<H"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
data_start = 4100
|
||||
|
||||
dtypes = {
|
||||
0: np.dtype(np.float32),
|
||||
1: np.dtype(np.int32),
|
||||
2: np.dtype(np.int16),
|
||||
3: np.dtype(np.uint16),
|
||||
8: np.dtype(np.uint32),
|
||||
}
|
||||
|
||||
controllers = [
|
||||
"new120 (Type II)",
|
||||
"old120 (Type I)",
|
||||
"ST130",
|
||||
"ST121",
|
||||
"ST138",
|
||||
"DC131 (PentaMax)",
|
||||
"ST133 (MicroMax/Roper)",
|
||||
"ST135 (GPIB)",
|
||||
"VTCCD",
|
||||
"ST116 (GPIB)",
|
||||
"OMA3 (GPIB)",
|
||||
"OMA4",
|
||||
]
|
||||
|
||||
# This was gathered from random places on the internet and own experiments
|
||||
# with the camera. May not be accurate.
|
||||
readout_modes = ["full frame", "frame transfer", "kinetics"]
|
||||
|
||||
# Do not decode the following metadata keys into strings, but leave them
|
||||
# as byte arrays
|
||||
no_decode = ["spare_4"]
|
||||
|
||||
|
||||
class SDTControlSpec:
|
||||
"""Extract metadata written by the SDT-control software
|
||||
|
||||
Some of it is encoded in the comment strings
|
||||
(see :py:meth:`parse_comments`). Also, date and time are encoded in a
|
||||
peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata`
|
||||
to update the metadata dict.
|
||||
"""
|
||||
|
||||
months = {
|
||||
# Convert SDT-control month strings to month numbers
|
||||
"Jän": 1,
|
||||
"Jan": 1,
|
||||
"Feb": 2,
|
||||
"Mär": 3,
|
||||
"Mar": 3,
|
||||
"Apr": 4,
|
||||
"Mai": 5,
|
||||
"May": 5,
|
||||
"Jun": 6,
|
||||
"Jul": 7,
|
||||
"Aug": 8,
|
||||
"Sep": 9,
|
||||
"Okt": 10,
|
||||
"Oct": 10,
|
||||
"Nov": 11,
|
||||
"Dez": 12,
|
||||
"Dec": 12,
|
||||
}
|
||||
|
||||
sequence_types = {
|
||||
# TODO: complete
|
||||
"SEQU": "standard",
|
||||
"SETO": "TOCCSL",
|
||||
"KINE": "kinetics",
|
||||
"SEAR": "arbitrary",
|
||||
}
|
||||
|
||||
class CommentDesc:
|
||||
"""Describe how to extract a metadata entry from a comment string"""
|
||||
|
||||
n: int
|
||||
"""Which of the 5 SPE comment fields to use."""
|
||||
slice: slice
|
||||
"""Which characters from the `n`-th comment to use."""
|
||||
cvt: Callable[[str], Any]
|
||||
"""How to convert characters to something useful."""
|
||||
scale: Union[None, float]
|
||||
"""Optional scaling factor for numbers"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
n: int,
|
||||
slice: slice,
|
||||
cvt: Callable[[str], Any] = str,
|
||||
scale: Optional[float] = None,
|
||||
):
|
||||
self.n = n
|
||||
self.slice = slice
|
||||
self.cvt = cvt
|
||||
self.scale = scale
|
||||
|
||||
comment_fields = {
|
||||
(5, 0): {
|
||||
"sdt_major_version": CommentDesc(4, slice(66, 68), int),
|
||||
"sdt_minor_version": CommentDesc(4, slice(68, 70), int),
|
||||
"sdt_controller_name": CommentDesc(4, slice(0, 6), str),
|
||||
"exposure_time": CommentDesc(1, slice(64, 73), float, 10**-6),
|
||||
"color_code": CommentDesc(4, slice(10, 14), str),
|
||||
"detection_channels": CommentDesc(4, slice(15, 16), int),
|
||||
"background_subtraction": CommentDesc(4, 14, lambda x: x == "B"),
|
||||
"em_active": CommentDesc(4, 32, lambda x: x == "E"),
|
||||
"em_gain": CommentDesc(4, slice(28, 32), int),
|
||||
"modulation_active": CommentDesc(4, 33, lambda x: x == "A"),
|
||||
"pixel_size": CommentDesc(4, slice(25, 28), float, 0.1),
|
||||
"sequence_type": CommentDesc(
|
||||
4, slice(6, 10), lambda x: __class__.sequence_types[x]
|
||||
),
|
||||
"grid": CommentDesc(4, slice(16, 25), float, 10**-6),
|
||||
"n_macro": CommentDesc(1, slice(0, 4), int),
|
||||
"delay_macro": CommentDesc(1, slice(10, 19), float, 10**-3),
|
||||
"n_mini": CommentDesc(1, slice(4, 7), int),
|
||||
"delay_mini": CommentDesc(1, slice(19, 28), float, 10**-6),
|
||||
"n_micro": CommentDesc(1, slice(7, 10), int),
|
||||
"delay_micro": CommentDesc(1, slice(28, 37), float, 10**-6),
|
||||
"n_subpics": CommentDesc(1, slice(7, 10), int),
|
||||
"delay_shutter": CommentDesc(1, slice(73, 79), float, 10**-6),
|
||||
"delay_prebleach": CommentDesc(1, slice(37, 46), float, 10**-6),
|
||||
"bleach_time": CommentDesc(1, slice(46, 55), float, 10**-6),
|
||||
"recovery_time": CommentDesc(1, slice(55, 64), float, 10**-6),
|
||||
},
|
||||
(5, 1): {
|
||||
"bleach_piezo_active": CommentDesc(4, slice(34, 35), lambda x: x == "z")
|
||||
},
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:
|
||||
"""Get the version of SDT-control metadata encoded in the comments
|
||||
|
||||
Parameters
|
||||
----------
|
||||
comments
|
||||
List of SPE file comments, typically ``metadata["comments"]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Major and minor version. ``-1, -1`` if detection failed.
|
||||
"""
|
||||
if comments[4][70:76] != "COMVER":
|
||||
return -1, -1
|
||||
try:
|
||||
return int(comments[4][76:78]), int(comments[4][78:80])
|
||||
except ValueError:
|
||||
return -1, -1
|
||||
|
||||
@staticmethod
|
||||
def parse_comments(
|
||||
comments: Sequence[str], version: Tuple[int, int]
|
||||
) -> Dict[str, Any]:
|
||||
"""Extract SDT-control metadata from comments
|
||||
|
||||
Parameters
|
||||
----------
|
||||
comments
|
||||
List of SPE file comments, typically ``metadata["comments"]``.
|
||||
version
|
||||
Major and minor version of SDT-control metadata format
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict of metadata
|
||||
"""
|
||||
sdt_md = {}
|
||||
for minor in range(version[1] + 1):
|
||||
# Metadata with same major version is backwards compatible.
|
||||
# Fields are specified incrementally in `comment_fields`.
|
||||
# E.g. if the file has version 5.01, `comment_fields[5, 0]` and
|
||||
# `comment_fields[5, 1]` need to be decoded.
|
||||
try:
|
||||
cmt = __class__.comment_fields[version[0], minor]
|
||||
except KeyError:
|
||||
continue
|
||||
for name, spec in cmt.items():
|
||||
try:
|
||||
v = spec.cvt(comments[spec.n][spec.slice])
|
||||
if spec.scale is not None:
|
||||
v *= spec.scale
|
||||
sdt_md[name] = v
|
||||
except Exception as e:
|
||||
warnings.warn(
|
||||
f"Failed to decode SDT-control metadata field `{name}`: {e}"
|
||||
)
|
||||
sdt_md[name] = None
|
||||
if version not in __class__.comment_fields:
|
||||
supported_ver = ", ".join(
|
||||
map(lambda x: f"{x[0]}.{x[1]:02}", __class__.comment_fields)
|
||||
)
|
||||
warnings.warn(
|
||||
f"Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. "
|
||||
f"Only versions {supported_ver} are supported. "
|
||||
"Some or all SDT-control metadata may be missing."
|
||||
)
|
||||
comment = comments[0] + comments[2]
|
||||
sdt_md["comment"] = comment.strip()
|
||||
return sdt_md
|
||||
|
||||
@staticmethod
|
||||
def get_datetime(date: str, time: str) -> Union[datetime, None]:
|
||||
"""Turn date and time saved by SDT-control into proper datetime object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
date
|
||||
SPE file date, typically ``metadata["date"]``.
|
||||
time
|
||||
SPE file date, typically ``metadata["time_local"]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
File's datetime if parsing was succsessful, else None.
|
||||
"""
|
||||
try:
|
||||
month = __class__.months[date[2:5]]
|
||||
return datetime(
|
||||
int(date[5:9]),
|
||||
month,
|
||||
int(date[0:2]),
|
||||
int(time[0:2]),
|
||||
int(time[2:4]),
|
||||
int(time[4:6]),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(f"Failed to decode date from SDT-control metadata: {e}.")
|
||||
|
||||
@staticmethod
|
||||
def extract_metadata(meta: Mapping, char_encoding: str = "latin1"):
|
||||
"""Extract SDT-control metadata from SPE metadata
|
||||
|
||||
SDT-control stores some metadata in comments and other fields.
|
||||
Extract them and remove unused entries.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
meta
|
||||
SPE file metadata. Modified in place.
|
||||
char_encoding
|
||||
Character encoding used to decode strings in the metadata.
|
||||
"""
|
||||
comver = __class__.get_comment_version(meta["comments"])
|
||||
if any(c < 0 for c in comver):
|
||||
# This file most likely was not created by SDT-control
|
||||
logger.debug("SDT-control comments not found.")
|
||||
return
|
||||
|
||||
sdt_meta = __class__.parse_comments(meta["comments"], comver)
|
||||
meta.pop("comments")
|
||||
meta.update(sdt_meta)
|
||||
|
||||
# Get date and time in a usable format
|
||||
dt = __class__.get_datetime(meta["date"], meta["time_local"])
|
||||
if dt:
|
||||
meta["datetime"] = dt
|
||||
meta.pop("date")
|
||||
meta.pop("time_local")
|
||||
|
||||
sp4 = meta["spare_4"]
|
||||
try:
|
||||
meta["modulation_script"] = sp4.decode(char_encoding)
|
||||
meta.pop("spare_4")
|
||||
except UnicodeDecodeError:
|
||||
warnings.warn(
|
||||
"Failed to decode SDT-control laser "
|
||||
"modulation script. Bad char_encoding?"
|
||||
)
|
||||
|
||||
# Get rid of unused data
|
||||
meta.pop("time_utc")
|
||||
meta.pop("exposure_sec")
|
||||
|
||||
|
||||
class SpePlugin(PluginV3):
|
||||
def __init__(
|
||||
self,
|
||||
request: Request,
|
||||
check_filesize: bool = True,
|
||||
char_encoding: Optional[str] = None,
|
||||
sdt_meta: Optional[bool] = None,
|
||||
) -> None:
|
||||
"""Instantiate a new SPE file plugin object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : Request
|
||||
A request object representing the resource to be operated on.
|
||||
check_filesize : bool
|
||||
If True, compute the number of frames from the filesize, compare it
|
||||
to the frame count in the file header, and raise a warning if the
|
||||
counts don't match. (Certain software may create files with
|
||||
char_encoding : str
|
||||
Deprecated. Exists for backwards compatibility; use ``char_encoding`` of
|
||||
``metadata`` instead.
|
||||
sdt_meta : bool
|
||||
Deprecated. Exists for backwards compatibility; use ``sdt_control`` of
|
||||
``metadata`` instead.
|
||||
|
||||
"""
|
||||
|
||||
super().__init__(request)
|
||||
if request.mode.io_mode == IOMode.write:
|
||||
raise InitializationError("cannot write SPE files")
|
||||
|
||||
if char_encoding is not None:
|
||||
warnings.warn(
|
||||
"Passing `char_encoding` to the constructor is deprecated. "
|
||||
"Use `char_encoding` parameter of the `metadata()` method "
|
||||
"instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
self._char_encoding = char_encoding
|
||||
if sdt_meta is not None:
|
||||
warnings.warn(
|
||||
"Passing `sdt_meta` to the constructor is deprecated. "
|
||||
"Use `sdt_control` parameter of the `metadata()` method "
|
||||
"instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
self._sdt_meta = sdt_meta
|
||||
|
||||
self._file = self.request.get_file()
|
||||
|
||||
try:
|
||||
# Spec.basic contains no string, no need to worry about character
|
||||
# encoding.
|
||||
info = self._parse_header(Spec.basic, "latin1")
|
||||
self._file_header_ver = info["file_header_ver"]
|
||||
self._dtype = Spec.dtypes[info["datatype"]]
|
||||
self._shape = (info["ydim"], info["xdim"])
|
||||
self._len = info["NumFrames"]
|
||||
|
||||
if check_filesize:
|
||||
# Some software writes incorrect `NumFrames` metadata.
|
||||
# To determine the number of frames, check the size of the data
|
||||
# segment -- until the end of the file for SPE<3, until the
|
||||
# xml footer for SPE>=3.
|
||||
if info["file_header_ver"] >= 3:
|
||||
data_end = info["xml_footer_offset"]
|
||||
else:
|
||||
self._file.seek(0, os.SEEK_END)
|
||||
data_end = self._file.tell()
|
||||
line = data_end - Spec.data_start
|
||||
line //= self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
if line != self._len:
|
||||
warnings.warn(
|
||||
f"The file header of {self.request.filename} claims there are "
|
||||
f"{self._len} frames, but there are actually {line} frames."
|
||||
)
|
||||
self._len = min(line, self._len)
|
||||
self._file.seek(Spec.data_start)
|
||||
except Exception:
|
||||
raise InitializationError("SPE plugin cannot read the provided file.")
|
||||
|
||||
def read(self, *, index: int = ...) -> np.ndarray:
|
||||
"""Read a frame or all frames from the file
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
Select the index-th frame from the file. If index is `...`,
|
||||
select all frames and stack them along a new axis.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A Numpy array of pixel values.
|
||||
|
||||
"""
|
||||
|
||||
if index is Ellipsis:
|
||||
read_offset = Spec.data_start
|
||||
count = self._shape[0] * self._shape[1] * self._len
|
||||
out_shape = (self._len, *self._shape)
|
||||
elif index < 0:
|
||||
raise IndexError(f"Index `{index}` is smaller than 0.")
|
||||
elif index >= self._len:
|
||||
raise IndexError(
|
||||
f"Index `{index}` exceeds the number of frames stored in this file (`{self._len}`)."
|
||||
)
|
||||
else:
|
||||
read_offset = (
|
||||
Spec.data_start
|
||||
+ index * self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
)
|
||||
count = self._shape[0] * self._shape[1]
|
||||
out_shape = self._shape
|
||||
|
||||
self._file.seek(read_offset)
|
||||
data = np.fromfile(self._file, dtype=self._dtype, count=count)
|
||||
return data.reshape(out_shape)
|
||||
|
||||
def iter(self) -> Iterator[np.ndarray]:
|
||||
"""Iterate over the frames in the file
|
||||
|
||||
Yields
|
||||
------
|
||||
A Numpy array of pixel values.
|
||||
"""
|
||||
|
||||
return (self.read(index=i) for i in range(self._len))
|
||||
|
||||
def metadata(
|
||||
self,
|
||||
index: int = ...,
|
||||
exclude_applied: bool = True,
|
||||
char_encoding: str = "latin1",
|
||||
sdt_control: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""SPE specific metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
Ignored as SPE files only store global metadata.
|
||||
exclude_applied : bool
|
||||
Ignored. Exists for API compatibility.
|
||||
char_encoding : str
|
||||
The encoding to use when parsing strings.
|
||||
sdt_control : bool
|
||||
If `True`, decode special metadata written by the
|
||||
SDT-control software if present.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
Key-value pairs of metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
SPE v3 stores metadata as XML, whereas SPE v2 uses a binary format.
|
||||
|
||||
.. rubric:: Supported SPE v2 Metadata fields
|
||||
|
||||
ROIs : list of dict
|
||||
Regions of interest used for recording images. Each dict has the
|
||||
"top_left" key containing x and y coordinates of the top left corner,
|
||||
the "bottom_right" key with x and y coordinates of the bottom right
|
||||
corner, and the "bin" key with number of binned pixels in x and y
|
||||
directions.
|
||||
comments : list of str
|
||||
The SPE format allows for 5 comment strings of 80 characters each.
|
||||
controller_version : int
|
||||
Hardware version
|
||||
logic_output : int
|
||||
Definition of output BNC
|
||||
amp_hi_cap_low_noise : int
|
||||
Amp switching mode
|
||||
mode : int
|
||||
Timing mode
|
||||
exp_sec : float
|
||||
Alternative exposure in seconds
|
||||
date : str
|
||||
Date string
|
||||
detector_temp : float
|
||||
Detector temperature
|
||||
detector_type : int
|
||||
CCD / diode array type
|
||||
st_diode : int
|
||||
Trigger diode
|
||||
delay_time : float
|
||||
Used with async mode
|
||||
shutter_control : int
|
||||
Normal, disabled open, or disabled closed
|
||||
absorb_live : bool
|
||||
on / off
|
||||
absorb_mode : int
|
||||
Reference strip or file
|
||||
can_do_virtual_chip : bool
|
||||
True or False whether chip can do virtual chip
|
||||
threshold_min_live : bool
|
||||
on / off
|
||||
threshold_min_val : float
|
||||
Threshold minimum value
|
||||
threshold_max_live : bool
|
||||
on / off
|
||||
threshold_max_val : float
|
||||
Threshold maximum value
|
||||
time_local : str
|
||||
Experiment local time
|
||||
time_utc : str
|
||||
Experiment UTC time
|
||||
adc_offset : int
|
||||
ADC offset
|
||||
adc_rate : int
|
||||
ADC rate
|
||||
adc_type : int
|
||||
ADC type
|
||||
adc_resolution : int
|
||||
ADC resolution
|
||||
adc_bit_adjust : int
|
||||
ADC bit adjust
|
||||
gain : int
|
||||
gain
|
||||
sw_version : str
|
||||
Version of software which created this file
|
||||
spare_4 : bytes
|
||||
Reserved space
|
||||
readout_time : float
|
||||
Experiment readout time
|
||||
type : str
|
||||
Controller type
|
||||
clockspeed_us : float
|
||||
Vertical clock speed in microseconds
|
||||
readout_mode : ["full frame", "frame transfer", "kinetics", ""]
|
||||
Readout mode. Empty string means that this was not set by the
|
||||
Software.
|
||||
window_size : int
|
||||
Window size for Kinetics mode
|
||||
file_header_ver : float
|
||||
File header version
|
||||
chip_size : [int, int]
|
||||
x and y dimensions of the camera chip
|
||||
virt_chip_size : [int, int]
|
||||
Virtual chip x and y dimensions
|
||||
pre_pixels : [int, int]
|
||||
Pre pixels in x and y dimensions
|
||||
post_pixels : [int, int],
|
||||
Post pixels in x and y dimensions
|
||||
geometric : list of {"rotate", "reverse", "flip"}
|
||||
Geometric operations
|
||||
sdt_major_version : int
|
||||
(only for files created by SDT-control)
|
||||
Major version of SDT-control software
|
||||
sdt_minor_version : int
|
||||
(only for files created by SDT-control)
|
||||
Minor version of SDT-control software
|
||||
sdt_controller_name : str
|
||||
(only for files created by SDT-control)
|
||||
Controller name
|
||||
exposure_time : float
|
||||
(only for files created by SDT-control)
|
||||
Exposure time in seconds
|
||||
color_code : str
|
||||
(only for files created by SDT-control)
|
||||
Color channels used
|
||||
detection_channels : int
|
||||
(only for files created by SDT-control)
|
||||
Number of channels
|
||||
background_subtraction : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether background subtraction war turned on
|
||||
em_active : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether EM was turned on
|
||||
em_gain : int
|
||||
(only for files created by SDT-control)
|
||||
EM gain
|
||||
modulation_active : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether laser modulation (“attenuate”) was turned on
|
||||
pixel_size : float
|
||||
(only for files created by SDT-control)
|
||||
Camera pixel size
|
||||
sequence_type : str
|
||||
(only for files created by SDT-control)
|
||||
Type of sequnce (standard, TOCCSL, arbitrary, …)
|
||||
grid : float
|
||||
(only for files created by SDT-control)
|
||||
Sequence time unit (“grid size”) in seconds
|
||||
n_macro : int
|
||||
(only for files created by SDT-control)
|
||||
Number of macro loops
|
||||
delay_macro : float
|
||||
(only for files created by SDT-control)
|
||||
Time between macro loops in seconds
|
||||
n_mini : int
|
||||
(only for files created by SDT-control)
|
||||
Number of mini loops
|
||||
delay_mini : float
|
||||
(only for files created by SDT-control)
|
||||
Time between mini loops in seconds
|
||||
n_micro : int (only for files created by SDT-control)
|
||||
Number of micro loops
|
||||
delay_micro : float (only for files created by SDT-control)
|
||||
Time between micro loops in seconds
|
||||
n_subpics : int
|
||||
(only for files created by SDT-control)
|
||||
Number of sub-pictures
|
||||
delay_shutter : float
|
||||
(only for files created by SDT-control)
|
||||
Camera shutter delay in seconds
|
||||
delay_prebleach : float
|
||||
(only for files created by SDT-control)
|
||||
Pre-bleach delay in seconds
|
||||
bleach_time : float
|
||||
(only for files created by SDT-control)
|
||||
Bleaching time in seconds
|
||||
recovery_time : float
|
||||
(only for files created by SDT-control)
|
||||
Recovery time in seconds
|
||||
comment : str
|
||||
(only for files created by SDT-control)
|
||||
User-entered comment. This replaces the "comments" field.
|
||||
datetime : datetime.datetime
|
||||
(only for files created by SDT-control)
|
||||
Combines the "date" and "time_local" keys. The latter two plus
|
||||
"time_utc" are removed.
|
||||
modulation_script : str
|
||||
(only for files created by SDT-control)
|
||||
Laser modulation script. Replaces the "spare_4" key.
|
||||
bleach_piezo_active : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether piezo for bleaching was enabled
|
||||
"""
|
||||
|
||||
if self._file_header_ver < 3:
|
||||
if self._char_encoding is not None:
|
||||
char_encoding = self._char_encoding
|
||||
if self._sdt_meta is not None:
|
||||
sdt_control = self._sdt_meta
|
||||
return self._metadata_pre_v3(char_encoding, sdt_control)
|
||||
return self._metadata_post_v3()
|
||||
|
||||
def _metadata_pre_v3(self, char_encoding: str, sdt_control: bool) -> Dict[str, Any]:
|
||||
"""Extract metadata from SPE v2 files
|
||||
|
||||
Parameters
|
||||
----------
|
||||
char_encoding
|
||||
String character encoding
|
||||
sdt_control
|
||||
If `True`, try to decode special metadata written by the
|
||||
SDT-control software.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict mapping metadata names to values.
|
||||
|
||||
"""
|
||||
|
||||
m = self._parse_header(Spec.metadata, char_encoding)
|
||||
|
||||
nr = m.pop("NumROI", None)
|
||||
nr = 1 if nr < 1 else nr
|
||||
m["ROIs"] = roi_array_to_dict(m["ROIs"][:nr])
|
||||
|
||||
# chip sizes
|
||||
m["chip_size"] = [m.pop(k, None) for k in ("xDimDet", "yDimDet")]
|
||||
m["virt_chip_size"] = [m.pop(k, None) for k in ("VChipXdim", "VChipYdim")]
|
||||
m["pre_pixels"] = [m.pop(k, None) for k in ("XPrePixels", "YPrePixels")]
|
||||
m["post_pixels"] = [m.pop(k, None) for k in ("XPostPixels", "YPostPixels")]
|
||||
|
||||
# convert comments from numpy.str_ to str
|
||||
m["comments"] = [str(c) for c in m["comments"]]
|
||||
|
||||
# geometric operations
|
||||
g = []
|
||||
f = m.pop("geometric", 0)
|
||||
if f & 1:
|
||||
g.append("rotate")
|
||||
if f & 2:
|
||||
g.append("reverse")
|
||||
if f & 4:
|
||||
g.append("flip")
|
||||
m["geometric"] = g
|
||||
|
||||
# Make some additional information more human-readable
|
||||
t = m["type"]
|
||||
if 1 <= t <= len(Spec.controllers):
|
||||
m["type"] = Spec.controllers[t - 1]
|
||||
else:
|
||||
m["type"] = None
|
||||
r = m["readout_mode"]
|
||||
if 1 <= r <= len(Spec.readout_modes):
|
||||
m["readout_mode"] = Spec.readout_modes[r - 1]
|
||||
else:
|
||||
m["readout_mode"] = None
|
||||
|
||||
# bools
|
||||
for k in (
|
||||
"absorb_live",
|
||||
"can_do_virtual_chip",
|
||||
"threshold_min_live",
|
||||
"threshold_max_live",
|
||||
):
|
||||
m[k] = bool(m[k])
|
||||
|
||||
# Extract SDT-control metadata if desired
|
||||
if sdt_control:
|
||||
SDTControlSpec.extract_metadata(m, char_encoding)
|
||||
|
||||
return m
|
||||
|
||||
def _metadata_post_v3(self) -> Dict[str, Any]:
|
||||
"""Extract XML metadata from SPE v3 files
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict with key `"__xml"`, whose value is the XML metadata
|
||||
"""
|
||||
|
||||
info = self._parse_header(Spec.basic, "latin1")
|
||||
self._file.seek(info["xml_footer_offset"])
|
||||
xml = self._file.read()
|
||||
return {"__xml": xml}
|
||||
|
||||
def properties(self, index: int = ...) -> ImageProperties:
|
||||
"""Standardized ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the index is an integer, select the index-th frame and return
|
||||
its properties. If index is an Ellipsis (...), return the
|
||||
properties of all frames in the file stacked along a new batch
|
||||
dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
"""
|
||||
|
||||
if index is Ellipsis:
|
||||
return ImageProperties(
|
||||
shape=(self._len, *self._shape),
|
||||
dtype=self._dtype,
|
||||
n_images=self._len,
|
||||
is_batch=True,
|
||||
)
|
||||
return ImageProperties(shape=self._shape, dtype=self._dtype, is_batch=False)
|
||||
|
||||
def _parse_header(
|
||||
self, spec: Mapping[str, Tuple], char_encoding: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Get information from SPE file header
|
||||
|
||||
Parameters
|
||||
----------
|
||||
spec
|
||||
Maps header entry name to its location, data type description and
|
||||
optionally number of entries. See :py:attr:`Spec.basic` and
|
||||
:py:attr:`Spec.metadata`.
|
||||
char_encoding
|
||||
String character encoding
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict mapping header entry name to its value
|
||||
"""
|
||||
|
||||
ret = {}
|
||||
# Decode each string from the numpy array read by np.fromfile
|
||||
decode = np.vectorize(lambda x: x.decode(char_encoding))
|
||||
|
||||
for name, sp in spec.items():
|
||||
self._file.seek(sp[0])
|
||||
cnt = 1 if len(sp) < 3 else sp[2]
|
||||
v = np.fromfile(self._file, dtype=sp[1], count=cnt)
|
||||
if v.dtype.kind == "S" and name not in Spec.no_decode:
|
||||
# Silently ignore string decoding failures
|
||||
try:
|
||||
v = decode(v)
|
||||
except Exception:
|
||||
warnings.warn(
|
||||
f'Failed to decode "{name}" metadata '
|
||||
"string. Check `char_encoding` parameter."
|
||||
)
|
||||
|
||||
try:
|
||||
# For convenience, if the array contains only one single
|
||||
# entry, return this entry itself.
|
||||
v = v.item()
|
||||
except ValueError:
|
||||
v = np.squeeze(v)
|
||||
ret[name] = v
|
||||
return ret
|
||||
|
||||
|
||||
def roi_array_to_dict(a: np.ndarray) -> List[Dict[str, List[int]]]:
|
||||
"""Convert the `ROIs` structured arrays to :py:class:`dict`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a
|
||||
Structured array containing ROI data
|
||||
|
||||
Returns
|
||||
-------
|
||||
One dict per ROI. Keys are "top_left", "bottom_right", and "bin",
|
||||
values are tuples whose first element is the x axis value and the
|
||||
second element is the y axis value.
|
||||
"""
|
||||
|
||||
dict_list = []
|
||||
a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]]
|
||||
for sx, sy, ex, ey, gx, gy in a:
|
||||
roi_dict = {
|
||||
"top_left": [int(sx), int(sy)],
|
||||
"bottom_right": [int(ex), int(ey)],
|
||||
"bin": [int(gx), int(gy)],
|
||||
}
|
||||
dict_list.append(roi_dict)
|
||||
return dict_list
|
||||
336
.CondaPkg/env/Lib/site-packages/imageio/plugins/swf.py
vendored
Normal file
336
.CondaPkg/env/Lib/site-packages/imageio/plugins/swf.py
vendored
Normal file
@@ -0,0 +1,336 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write SWF files.
|
||||
|
||||
Backend: internal
|
||||
|
||||
Shockwave flash (SWF) is a media format designed for rich and
|
||||
interactive animations. This plugin makes use of this format to
|
||||
store a series of images in a lossless format with good compression
|
||||
(zlib). The resulting images can be shown as an animation using
|
||||
a flash player (such as the browser).
|
||||
|
||||
SWF stores images in RGBA format. RGB or grayscale images are
|
||||
automatically converted. SWF does not support meta data.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
fps : int
|
||||
The speed to play the animation. Default 12.
|
||||
loop : bool
|
||||
If True, add a tag to the end of the file to play again from
|
||||
the first frame. Most flash players will then play the movie
|
||||
in a loop. Note that the imageio SWF Reader does not check this
|
||||
tag. Default True.
|
||||
html : bool
|
||||
If the output is a file on the file system, write an html file
|
||||
(in HTML5) that shows the animation. Default False.
|
||||
compress : bool
|
||||
Whether to compress the swf file. Default False. You probably don't
|
||||
want to use this. This does not decrease the file size since
|
||||
the images are already compressed. It will result in slower
|
||||
read and write time. The only purpose of this feature is to
|
||||
create compressed SWF files, so that we can test the
|
||||
functionality to read them.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, read_n_bytes, image_as_uint
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_swf = None # lazily loaded in lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _swf
|
||||
from . import _swf
|
||||
|
||||
return _swf
|
||||
|
||||
|
||||
class SWFFormat(Format):
|
||||
"""See :mod:`imageio.plugins.swf`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
tmp = request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp in ("FWS", "CWS"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, loop=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_loop = bool(loop)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
# Check file ...
|
||||
tmp = self.request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp == "FWS":
|
||||
pass # OK
|
||||
elif tmp == "CWS":
|
||||
# Compressed, we need to decompress
|
||||
bb = self._fp.read()
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
# Wrap up in a file object
|
||||
self._fp = BytesIO(bb)
|
||||
else:
|
||||
raise IOError("This does not look like a valid SWF file")
|
||||
|
||||
# Skip first bytes. This also tests support got seeking ...
|
||||
try:
|
||||
self._fp.seek(8)
|
||||
self._streaming_mode = False
|
||||
except Exception:
|
||||
self._streaming_mode = True
|
||||
self._fp_read(8)
|
||||
|
||||
# Skip header
|
||||
# Note that the number of frames is there, which we could
|
||||
# potentially use, but the number of frames does not necessarily
|
||||
# correspond to the number of images.
|
||||
nbits = _swf.bits2int(self._fp_read(1), 5)
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
self._fp_read(Lrect + 3)
|
||||
|
||||
# Now the rest is basically tags ...
|
||||
self._imlocs = [] # tuple (loc, sze, T, L1)
|
||||
if not self._streaming_mode:
|
||||
# Collect locations of frame, while skipping through the data
|
||||
# This does not read any of the tag *data*.
|
||||
try:
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
loc = self._fp.tell()
|
||||
if isimage:
|
||||
# Still need to check if the format is right
|
||||
format = ord(self._fp_read(3)[2:])
|
||||
if format == 5: # RGB or RGBA lossless
|
||||
self._imlocs.append((loc, sze, T, L1))
|
||||
self._fp.seek(loc + sze) # Skip over tag
|
||||
except IndexError:
|
||||
pass # done reading
|
||||
|
||||
def _fp_read(self, n):
|
||||
return read_n_bytes(self._fp, n)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if self._streaming_mode:
|
||||
return np.inf
|
||||
else:
|
||||
return len(self._imlocs)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Check index
|
||||
if index < 0:
|
||||
raise IndexError("Index in swf file must be > 0")
|
||||
if not self._streaming_mode:
|
||||
if self._arg_loop and self._imlocs:
|
||||
index = index % len(self._imlocs)
|
||||
if index >= len(self._imlocs):
|
||||
raise IndexError("Index out of bounds")
|
||||
|
||||
if self._streaming_mode:
|
||||
# Walk over tags until we find an image
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
bb = self._fp_read(sze) # always read data
|
||||
if isimage:
|
||||
im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
if im is not None:
|
||||
return im, {}
|
||||
|
||||
else:
|
||||
# Go to corresponding location, read data, and convert to image
|
||||
loc, sze, T, L1 = self._imlocs[index]
|
||||
self._fp.seek(loc)
|
||||
bb = self._fp_read(sze)
|
||||
# Read_pixels should return ndarry, since we checked format
|
||||
im = _swf.read_pixels(bb, 0, T, L1)
|
||||
return im, {}
|
||||
|
||||
def _read_one_tag(self):
|
||||
"""
|
||||
Return (True, loc, size, T, L1) if an image that we can read.
|
||||
Return (False, loc, size, T, L1) if any other tag.
|
||||
"""
|
||||
|
||||
# Get head
|
||||
head = self._fp_read(6)
|
||||
if not head: # pragma: no cover
|
||||
raise IndexError("Reached end of swf movie")
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = _swf.get_type_and_len(head)
|
||||
if not L2: # pragma: no cover
|
||||
raise RuntimeError("Invalid tag length, could not proceed")
|
||||
|
||||
# Read data
|
||||
isimage = False
|
||||
sze = L2 - 6
|
||||
# bb = self._fp_read(L2 - 6)
|
||||
|
||||
# Parse tag
|
||||
if T == 0:
|
||||
raise IndexError("Reached end of swf movie")
|
||||
elif T in [20, 36]:
|
||||
isimage = True
|
||||
# im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
elif T in [6, 21, 35, 90]: # pragma: no cover
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Done. Return image. Can be None
|
||||
# return im
|
||||
return isimage, sze, T, L1
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, fps=12, loop=True, html=False, compress=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_fps = int(fps)
|
||||
self._arg_loop = bool(loop)
|
||||
self._arg_html = bool(html)
|
||||
self._arg_compress = bool(compress)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
self._framecounter = 0
|
||||
self._framesize = (100, 100)
|
||||
|
||||
# For compress, we use an in-memory file object
|
||||
if self._arg_compress:
|
||||
self._fp_real = self._fp
|
||||
self._fp = BytesIO()
|
||||
|
||||
def _close(self):
|
||||
self._complete()
|
||||
# Get size of (uncompressed) file
|
||||
sze = self._fp.tell()
|
||||
# set nframes, this is in the potentially compressed region
|
||||
self._fp.seek(self._location_to_save_nframes)
|
||||
self._fp.write(_swf.int2uint16(self._framecounter))
|
||||
# Compress body?
|
||||
if self._arg_compress:
|
||||
bb = self._fp.getvalue()
|
||||
self._fp = self._fp_real
|
||||
self._fp.write(bb[:8])
|
||||
self._fp.write(zlib.compress(bb[8:]))
|
||||
sze = self._fp.tell() # renew sze value
|
||||
# set size
|
||||
self._fp.seek(4)
|
||||
self._fp.write(_swf.int2uint32(sze))
|
||||
self._fp = None # Disable
|
||||
|
||||
# Write html?
|
||||
if self._arg_html and os.path.isfile(self.request.filename):
|
||||
dirname, fname = os.path.split(self.request.filename)
|
||||
filename = os.path.join(dirname, fname[:-4] + ".html")
|
||||
w, h = self._framesize
|
||||
html = HTML % (fname, w, h, fname)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(html.encode("utf-8"))
|
||||
|
||||
def _write_header(self, framesize, fps):
|
||||
self._framesize = framesize
|
||||
# Called as soon as we know framesize; when we get first frame
|
||||
bb = b""
|
||||
bb += "FC"[self._arg_compress].encode("ascii")
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += _swf.int2uint8(8) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += (
|
||||
_swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
)
|
||||
bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate
|
||||
self._location_to_save_nframes = len(bb)
|
||||
bb += "00".encode("ascii") # nframes (leave open for now)
|
||||
self._fp.write(bb)
|
||||
|
||||
# Write some initial tags
|
||||
taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0)
|
||||
for tag in taglist:
|
||||
self._fp.write(tag.get_tag())
|
||||
|
||||
def _complete(self):
|
||||
# What if no images were saved?
|
||||
if not self._framecounter:
|
||||
self._write_header((10, 10), self._arg_fps)
|
||||
# Write stop tag if we do not loop
|
||||
if not self._arg_loop:
|
||||
self._fp.write(_swf.DoActionTag("stop").get_tag())
|
||||
# finish with end tag
|
||||
self._fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Correct shape and type
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Get frame size
|
||||
wh = im.shape[1], im.shape[0]
|
||||
# Write header on first frame
|
||||
isfirstframe = False
|
||||
if self._framecounter == 0:
|
||||
isfirstframe = True
|
||||
self._write_header(wh, self._arg_fps)
|
||||
# Create tags
|
||||
bm = _swf.BitmapTag(im)
|
||||
sh = _swf.ShapeTag(bm.id, (0, 0), wh)
|
||||
po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe))
|
||||
sf = _swf.ShowFrameTag()
|
||||
# Write tags
|
||||
for tag in [bm, sh, po, sf]:
|
||||
self._fp.write(tag.get_tag())
|
||||
self._framecounter += 1
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
pass
|
||||
|
||||
|
||||
HTML = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Show Flash animation %s</title>
|
||||
</head>
|
||||
<body>
|
||||
<embed width="%i" height="%i" src="%s">
|
||||
</html>
|
||||
"""
|
||||
561
.CondaPkg/env/Lib/site-packages/imageio/plugins/tifffile.py
vendored
Normal file
561
.CondaPkg/env/Lib/site-packages/imageio/plugins/tifffile.py
vendored
Normal file
@@ -0,0 +1,561 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write TIFF files.
|
||||
|
||||
Backend: internal
|
||||
|
||||
Provides support for a wide range of Tiff images using the tifffile
|
||||
backend.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
offset : int
|
||||
Optional start position of embedded file. By default this is
|
||||
the current file position.
|
||||
size : int
|
||||
Optional size of embedded file. By default this is the number
|
||||
of bytes from the 'offset' to the end of the file.
|
||||
multifile : bool
|
||||
If True (default), series may include pages from multiple files.
|
||||
Currently applies to OME-TIFF only.
|
||||
multifile_close : bool
|
||||
If True (default), keep the handles of other files in multifile
|
||||
series closed. This is inefficient when few files refer to
|
||||
many pages. If False, the C runtime may run out of resources.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
bigtiff : bool
|
||||
If True, the BigTIFF format is used.
|
||||
byteorder : {'<', '>'}
|
||||
The endianness of the data in the file.
|
||||
By default this is the system's native byte order.
|
||||
software : str
|
||||
Name of the software used to create the image.
|
||||
Saved with the first page only.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
planar_configuration : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution_unit : int
|
||||
The resolution unit stored in the TIFF tag. Usually 1 means no/unknown unit,
|
||||
2 means dpi (inch), 3 means dpc (centimeter).
|
||||
resolution : (float, float, str)
|
||||
A tuple formatted as (X_resolution, Y_resolution, unit). The unit is a
|
||||
string representing one of the following units::
|
||||
|
||||
NONE # No unit or unit unknown
|
||||
INCH # dpi
|
||||
CENTIMETER # cpi
|
||||
MILLIMETER
|
||||
MICROMETER
|
||||
|
||||
compression : int
|
||||
Value indicating the compression algorithm used, e.g. 5 is LZW,
|
||||
7 is JPEG, 8 is deflate.
|
||||
If 1, data are uncompressed.
|
||||
predictor : int
|
||||
Value 2 indicates horizontal differencing was used before compression,
|
||||
while 3 indicates floating point horizontal differencing.
|
||||
If 1, no prediction scheme was used before compression.
|
||||
orientation : {'top_left', 'bottom_right', ...}
|
||||
Oriented of image array.
|
||||
is_rgb : bool
|
||||
True if page contains a RGB image.
|
||||
is_contig : bool
|
||||
True if page contains a contiguous image.
|
||||
is_tiled : bool
|
||||
True if page contains tiled image.
|
||||
is_palette : bool
|
||||
True if page contains a palette-colored image and not OME or STK.
|
||||
is_reduced : bool
|
||||
True if page is a reduced image of another image.
|
||||
is_shaped : bool
|
||||
True if page contains shape in image_description tag.
|
||||
is_fluoview : bool
|
||||
True if page contains FluoView MM_STAMP tag.
|
||||
is_nih : bool
|
||||
True if page contains NIH image header.
|
||||
is_micromanager : bool
|
||||
True if page contains Micro-Manager metadata.
|
||||
is_ome : bool
|
||||
True if page contains OME-XML in image_description tag.
|
||||
is_sgi : bool
|
||||
True if page contains SGI image and tile depth tags.
|
||||
is_mdgel : bool
|
||||
True if page contains md_file_tag tag.
|
||||
is_mediacy : bool
|
||||
True if page contains Media Cybernetics Id tag.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_lsm : bool
|
||||
True if page contains LSM CZ_LSM_INFO tag.
|
||||
description : str
|
||||
Image description
|
||||
description1 : str
|
||||
Additional description
|
||||
is_imagej : None or str
|
||||
ImageJ metadata
|
||||
software : str
|
||||
Software used to create the TIFF file
|
||||
datetime : datetime.datetime
|
||||
Creation date and time
|
||||
|
||||
Metadata for writing
|
||||
--------------------
|
||||
photometric : {'minisblack', 'miniswhite', 'rgb'}
|
||||
The color space of the image data.
|
||||
By default this setting is inferred from the data shape.
|
||||
planarconfig : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
description : str
|
||||
The subject of the image. Saved with the first page only.
|
||||
compress : int
|
||||
Values from 0 to 9 controlling the level of zlib (deflate) compression.
|
||||
If 0, data are written uncompressed (default).
|
||||
compression : str, (int, int)
|
||||
Compression scheme used while writing the image. If omitted (default) the
|
||||
image is not uncompressed. Compression cannot be used to write contiguous
|
||||
series. Compressors may require certain data shapes, types or value ranges.
|
||||
For example, JPEG compression requires grayscale or RGB(A), uint8 or 12-bit
|
||||
uint16. JPEG compression is experimental. JPEG markers and TIFF tags may not
|
||||
match. Only a limited set of compression schemes are implemented. 'ZLIB' is
|
||||
short for ADOBE_DEFLATE. The value is written to the Compression tag.
|
||||
compressionargs:
|
||||
Extra arguments passed to compression codec, e.g., compression level. Refer
|
||||
to the Imagecodecs implementation for supported arguments.
|
||||
predictor : bool
|
||||
If True, horizontal differencing is applied before compression.
|
||||
Note that using an int literal 1 actually means no prediction scheme
|
||||
will be used.
|
||||
volume : bool
|
||||
If True, volume data are stored in one tile (if applicable) using
|
||||
the SGI image_depth and tile_depth tags.
|
||||
Image width and depth must be multiple of 16.
|
||||
Few software can read this format, e.g. MeVisLab.
|
||||
writeshape : bool
|
||||
If True, write the data shape to the image_description tag
|
||||
if necessary and no other description is given.
|
||||
extratags: sequence of tuples
|
||||
Additional tags as [(code, dtype, count, value, writeonce)].
|
||||
|
||||
code : int
|
||||
The TIFF tag Id.
|
||||
dtype : str
|
||||
Data type of items in 'value' in Python struct format.
|
||||
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
|
||||
count : int
|
||||
Number of data values. Not used for string values.
|
||||
value : sequence
|
||||
'Count' values compatible with 'dtype'.
|
||||
writeonce : bool
|
||||
If True, the tag is written to the first page only.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Global metadata is stored with the first frame in a TIFF file.
|
||||
Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame
|
||||
was written has no effect. Also, global metadata is ignored if metadata is
|
||||
provided via the `meta` argument of :py:meth:`Format.Writer.append_data`.
|
||||
|
||||
If you have installed tifffile as a Python package, imageio will attempt
|
||||
to use that as backend instead of the bundled backend. Doing so can
|
||||
provide access to new performance improvements and bug fixes.
|
||||
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from ..core import Format
|
||||
from ..core.request import URI_BYTES, URI_FILE
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
import tifffile as _tifffile
|
||||
except ImportError:
|
||||
warnings.warn(
|
||||
"ImageIO's vendored tifffile backend is deprecated and will be"
|
||||
" removed in ImageIO v3. Install the tifffile directly:"
|
||||
" `pip install imageio[tifffile]`",
|
||||
DeprecationWarning,
|
||||
)
|
||||
from . import _tifffile
|
||||
|
||||
|
||||
TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm")
|
||||
WRITE_METADATA_KEYS = (
|
||||
"photometric",
|
||||
"planarconfig",
|
||||
"resolution",
|
||||
"description",
|
||||
"compress",
|
||||
"compression",
|
||||
"compressionargs",
|
||||
"predictor",
|
||||
"volume",
|
||||
"writeshape",
|
||||
"extratags",
|
||||
"datetime",
|
||||
)
|
||||
READ_METADATA_KEYS = (
|
||||
"planar_configuration",
|
||||
"is_fluoview",
|
||||
"is_nih",
|
||||
"is_contig",
|
||||
"is_micromanager",
|
||||
"is_ome",
|
||||
"is_lsm",
|
||||
"is_palette",
|
||||
"is_reduced",
|
||||
"is_rgb",
|
||||
"is_sgi",
|
||||
"is_shaped",
|
||||
"is_stk",
|
||||
"is_tiled",
|
||||
"is_mdgel",
|
||||
"resolution_unit",
|
||||
"compression",
|
||||
"predictor",
|
||||
"is_mediacy",
|
||||
"orientation",
|
||||
"description",
|
||||
"description1",
|
||||
"is_imagej",
|
||||
"software",
|
||||
)
|
||||
|
||||
|
||||
class TiffFormat(Format):
|
||||
"""Provides support for a wide range of Tiff images using the tifffile
|
||||
backend.
|
||||
|
||||
Images that contain multiple pages can be read using ``imageio.mimread()``
|
||||
to read the individual pages, or ``imageio.volread()`` to obtain a
|
||||
single (higher dimensional) array.
|
||||
|
||||
Note that global metadata is stored with the first frame in a TIFF file.
|
||||
Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame
|
||||
was written has no effect. Also, global metadata is ignored if metadata is
|
||||
provided via the `meta` argument of :py:meth:`Format.Writer.append_data`.
|
||||
|
||||
If you have installed tifffile as a Python package, imageio will attempt
|
||||
to use that as backend instead of the bundled backend. Doing so can
|
||||
provide access to new performance improvements and bug fixes.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
offset : int
|
||||
Optional start position of embedded file. By default this is
|
||||
the current file position.
|
||||
size : int
|
||||
Optional size of embedded file. By default this is the number
|
||||
of bytes from the 'offset' to the end of the file.
|
||||
multifile : bool
|
||||
If True (default), series may include pages from multiple files.
|
||||
Currently applies to OME-TIFF only.
|
||||
multifile_close : bool
|
||||
If True (default), keep the handles of other files in multifile
|
||||
series closed. This is inefficient when few files refer to
|
||||
many pages. If False, the C runtime may run out of resources.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
bigtiff : bool
|
||||
If True, the BigTIFF format is used.
|
||||
byteorder : {'<', '>'}
|
||||
The endianness of the data in the file.
|
||||
By default this is the system's native byte order.
|
||||
software : str
|
||||
Name of the software used to create the image.
|
||||
Saved with the first page only.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
planar_configuration : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution_unit : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
compression : int
|
||||
Value indicating the compression algorithm used, e.g. 5 is LZW,
|
||||
7 is JPEG, 8 is deflate.
|
||||
If 1, data are uncompressed.
|
||||
predictor : int
|
||||
Value 2 indicates horizontal differencing was used before compression,
|
||||
while 3 indicates floating point horizontal differencing.
|
||||
If 1, no prediction scheme was used before compression.
|
||||
orientation : {'top_left', 'bottom_right', ...}
|
||||
Oriented of image array.
|
||||
is_rgb : bool
|
||||
True if page contains a RGB image.
|
||||
is_contig : bool
|
||||
True if page contains a contiguous image.
|
||||
is_tiled : bool
|
||||
True if page contains tiled image.
|
||||
is_palette : bool
|
||||
True if page contains a palette-colored image and not OME or STK.
|
||||
is_reduced : bool
|
||||
True if page is a reduced image of another image.
|
||||
is_shaped : bool
|
||||
True if page contains shape in image_description tag.
|
||||
is_fluoview : bool
|
||||
True if page contains FluoView MM_STAMP tag.
|
||||
is_nih : bool
|
||||
True if page contains NIH image header.
|
||||
is_micromanager : bool
|
||||
True if page contains Micro-Manager metadata.
|
||||
is_ome : bool
|
||||
True if page contains OME-XML in image_description tag.
|
||||
is_sgi : bool
|
||||
True if page contains SGI image and tile depth tags.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_mdgel : bool
|
||||
True if page contains md_file_tag tag.
|
||||
is_mediacy : bool
|
||||
True if page contains Media Cybernetics Id tag.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_lsm : bool
|
||||
True if page contains LSM CZ_LSM_INFO tag.
|
||||
description : str
|
||||
Image description
|
||||
description1 : str
|
||||
Additional description
|
||||
is_imagej : None or str
|
||||
ImageJ metadata
|
||||
software : str
|
||||
Software used to create the TIFF file
|
||||
datetime : datetime.datetime
|
||||
Creation date and time
|
||||
|
||||
Metadata for writing
|
||||
--------------------
|
||||
photometric : {'minisblack', 'miniswhite', 'rgb'}
|
||||
The color space of the image data.
|
||||
By default this setting is inferred from the data shape.
|
||||
planarconfig : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
description : str
|
||||
The subject of the image. Saved with the first page only.
|
||||
compress : int
|
||||
Values from 0 to 9 controlling the level of zlib (deflate) compression.
|
||||
If 0, data are written uncompressed (default).
|
||||
predictor : bool
|
||||
If True, horizontal differencing is applied before compression.
|
||||
Note that using an int literal 1 actually means no prediction scheme
|
||||
will be used.
|
||||
volume : bool
|
||||
If True, volume data are stored in one tile (if applicable) using
|
||||
the SGI image_depth and tile_depth tags.
|
||||
Image width and depth must be multiple of 16.
|
||||
Few software can read this format, e.g. MeVisLab.
|
||||
writeshape : bool
|
||||
If True, write the data shape to the image_description tag
|
||||
if necessary and no other description is given.
|
||||
extratags: sequence of tuples
|
||||
Additional tags as [(code, dtype, count, value, writeonce)].
|
||||
|
||||
code : int
|
||||
The TIFF tag Id.
|
||||
dtype : str
|
||||
Data type of items in 'value' in Python struct format.
|
||||
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
|
||||
count : int
|
||||
Number of data values. Not used for string values.
|
||||
value : sequence
|
||||
'Count' values compatible with 'dtype'.
|
||||
writeonce : bool
|
||||
If True, the tag is written to the first page only.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
try:
|
||||
_tifffile.TiffFile(request.get_file(), **request.kwargs)
|
||||
except ValueError:
|
||||
# vendored backend raises value exception
|
||||
return False
|
||||
except _tifffile.TiffFileError: # pragma: no-cover
|
||||
# current version raises custom exception
|
||||
return False
|
||||
finally:
|
||||
request.get_file().seek(0)
|
||||
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request._uri_type in [URI_FILE, URI_BYTES]:
|
||||
pass # special URI
|
||||
elif request.extension not in self.extensions:
|
||||
return False
|
||||
|
||||
try:
|
||||
_tifffile.TiffWriter(request.get_file(), **request.kwargs)
|
||||
except ValueError:
|
||||
# vendored backend raises value exception
|
||||
return False
|
||||
except _tifffile.TiffFileError: # pragma: no-cover
|
||||
# current version raises custom exception
|
||||
return False
|
||||
finally:
|
||||
request.get_file().seek(0)
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, **kwargs):
|
||||
# Allow loading from http; tifffile uses seek, so download first
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
self._f = f = open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._f = None
|
||||
f = self.request.get_file()
|
||||
self._tf = _tifffile.TiffFile(f, **kwargs)
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
if self._f is not None:
|
||||
self._f.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._tf.series)
|
||||
|
||||
def _get_data(self, index):
|
||||
if index < 0 or index >= self._get_length():
|
||||
raise IndexError("Index out of range while reading from tiff file")
|
||||
|
||||
im = self._tf.asarray(series=index)
|
||||
meta = self._get_meta_data(index)
|
||||
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
meta = {}
|
||||
page = self._tf.pages[index or 0]
|
||||
for key in READ_METADATA_KEYS:
|
||||
try:
|
||||
meta[key] = getattr(page, key)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# tifffile <= 0.12.1 use datetime, newer use DateTime
|
||||
for key in ("datetime", "DateTime"):
|
||||
try:
|
||||
meta["datetime"] = datetime.datetime.strptime(
|
||||
page.tags[key].value, "%Y:%m:%d %H:%M:%S"
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if 296 in page.tags:
|
||||
meta["resolution_unit"] = page.tags[296].value.value
|
||||
|
||||
if 282 in page.tags and 283 in page.tags and 296 in page.tags:
|
||||
resolution_x = page.tags[282].value
|
||||
resolution_y = page.tags[283].value
|
||||
if resolution_x[1] == 0 or resolution_y[1] == 0:
|
||||
warnings.warn(
|
||||
"Ignoring resolution metadata, "
|
||||
"because at least one direction has a 0 denominator.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
else:
|
||||
meta["resolution"] = (
|
||||
resolution_x[0] / resolution_x[1],
|
||||
resolution_y[0] / resolution_y[1],
|
||||
page.tags[296].value.name,
|
||||
)
|
||||
|
||||
return meta
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, bigtiff=None, byteorder=None, software=None):
|
||||
try:
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(),
|
||||
bigtiff=bigtiff,
|
||||
byteorder=byteorder,
|
||||
software=software,
|
||||
)
|
||||
self._software = None
|
||||
except TypeError:
|
||||
# In tifffile >= 0.15, the `software` arg is passed to
|
||||
# TiffWriter.save
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(), bigtiff=bigtiff, byteorder=byteorder
|
||||
)
|
||||
self._software = software
|
||||
|
||||
self._meta = {}
|
||||
self._frames_written = 0
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if meta is not None:
|
||||
meta = self._sanitize_meta(meta)
|
||||
else:
|
||||
# Use global metadata for first frame
|
||||
meta = self._meta if self._frames_written == 0 else {}
|
||||
if self._software is not None and self._frames_written == 0:
|
||||
meta["software"] = self._software
|
||||
# No need to check self.request.mode; tifffile figures out whether
|
||||
# this is a single page, or all page data at once.
|
||||
try:
|
||||
# TiffWriter.save has been deprecated in version 2020.9.30
|
||||
write_meth = self._tf.write
|
||||
except AttributeError:
|
||||
write_meth = self._tf.save
|
||||
write_meth(np.asanyarray(im), contiguous=False, **meta)
|
||||
self._frames_written += 1
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_meta(meta):
|
||||
ret = {}
|
||||
for key, value in meta.items():
|
||||
if key in WRITE_METADATA_KEYS:
|
||||
# Special case of previously read `predictor` int value
|
||||
# 1(=NONE) translation to False expected by TiffWriter.save
|
||||
if key == "predictor" and not isinstance(value, bool):
|
||||
ret[key] = value > 1
|
||||
elif key == "compress" and value != 0:
|
||||
warnings.warn(
|
||||
"The use of `compress` is deprecated. Use `compression` and `compressionargs` instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
if _tifffile.__version__ < "2022":
|
||||
ret["compression"] = (8, value)
|
||||
else:
|
||||
ret["compression"] = "zlib"
|
||||
ret["compressionargs"] = {"level": value}
|
||||
else:
|
||||
ret[key] = value
|
||||
return ret
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta = self._sanitize_meta(meta)
|
||||
413
.CondaPkg/env/Lib/site-packages/imageio/plugins/tifffile_v3.py
vendored
Normal file
413
.CondaPkg/env/Lib/site-packages/imageio/plugins/tifffile_v3.py
vendored
Normal file
@@ -0,0 +1,413 @@
|
||||
"""Read/Write TIFF files using tifffile.
|
||||
|
||||
.. note::
|
||||
To use this plugin you need to have `tifffile
|
||||
<https://github.com/cgohlke/tifffile>`_ installed::
|
||||
|
||||
pip install tifffile
|
||||
|
||||
This plugin wraps tifffile, a powerful library to manipulate TIFF files. It
|
||||
superseeds our previous tifffile plugin and aims to expose all the features of
|
||||
tifffile.
|
||||
|
||||
The plugin treats individual TIFF series as ndimages. A series is a sequence of
|
||||
TIFF pages that, when combined describe a meaningful unit, e.g., a volumetric
|
||||
image (where each slice is stored on an individual page) or a multi-color
|
||||
staining picture (where each stain is stored on an individual page). Different
|
||||
TIFF flavors/variants use series in different ways and, as such, the resulting
|
||||
reading behavior may vary depending on the program used while creating a
|
||||
particular TIFF file.
|
||||
|
||||
Methods
|
||||
-------
|
||||
.. note::
|
||||
Check the respective function for a list of supported kwargs and detailed
|
||||
documentation.
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
TifffilePlugin.read
|
||||
TifffilePlugin.iter
|
||||
TifffilePlugin.write
|
||||
TifffilePlugin.properties
|
||||
TifffilePlugin.metadata
|
||||
|
||||
Additional methods available inside the :func:`imopen <imageio.v3.imopen>`
|
||||
context:
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
TifffilePlugin.iter_pages
|
||||
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
from typing import Any, Dict, Optional, cast
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import tifffile
|
||||
|
||||
from ..core.request import URI_BYTES, InitializationError, Request
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
def _get_resolution(page: tifffile.TiffPage) -> Dict[str, Any]:
|
||||
metadata = {}
|
||||
|
||||
try:
|
||||
metadata["resolution_unit"] = page.tags[296].value.value
|
||||
except KeyError:
|
||||
# tag 296 missing
|
||||
return metadata
|
||||
|
||||
try:
|
||||
resolution_x = page.tags[282].value
|
||||
resolution_y = page.tags[283].value
|
||||
|
||||
metadata["resolution"] = (
|
||||
resolution_x[0] / resolution_x[1],
|
||||
resolution_y[0] / resolution_y[1],
|
||||
)
|
||||
except KeyError:
|
||||
# tag 282 or 283 missing
|
||||
pass
|
||||
except ZeroDivisionError:
|
||||
warnings.warn(
|
||||
"Ignoring resolution metadata because at least one direction has a 0 "
|
||||
"denominator.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
class TifffilePlugin(PluginV3):
|
||||
"""Support for tifffile as backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : iio.Request
|
||||
A request object that represents the users intent. It provides a
|
||||
standard interface for a plugin to access the various ImageResources.
|
||||
Check the docs for details.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to tifffile's constructor, i.e.
|
||||
to ``TiffFile`` for reading or ``TiffWriter`` for writing.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, request: Request, **kwargs) -> None:
|
||||
super().__init__(request)
|
||||
self._fh = None
|
||||
|
||||
if request.mode.io_mode == "r":
|
||||
try:
|
||||
self._fh = tifffile.TiffFile(request.get_file(), **kwargs)
|
||||
except tifffile.tifffile.TiffFileError:
|
||||
raise InitializationError("Tifffile can not read this file.")
|
||||
else:
|
||||
self._fh = tifffile.TiffWriter(request.get_file(), **kwargs)
|
||||
|
||||
# ---------------------
|
||||
# Standard V3 Interface
|
||||
# ---------------------
|
||||
|
||||
def read(self, *, index: int = None, page: int = None, **kwargs) -> np.ndarray:
|
||||
"""Read a ndimage or page.
|
||||
|
||||
The ndimage returned depends on the value of both ``index`` and
|
||||
``page``. ``index`` selects the series to read and ``page`` allows
|
||||
selecting a single page from the selected series. If ``index=None``,
|
||||
``page`` is understood as a flat index, i.e., the selection ignores
|
||||
individual series inside the file. If both ``index`` and ``page`` are
|
||||
``None``, then all the series are read and returned as a batch.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If ``int``, select the ndimage (series) located at that index inside
|
||||
the file and return ``page`` from it. If ``None`` and ``page`` is
|
||||
``int`` read the page located at that (flat) index inside the file.
|
||||
If ``None`` and ``page=None``, read all ndimages from the file and
|
||||
return them as a batch.
|
||||
page : int
|
||||
If ``None`` return the full selected ndimage. If ``int``, read the
|
||||
page at the selected index and return it.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to TiffFile's ``as_array`` method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndarray : np.ndarray
|
||||
The decoded ndimage or page.
|
||||
"""
|
||||
|
||||
if "key" not in kwargs:
|
||||
kwargs["key"] = page
|
||||
elif page is not None:
|
||||
raise ValueError("Can't use `page` and `key` at the same time.")
|
||||
|
||||
# set plugin default for ``index``
|
||||
if index is not None and "series" in kwargs:
|
||||
raise ValueError("Can't use `series` and `index` at the same time.")
|
||||
elif "series" in kwargs:
|
||||
index = kwargs.pop("series")
|
||||
elif index is not None:
|
||||
pass
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if index is Ellipsis and page is None:
|
||||
# read all series in the file and return them as a batch
|
||||
ndimage = np.stack([x for x in self.iter(**kwargs)])
|
||||
else:
|
||||
index = None if index is Ellipsis else index
|
||||
ndimage = self._fh.asarray(series=index, **kwargs)
|
||||
|
||||
return ndimage
|
||||
|
||||
def iter(self, **kwargs) -> np.ndarray:
|
||||
"""Yield ndimages from the TIFF.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to the TiffPageSeries' ``as_array``
|
||||
method.
|
||||
|
||||
Yields
|
||||
------
|
||||
ndimage : np.ndarray
|
||||
A decoded ndimage.
|
||||
"""
|
||||
|
||||
for sequence in self._fh.series:
|
||||
yield sequence.asarray(**kwargs)
|
||||
|
||||
def write(
|
||||
self, ndimage: ArrayLike, *, is_batch: bool = False, **kwargs
|
||||
) -> Optional[bytes]:
|
||||
"""Save a ndimage as TIFF.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : ArrayLike
|
||||
The ndimage to encode and write to the ImageResource.
|
||||
is_batch : bool
|
||||
If True, the first dimension of the given ndimage is treated as a
|
||||
batch dimension and each element will create a new series.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to TiffWriter's ``write`` method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
encoded_image : bytes
|
||||
If the ImageResource is ``"<bytes>"``, return the encoded bytes.
|
||||
Otherwise write returns None.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Incremental writing is supported. Subsequent calls to ``write`` will
|
||||
create new series unless ``contiguous=True`` is used, in which case the
|
||||
call to write will append to the current series.
|
||||
|
||||
"""
|
||||
|
||||
if not is_batch:
|
||||
ndimage = np.asarray(ndimage)[None, :]
|
||||
|
||||
for image in ndimage:
|
||||
self._fh.write(image, **kwargs)
|
||||
|
||||
if self._request._uri_type == URI_BYTES:
|
||||
self._fh.close()
|
||||
file = cast(BytesIO, self._request.get_file())
|
||||
return file.getvalue()
|
||||
|
||||
def metadata(
|
||||
self, *, index: int = Ellipsis, page: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Format-Specific TIFF metadata.
|
||||
|
||||
The metadata returned depends on the value of both ``index`` and
|
||||
``page``. ``index`` selects a series and ``page`` allows selecting a
|
||||
single page from the selected series. If ``index=Ellipsis``, ``page`` is
|
||||
understood as a flat index, i.e., the selection ignores individual
|
||||
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
|
||||
global (file-level) metadata is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
Select the series of which to extract metadata from. If Ellipsis, treat
|
||||
page as a flat index into the file's pages.
|
||||
page : int
|
||||
If not None, select the page of which to extract metadata from. If
|
||||
None, read series-level metadata or, if ``index=...`` global,
|
||||
file-level metadata.
|
||||
exclude_applied : bool
|
||||
For API compatibility. Currently ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary with information regarding the tiff flavor (file-level)
|
||||
or tiff tags (page-level).
|
||||
"""
|
||||
|
||||
if index is not Ellipsis and page is not None:
|
||||
target = self._fh.series[index].pages[page]
|
||||
elif index is not Ellipsis and page is None:
|
||||
# This is based on my understanding that series-level metadata is
|
||||
# stored in the first TIFF page.
|
||||
target = self._fh.series[index].pages[0]
|
||||
elif index is Ellipsis and page is not None:
|
||||
target = self._fh.pages[page]
|
||||
else:
|
||||
target = None
|
||||
|
||||
metadata = {}
|
||||
if target is None:
|
||||
# return file-level metadata
|
||||
metadata["byteorder"] = self._fh.byteorder
|
||||
|
||||
for flag in tifffile.TIFF.FILE_FLAGS:
|
||||
flag_value = getattr(self._fh, "is_" + flag)
|
||||
metadata["is_" + flag] = flag_value
|
||||
|
||||
if flag_value and hasattr(self._fh, flag + "_metadata"):
|
||||
flavor_metadata = getattr(self._fh, flag + "_metadata")
|
||||
if isinstance(flavor_metadata, tuple):
|
||||
metadata.update(flavor_metadata[0])
|
||||
else:
|
||||
metadata.update(flavor_metadata)
|
||||
else:
|
||||
# tifffile may return a TiffFrame instead of a page
|
||||
target = target.keyframe
|
||||
|
||||
metadata.update({tag.name: tag.value for tag in target.tags})
|
||||
metadata.update(
|
||||
{
|
||||
"planar_configuration": target.planarconfig,
|
||||
"compression": target.compression,
|
||||
"predictor": target.predictor,
|
||||
"orientation": None, # TODO
|
||||
"description1": target.description1,
|
||||
"description": target.description,
|
||||
"software": target.software,
|
||||
**_get_resolution(target),
|
||||
"datetime": target.datetime,
|
||||
}
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def properties(self, *, index: int = None, page: int = None) -> ImageProperties:
|
||||
"""Standardized metadata.
|
||||
|
||||
The properties returned depend on the value of both ``index`` and
|
||||
``page``. ``index`` selects a series and ``page`` allows selecting a
|
||||
single page from the selected series. If ``index=Ellipsis``, ``page`` is
|
||||
understood as a flat index, i.e., the selection ignores individual
|
||||
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
|
||||
global (file-level) properties are returned. If ``index=Ellipsis``
|
||||
and ``page=...``, file-level properties for the flattened index are
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If ``int``, select the ndimage (series) located at that index inside
|
||||
the file. If ``Ellipsis`` and ``page`` is ``int`` extract the
|
||||
properties of the page located at that (flat) index inside the file.
|
||||
If ``Ellipsis`` and ``page=None``, return the properties for the
|
||||
batch of all ndimages in the file.
|
||||
page : int
|
||||
If ``None`` return the properties of the full ndimage. If ``...``
|
||||
return the properties of the flattened index. If ``int``,
|
||||
return the properties of the page at the selected index only.
|
||||
|
||||
Returns
|
||||
-------
|
||||
image_properties : ImageProperties
|
||||
The standardized metadata (properties) of the selected ndimage or series.
|
||||
|
||||
"""
|
||||
index = index or 0
|
||||
page_idx = 0 if page in (None, Ellipsis) else page
|
||||
|
||||
if index is Ellipsis:
|
||||
target_page = self._fh.pages[page_idx]
|
||||
else:
|
||||
target_page = self._fh.series[index].pages[page_idx]
|
||||
|
||||
if index is Ellipsis and page is None:
|
||||
n_series = len(self._fh.series)
|
||||
props = ImageProperties(
|
||||
shape=(n_series, *target_page.shape),
|
||||
dtype=target_page.dtype,
|
||||
n_images=n_series,
|
||||
is_batch=True,
|
||||
spacing=_get_resolution(target_page).get("resolution"),
|
||||
)
|
||||
elif index is Ellipsis and page is Ellipsis:
|
||||
n_pages = len(self._fh.pages)
|
||||
props = ImageProperties(
|
||||
shape=(n_pages, *target_page.shape),
|
||||
dtype=target_page.dtype,
|
||||
n_images=n_pages,
|
||||
is_batch=True,
|
||||
spacing=_get_resolution(target_page).get("resolution"),
|
||||
)
|
||||
else:
|
||||
props = ImageProperties(
|
||||
shape=target_page.shape,
|
||||
dtype=target_page.dtype,
|
||||
is_batch=False,
|
||||
spacing=_get_resolution(target_page).get("resolution"),
|
||||
)
|
||||
|
||||
return props
|
||||
|
||||
def close(self) -> None:
|
||||
if self._fh is not None:
|
||||
self._fh.close()
|
||||
|
||||
super().close()
|
||||
|
||||
# ------------------------------
|
||||
# Add-on Interface inside imopen
|
||||
# ------------------------------
|
||||
|
||||
def iter_pages(self, index=..., **kwargs):
|
||||
"""Yield pages from a TIFF file.
|
||||
|
||||
This generator walks over the flat index of the pages inside an
|
||||
ImageResource and yields them in order.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
The index of the series to yield pages from. If Ellipsis, walk over
|
||||
the file's flat index (and ignore individual series).
|
||||
kwargs : Any
|
||||
Additional kwargs are passed to TiffPage's ``as_array`` method.
|
||||
|
||||
Yields
|
||||
------
|
||||
page : np.ndarray
|
||||
A page stored inside the TIFF file.
|
||||
|
||||
"""
|
||||
|
||||
if index is Ellipsis:
|
||||
pages = self._fh.pages
|
||||
else:
|
||||
pages = self._fh.series[index]
|
||||
|
||||
for page in pages:
|
||||
yield page.asarray(**kwargs)
|
||||
Reference in New Issue
Block a user