update
This commit is contained in:
131
.CondaPkg/env/Lib/site-packages/imageio/__init__.py
vendored
131
.CondaPkg/env/Lib/site-packages/imageio/__init__.py
vendored
@@ -1,131 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2014-2020, imageio contributors
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
# This docstring is used at the index of the documentation pages, and
|
||||
# gets inserted into a slightly larger description (in setup.py) for
|
||||
# the page on Pypi:
|
||||
"""
|
||||
Imageio is a Python library that provides an easy interface to read and
|
||||
write a wide range of image data, including animated images, volumetric
|
||||
data, and scientific formats. It is cross-platform, runs on Python 3.5+,
|
||||
and is easy to install.
|
||||
|
||||
Main website: https://imageio.readthedocs.io/
|
||||
"""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
__version__ = "2.27.0"
|
||||
|
||||
import warnings
|
||||
|
||||
# Load some bits from core
|
||||
from .core import FormatManager, RETURN_BYTES
|
||||
|
||||
# Instantiate the old format manager
|
||||
formats = FormatManager()
|
||||
show_formats = formats.show
|
||||
|
||||
from . import v2
|
||||
from . import v3
|
||||
from . import plugins
|
||||
|
||||
# import config after core to avoid circular import
|
||||
from . import config
|
||||
|
||||
# import all APIs into the top level (meta API)
|
||||
from .v2 import (
|
||||
imread as imread_v2,
|
||||
mimread,
|
||||
volread,
|
||||
mvolread,
|
||||
imwrite,
|
||||
mimwrite,
|
||||
volwrite,
|
||||
mvolwrite,
|
||||
# aliases
|
||||
get_reader as read,
|
||||
get_writer as save,
|
||||
imwrite as imsave,
|
||||
mimwrite as mimsave,
|
||||
volwrite as volsave,
|
||||
mvolwrite as mvolsave,
|
||||
# misc
|
||||
help,
|
||||
get_reader,
|
||||
get_writer,
|
||||
)
|
||||
from .v3 import (
|
||||
imopen,
|
||||
# imread, # Will take over once v3 is released
|
||||
# imwrite, # Will take over once v3 is released
|
||||
imiter,
|
||||
)
|
||||
|
||||
|
||||
def imread(uri, format=None, **kwargs):
|
||||
"""imread(uri, format=None, **kwargs)
|
||||
|
||||
Reads an image from the specified file. Returns a numpy array, which
|
||||
comes with a dict of meta data at its 'meta' attribute.
|
||||
|
||||
Note that the image data is returned as-is, and may not always have
|
||||
a dtype of uint8 (and thus may differ from what e.g. PIL returns).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : {str, pathlib.Path, bytes, file}
|
||||
The resource to load the image from, e.g. a filename, pathlib.Path,
|
||||
http address or file object, see the docs for more info.
|
||||
format : str
|
||||
The format to use to read the file. By default imageio selects
|
||||
the appropriate for you based on the filename and its contents.
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the reader. See :func:`.help`
|
||||
to see what arguments are available for a particular format.
|
||||
"""
|
||||
|
||||
warnings.warn(
|
||||
"Starting with ImageIO v3 the behavior of this function will switch to that of"
|
||||
" iio.v3.imread. To keep the current behavior (and make this warning disappear)"
|
||||
" use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
return imread_v2(uri, format=format, **kwargs)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"v2",
|
||||
"v3",
|
||||
"config",
|
||||
"plugins",
|
||||
# v3 API
|
||||
"imopen",
|
||||
"imread",
|
||||
"imwrite",
|
||||
"imiter",
|
||||
# v2 API
|
||||
"mimread",
|
||||
"volread",
|
||||
"mvolread",
|
||||
"imwrite",
|
||||
"mimwrite",
|
||||
"volwrite",
|
||||
"mvolwrite",
|
||||
# v2 aliases
|
||||
"read",
|
||||
"save",
|
||||
"imsave",
|
||||
"mimsave",
|
||||
"volsave",
|
||||
"mvolsave",
|
||||
# functions to deprecate
|
||||
"help",
|
||||
"get_reader",
|
||||
"get_writer",
|
||||
"formats",
|
||||
"show_formats",
|
||||
]
|
||||
169
.CondaPkg/env/Lib/site-packages/imageio/__main__.py
vendored
169
.CondaPkg/env/Lib/site-packages/imageio/__main__.py
vendored
@@ -1,169 +0,0 @@
|
||||
"""
|
||||
Console scripts and associated helper methods for imageio.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from os import path as op
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
from . import plugins
|
||||
from .core import util
|
||||
|
||||
# A list of plugins that require binaries from the imageio-binaries
|
||||
# repository. These plugins must implement the `download` method.
|
||||
PLUGINS_WITH_BINARIES = ["freeimage"]
|
||||
|
||||
|
||||
def download_bin(plugin_names=["all"], package_dir=False):
|
||||
"""Download binary dependencies of plugins
|
||||
|
||||
This is a convenience method for downloading the binaries
|
||||
(e.g. for freeimage) from the imageio-binaries
|
||||
repository.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
plugin_names: list
|
||||
A list of imageio plugin names. If it contains "all", all
|
||||
binary dependencies are downloaded.
|
||||
package_dir: bool
|
||||
If set to `True`, the binaries will be downloaded to the
|
||||
`resources` directory of the imageio package instead of
|
||||
to the users application data directory. Note that this
|
||||
might require administrative rights if imageio is installed
|
||||
in a system directory.
|
||||
"""
|
||||
if plugin_names.count("all"):
|
||||
# Use all plugins
|
||||
plugin_names = PLUGINS_WITH_BINARIES
|
||||
|
||||
plugin_names.sort()
|
||||
print("Ascertaining binaries for: {}.".format(", ".join(plugin_names)))
|
||||
|
||||
if package_dir:
|
||||
# Download the binaries to the `resources` directory
|
||||
# of imageio. If imageio comes as an .egg, then a cache
|
||||
# directory will be created by pkg_resources (requires setuptools).
|
||||
# see `imageio.core.util.resource_dirs`
|
||||
# and `imageio.core.utilresource_package_dir`
|
||||
directory = util.resource_package_dir()
|
||||
else:
|
||||
directory = None
|
||||
|
||||
for plg in plugin_names:
|
||||
if plg not in PLUGINS_WITH_BINARIES:
|
||||
msg = "Plugin {} not registered for binary download!".format(plg)
|
||||
raise Exception(msg)
|
||||
mod = getattr(plugins, plg)
|
||||
mod.download(directory=directory)
|
||||
|
||||
|
||||
def download_bin_main():
|
||||
"""Argument-parsing wrapper for `download_bin`"""
|
||||
description = "Download plugin binary dependencies"
|
||||
phelp = (
|
||||
"Plugin name for which to download the binary. "
|
||||
+ "If no argument is given, all binaries are downloaded."
|
||||
)
|
||||
dhelp = (
|
||||
"Download the binaries to the package directory "
|
||||
+ "(default is the users application data directory). "
|
||||
+ "This might require administrative rights."
|
||||
)
|
||||
example_text = (
|
||||
"examples:\n"
|
||||
+ " imageio_download_bin all\n"
|
||||
+ " imageio_download_bin freeimage\n"
|
||||
)
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
epilog=example_text,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp)
|
||||
parser.add_argument(
|
||||
"--package-dir",
|
||||
dest="package_dir",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=dhelp,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
download_bin(plugin_names=args.plugin, package_dir=args.package_dir)
|
||||
|
||||
|
||||
def remove_bin(plugin_names=["all"]):
|
||||
"""Remove binary dependencies of plugins
|
||||
|
||||
This is a convenience method that removes all binaries
|
||||
dependencies for plugins downloaded by imageio.
|
||||
|
||||
Notes
|
||||
-----
|
||||
It only makes sense to use this method if the binaries
|
||||
are corrupt.
|
||||
"""
|
||||
if plugin_names.count("all"):
|
||||
# Use all plugins
|
||||
plugin_names = PLUGINS_WITH_BINARIES
|
||||
|
||||
print("Removing binaries for: {}.".format(", ".join(plugin_names)))
|
||||
|
||||
rdirs = util.resource_dirs()
|
||||
|
||||
for plg in plugin_names:
|
||||
if plg not in PLUGINS_WITH_BINARIES:
|
||||
msg = "Plugin {} not registered for binary download!".format(plg)
|
||||
raise Exception(msg)
|
||||
|
||||
not_removed = []
|
||||
for rd in rdirs:
|
||||
# plugin name is in subdirectories
|
||||
for rsub in os.listdir(rd):
|
||||
if rsub in plugin_names:
|
||||
plgdir = op.join(rd, rsub)
|
||||
try:
|
||||
shutil.rmtree(plgdir)
|
||||
except Exception:
|
||||
not_removed.append(plgdir)
|
||||
if not_removed:
|
||||
nrs = ",".join(not_removed)
|
||||
msg2 = (
|
||||
"These plugins files could not be removed: {}\n".format(nrs)
|
||||
+ "Make sure they are not used by any program and try again."
|
||||
)
|
||||
raise Exception(msg2)
|
||||
|
||||
|
||||
def remove_bin_main():
|
||||
"""Argument-parsing wrapper for `remove_bin`"""
|
||||
description = "Remove plugin binary dependencies"
|
||||
phelp = (
|
||||
"Plugin name for which to remove the binary. "
|
||||
+ "If no argument is given, all binaries are removed."
|
||||
)
|
||||
example_text = (
|
||||
"examples:\n"
|
||||
+ " imageio_remove_bin all\n"
|
||||
+ " imageio_remove_bin freeimage\n"
|
||||
)
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
epilog=example_text,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp)
|
||||
args = parser.parse_args()
|
||||
remove_bin(plugin_names=args.plugin)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "download_bin":
|
||||
download_bin_main()
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == "remove_bin":
|
||||
remove_bin_main()
|
||||
else:
|
||||
raise RuntimeError("Invalid use of the imageio CLI")
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,16 +0,0 @@
|
||||
from .extensions import (
|
||||
extension_list,
|
||||
known_extensions,
|
||||
FileExtension,
|
||||
video_extensions,
|
||||
)
|
||||
from .plugins import known_plugins, PluginConfig
|
||||
|
||||
__all__ = [
|
||||
"known_plugins",
|
||||
"PluginConfig",
|
||||
"extension_list",
|
||||
"known_extensions",
|
||||
"FileExtension",
|
||||
"video_extensions",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -1,24 +0,0 @@
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
class FileExtension:
|
||||
extension: str
|
||||
priority: List[str]
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
external_link: Optional[str] = None
|
||||
volume_support: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
extension: str,
|
||||
priority: List[str],
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
external_link: str = None
|
||||
) -> None: ...
|
||||
def reset(self) -> None: ...
|
||||
|
||||
extension_list: List[FileExtension]
|
||||
known_extensions: Dict[str, List[FileExtension]]
|
||||
video_extensions: List[FileExtension]
|
||||
@@ -1,789 +0,0 @@
|
||||
import importlib
|
||||
|
||||
from ..core.legacy_plugin_wrapper import LegacyPlugin
|
||||
|
||||
|
||||
class PluginConfig:
|
||||
"""Plugin Configuration Metadata
|
||||
|
||||
This class holds the information needed to lazy-import plugins.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the plugin.
|
||||
class_name : str
|
||||
The name of the plugin class inside the plugin module.
|
||||
module_name : str
|
||||
The name of the module/package from which to import the plugin.
|
||||
is_legacy : bool
|
||||
If True, this plugin is a v2 plugin and will be wrapped in a
|
||||
LegacyPlugin. Default: False.
|
||||
package_name : str
|
||||
If the given module name points to a relative module, then the package
|
||||
name determines the package it is relative to.
|
||||
install_name : str
|
||||
The name of the optional dependency that can be used to install this
|
||||
plugin if it is missing.
|
||||
legacy_args : Dict
|
||||
A dictionary of kwargs to pass to the v2 plugin (Format) upon construction.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> PluginConfig(
|
||||
name="TIFF",
|
||||
class_name="TiffFormat",
|
||||
module_name="imageio.plugins.tifffile",
|
||||
is_legacy=True,
|
||||
install_name="tifffile",
|
||||
legacy_args={
|
||||
"description": "TIFF format",
|
||||
"extensions": ".tif .tiff .stk .lsm",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
>>> PluginConfig(
|
||||
name="pillow",
|
||||
class_name="PillowPlugin",
|
||||
module_name="imageio.plugins.pillow"
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
class_name,
|
||||
module_name,
|
||||
*,
|
||||
is_legacy=False,
|
||||
package_name=None,
|
||||
install_name=None,
|
||||
legacy_args=None,
|
||||
):
|
||||
legacy_args = legacy_args or dict()
|
||||
|
||||
self.name = name
|
||||
self.class_name = class_name
|
||||
self.module_name = module_name
|
||||
self.package_name = package_name
|
||||
|
||||
self.is_legacy = is_legacy
|
||||
self.install_name = install_name or self.name
|
||||
self.legacy_args = {"name": name, "description": "A legacy plugin"}
|
||||
self.legacy_args.update(legacy_args)
|
||||
|
||||
@property
|
||||
def format(self):
|
||||
"""For backwards compatibility with FormatManager
|
||||
|
||||
Delete when migrating to v3
|
||||
"""
|
||||
if not self.is_legacy:
|
||||
raise RuntimeError("Can only get format for legacy plugins.")
|
||||
|
||||
module = importlib.import_module(self.module_name, self.package_name)
|
||||
clazz = getattr(module, self.class_name)
|
||||
return clazz(**self.legacy_args)
|
||||
|
||||
@property
|
||||
def plugin_class(self):
|
||||
"""Get the plugin class (import if needed)
|
||||
|
||||
Returns
|
||||
-------
|
||||
plugin_class : Any
|
||||
The class that can be used to instantiate plugins.
|
||||
|
||||
"""
|
||||
|
||||
module = importlib.import_module(self.module_name, self.package_name)
|
||||
clazz = getattr(module, self.class_name)
|
||||
|
||||
if self.is_legacy:
|
||||
legacy_plugin = clazz(**self.legacy_args)
|
||||
|
||||
def partial_legacy_plugin(request):
|
||||
return LegacyPlugin(request, legacy_plugin)
|
||||
|
||||
clazz = partial_legacy_plugin
|
||||
|
||||
return clazz
|
||||
|
||||
|
||||
known_plugins = dict()
|
||||
known_plugins["pillow"] = PluginConfig(
|
||||
name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow"
|
||||
)
|
||||
known_plugins["pyav"] = PluginConfig(
|
||||
name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav"
|
||||
)
|
||||
known_plugins["opencv"] = PluginConfig(
|
||||
name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv"
|
||||
)
|
||||
known_plugins["tifffile"] = PluginConfig(
|
||||
name="tifffile",
|
||||
class_name="TifffilePlugin",
|
||||
module_name="imageio.plugins.tifffile_v3",
|
||||
)
|
||||
|
||||
# Legacy plugins
|
||||
# ==============
|
||||
#
|
||||
# Which are partly registered by format, partly by plugin, and partly by a mix
|
||||
# of both. We keep the naming here for backwards compatibility.
|
||||
# In v3 this should become a single entry per plugin named after the plugin
|
||||
# We can choose extension-specific priority in ``config.extensions``.
|
||||
#
|
||||
# Note: Since python 3.7 order of insertion determines the order of dict().keys()
|
||||
# This means that the order here determines the order by which plugins are
|
||||
# checked during the full fallback search. We don't advertise this downstream,
|
||||
# but it could be a useful thing to keep in mind to choose a sensible default
|
||||
# search order.
|
||||
|
||||
known_plugins["TIFF"] = PluginConfig(
|
||||
name="TIFF",
|
||||
class_name="TiffFormat",
|
||||
module_name="imageio.plugins.tifffile",
|
||||
is_legacy=True,
|
||||
install_name="tifffile",
|
||||
legacy_args={
|
||||
"description": "TIFF format",
|
||||
"extensions": ".tif .tiff .stk .lsm",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
# PILLOW plugin formats (legacy)
|
||||
PILLOW_FORMATS = [
|
||||
("BMP", "Windows Bitmap", ".bmp", "PillowFormat"),
|
||||
("BUFR", "BUFR", ".bufr", "PillowFormat"),
|
||||
("CUR", "Windows Cursor", ".cur", "PillowFormat"),
|
||||
("DCX", "Intel DCX", ".dcx", "PillowFormat"),
|
||||
("DDS", "DirectDraw Surface", ".dds", "PillowFormat"),
|
||||
("DIB", "Windows Bitmap", "", "PillowFormat"),
|
||||
("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"),
|
||||
("FITS", "FITS", ".fit .fits", "PillowFormat"),
|
||||
("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"),
|
||||
("FPX", "FlashPix", ".fpx", "PillowFormat"),
|
||||
("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"),
|
||||
("GBR", "GIMP brush file", ".gbr", "PillowFormat"),
|
||||
("GIF", "Compuserve GIF", ".gif", "GIFFormat"),
|
||||
("GRIB", "GRIB", ".grib", "PillowFormat"),
|
||||
("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"),
|
||||
("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"),
|
||||
("ICO", "Windows Icon", ".ico", "PillowFormat"),
|
||||
("IM", "IFUNC Image Memory", ".im", "PillowFormat"),
|
||||
("IMT", "IM Tools", "", "PillowFormat"),
|
||||
("IPTC", "IPTC/NAA", ".iim", "PillowFormat"),
|
||||
("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"),
|
||||
(
|
||||
"JPEG2000",
|
||||
"JPEG 2000 (ISO 15444)",
|
||||
".jp2 .j2k .jpc .jpf .jpx .j2c",
|
||||
"JPEG2000Format",
|
||||
),
|
||||
("MCIDAS", "McIdas area file", "", "PillowFormat"),
|
||||
("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"),
|
||||
# skipped in legacy pillow
|
||||
# ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"),
|
||||
("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"),
|
||||
("MSP", "Windows Paint", ".msp", "PillowFormat"),
|
||||
("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"),
|
||||
("PCX", "Paintbrush", ".pcx", "PillowFormat"),
|
||||
("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"),
|
||||
("PNG", "Portable network graphics", ".png", "PNGFormat"),
|
||||
("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"),
|
||||
("PSD", "Adobe Photoshop", ".psd", "PillowFormat"),
|
||||
("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"),
|
||||
("SPIDER", "Spider 2D image", "", "PillowFormat"),
|
||||
("SUN", "Sun Raster File", ".ras", "PillowFormat"),
|
||||
("TGA", "Targa", ".tga", "PillowFormat"),
|
||||
("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"),
|
||||
("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"),
|
||||
("XBM", "X11 Bitmap", ".xbm", "PillowFormat"),
|
||||
("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"),
|
||||
("XVTHUMB", "XV thumbnail image", "", "PillowFormat"),
|
||||
]
|
||||
for id, summary, ext, class_name in PILLOW_FORMATS:
|
||||
config = PluginConfig(
|
||||
name=id.upper() + "-PIL",
|
||||
class_name=class_name,
|
||||
module_name="imageio.plugins.pillow_legacy",
|
||||
is_legacy=True,
|
||||
install_name="pillow",
|
||||
legacy_args={
|
||||
"description": summary + " via Pillow",
|
||||
"extensions": ext,
|
||||
"modes": "iI" if class_name == "GIFFormat" else "i",
|
||||
"plugin_id": id,
|
||||
},
|
||||
)
|
||||
known_plugins[config.name] = config
|
||||
|
||||
known_plugins["FFMPEG"] = PluginConfig(
|
||||
name="FFMPEG",
|
||||
class_name="FfmpegFormat",
|
||||
module_name="imageio.plugins.ffmpeg",
|
||||
is_legacy=True,
|
||||
install_name="ffmpeg",
|
||||
legacy_args={
|
||||
"description": "Many video formats and cameras (via ffmpeg)",
|
||||
"extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264",
|
||||
"modes": "I",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["BSDF"] = PluginConfig(
|
||||
name="BSDF",
|
||||
class_name="BsdfFormat",
|
||||
module_name="imageio.plugins.bsdf",
|
||||
is_legacy=True,
|
||||
install_name="bsdf",
|
||||
legacy_args={
|
||||
"description": "Format based on the Binary Structured Data Format",
|
||||
"extensions": ".bsdf",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["DICOM"] = PluginConfig(
|
||||
name="DICOM",
|
||||
class_name="DicomFormat",
|
||||
module_name="imageio.plugins.dicom",
|
||||
is_legacy=True,
|
||||
install_name="dicom",
|
||||
legacy_args={
|
||||
"description": "Digital Imaging and Communications in Medicine",
|
||||
"extensions": ".dcm .ct .mri",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["FEI"] = PluginConfig(
|
||||
name="FEI",
|
||||
class_name="FEISEMFormat",
|
||||
module_name="imageio.plugins.feisem",
|
||||
is_legacy=True,
|
||||
install_name="feisem",
|
||||
legacy_args={
|
||||
"description": "FEI-SEM TIFF format",
|
||||
"extensions": [".tif", ".tiff"],
|
||||
"modes": "iv",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["FITS"] = PluginConfig(
|
||||
name="FITS",
|
||||
class_name="FitsFormat",
|
||||
module_name="imageio.plugins.fits",
|
||||
is_legacy=True,
|
||||
install_name="fits",
|
||||
legacy_args={
|
||||
"description": "Flexible Image Transport System (FITS) format",
|
||||
"extensions": ".fits .fit .fts .fz",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["GDAL"] = PluginConfig(
|
||||
name="GDAL",
|
||||
class_name="GdalFormat",
|
||||
module_name="imageio.plugins.gdal",
|
||||
is_legacy=True,
|
||||
install_name="gdal",
|
||||
legacy_args={
|
||||
"description": "Geospatial Data Abstraction Library",
|
||||
"extensions": ".tiff .tif .img .ecw .jpg .jpeg",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["ITK"] = PluginConfig(
|
||||
name="ITK",
|
||||
class_name="ItkFormat",
|
||||
module_name="imageio.plugins.simpleitk",
|
||||
is_legacy=True,
|
||||
install_name="simpleitk",
|
||||
legacy_args={
|
||||
"description": "Insight Segmentation and Registration Toolkit (ITK) format",
|
||||
"extensions": " ".join(
|
||||
(
|
||||
".gipl",
|
||||
".ipl",
|
||||
".mha",
|
||||
".mhd",
|
||||
".nhdr",
|
||||
".nia",
|
||||
".hdr",
|
||||
".nrrd",
|
||||
".nii",
|
||||
".nii.gz",
|
||||
".img",
|
||||
".img.gz",
|
||||
".vtk",
|
||||
".hdf5",
|
||||
".lsm",
|
||||
".mnc",
|
||||
".mnc2",
|
||||
".mgh",
|
||||
".mnc",
|
||||
".pic",
|
||||
".bmp",
|
||||
".jpeg",
|
||||
".jpg",
|
||||
".png",
|
||||
".tiff",
|
||||
".tif",
|
||||
".dicom",
|
||||
".dcm",
|
||||
".gdcm",
|
||||
)
|
||||
),
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["NPZ"] = PluginConfig(
|
||||
name="NPZ",
|
||||
class_name="NpzFormat",
|
||||
module_name="imageio.plugins.npz",
|
||||
is_legacy=True,
|
||||
install_name="numpy",
|
||||
legacy_args={
|
||||
"description": "Numpy's compressed array format",
|
||||
"extensions": ".npz",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["SPE"] = PluginConfig(
|
||||
name="SPE",
|
||||
class_name="SpeFormat",
|
||||
module_name="imageio.plugins.spe",
|
||||
is_legacy=True,
|
||||
install_name="spe",
|
||||
legacy_args={
|
||||
"description": "SPE file format",
|
||||
"extensions": ".spe",
|
||||
"modes": "iIvV",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["SWF"] = PluginConfig(
|
||||
name="SWF",
|
||||
class_name="SWFFormat",
|
||||
module_name="imageio.plugins.swf",
|
||||
is_legacy=True,
|
||||
install_name="swf",
|
||||
legacy_args={
|
||||
"description": "Shockwave flash",
|
||||
"extensions": ".swf",
|
||||
"modes": "I",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["SCREENGRAB"] = PluginConfig(
|
||||
name="SCREENGRAB",
|
||||
class_name="ScreenGrabFormat",
|
||||
module_name="imageio.plugins.grab",
|
||||
is_legacy=True,
|
||||
install_name="pillow",
|
||||
legacy_args={
|
||||
"description": "Grab screenshots (Windows and OS X only)",
|
||||
"extensions": [],
|
||||
"modes": "i",
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins["CLIPBOARDGRAB"] = PluginConfig(
|
||||
name="CLIPBOARDGRAB",
|
||||
class_name="ClipboardGrabFormat",
|
||||
module_name="imageio.plugins.grab",
|
||||
is_legacy=True,
|
||||
install_name="pillow",
|
||||
legacy_args={
|
||||
"description": "Grab from clipboard (Windows only)",
|
||||
"extensions": [],
|
||||
"modes": "i",
|
||||
},
|
||||
)
|
||||
|
||||
# LYTRO plugin (legacy)
|
||||
lytro_formats = [
|
||||
("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"),
|
||||
(
|
||||
"lytro-illum-raw",
|
||||
"Lytro Illum raw image file",
|
||||
".raw",
|
||||
"i",
|
||||
"LytroIllumRawFormat",
|
||||
),
|
||||
("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"),
|
||||
("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"),
|
||||
]
|
||||
for name, des, ext, mode, class_name in lytro_formats:
|
||||
config = PluginConfig(
|
||||
name=name.upper(),
|
||||
class_name=class_name,
|
||||
module_name="imageio.plugins.lytro",
|
||||
is_legacy=True,
|
||||
install_name="lytro",
|
||||
legacy_args={
|
||||
"description": des,
|
||||
"extensions": ext,
|
||||
"modes": mode,
|
||||
},
|
||||
)
|
||||
known_plugins[config.name] = config
|
||||
|
||||
# FreeImage plugin (legacy)
|
||||
FREEIMAGE_FORMATS = [
|
||||
(
|
||||
"BMP",
|
||||
0,
|
||||
"Windows or OS/2 Bitmap",
|
||||
".bmp",
|
||||
"i",
|
||||
"FreeimageBmpFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"CUT",
|
||||
21,
|
||||
"Dr. Halo",
|
||||
".cut",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"DDS",
|
||||
24,
|
||||
"DirectX Surface",
|
||||
".dds",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"EXR",
|
||||
29,
|
||||
"ILM OpenEXR",
|
||||
".exr",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"G3",
|
||||
27,
|
||||
"Raw fax format CCITT G.3",
|
||||
".g3",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"GIF",
|
||||
25,
|
||||
"Static and animated gif (FreeImage)",
|
||||
".gif",
|
||||
"iI",
|
||||
"GifFormat",
|
||||
"imageio.plugins.freeimagemulti",
|
||||
),
|
||||
(
|
||||
"HDR",
|
||||
26,
|
||||
"High Dynamic Range Image",
|
||||
".hdr",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"ICO",
|
||||
1,
|
||||
"Windows Icon",
|
||||
".ico",
|
||||
"iI",
|
||||
"IcoFormat",
|
||||
"imageio.plugins.freeimagemulti",
|
||||
),
|
||||
(
|
||||
"IFF",
|
||||
5,
|
||||
"IFF Interleaved Bitmap",
|
||||
".iff .lbm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"J2K",
|
||||
30,
|
||||
"JPEG-2000 codestream",
|
||||
".j2k .j2c",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"JNG",
|
||||
3,
|
||||
"JPEG Network Graphics",
|
||||
".jng",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"JP2",
|
||||
31,
|
||||
"JPEG-2000 File Format",
|
||||
".jp2",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"JPEG",
|
||||
2,
|
||||
"JPEG - JFIF Compliant",
|
||||
".jpg .jif .jpeg .jpe",
|
||||
"i",
|
||||
"FreeimageJpegFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"JPEG-XR",
|
||||
36,
|
||||
"JPEG XR image format",
|
||||
".jxr .wdp .hdp",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"KOALA",
|
||||
4,
|
||||
"C64 Koala Graphics",
|
||||
".koa",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
# not registered in legacy pillow
|
||||
# ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"),
|
||||
(
|
||||
"PBM",
|
||||
7,
|
||||
"Portable Bitmap (ASCII)",
|
||||
".pbm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PBMRAW",
|
||||
8,
|
||||
"Portable Bitmap (RAW)",
|
||||
".pbm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PCD",
|
||||
9,
|
||||
"Kodak PhotoCD",
|
||||
".pcd",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PCX",
|
||||
10,
|
||||
"Zsoft Paintbrush",
|
||||
".pcx",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PFM",
|
||||
32,
|
||||
"Portable floatmap",
|
||||
".pfm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PGM",
|
||||
11,
|
||||
"Portable Greymap (ASCII)",
|
||||
".pgm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PGMRAW",
|
||||
12,
|
||||
"Portable Greymap (RAW)",
|
||||
".pgm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PICT",
|
||||
33,
|
||||
"Macintosh PICT",
|
||||
".pct .pict .pic",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PNG",
|
||||
13,
|
||||
"Portable Network Graphics",
|
||||
".png",
|
||||
"i",
|
||||
"FreeimagePngFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PPM",
|
||||
14,
|
||||
"Portable Pixelmap (ASCII)",
|
||||
".ppm",
|
||||
"i",
|
||||
"FreeimagePnmFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PPMRAW",
|
||||
15,
|
||||
"Portable Pixelmap (RAW)",
|
||||
".ppm",
|
||||
"i",
|
||||
"FreeimagePnmFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"PSD",
|
||||
20,
|
||||
"Adobe Photoshop",
|
||||
".psd",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"RAS",
|
||||
16,
|
||||
"Sun Raster Image",
|
||||
".ras",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"RAW",
|
||||
34,
|
||||
"RAW camera image",
|
||||
".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 "
|
||||
".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf "
|
||||
".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"SGI",
|
||||
28,
|
||||
"SGI Image Format",
|
||||
".sgi .rgb .rgba .bw",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"TARGA",
|
||||
17,
|
||||
"Truevision Targa",
|
||||
".tga .targa",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"TIFF",
|
||||
18,
|
||||
"Tagged Image File Format",
|
||||
".tif .tiff",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"WBMP",
|
||||
19,
|
||||
"Wireless Bitmap",
|
||||
".wap .wbmp .wbm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"WebP",
|
||||
35,
|
||||
"Google WebP image format",
|
||||
".webp",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"XBM",
|
||||
22,
|
||||
"X11 Bitmap Format",
|
||||
".xbm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
(
|
||||
"XPM",
|
||||
23,
|
||||
"X11 Pixmap Format",
|
||||
".xpm",
|
||||
"i",
|
||||
"FreeimageFormat",
|
||||
"imageio.plugins.freeimage",
|
||||
),
|
||||
]
|
||||
for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS:
|
||||
config = PluginConfig(
|
||||
name=name.upper() + "-FI",
|
||||
class_name=class_name,
|
||||
module_name=module_name,
|
||||
is_legacy=True,
|
||||
install_name="freeimage",
|
||||
legacy_args={
|
||||
"description": des,
|
||||
"extensions": ext,
|
||||
"modes": mode,
|
||||
"fif": i,
|
||||
},
|
||||
)
|
||||
known_plugins[config.name] = config
|
||||
|
||||
# exists for backwards compatibility with FormatManager
|
||||
# delete in V3
|
||||
_original_order = [x for x, config in known_plugins.items() if config.is_legacy]
|
||||
@@ -1,28 +0,0 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from ..core.v3_plugin_api import PluginV3
|
||||
|
||||
class PluginConfig:
|
||||
name: str
|
||||
class_name: str
|
||||
module_name: str
|
||||
is_legacy: bool
|
||||
package_name: Optional[str] = None
|
||||
install_name: Optional[str] = None
|
||||
legacy_args: Optional[dict] = None
|
||||
@property
|
||||
def format(self) -> Any: ...
|
||||
@property
|
||||
def plugin_class(self) -> PluginV3: ...
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
class_name: str,
|
||||
module_name: str,
|
||||
*,
|
||||
is_legacy: bool = False,
|
||||
package_name: str = None,
|
||||
install_name: str = None,
|
||||
legacy_args: dict = None,
|
||||
) -> None: ...
|
||||
|
||||
known_plugins: Dict[str, PluginConfig]
|
||||
@@ -1,16 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
|
||||
|
||||
""" This subpackage provides the core functionality of imageio
|
||||
(everything but the plugins).
|
||||
"""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
from .util import Image, Array, Dict, asarray, image_as_uint, urlopen
|
||||
from .util import BaseProgressIndicator, StdoutProgressIndicator, IS_PYPY
|
||||
from .util import get_platform, appdata_dir, resource_dirs, has_module
|
||||
from .findlib import load_lib
|
||||
from .fetching import get_remote_file, InternetNotAllowedError, NeedDownloadError
|
||||
from .request import Request, read_n_bytes, RETURN_BYTES
|
||||
from .format import Format, FormatManager
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,247 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on code from the vispy project
|
||||
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
|
||||
|
||||
"""Data downloading and reading functions
|
||||
"""
|
||||
|
||||
from math import log
|
||||
import os
|
||||
from os import path as op
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
|
||||
from . import appdata_dir, resource_dirs
|
||||
from . import StdoutProgressIndicator, urlopen
|
||||
|
||||
|
||||
class InternetNotAllowedError(IOError):
|
||||
"""Plugins that need resources can just use get_remote_file(), but
|
||||
should catch this error and silently ignore it.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NeedDownloadError(IOError):
|
||||
"""Is raised when a remote file is requested that is not locally
|
||||
available, but which needs to be explicitly downloaded by the user.
|
||||
"""
|
||||
|
||||
|
||||
def get_remote_file(fname, directory=None, force_download=False, auto=True):
|
||||
"""Get a the filename for the local version of a file from the web
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : str
|
||||
The relative filename on the remote data repository to download.
|
||||
These correspond to paths on
|
||||
``https://github.com/imageio/imageio-binaries/``.
|
||||
directory : str | None
|
||||
The directory where the file will be cached if a download was
|
||||
required to obtain the file. By default, the appdata directory
|
||||
is used. This is also the first directory that is checked for
|
||||
a local version of the file. If the directory does not exist,
|
||||
it will be created.
|
||||
force_download : bool | str
|
||||
If True, the file will be downloaded even if a local copy exists
|
||||
(and this copy will be overwritten). Can also be a YYYY-MM-DD date
|
||||
to ensure a file is up-to-date (modified date of a file on disk,
|
||||
if present, is checked).
|
||||
auto : bool
|
||||
Whether to auto-download the file if its not present locally. Default
|
||||
True. If False and a download is needed, raises NeedDownloadError.
|
||||
|
||||
Returns
|
||||
-------
|
||||
fname : str
|
||||
The path to the file on the local system.
|
||||
"""
|
||||
_url_root = "https://github.com/imageio/imageio-binaries/raw/master/"
|
||||
url = _url_root + fname
|
||||
nfname = op.normcase(fname) # convert to native
|
||||
# Get dirs to look for the resource
|
||||
given_directory = directory
|
||||
directory = given_directory or appdata_dir("imageio")
|
||||
dirs = resource_dirs()
|
||||
dirs.insert(0, directory) # Given dir has preference
|
||||
# Try to find the resource locally
|
||||
for dir in dirs:
|
||||
filename = op.join(dir, nfname)
|
||||
if op.isfile(filename):
|
||||
if not force_download: # we're done
|
||||
if given_directory and given_directory != dir:
|
||||
filename2 = os.path.join(given_directory, nfname)
|
||||
# Make sure the output directory exists
|
||||
if not op.isdir(op.dirname(filename2)):
|
||||
os.makedirs(op.abspath(op.dirname(filename2)))
|
||||
shutil.copy(filename, filename2)
|
||||
return filename2
|
||||
return filename
|
||||
if isinstance(force_download, str):
|
||||
ntime = time.strptime(force_download, "%Y-%m-%d")
|
||||
ftime = time.gmtime(op.getctime(filename))
|
||||
if ftime >= ntime:
|
||||
if given_directory and given_directory != dir:
|
||||
filename2 = os.path.join(given_directory, nfname)
|
||||
# Make sure the output directory exists
|
||||
if not op.isdir(op.dirname(filename2)):
|
||||
os.makedirs(op.abspath(op.dirname(filename2)))
|
||||
shutil.copy(filename, filename2)
|
||||
return filename2
|
||||
return filename
|
||||
else:
|
||||
print("File older than %s, updating..." % force_download)
|
||||
break
|
||||
|
||||
# If we get here, we're going to try to download the file
|
||||
if os.getenv("IMAGEIO_NO_INTERNET", "").lower() in ("1", "true", "yes"):
|
||||
raise InternetNotAllowedError(
|
||||
"Will not download resource from the "
|
||||
"internet because environment variable "
|
||||
"IMAGEIO_NO_INTERNET is set."
|
||||
)
|
||||
|
||||
# Can we proceed with auto-download?
|
||||
if not auto:
|
||||
raise NeedDownloadError()
|
||||
|
||||
# Get filename to store to and make sure the dir exists
|
||||
filename = op.join(directory, nfname)
|
||||
if not op.isdir(op.dirname(filename)):
|
||||
os.makedirs(op.abspath(op.dirname(filename)))
|
||||
# let's go get the file
|
||||
if os.getenv("CONTINUOUS_INTEGRATION", False): # pragma: no cover
|
||||
# On CI, we retry a few times ...
|
||||
for i in range(2):
|
||||
try:
|
||||
_fetch_file(url, filename)
|
||||
return filename
|
||||
except IOError:
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
_fetch_file(url, filename)
|
||||
return filename
|
||||
else: # pragma: no cover
|
||||
_fetch_file(url, filename)
|
||||
return filename
|
||||
|
||||
|
||||
def _fetch_file(url, file_name, print_destination=True):
|
||||
"""Load requested file, downloading it if needed or requested
|
||||
|
||||
Parameters
|
||||
----------
|
||||
url: string
|
||||
The url of file to be downloaded.
|
||||
file_name: string
|
||||
Name, along with the path, of where downloaded file will be saved.
|
||||
print_destination: bool, optional
|
||||
If true, destination of where file was saved will be printed after
|
||||
download finishes.
|
||||
resume: bool, optional
|
||||
If true, try to resume partially downloaded files.
|
||||
"""
|
||||
# Adapted from NISL:
|
||||
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
|
||||
|
||||
print(
|
||||
"Imageio: %r was not found on your computer; "
|
||||
"downloading it now." % os.path.basename(file_name)
|
||||
)
|
||||
|
||||
temp_file_name = file_name + ".part"
|
||||
local_file = None
|
||||
initial_size = 0
|
||||
errors = []
|
||||
for tries in range(4):
|
||||
try:
|
||||
# Checking file size and displaying it alongside the download url
|
||||
remote_file = urlopen(url, timeout=5.0)
|
||||
file_size = int(remote_file.headers["Content-Length"].strip())
|
||||
size_str = _sizeof_fmt(file_size)
|
||||
print("Try %i. Download from %s (%s)" % (tries + 1, url, size_str))
|
||||
# Downloading data (can be extended to resume if need be)
|
||||
local_file = open(temp_file_name, "wb")
|
||||
_chunk_read(remote_file, local_file, initial_size=initial_size)
|
||||
# temp file must be closed prior to the move
|
||||
if not local_file.closed:
|
||||
local_file.close()
|
||||
shutil.move(temp_file_name, file_name)
|
||||
if print_destination is True:
|
||||
sys.stdout.write("File saved as %s.\n" % file_name)
|
||||
break
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
print("Error while fetching file: %s." % str(e))
|
||||
finally:
|
||||
if local_file is not None:
|
||||
if not local_file.closed:
|
||||
local_file.close()
|
||||
else:
|
||||
raise IOError(
|
||||
"Unable to download %r. Perhaps there is no internet "
|
||||
"connection? If there is, please report this problem."
|
||||
% os.path.basename(file_name)
|
||||
)
|
||||
|
||||
|
||||
def _chunk_read(response, local_file, chunk_size=8192, initial_size=0):
|
||||
"""Download a file chunk by chunk and show advancement
|
||||
|
||||
Can also be used when resuming downloads over http.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
response: urllib.response.addinfourl
|
||||
Response to the download request in order to get file size.
|
||||
local_file: file
|
||||
Hard disk file where data should be written.
|
||||
chunk_size: integer, optional
|
||||
Size of downloaded chunks. Default: 8192
|
||||
initial_size: int, optional
|
||||
If resuming, indicate the initial size of the file.
|
||||
"""
|
||||
# Adapted from NISL:
|
||||
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
|
||||
|
||||
bytes_so_far = initial_size
|
||||
# Returns only amount left to download when resuming, not the size of the
|
||||
# entire file
|
||||
total_size = int(response.headers["Content-Length"].strip())
|
||||
total_size += initial_size
|
||||
|
||||
progress = StdoutProgressIndicator("Downloading")
|
||||
progress.start("", "bytes", total_size)
|
||||
|
||||
while True:
|
||||
chunk = response.read(chunk_size)
|
||||
bytes_so_far += len(chunk)
|
||||
if not chunk:
|
||||
break
|
||||
_chunk_write(chunk, local_file, progress)
|
||||
progress.finish("Done")
|
||||
|
||||
|
||||
def _chunk_write(chunk, local_file, progress):
|
||||
"""Write a chunk to file and update the progress bar"""
|
||||
local_file.write(chunk)
|
||||
progress.increase_progress(len(chunk))
|
||||
time.sleep(0) # Give other threads a chance, e.g. those that handle stdout pipes
|
||||
|
||||
|
||||
def _sizeof_fmt(num):
|
||||
"""Turn number of bytes into human-readable str"""
|
||||
units = ["bytes", "kB", "MB", "GB", "TB", "PB"]
|
||||
decimals = [0, 0, 1, 2, 2, 2]
|
||||
"""Human friendly file size"""
|
||||
if num > 1:
|
||||
exponent = min(int(log(num, 1024)), len(units) - 1)
|
||||
quotient = float(num) / 1024**exponent
|
||||
unit = units[exponent]
|
||||
num_decimals = decimals[exponent]
|
||||
format_string = "{0:.%sf} {1}" % num_decimals
|
||||
return format_string.format(quotient, unit)
|
||||
return "0 bytes" if num == 0 else "1 byte"
|
||||
@@ -1,161 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2015-1018, imageio contributors
|
||||
# Copyright (C) 2013, Zach Pincus, Almar Klein and others
|
||||
|
||||
""" This module contains generic code to find and load a dynamic library.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ctypes
|
||||
|
||||
|
||||
LOCALDIR = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
# Flag that can be patched / set to True to disable loading non-system libs
|
||||
SYSTEM_LIBS_ONLY = False
|
||||
|
||||
|
||||
def looks_lib(fname):
|
||||
"""Returns True if the given filename looks like a dynamic library.
|
||||
Based on extension, but cross-platform and more flexible.
|
||||
"""
|
||||
fname = fname.lower()
|
||||
if sys.platform.startswith("win"):
|
||||
return fname.endswith(".dll")
|
||||
elif sys.platform.startswith("darwin"):
|
||||
return fname.endswith(".dylib")
|
||||
else:
|
||||
return fname.endswith(".so") or ".so." in fname
|
||||
|
||||
|
||||
def generate_candidate_libs(lib_names, lib_dirs=None):
|
||||
"""Generate a list of candidate filenames of what might be the dynamic
|
||||
library corresponding with the given list of names.
|
||||
Returns (lib_dirs, lib_paths)
|
||||
"""
|
||||
lib_dirs = lib_dirs or []
|
||||
|
||||
# Get system dirs to search
|
||||
sys_lib_dirs = [
|
||||
"/lib",
|
||||
"/usr/lib",
|
||||
"/usr/lib/x86_64-linux-gnu",
|
||||
"/usr/lib/aarch64-linux-gnu",
|
||||
"/usr/local/lib",
|
||||
"/opt/local/lib",
|
||||
]
|
||||
|
||||
# Get Python dirs to search (shared if for Pyzo)
|
||||
py_sub_dirs = ["bin", "lib", "DLLs", "Library/bin", "shared"]
|
||||
py_lib_dirs = [os.path.join(sys.prefix, d) for d in py_sub_dirs]
|
||||
if hasattr(sys, "base_prefix"):
|
||||
py_lib_dirs += [os.path.join(sys.base_prefix, d) for d in py_sub_dirs]
|
||||
|
||||
# Get user dirs to search (i.e. HOME)
|
||||
home_dir = os.path.expanduser("~")
|
||||
user_lib_dirs = [os.path.join(home_dir, d) for d in ["lib"]]
|
||||
|
||||
# Select only the dirs for which a directory exists, and remove duplicates
|
||||
potential_lib_dirs = lib_dirs + sys_lib_dirs + py_lib_dirs + user_lib_dirs
|
||||
lib_dirs = []
|
||||
for ld in potential_lib_dirs:
|
||||
if os.path.isdir(ld) and ld not in lib_dirs:
|
||||
lib_dirs.append(ld)
|
||||
|
||||
# Now attempt to find libraries of that name in the given directory
|
||||
# (case-insensitive)
|
||||
lib_paths = []
|
||||
for lib_dir in lib_dirs:
|
||||
# Get files, prefer short names, last version
|
||||
files = os.listdir(lib_dir)
|
||||
files = reversed(sorted(files))
|
||||
files = sorted(files, key=len)
|
||||
for lib_name in lib_names:
|
||||
# Test all filenames for name and ext
|
||||
for fname in files:
|
||||
if fname.lower().startswith(lib_name) and looks_lib(fname):
|
||||
lib_paths.append(os.path.join(lib_dir, fname))
|
||||
|
||||
# Return (only the items which are files)
|
||||
lib_paths = [lp for lp in lib_paths if os.path.isfile(lp)]
|
||||
return lib_dirs, lib_paths
|
||||
|
||||
|
||||
def load_lib(exact_lib_names, lib_names, lib_dirs=None):
|
||||
"""load_lib(exact_lib_names, lib_names, lib_dirs=None)
|
||||
|
||||
Load a dynamic library.
|
||||
|
||||
This function first tries to load the library from the given exact
|
||||
names. When that fails, it tries to find the library in common
|
||||
locations. It searches for files that start with one of the names
|
||||
given in lib_names (case insensitive). The search is performed in
|
||||
the given lib_dirs and a set of common library dirs.
|
||||
|
||||
Returns ``(ctypes_library, library_path)``
|
||||
"""
|
||||
|
||||
# Checks
|
||||
assert isinstance(exact_lib_names, list)
|
||||
assert isinstance(lib_names, list)
|
||||
if lib_dirs is not None:
|
||||
assert isinstance(lib_dirs, list)
|
||||
exact_lib_names = [n for n in exact_lib_names if n]
|
||||
lib_names = [n for n in lib_names if n]
|
||||
|
||||
# Get reference name (for better messages)
|
||||
if lib_names:
|
||||
the_lib_name = lib_names[0]
|
||||
elif exact_lib_names:
|
||||
the_lib_name = exact_lib_names[0]
|
||||
else:
|
||||
raise ValueError("No library name given.")
|
||||
|
||||
# Collect filenames of potential libraries
|
||||
# First try a few bare library names that ctypes might be able to find
|
||||
# in the default locations for each platform.
|
||||
if SYSTEM_LIBS_ONLY:
|
||||
lib_dirs, lib_paths = [], []
|
||||
else:
|
||||
lib_dirs, lib_paths = generate_candidate_libs(lib_names, lib_dirs)
|
||||
lib_paths = exact_lib_names + lib_paths
|
||||
|
||||
# Select loader
|
||||
if sys.platform.startswith("win"):
|
||||
loader = ctypes.windll
|
||||
else:
|
||||
loader = ctypes.cdll
|
||||
|
||||
# Try to load until success
|
||||
the_lib = None
|
||||
errors = []
|
||||
for fname in lib_paths:
|
||||
try:
|
||||
the_lib = loader.LoadLibrary(fname)
|
||||
break
|
||||
except Exception as err:
|
||||
# Don't record errors when it couldn't load the library from an
|
||||
# exact name -- this fails often, and doesn't provide any useful
|
||||
# debugging information anyway, beyond "couldn't find library..."
|
||||
if fname not in exact_lib_names:
|
||||
errors.append((fname, err))
|
||||
|
||||
# No success ...
|
||||
if the_lib is None:
|
||||
if errors:
|
||||
# No library loaded, and load-errors reported for some
|
||||
# candidate libs
|
||||
err_txt = ["%s:\n%s" % (lib, str(e)) for lib, e in errors]
|
||||
msg = (
|
||||
"One or more %s libraries were found, but "
|
||||
+ "could not be loaded due to the following errors:\n%s"
|
||||
)
|
||||
raise OSError(msg % (the_lib_name, "\n\n".join(err_txt)))
|
||||
else:
|
||||
# No errors, because no potential libraries found at all!
|
||||
msg = "Could not find a %s library in any of:\n%s"
|
||||
raise OSError(msg % (the_lib_name, "\n".join(lib_dirs)))
|
||||
|
||||
# Done
|
||||
return the_lib, fname
|
||||
@@ -1,865 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""
|
||||
|
||||
.. note::
|
||||
imageio is under construction, some details with regard to the
|
||||
Reader and Writer classes may change.
|
||||
|
||||
These are the main classes of imageio. They expose an interface for
|
||||
advanced users and plugin developers. A brief overview:
|
||||
|
||||
* imageio.FormatManager - for keeping track of registered formats.
|
||||
* imageio.Format - representation of a file format reader/writer
|
||||
* imageio.Format.Reader - object used during the reading of a file.
|
||||
* imageio.Format.Writer - object used during saving a file.
|
||||
* imageio.Request - used to store the filename and other info.
|
||||
|
||||
Plugins need to implement a Format class and register
|
||||
a format object using ``imageio.formats.add_format()``.
|
||||
|
||||
"""
|
||||
|
||||
# todo: do we even use the known extensions?
|
||||
|
||||
# Some notes:
|
||||
#
|
||||
# The classes in this module use the Request object to pass filename and
|
||||
# related info around. This request object is instantiated in
|
||||
# imageio.get_reader and imageio.get_writer.
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
import contextlib
|
||||
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
from . import Array, asarray
|
||||
from .request import ImageMode
|
||||
from ..config import known_plugins, known_extensions, PluginConfig, FileExtension
|
||||
from ..config.plugins import _original_order
|
||||
from .imopen import imopen
|
||||
|
||||
|
||||
# survived for backwards compatibility
|
||||
# I don't know if external plugin code depends on it existing
|
||||
# We no longer do
|
||||
MODENAMES = ImageMode
|
||||
|
||||
|
||||
def _get_config(plugin):
|
||||
"""Old Plugin resolution logic.
|
||||
|
||||
Remove once we remove the old format manager.
|
||||
"""
|
||||
|
||||
extension_name = None
|
||||
|
||||
if Path(plugin).suffix.lower() in known_extensions:
|
||||
extension_name = Path(plugin).suffix.lower()
|
||||
elif plugin in known_plugins:
|
||||
pass
|
||||
elif plugin.lower() in known_extensions:
|
||||
extension_name = plugin.lower()
|
||||
elif "." + plugin.lower() in known_extensions:
|
||||
extension_name = "." + plugin.lower()
|
||||
else:
|
||||
raise IndexError(f"No format known by name `{plugin}`.")
|
||||
|
||||
if extension_name is not None:
|
||||
for plugin_name in [
|
||||
x
|
||||
for file_extension in known_extensions[extension_name]
|
||||
for x in file_extension.priority
|
||||
]:
|
||||
if known_plugins[plugin_name].is_legacy:
|
||||
plugin = plugin_name
|
||||
break
|
||||
|
||||
return known_plugins[plugin]
|
||||
|
||||
|
||||
class Format(object):
|
||||
"""Represents an implementation to read/write a particular file format
|
||||
|
||||
A format instance is responsible for 1) providing information about
|
||||
a format; 2) determining whether a certain file can be read/written
|
||||
with this format; 3) providing a reader/writer class.
|
||||
|
||||
Generally, imageio will select the right format and use that to
|
||||
read/write an image. A format can also be explicitly chosen in all
|
||||
read/write functions. Use ``print(format)``, or ``help(format_name)``
|
||||
to see its documentation.
|
||||
|
||||
To implement a specific format, one should create a subclass of
|
||||
Format and the Format.Reader and Format.Writer classes. See
|
||||
:class:`imageio.plugins` for details.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
A short name of this format. Users can select a format using its name.
|
||||
description : str
|
||||
A one-line description of the format.
|
||||
extensions : str | list | None
|
||||
List of filename extensions that this format supports. If a
|
||||
string is passed it should be space or comma separated. The
|
||||
extensions are used in the documentation and to allow users to
|
||||
select a format by file extension. It is not used to determine
|
||||
what format to use for reading/saving a file.
|
||||
modes : str
|
||||
A string containing the modes that this format can handle ('iIvV'),
|
||||
“i” for an image, “I” for multiple images, “v” for a volume,
|
||||
“V” for multiple volumes.
|
||||
This attribute is used in the documentation and to select the
|
||||
formats when reading/saving a file.
|
||||
"""
|
||||
|
||||
def __init__(self, name, description, extensions=None, modes=None):
|
||||
"""Initialize the Plugin.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
A short name of this format. Users can select a format using its name.
|
||||
description : str
|
||||
A one-line description of the format.
|
||||
extensions : str | list | None
|
||||
List of filename extensions that this format supports. If a
|
||||
string is passed it should be space or comma separated. The
|
||||
extensions are used in the documentation and to allow users to
|
||||
select a format by file extension. It is not used to determine
|
||||
what format to use for reading/saving a file.
|
||||
modes : str
|
||||
A string containing the modes that this format can handle ('iIvV'),
|
||||
“i” for an image, “I” for multiple images, “v” for a volume,
|
||||
“V” for multiple volumes.
|
||||
This attribute is used in the documentation and to select the
|
||||
formats when reading/saving a file.
|
||||
"""
|
||||
|
||||
# Store name and description
|
||||
self._name = name.upper()
|
||||
self._description = description
|
||||
|
||||
# Store extensions, do some effort to normalize them.
|
||||
# They are stored as a list of lowercase strings without leading dots.
|
||||
if extensions is None:
|
||||
extensions = []
|
||||
elif isinstance(extensions, str):
|
||||
extensions = extensions.replace(",", " ").split(" ")
|
||||
#
|
||||
if isinstance(extensions, (tuple, list)):
|
||||
self._extensions = tuple(
|
||||
["." + e.strip(".").lower() for e in extensions if e]
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid value for extensions given.")
|
||||
|
||||
# Store mode
|
||||
self._modes = modes or ""
|
||||
if not isinstance(self._modes, str):
|
||||
raise ValueError("Invalid value for modes given.")
|
||||
for m in self._modes:
|
||||
if m not in "iIvV?":
|
||||
raise ValueError("Invalid value for mode given.")
|
||||
|
||||
def __repr__(self):
|
||||
# Short description
|
||||
return "<Format %s - %s>" % (self.name, self.description)
|
||||
|
||||
def __str__(self):
|
||||
return self.doc
|
||||
|
||||
@property
|
||||
def doc(self):
|
||||
"""The documentation for this format (name + description + docstring)."""
|
||||
# Our docsring is assumed to be indented by four spaces. The
|
||||
# first line needs special attention.
|
||||
return "%s - %s\n\n %s\n" % (
|
||||
self.name,
|
||||
self.description,
|
||||
self.__doc__.strip(),
|
||||
)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The name of this format."""
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""A short description of this format."""
|
||||
return self._description
|
||||
|
||||
@property
|
||||
def extensions(self):
|
||||
"""A list of file extensions supported by this plugin.
|
||||
These are all lowercase with a leading dot.
|
||||
"""
|
||||
return self._extensions
|
||||
|
||||
@property
|
||||
def modes(self):
|
||||
"""A string specifying the modes that this format can handle."""
|
||||
return self._modes
|
||||
|
||||
def get_reader(self, request):
|
||||
"""get_reader(request)
|
||||
|
||||
Return a reader object that can be used to read data and info
|
||||
from the given file. Users are encouraged to use
|
||||
imageio.get_reader() instead.
|
||||
"""
|
||||
select_mode = request.mode[1] if request.mode[1] in "iIvV" else ""
|
||||
if select_mode not in self.modes:
|
||||
raise RuntimeError(
|
||||
f"Format {self.name} cannot read in {request.mode.image_mode} mode"
|
||||
)
|
||||
return self.Reader(self, request)
|
||||
|
||||
def get_writer(self, request):
|
||||
"""get_writer(request)
|
||||
|
||||
Return a writer object that can be used to write data and info
|
||||
to the given file. Users are encouraged to use
|
||||
imageio.get_writer() instead.
|
||||
"""
|
||||
select_mode = request.mode[1] if request.mode[1] in "iIvV" else ""
|
||||
if select_mode not in self.modes:
|
||||
raise RuntimeError(
|
||||
f"Format {self.name} cannot write in {request.mode.image_mode} mode"
|
||||
)
|
||||
return self.Writer(self, request)
|
||||
|
||||
def can_read(self, request):
|
||||
"""can_read(request)
|
||||
|
||||
Get whether this format can read data from the specified uri.
|
||||
"""
|
||||
return self._can_read(request)
|
||||
|
||||
def can_write(self, request):
|
||||
"""can_write(request)
|
||||
|
||||
Get whether this format can write data to the speciefed uri.
|
||||
"""
|
||||
return self._can_write(request)
|
||||
|
||||
def _can_read(self, request): # pragma: no cover
|
||||
"""Check if Plugin can read from ImageResource.
|
||||
|
||||
This method is called when the format manager is searching for a format
|
||||
to read a certain image. Return True if this format can do it.
|
||||
|
||||
The format manager is aware of the extensions and the modes that each
|
||||
format can handle. It will first ask all formats that *seem* to be able
|
||||
to read it whether they can. If none can, it will ask the remaining
|
||||
formats if they can: the extension might be missing, and this allows
|
||||
formats to provide functionality for certain extensions, while giving
|
||||
preference to other plugins.
|
||||
|
||||
If a format says it can, it should live up to it. The format would
|
||||
ideally check the request.firstbytes and look for a header of some kind.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : Request
|
||||
A request that can be used to access the ImageResource and obtain
|
||||
metadata about it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
can_read : bool
|
||||
True if the plugin can read from the ImageResource, False otherwise.
|
||||
|
||||
"""
|
||||
return None # Plugins must implement this
|
||||
|
||||
def _can_write(self, request): # pragma: no cover
|
||||
"""Check if Plugin can write to ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : Request
|
||||
A request that can be used to access the ImageResource and obtain
|
||||
metadata about it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
can_read : bool
|
||||
True if the plugin can write to the ImageResource, False otherwise.
|
||||
|
||||
"""
|
||||
return None # Plugins must implement this
|
||||
|
||||
# -----
|
||||
|
||||
class _BaseReaderWriter(object):
|
||||
"""Base class for the Reader and Writer class to implement common
|
||||
functionality. It implements a similar approach for opening/closing
|
||||
and context management as Python's file objects.
|
||||
"""
|
||||
|
||||
def __init__(self, format, request):
|
||||
self.__closed = False
|
||||
self._BaseReaderWriter_last_index = -1
|
||||
self._format = format
|
||||
self._request = request
|
||||
# Open the reader/writer
|
||||
self._open(**self.request.kwargs.copy())
|
||||
|
||||
@property
|
||||
def format(self):
|
||||
"""The :class:`.Format` object corresponding to the current
|
||||
read/write operation.
|
||||
"""
|
||||
return self._format
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
"""The :class:`.Request` object corresponding to the
|
||||
current read/write operation.
|
||||
"""
|
||||
return self._request
|
||||
|
||||
def __enter__(self):
|
||||
self._checkClosed()
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if value is None:
|
||||
# Otherwise error in close hide the real error.
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.close()
|
||||
except Exception: # pragma: no cover
|
||||
pass # Supress noise when called during interpreter shutdown
|
||||
|
||||
def close(self):
|
||||
"""Flush and close the reader/writer.
|
||||
This method has no effect if it is already closed.
|
||||
"""
|
||||
if self.__closed:
|
||||
return
|
||||
self.__closed = True
|
||||
self._close()
|
||||
# Process results and clean request object
|
||||
self.request.finish()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""Whether the reader/writer is closed."""
|
||||
return self.__closed
|
||||
|
||||
def _checkClosed(self, msg=None):
|
||||
"""Internal: raise an ValueError if reader/writer is closed"""
|
||||
if self.closed:
|
||||
what = self.__class__.__name__
|
||||
msg = msg or ("I/O operation on closed %s." % what)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# To implement
|
||||
|
||||
def _open(self, **kwargs):
|
||||
"""_open(**kwargs)
|
||||
|
||||
Plugins should probably implement this.
|
||||
|
||||
It is called when reader/writer is created. Here the
|
||||
plugin can do its initialization. The given keyword arguments
|
||||
are those that were given by the user at imageio.read() or
|
||||
imageio.write().
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _close(self):
|
||||
"""_close()
|
||||
|
||||
Plugins should probably implement this.
|
||||
|
||||
It is called when the reader/writer is closed. Here the plugin
|
||||
can do a cleanup, flush, etc.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
# -----
|
||||
|
||||
class Reader(_BaseReaderWriter):
|
||||
"""
|
||||
The purpose of a reader object is to read data from an image
|
||||
resource, and should be obtained by calling :func:`.get_reader`.
|
||||
|
||||
A reader can be used as an iterator to read multiple images,
|
||||
and (if the format permits) only reads data from the file when
|
||||
new data is requested (i.e. streaming). A reader can also be
|
||||
used as a context manager so that it is automatically closed.
|
||||
|
||||
Plugins implement Reader's for different formats. Though rare,
|
||||
plugins may provide additional functionality (beyond what is
|
||||
provided by the base reader class).
|
||||
"""
|
||||
|
||||
def get_length(self):
|
||||
"""get_length()
|
||||
|
||||
Get the number of images in the file. (Note: you can also
|
||||
use ``len(reader_object)``.)
|
||||
|
||||
The result can be:
|
||||
* 0 for files that only have meta data
|
||||
* 1 for singleton images (e.g. in PNG, JPEG, etc.)
|
||||
* N for image series
|
||||
* inf for streams (series of unknown length)
|
||||
"""
|
||||
return self._get_length()
|
||||
|
||||
def get_data(self, index, **kwargs):
|
||||
"""get_data(index, **kwargs)
|
||||
|
||||
Read image data from the file, using the image index. The
|
||||
returned image has a 'meta' attribute with the meta data.
|
||||
Raises IndexError if the index is out of range.
|
||||
|
||||
Some formats may support additional keyword arguments. These are
|
||||
listed in the documentation of those formats.
|
||||
"""
|
||||
self._checkClosed()
|
||||
self._BaseReaderWriter_last_index = index
|
||||
try:
|
||||
im, meta = self._get_data(index, **kwargs)
|
||||
except StopIteration:
|
||||
raise IndexError(index)
|
||||
return Array(im, meta) # Array tests im and meta
|
||||
|
||||
def get_next_data(self, **kwargs):
|
||||
"""get_next_data(**kwargs)
|
||||
|
||||
Read the next image from the series.
|
||||
|
||||
Some formats may support additional keyword arguments. These are
|
||||
listed in the documentation of those formats.
|
||||
"""
|
||||
return self.get_data(self._BaseReaderWriter_last_index + 1, **kwargs)
|
||||
|
||||
def set_image_index(self, index, **kwargs):
|
||||
"""set_image_index(index)
|
||||
|
||||
Set the internal pointer such that the next call to
|
||||
get_next_data() returns the image specified by the index
|
||||
"""
|
||||
self._checkClosed()
|
||||
n = self.get_length()
|
||||
self._BaseReaderWriter_last_index = min(max(index - 1, -1), n)
|
||||
|
||||
def get_meta_data(self, index=None):
|
||||
"""get_meta_data(index=None)
|
||||
|
||||
Read meta data from the file. using the image index. If the
|
||||
index is omitted or None, return the file's (global) meta data.
|
||||
|
||||
Note that ``get_data`` also provides the meta data for the returned
|
||||
image as an atrribute of that image.
|
||||
|
||||
The meta data is a dict, which shape depends on the format.
|
||||
E.g. for JPEG, the dict maps group names to subdicts and each
|
||||
group is a dict with name-value pairs. The groups represent
|
||||
the different metadata formats (EXIF, XMP, etc.).
|
||||
"""
|
||||
self._checkClosed()
|
||||
meta = self._get_meta_data(index)
|
||||
if not isinstance(meta, dict):
|
||||
raise ValueError(
|
||||
"Meta data must be a dict, not %r" % meta.__class__.__name__
|
||||
)
|
||||
return meta
|
||||
|
||||
def iter_data(self):
|
||||
"""iter_data()
|
||||
|
||||
Iterate over all images in the series. (Note: you can also
|
||||
iterate over the reader object.)
|
||||
|
||||
"""
|
||||
self._checkClosed()
|
||||
n = self.get_length()
|
||||
i = 0
|
||||
while i < n:
|
||||
try:
|
||||
im, meta = self._get_data(i)
|
||||
except StopIteration:
|
||||
return
|
||||
except IndexError:
|
||||
if n == float("inf"):
|
||||
return
|
||||
raise
|
||||
yield Array(im, meta)
|
||||
i += 1
|
||||
|
||||
# Compatibility
|
||||
|
||||
def __iter__(self):
|
||||
return self.iter_data()
|
||||
|
||||
def __len__(self):
|
||||
n = self.get_length()
|
||||
if n == float("inf"):
|
||||
n = sys.maxsize
|
||||
return n
|
||||
|
||||
# To implement
|
||||
|
||||
def _get_length(self):
|
||||
"""_get_length()
|
||||
|
||||
Plugins must implement this.
|
||||
|
||||
The retured scalar specifies the number of images in the series.
|
||||
See Reader.get_length for more information.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_data(self, index):
|
||||
"""_get_data()
|
||||
|
||||
Plugins must implement this, but may raise an IndexError in
|
||||
case the plugin does not support random access.
|
||||
|
||||
It should return the image and meta data: (ndarray, dict).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
"""_get_meta_data(index)
|
||||
|
||||
Plugins must implement this.
|
||||
|
||||
It should return the meta data as a dict, corresponding to the
|
||||
given index, or to the file's (global) meta data if index is
|
||||
None.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
# -----
|
||||
|
||||
class Writer(_BaseReaderWriter):
|
||||
"""
|
||||
The purpose of a writer object is to write data to an image
|
||||
resource, and should be obtained by calling :func:`.get_writer`.
|
||||
|
||||
A writer will (if the format permits) write data to the file
|
||||
as soon as new data is provided (i.e. streaming). A writer can
|
||||
also be used as a context manager so that it is automatically
|
||||
closed.
|
||||
|
||||
Plugins implement Writer's for different formats. Though rare,
|
||||
plugins may provide additional functionality (beyond what is
|
||||
provided by the base writer class).
|
||||
"""
|
||||
|
||||
def append_data(self, im, meta=None):
|
||||
"""append_data(im, meta={})
|
||||
|
||||
Append an image (and meta data) to the file. The final meta
|
||||
data that is used consists of the meta data on the given
|
||||
image (if applicable), updated with the given meta data.
|
||||
"""
|
||||
self._checkClosed()
|
||||
# Check image data
|
||||
if not isinstance(im, np.ndarray):
|
||||
raise ValueError("append_data requires ndarray as first arg")
|
||||
# Get total meta dict
|
||||
total_meta = {}
|
||||
if hasattr(im, "meta") and isinstance(im.meta, dict):
|
||||
total_meta.update(im.meta)
|
||||
if meta is None:
|
||||
pass
|
||||
elif not isinstance(meta, dict):
|
||||
raise ValueError("Meta must be a dict.")
|
||||
else:
|
||||
total_meta.update(meta)
|
||||
|
||||
# Decouple meta info
|
||||
im = asarray(im)
|
||||
# Call
|
||||
return self._append_data(im, total_meta)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
"""set_meta_data(meta)
|
||||
|
||||
Sets the file's (global) meta data. The meta data is a dict which
|
||||
shape depends on the format. E.g. for JPEG the dict maps
|
||||
group names to subdicts, and each group is a dict with
|
||||
name-value pairs. The groups represents the different
|
||||
metadata formats (EXIF, XMP, etc.).
|
||||
|
||||
Note that some meta formats may not be supported for
|
||||
writing, and individual fields may be ignored without
|
||||
warning if they are invalid.
|
||||
"""
|
||||
self._checkClosed()
|
||||
if not isinstance(meta, dict):
|
||||
raise ValueError("Meta must be a dict.")
|
||||
else:
|
||||
return self._set_meta_data(meta)
|
||||
|
||||
# To implement
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Plugins must implement this
|
||||
raise NotImplementedError()
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
# Plugins must implement this
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FormatManager(object):
|
||||
"""
|
||||
The FormatManager is a singleton plugin factory.
|
||||
|
||||
The format manager supports getting a format object using indexing (by
|
||||
format name or extension). When used as an iterator, this object
|
||||
yields all registered format objects.
|
||||
|
||||
See also :func:`.help`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def _formats(self):
|
||||
available_formats = list()
|
||||
|
||||
for config in known_plugins.values():
|
||||
with contextlib.suppress(ImportError):
|
||||
# if an exception is raised, then format not installed
|
||||
if config.is_legacy and config.format is not None:
|
||||
available_formats.append(config)
|
||||
|
||||
return available_formats
|
||||
|
||||
def __repr__(self):
|
||||
return f"<imageio.FormatManager with {len(self._formats)} registered formats>"
|
||||
|
||||
def __iter__(self):
|
||||
return iter(x.format for x in self._formats)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._formats)
|
||||
|
||||
def __str__(self):
|
||||
ss = []
|
||||
for config in self._formats:
|
||||
ext = config.legacy_args["extensions"]
|
||||
desc = config.legacy_args["description"]
|
||||
s = f"{config.name} - {desc} [{ext}]"
|
||||
ss.append(s)
|
||||
return "\n".join(ss)
|
||||
|
||||
def __getitem__(self, name):
|
||||
warnings.warn(
|
||||
"The usage of `FormatManager` is deprecated and it will be "
|
||||
"removed in Imageio v3. Use `iio.imopen` instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if not isinstance(name, str):
|
||||
raise ValueError(
|
||||
"Looking up a format should be done by name or by extension."
|
||||
)
|
||||
|
||||
if name == "":
|
||||
raise ValueError("No format matches the empty string.")
|
||||
|
||||
# Test if name is existing file
|
||||
if Path(name).is_file():
|
||||
# legacy compatibility - why test reading here??
|
||||
try:
|
||||
return imopen(name, "r", legacy_mode=True)._format
|
||||
except ValueError:
|
||||
# no plugin can read the file
|
||||
pass
|
||||
|
||||
config = _get_config(name.upper())
|
||||
|
||||
try:
|
||||
return config.format
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
f"The `{config.name}` format is not installed. "
|
||||
f"Use `pip install imageio[{config.install_name}]` to install it."
|
||||
)
|
||||
|
||||
def sort(self, *names):
|
||||
"""sort(name1, name2, name3, ...)
|
||||
|
||||
Sort the formats based on zero or more given names; a format with
|
||||
a name that matches one of the given names will take precedence
|
||||
over other formats. A match means an equal name, or ending with
|
||||
that name (though the former counts higher). Case insensitive.
|
||||
|
||||
Format preference will match the order of the given names: using
|
||||
``sort('TIFF', '-FI', '-PIL')`` would prefer the FreeImage formats
|
||||
over the Pillow formats, but prefer TIFF even more. Each time
|
||||
this is called, the starting point is the default format order,
|
||||
and calling ``sort()`` with no arguments will reset the order.
|
||||
|
||||
Be aware that using the function can affect the behavior of
|
||||
other code that makes use of imageio.
|
||||
|
||||
Also see the ``IMAGEIO_FORMAT_ORDER`` environment variable.
|
||||
"""
|
||||
|
||||
warnings.warn(
|
||||
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
|
||||
" Migrating `FormatManager.sort` depends on your use-case:\n"
|
||||
"\t- modify `iio.config.known_plugins` to specify the search order for "
|
||||
"unrecognized formats.\n"
|
||||
"\t- modify `iio.config.known_extensions[<extension>].priority`"
|
||||
" to control a specific extension.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Check and sanitize imput
|
||||
for name in names:
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("formats.sort() accepts only string names.")
|
||||
if any(c in name for c in ".,"):
|
||||
raise ValueError(
|
||||
"Names given to formats.sort() should not "
|
||||
"contain dots `.` or commas `,`."
|
||||
)
|
||||
|
||||
should_reset = len(names) == 0
|
||||
if should_reset:
|
||||
names = _original_order
|
||||
|
||||
sane_names = [name.strip().upper() for name in names if name != ""]
|
||||
|
||||
# enforce order for every extension that uses it
|
||||
flat_extensions = [
|
||||
ext for ext_list in known_extensions.values() for ext in ext_list
|
||||
]
|
||||
for extension in flat_extensions:
|
||||
if should_reset:
|
||||
extension.reset()
|
||||
continue
|
||||
|
||||
for name in reversed(sane_names):
|
||||
for plugin in [x for x in extension.default_priority]:
|
||||
if plugin.endswith(name):
|
||||
extension.priority.remove(plugin)
|
||||
extension.priority.insert(0, plugin)
|
||||
|
||||
old_order = known_plugins.copy()
|
||||
known_plugins.clear()
|
||||
|
||||
for name in sane_names:
|
||||
plugin = old_order.pop(name, None)
|
||||
if plugin is not None:
|
||||
known_plugins[name] = plugin
|
||||
|
||||
known_plugins.update(old_order)
|
||||
|
||||
def add_format(self, iio_format, overwrite=False):
|
||||
"""add_format(format, overwrite=False)
|
||||
|
||||
Register a format, so that imageio can use it. If a format with the
|
||||
same name already exists, an error is raised, unless overwrite is True,
|
||||
in which case the current format is replaced.
|
||||
"""
|
||||
|
||||
warnings.warn(
|
||||
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
|
||||
"To migrate `FormatManager.add_format` add the plugin directly to "
|
||||
"`iio.config.known_plugins`.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if not isinstance(iio_format, Format):
|
||||
raise ValueError("add_format needs argument to be a Format object")
|
||||
elif not overwrite and iio_format.name in self.get_format_names():
|
||||
raise ValueError(
|
||||
f"A Format named {iio_format.name} is already registered, use"
|
||||
" `overwrite=True` to replace."
|
||||
)
|
||||
|
||||
config = PluginConfig(
|
||||
name=iio_format.name.upper(),
|
||||
class_name=iio_format.__class__.__name__,
|
||||
module_name=iio_format.__class__.__module__,
|
||||
is_legacy=True,
|
||||
install_name="unknown",
|
||||
legacy_args={
|
||||
"name": iio_format.name,
|
||||
"description": iio_format.description,
|
||||
"extensions": " ".join(iio_format.extensions),
|
||||
"modes": iio_format.modes,
|
||||
},
|
||||
)
|
||||
|
||||
known_plugins[config.name] = config
|
||||
|
||||
for extension in iio_format.extensions:
|
||||
# be conservative and always treat it as a unique file format
|
||||
ext = FileExtension(
|
||||
extension=extension,
|
||||
priority=[config.name],
|
||||
name="Unique Format",
|
||||
description="A format inserted at runtime."
|
||||
f" It is being read by the `{config.name}` plugin.",
|
||||
)
|
||||
known_extensions.setdefault(extension, list()).append(ext)
|
||||
|
||||
def search_read_format(self, request):
|
||||
"""search_read_format(request)
|
||||
|
||||
Search a format that can read a file according to the given request.
|
||||
Returns None if no appropriate format was found. (used internally)
|
||||
"""
|
||||
|
||||
try:
|
||||
# in legacy_mode imopen returns a LegacyPlugin
|
||||
return imopen(request, request.mode.io_mode, legacy_mode=True)._format
|
||||
except ValueError:
|
||||
# no plugin can read this request
|
||||
# but the legacy API doesn't raise
|
||||
return None
|
||||
|
||||
def search_write_format(self, request):
|
||||
"""search_write_format(request)
|
||||
|
||||
Search a format that can write a file according to the given request.
|
||||
Returns None if no appropriate format was found. (used internally)
|
||||
"""
|
||||
|
||||
try:
|
||||
# in legacy_mode imopen returns a LegacyPlugin
|
||||
return imopen(request, request.mode.io_mode, legacy_mode=True)._format
|
||||
except ValueError:
|
||||
# no plugin can write this request
|
||||
# but the legacy API doesn't raise
|
||||
return None
|
||||
|
||||
def get_format_names(self):
|
||||
"""Get the names of all registered formats."""
|
||||
|
||||
warnings.warn(
|
||||
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
|
||||
"To migrate `FormatManager.get_format_names` use `iio.config.known_plugins.keys()` instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
return [f.name for f in self._formats]
|
||||
|
||||
def show(self):
|
||||
"""Show a nicely formatted list of available formats"""
|
||||
print(self)
|
||||
@@ -1,87 +0,0 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..typing import ArrayLike
|
||||
from . import Array
|
||||
from .request import Request
|
||||
from ..config import PluginConfig
|
||||
|
||||
def _get_config(plugin: str) -> PluginConfig: ...
|
||||
|
||||
class Format(object):
|
||||
@property
|
||||
def doc(self) -> str: ...
|
||||
@property
|
||||
def name(self) -> str: ...
|
||||
@property
|
||||
def description(self) -> str: ...
|
||||
@property
|
||||
def extensions(self) -> List[str]: ...
|
||||
@property
|
||||
def modes(self) -> str: ...
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
extensions: Union[str, list, tuple, None] = None,
|
||||
modes: str = None,
|
||||
) -> None: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __str__(self) -> str: ...
|
||||
def get_reader(self, request: Request) -> Reader: ...
|
||||
def get_writer(self, request: Request) -> Writer: ...
|
||||
def can_read(self, request: Request) -> bool: ...
|
||||
def can_write(self, request: Request) -> bool: ...
|
||||
def _can_read(self, request: Request) -> bool: ...
|
||||
def _can_write(self, request: Request) -> bool: ...
|
||||
|
||||
class _BaseReaderWriter(object):
|
||||
@property
|
||||
def format(self) -> Format: ...
|
||||
@property
|
||||
def request(self) -> Request: ...
|
||||
@property
|
||||
def closed(self) -> bool: ...
|
||||
def __init__(self, format: Format, request: Request) -> None: ...
|
||||
def __enter__(self) -> Format._BaseReaderWriter: ...
|
||||
def __exit__(self, type, value, traceback) -> None: ...
|
||||
def __del__(self) -> None: ...
|
||||
def close(self) -> None: ...
|
||||
def _checkClosed(self, msg=None) -> None: ...
|
||||
def _open(self, **kwargs) -> None: ...
|
||||
def _close(self) -> None: ...
|
||||
|
||||
class Reader(_BaseReaderWriter):
|
||||
def get_length(self) -> int: ...
|
||||
def get_data(self, index: int, **kwargs) -> Array: ...
|
||||
def get_next_data(self, **kwargs) -> Dict[str, Any]: ...
|
||||
def set_image_index(self, index: int, **kwargs) -> None: ...
|
||||
def get_meta_data(self, index: int = None) -> Dict[str, Any]: ...
|
||||
def iter_data(self) -> Array: ...
|
||||
def __iter__(self) -> Array: ...
|
||||
def __len__(self) -> int: ...
|
||||
def _get_length(self) -> int: ...
|
||||
def _get_data(self, index: int) -> Array: ...
|
||||
def _get_meta_data(self, index: int) -> Dict[str, Any]: ...
|
||||
|
||||
class Writer(_BaseReaderWriter):
|
||||
def append_data(self, im: ArrayLike, meta: Dict[str, Any] = None) -> None: ...
|
||||
def set_meta_data(self, meta: Dict[str, Any]) -> None: ...
|
||||
def _append_data(self, im: ArrayLike, meta: Dict[str, Any]) -> None: ...
|
||||
def _set_meta_data(self, meta: Dict[str, Any]) -> None: ...
|
||||
|
||||
class FormatManager(object):
|
||||
@property
|
||||
def _formats(self) -> List[Format]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __iter__(self) -> Format: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __str__(self) -> str: ...
|
||||
def __getitem__(self, name: str) -> Format: ...
|
||||
def sort(self, *names: str) -> None: ...
|
||||
def add_format(self, iio_format: Format, overwrite: bool = False) -> None: ...
|
||||
def search_read_format(self, request: Request) -> Optional[Format]: ...
|
||||
def search_write_format(self, request: Request) -> Optional[Format]: ...
|
||||
def get_format_names(self) -> List[str]: ...
|
||||
def show(self) -> None: ...
|
||||
@@ -1,298 +0,0 @@
|
||||
from pathlib import Path
|
||||
import warnings
|
||||
|
||||
from ..config import known_plugins
|
||||
from ..config.extensions import known_extensions
|
||||
from .request import (
|
||||
SPECIAL_READ_URIS,
|
||||
URI_FILENAME,
|
||||
InitializationError,
|
||||
IOMode,
|
||||
Request,
|
||||
)
|
||||
|
||||
|
||||
def imopen(
|
||||
uri,
|
||||
io_mode,
|
||||
*,
|
||||
plugin=None,
|
||||
extension=None,
|
||||
format_hint=None,
|
||||
legacy_mode=False,
|
||||
**kwargs,
|
||||
):
|
||||
"""Open an ImageResource.
|
||||
|
||||
.. warning::
|
||||
This warning is for pypy users. If you are not using a context manager,
|
||||
remember to deconstruct the returned plugin to avoid leaking the file
|
||||
handle to an unclosed file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : str or pathlib.Path or bytes or file or Request
|
||||
The :doc:`ImageResource <../../user_guide/requests>` to load the
|
||||
image from.
|
||||
io_mode : str
|
||||
The mode in which the file is opened. Possible values are::
|
||||
|
||||
``r`` - open the file for reading
|
||||
``w`` - open the file for writing
|
||||
|
||||
Depreciated since v2.9:
|
||||
A second character can be added to give the reader a hint on what
|
||||
the user expects. This will be ignored by new plugins and will
|
||||
only have an effect on legacy plugins. Possible values are::
|
||||
|
||||
``i`` for a single image,
|
||||
``I`` for multiple images,
|
||||
``v`` for a single volume,
|
||||
``V`` for multiple volumes,
|
||||
``?`` for don't care (default)
|
||||
|
||||
plugin : str, Plugin, or None
|
||||
The plugin to use. If set to None (default) imopen will perform a
|
||||
search for a matching plugin. If not None, this takes priority over
|
||||
the provided format hint.
|
||||
extension : str
|
||||
If not None, treat the provided ImageResource as if it had the given
|
||||
extension. This affects the order in which backends are considered, and
|
||||
when writing this may also influence the format used when encoding.
|
||||
format_hint : str
|
||||
Deprecated. Use `extension` instead.
|
||||
legacy_mode : bool
|
||||
If true (default) use the v2 behavior when searching for a suitable
|
||||
plugin. This will ignore v3 plugins and will check ``plugin``
|
||||
against known extensions if no plugin with the given name can be found.
|
||||
**kwargs : Any
|
||||
Additional keyword arguments will be passed to the plugin upon
|
||||
construction.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Registered plugins are controlled via the ``known_plugins`` dict in
|
||||
``imageio.config``.
|
||||
|
||||
Passing a ``Request`` as the uri is only supported if ``legacy_mode``
|
||||
is ``True``. In this case ``io_mode`` is ignored.
|
||||
|
||||
Using the kwarg ``format_hint`` does not enforce the given format. It merely
|
||||
provides a `hint` to the selection process and plugin. The selection
|
||||
processes uses this hint for optimization; however, a plugin's decision how
|
||||
to read a ImageResource will - typically - still be based on the content of
|
||||
the resource.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import imageio.v3 as iio
|
||||
>>> with iio.imopen("/path/to/image.png", "r") as file:
|
||||
>>> im = file.read()
|
||||
|
||||
>>> with iio.imopen("/path/to/output.jpg", "w") as file:
|
||||
>>> file.write(im)
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(uri, Request) and legacy_mode:
|
||||
warnings.warn(
|
||||
"`iio.core.Request` is a low-level object and using it"
|
||||
" directly as input to `imopen` is discouraged. This will raise"
|
||||
" an exception in ImageIO v3.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
request = uri
|
||||
uri = request.raw_uri
|
||||
io_mode = request.mode.io_mode
|
||||
request.format_hint = format_hint
|
||||
else:
|
||||
request = Request(uri, io_mode, format_hint=format_hint, extension=extension)
|
||||
|
||||
source = "<bytes>" if isinstance(uri, bytes) else uri
|
||||
|
||||
# fast-path based on plugin
|
||||
# (except in legacy mode)
|
||||
if plugin is not None:
|
||||
if isinstance(plugin, str):
|
||||
try:
|
||||
config = known_plugins[plugin]
|
||||
except KeyError:
|
||||
request.finish()
|
||||
raise ValueError(
|
||||
f"`{plugin}` is not a registered plugin name."
|
||||
) from None
|
||||
|
||||
def loader(request, **kwargs):
|
||||
return config.plugin_class(request, **kwargs)
|
||||
|
||||
elif not legacy_mode:
|
||||
|
||||
def loader(request, **kwargs):
|
||||
return plugin(request, **kwargs)
|
||||
|
||||
else:
|
||||
request.finish()
|
||||
raise ValueError("The `plugin` argument must be a string.")
|
||||
|
||||
try:
|
||||
return loader(request, **kwargs)
|
||||
except InitializationError as class_specific:
|
||||
err_from = class_specific
|
||||
err_type = RuntimeError if legacy_mode else IOError
|
||||
err_msg = f"`{plugin}` can not handle the given uri."
|
||||
except ImportError:
|
||||
err_from = None
|
||||
err_type = ImportError
|
||||
err_msg = (
|
||||
f"The `{config.name}` plugin is not installed. "
|
||||
f"Use `pip install imageio[{config.install_name}]` to install it."
|
||||
)
|
||||
except Exception as generic_error:
|
||||
err_from = generic_error
|
||||
err_type = IOError
|
||||
err_msg = f"An unknown error occured while initializing plugin `{plugin}`."
|
||||
|
||||
request.finish()
|
||||
raise err_type(err_msg) from err_from
|
||||
|
||||
# fast-path based on format_hint
|
||||
if request.format_hint is not None:
|
||||
for candidate_format in known_extensions[format_hint]:
|
||||
for plugin_name in candidate_format.priority:
|
||||
config = known_plugins[plugin_name]
|
||||
|
||||
# v2 compatibility; delete in v3
|
||||
if legacy_mode and not config.is_legacy:
|
||||
continue
|
||||
|
||||
try:
|
||||
candidate_plugin = config.plugin_class
|
||||
except ImportError:
|
||||
# not installed
|
||||
continue
|
||||
|
||||
try:
|
||||
plugin_instance = candidate_plugin(request, **kwargs)
|
||||
except InitializationError:
|
||||
# file extension doesn't match file type
|
||||
continue
|
||||
|
||||
return plugin_instance
|
||||
else:
|
||||
resource = (
|
||||
"<bytes>" if isinstance(request.raw_uri, bytes) else request.raw_uri
|
||||
)
|
||||
warnings.warn(f"`{resource}` can not be opened as a `{format_hint}` file.")
|
||||
|
||||
# fast-path based on file extension
|
||||
if request.extension in known_extensions:
|
||||
for candidate_format in known_extensions[request.extension]:
|
||||
for plugin_name in candidate_format.priority:
|
||||
config = known_plugins[plugin_name]
|
||||
|
||||
# v2 compatibility; delete in v3
|
||||
if legacy_mode and not config.is_legacy:
|
||||
continue
|
||||
|
||||
try:
|
||||
candidate_plugin = config.plugin_class
|
||||
except ImportError:
|
||||
# not installed
|
||||
continue
|
||||
|
||||
try:
|
||||
plugin_instance = candidate_plugin(request, **kwargs)
|
||||
except InitializationError:
|
||||
# file extension doesn't match file type
|
||||
continue
|
||||
|
||||
return plugin_instance
|
||||
|
||||
# error out for read-only special targets
|
||||
# this is hacky; can we come up with a better solution for this?
|
||||
if request.mode.io_mode == IOMode.write:
|
||||
if isinstance(uri, str) and uri.startswith(SPECIAL_READ_URIS):
|
||||
request.finish()
|
||||
err_type = ValueError if legacy_mode else IOError
|
||||
err_msg = f"`{source}` is read-only."
|
||||
raise err_type(err_msg)
|
||||
|
||||
# error out for directories
|
||||
# this is a bit hacky and should be cleaned once we decide
|
||||
# how to gracefully handle DICOM
|
||||
if request._uri_type == URI_FILENAME and Path(request.raw_uri).is_dir():
|
||||
request.finish()
|
||||
err_type = ValueError if legacy_mode else IOError
|
||||
err_msg = (
|
||||
"ImageIO does not generally support reading folders. "
|
||||
"Limited support may be available via specific plugins. "
|
||||
"Specify the plugin explicitly using the `plugin` kwarg, e.g. `plugin='DICOM'`"
|
||||
)
|
||||
raise err_type(err_msg)
|
||||
|
||||
# close the current request here and use fresh/new ones while trying each
|
||||
# plugin This is slow (means potentially reopening a resource several
|
||||
# times), but should only happen rarely because this is the fallback if all
|
||||
# else fails.
|
||||
request.finish()
|
||||
|
||||
# fallback option: try all plugins
|
||||
for config in known_plugins.values():
|
||||
# Note: for v2 compatibility
|
||||
# this branch can be removed in ImageIO v3.0
|
||||
if legacy_mode and not config.is_legacy:
|
||||
continue
|
||||
|
||||
# each plugin gets its own request
|
||||
request = Request(uri, io_mode, format_hint=format_hint)
|
||||
|
||||
try:
|
||||
plugin_instance = config.plugin_class(request, **kwargs)
|
||||
except InitializationError:
|
||||
continue
|
||||
except ImportError:
|
||||
continue
|
||||
else:
|
||||
return plugin_instance
|
||||
|
||||
err_type = ValueError if legacy_mode else IOError
|
||||
err_msg = f"Could not find a backend to open `{source}`` with iomode `{io_mode}`."
|
||||
|
||||
# check if a missing plugin could help
|
||||
if request.extension in known_extensions:
|
||||
missing_plugins = list()
|
||||
|
||||
formats = known_extensions[request.extension]
|
||||
plugin_names = [
|
||||
plugin for file_format in formats for plugin in file_format.priority
|
||||
]
|
||||
for name in plugin_names:
|
||||
config = known_plugins[name]
|
||||
|
||||
try:
|
||||
config.plugin_class
|
||||
continue
|
||||
except ImportError:
|
||||
missing_plugins.append(config)
|
||||
|
||||
if len(missing_plugins) > 0:
|
||||
install_candidates = "\n".join(
|
||||
[
|
||||
(
|
||||
f" {config.name}: "
|
||||
f"pip install imageio[{config.install_name}]"
|
||||
)
|
||||
for config in missing_plugins
|
||||
]
|
||||
)
|
||||
err_msg += (
|
||||
"\nBased on the extension, the following plugins might add capable backends:\n"
|
||||
f"{install_candidates}"
|
||||
)
|
||||
|
||||
request.finish()
|
||||
raise err_type(err_msg)
|
||||
@@ -1,87 +0,0 @@
|
||||
from typing import Literal, Type, TypeVar, overload
|
||||
|
||||
from ..plugins.opencv import OpenCVPlugin
|
||||
from ..plugins.pillow import PillowPlugin
|
||||
from ..plugins.pyav import PyAVPlugin
|
||||
from ..plugins.tifffile_v3 import TifffilePlugin
|
||||
from ..typing import ImageResource
|
||||
from .legacy_plugin_wrapper import LegacyPlugin
|
||||
from .v3_plugin_api import PluginV3
|
||||
|
||||
CustomPlugin = TypeVar("CustomPlugin", bound=PluginV3)
|
||||
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
) -> PluginV3: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: str = None,
|
||||
format_hint: str = None,
|
||||
extension: str = None,
|
||||
legacy_mode: Literal[True],
|
||||
**kwargs,
|
||||
) -> LegacyPlugin: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
format_hint: str = None,
|
||||
extension: str = None,
|
||||
legacy_mode: Literal[False] = False,
|
||||
) -> PluginV3: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: Literal["pillow"],
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
) -> PillowPlugin: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: Literal["pyav"],
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
container: str = None,
|
||||
) -> PyAVPlugin: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: Literal["opencv"],
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
) -> OpenCVPlugin: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: Literal["tifffile"],
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
) -> TifffilePlugin: ...
|
||||
@overload
|
||||
def imopen(
|
||||
uri: ImageResource,
|
||||
io_mode: Literal["r", "w"],
|
||||
*,
|
||||
plugin: Type[CustomPlugin],
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
**kwargs,
|
||||
) -> CustomPlugin: ...
|
||||
@@ -1,363 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..config import known_extensions
|
||||
from .request import InitializationError, IOMode
|
||||
from .v3_plugin_api import ImageProperties, PluginV3
|
||||
|
||||
|
||||
def _legacy_default_index(format):
|
||||
if format._name == "FFMPEG":
|
||||
index = Ellipsis
|
||||
elif format._name == "GIF-PIL":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
return index
|
||||
|
||||
|
||||
class LegacyPlugin(PluginV3):
|
||||
"""A plugin to make old (v2.9) plugins compatible with v3.0
|
||||
|
||||
.. depreciated:: 2.9
|
||||
`legacy_get_reader` will be removed in a future version of imageio.
|
||||
`legacy_get_writer` will be removed in a future version of imageio.
|
||||
|
||||
This plugin is a wrapper around the old FormatManager class and exposes
|
||||
all the old plugins via the new API. On top of this it has
|
||||
``legacy_get_reader`` and ``legacy_get_writer`` methods to allow using
|
||||
it with the v2.9 API.
|
||||
|
||||
Methods
|
||||
-------
|
||||
read(index=None, **kwargs)
|
||||
Read the image at position ``index``.
|
||||
write(image, **kwargs)
|
||||
Write image to the URI.
|
||||
iter(**kwargs)
|
||||
Iteratively yield images from the given URI.
|
||||
get_meta(index=None)
|
||||
Return the metadata for the image at position ``index``.
|
||||
legacy_get_reader(**kwargs)
|
||||
Returns the v2.9 image reader. (depreciated)
|
||||
legacy_get_writer(**kwargs)
|
||||
Returns the v2.9 image writer. (depreciated)
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import imageio.v3 as iio
|
||||
>>> with iio.imopen("/path/to/image.tiff", "r", legacy_mode=True) as file:
|
||||
>>> reader = file.legacy_get_reader() # depreciated
|
||||
>>> for im in file.iter():
|
||||
>>> print(im.shape)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, request, legacy_plugin):
|
||||
"""Instantiate a new Legacy Plugin
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : {str, pathlib.Path, bytes, file}
|
||||
The resource to load the image from, e.g. a filename, pathlib.Path,
|
||||
http address or file object, see the docs for more info.
|
||||
legacy_plugin : Format
|
||||
The (legacy) format to use to interface with the URI.
|
||||
|
||||
"""
|
||||
self._request = request
|
||||
self._format = legacy_plugin
|
||||
|
||||
source = (
|
||||
"<bytes>"
|
||||
if isinstance(self._request.raw_uri, bytes)
|
||||
else self._request.raw_uri
|
||||
)
|
||||
if self._request.mode.io_mode == IOMode.read:
|
||||
if not self._format.can_read(request):
|
||||
raise InitializationError(
|
||||
f"`{self._format.name}`" f" can not read `{source}`."
|
||||
)
|
||||
else:
|
||||
if not self._format.can_write(request):
|
||||
raise InitializationError(
|
||||
f"`{self._format.name}`" f" can not write to `{source}`."
|
||||
)
|
||||
|
||||
def legacy_get_reader(self, **kwargs):
|
||||
"""legacy_get_reader(**kwargs)
|
||||
|
||||
a utility method to provide support vor the V2.9 API
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the reader. See :func:`.help`
|
||||
to see what arguments are available for a particular format.
|
||||
"""
|
||||
|
||||
# Note: this will break thread-safety
|
||||
self._request._kwargs = kwargs
|
||||
|
||||
# safeguard for DICOM plugin reading from folders
|
||||
try:
|
||||
assert Path(self._request.filename).is_dir()
|
||||
except OSError:
|
||||
pass # not a valid path on this OS
|
||||
except AssertionError:
|
||||
pass # not a folder
|
||||
else:
|
||||
return self._format.get_reader(self._request)
|
||||
|
||||
self._request.get_file().seek(0)
|
||||
return self._format.get_reader(self._request)
|
||||
|
||||
def read(self, *, index=None, **kwargs):
|
||||
"""
|
||||
Parses the given URI and creates a ndarray from it.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : {integer, None}
|
||||
If the URI contains a list of ndimages return the index-th
|
||||
image. If None, stack all images into an ndimage along the
|
||||
0-th dimension (equivalent to np.stack(imgs, axis=0)).
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the reader. See
|
||||
:func:`.help` to see what arguments are available for a particular
|
||||
format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : np.ndarray
|
||||
A numpy array containing the decoded image data.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
index = _legacy_default_index(self._format)
|
||||
|
||||
if index is Ellipsis:
|
||||
img = np.stack([im for im in self.iter(**kwargs)])
|
||||
return img
|
||||
|
||||
reader = self.legacy_get_reader(**kwargs)
|
||||
return reader.get_data(index)
|
||||
|
||||
def legacy_get_writer(self, **kwargs):
|
||||
"""legacy_get_writer(**kwargs)
|
||||
|
||||
Returns a :class:`.Writer` object which can be used to write data
|
||||
and meta data to the specified file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the writer. See :func:`.help`
|
||||
to see what arguments are available for a particular format.
|
||||
"""
|
||||
|
||||
# Note: this will break thread-safety
|
||||
self._request._kwargs = kwargs
|
||||
return self._format.get_writer(self._request)
|
||||
|
||||
def write(self, ndimage, *, is_batch=None, metadata=None, **kwargs):
|
||||
"""
|
||||
Write an ndimage to the URI specified in path.
|
||||
|
||||
If the URI points to a file on the current host and the file does not
|
||||
yet exist it will be created. If the file exists already, it will be
|
||||
appended if possible; otherwise, it will be replaced.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : numpy.ndarray
|
||||
The ndimage or list of ndimages to write.
|
||||
is_batch : bool
|
||||
If True, treat the supplied ndimage as a batch of images. If False,
|
||||
treat the supplied ndimage as a single image. If None, try to
|
||||
determine ``is_batch`` from the ndimage's shape and ndim.
|
||||
metadata : dict
|
||||
The metadata passed to write alongside the image.
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the writer. See
|
||||
:func:`.help` to see what arguments are available for a
|
||||
particular format.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
buffer : bytes
|
||||
When writing to the special target "<bytes>", this function will
|
||||
return the encoded image data as a bytes string. Otherwise it
|
||||
returns None.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Automatically determining ``is_batch`` may fail for some images due to
|
||||
shape aliasing. For example, it may classify a channel-first color image
|
||||
as a batch of gray images. In most cases this automatic deduction works
|
||||
fine (it has for almost a decade), but if you do have one of those edge
|
||||
cases (or are worried that you might) consider explicitly setting
|
||||
``is_batch``.
|
||||
|
||||
"""
|
||||
|
||||
if is_batch or isinstance(ndimage, (list, tuple)):
|
||||
pass # ndimage is list of images
|
||||
elif is_batch is False:
|
||||
ndimage = [ndimage]
|
||||
else:
|
||||
# Write the largest possible block by guessing the meaning of each
|
||||
# dimension from the shape/ndim and then checking if any batch
|
||||
# dimensions are left.
|
||||
ndimage = np.asanyarray(ndimage)
|
||||
batch_dims = ndimage.ndim
|
||||
|
||||
# two spatial dimensions
|
||||
batch_dims = max(batch_dims - 2, 0)
|
||||
|
||||
# packed (channel-last) image
|
||||
if ndimage.ndim >= 3 and ndimage.shape[-1] < 5:
|
||||
batch_dims = max(batch_dims - 1, 0)
|
||||
|
||||
# format supports volumetric images
|
||||
ext_infos = known_extensions.get(self._request.extension, list())
|
||||
for ext_info in ext_infos:
|
||||
if self._format.name in ext_info.priority and ext_info.volume_support:
|
||||
batch_dims = max(batch_dims - 1, 0)
|
||||
break
|
||||
|
||||
if batch_dims == 0:
|
||||
ndimage = [ndimage]
|
||||
|
||||
with self.legacy_get_writer(**kwargs) as writer:
|
||||
for image in ndimage:
|
||||
image = np.asanyarray(image)
|
||||
|
||||
if image.ndim < 2:
|
||||
raise ValueError(
|
||||
"The image must have at least two spatial dimensions."
|
||||
)
|
||||
|
||||
if not np.issubdtype(image.dtype, np.number) and not np.issubdtype(
|
||||
image.dtype, bool
|
||||
):
|
||||
raise ValueError(
|
||||
f"All images have to be numeric, and not `{image.dtype}`."
|
||||
)
|
||||
|
||||
writer.append_data(image, metadata)
|
||||
|
||||
return writer.request.get_result()
|
||||
|
||||
def iter(self, **kwargs):
|
||||
"""Iterate over a list of ndimages given by the URI
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : ...
|
||||
Further keyword arguments are passed to the reader. See
|
||||
:func:`.help` to see what arguments are available for a particular
|
||||
format.
|
||||
"""
|
||||
|
||||
reader = self.legacy_get_reader(**kwargs)
|
||||
for image in reader:
|
||||
yield image
|
||||
|
||||
def properties(self, index=None):
|
||||
"""Standardized ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
The index of the ndimage for which to return properties. If the
|
||||
index is out of bounds a ``ValueError`` is raised. If ``None``,
|
||||
return the properties for the ndimage stack. If this is impossible,
|
||||
e.g., due to shape missmatch, an exception will be raised.
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
index = _legacy_default_index(self._format)
|
||||
|
||||
# for backwards compatibility ... actually reads pixel data :(
|
||||
if index is Ellipsis:
|
||||
image = self.read(index=0)
|
||||
n_images = self.legacy_get_reader().get_length()
|
||||
return ImageProperties(
|
||||
shape=(n_images, *image.shape),
|
||||
dtype=image.dtype,
|
||||
n_images=n_images,
|
||||
is_batch=True,
|
||||
)
|
||||
|
||||
image = self.read(index=index)
|
||||
return ImageProperties(
|
||||
shape=image.shape,
|
||||
dtype=image.dtype,
|
||||
is_batch=False,
|
||||
)
|
||||
|
||||
def get_meta(self, *, index=None):
|
||||
"""Read ndimage metadata from the URI
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : {integer, None}
|
||||
If the URI contains a list of ndimages return the metadata
|
||||
corresponding to the index-th image. If None, behavior depends on
|
||||
the used api
|
||||
|
||||
Legacy-style API: return metadata of the first element (index=0)
|
||||
New-style API: Behavior depends on the used Plugin.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary of metadata.
|
||||
|
||||
"""
|
||||
|
||||
return self.metadata(index=index, exclude_applied=False)
|
||||
|
||||
def metadata(self, index=None, exclude_applied: bool = True):
|
||||
"""Format-Specific ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
The index of the ndimage to read. If the index is out of bounds a
|
||||
``ValueError`` is raised. If ``None``, global metadata is returned.
|
||||
exclude_applied : bool
|
||||
This parameter exists for compatibility and has no effect. Legacy
|
||||
plugins always report all metadata they find.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary filled with format-specific metadata fields and their
|
||||
values.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
index = _legacy_default_index(self._format)
|
||||
|
||||
return self.legacy_get_reader().get_meta_data(index=index)
|
||||
|
||||
def __del__(self) -> None:
|
||||
pass
|
||||
# turns out we can't close the file here for LegacyPlugin
|
||||
# because it would break backwards compatibility
|
||||
# with legacy_get_writer and legacy_get_reader
|
||||
# self._request.finish()
|
||||
@@ -1,27 +0,0 @@
|
||||
import numpy as np
|
||||
from typing import Optional, Dict, Any, Union, List, Iterator
|
||||
|
||||
from .request import Request
|
||||
from .v3_plugin_api import PluginV3, ImageProperties
|
||||
from .format import Format
|
||||
from ..typing import ArrayLike
|
||||
|
||||
class LegacyPlugin(PluginV3):
|
||||
def __init__(self, request: Request, legacy_plugin: Format) -> None: ...
|
||||
def legacy_get_reader(self, **kwargs) -> Format.Reader: ...
|
||||
def read(self, *, index: Optional[int] = 0, **kwargs) -> np.ndarray: ...
|
||||
def legacy_get_writer(self, **kwargs) -> Format.Writer: ...
|
||||
def write(
|
||||
self,
|
||||
ndimage: Union[ArrayLike, List[ArrayLike]],
|
||||
*,
|
||||
is_batch: bool = None,
|
||||
**kwargs
|
||||
) -> Optional[bytes]: ...
|
||||
def iter(self, **kwargs) -> Iterator[np.ndarray]: ...
|
||||
def properties(self, index: Optional[int] = 0) -> ImageProperties: ...
|
||||
def get_meta(self, *, index: Optional[int] = 0) -> Dict[str, Any]: ...
|
||||
def metadata(
|
||||
self, index: Optional[int] = 0, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]: ...
|
||||
def __del__(self) -> None: ...
|
||||
@@ -1,750 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""
|
||||
Definition of the Request object, which acts as a kind of bridge between
|
||||
what the user wants and what the plugins can.
|
||||
"""
|
||||
|
||||
import os
|
||||
from io import BytesIO
|
||||
import zipfile
|
||||
import tempfile
|
||||
import shutil
|
||||
import enum
|
||||
import warnings
|
||||
|
||||
from ..core import urlopen, get_remote_file
|
||||
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
from typing import Optional
|
||||
|
||||
# URI types
|
||||
URI_BYTES = 1
|
||||
URI_FILE = 2
|
||||
URI_FILENAME = 3
|
||||
URI_ZIPPED = 4
|
||||
URI_HTTP = 5
|
||||
URI_FTP = 6
|
||||
|
||||
|
||||
class IOMode(str, enum.Enum):
|
||||
"""Available Image modes
|
||||
|
||||
This is a helper enum for ``Request.Mode`` which is a composite of a
|
||||
``Request.ImageMode`` and ``Request.IOMode``. The IOMode that tells the
|
||||
plugin if the resource should be read from or written to. Available values are
|
||||
|
||||
- read ("r"): Read from the specified resource
|
||||
- write ("w"): Write to the specified resource
|
||||
|
||||
"""
|
||||
|
||||
read = "r"
|
||||
write = "w"
|
||||
|
||||
|
||||
class ImageMode(str, enum.Enum):
|
||||
"""Available Image modes
|
||||
|
||||
This is a helper enum for ``Request.Mode`` which is a composite of a
|
||||
``Request.ImageMode`` and ``Request.IOMode``. The image mode that tells the
|
||||
plugin the desired (and expected) image shape. Available values are
|
||||
|
||||
- single_image ("i"): Return a single image extending in two spacial
|
||||
dimensions
|
||||
- multi_image ("I"): Return a list of images extending in two spacial
|
||||
dimensions
|
||||
- single_volume ("v"): Return an image extending into multiple dimensions.
|
||||
E.g. three spacial dimensions for image stacks, or two spatial and one
|
||||
time dimension for videos
|
||||
- multi_volume ("V"): Return a list of images extending into multiple
|
||||
dimensions.
|
||||
- any_mode ("?"): Return an image in any format (the plugin decides the
|
||||
appropriate action).
|
||||
|
||||
"""
|
||||
|
||||
single_image = "i"
|
||||
multi_image = "I"
|
||||
single_volume = "v"
|
||||
multi_volume = "V"
|
||||
any_mode = "?"
|
||||
|
||||
|
||||
@enum.unique
|
||||
class Mode(str, enum.Enum):
|
||||
"""The mode to use when interacting with the resource
|
||||
|
||||
``Request.Mode`` is a composite of ``Request.ImageMode`` and
|
||||
``Request.IOMode``. The image mode that tells the plugin the desired (and
|
||||
expected) image shape and the ``Request.IOMode`` tells the plugin the way
|
||||
the resource should be interacted with. For a detailed description of the
|
||||
available modes, see the documentation for ``Request.ImageMode`` and
|
||||
``Request.IOMode`` respectively.
|
||||
|
||||
Available modes are all combinations of ``Request.IOMode`` and ``Request.ImageMode``:
|
||||
|
||||
- read_single_image ("ri")
|
||||
- read_multi_image ("rI")
|
||||
- read_single_volume ("rv")
|
||||
- read_multi_volume ("rV")
|
||||
- read_any ("r?")
|
||||
- write_single_image ("wi")
|
||||
- write_multi_image ("wI")
|
||||
- write_single_volume ("wv")
|
||||
- write_multi_volume ("wV")
|
||||
- write_any ("w?")
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> Request.Mode("rI") # a list of simple images should be read from the resource
|
||||
>>> Request.Mode("wv") # a single volume should be written to the resource
|
||||
|
||||
"""
|
||||
|
||||
read_single_image = "ri"
|
||||
read_multi_image = "rI"
|
||||
read_single_volume = "rv"
|
||||
read_multi_volume = "rV"
|
||||
read_any = "r?"
|
||||
write_single_image = "wi"
|
||||
write_multi_image = "wI"
|
||||
write_single_volume = "wv"
|
||||
write_multi_volume = "wV"
|
||||
write_any = "w?"
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value):
|
||||
"""Enable Mode("r") and Mode("w")
|
||||
|
||||
The sunder method ``_missing_`` is called whenever the constructor fails
|
||||
to directly look up the corresponding enum value from the given input.
|
||||
In our case, we use it to convert the modes "r" and "w" (from the v3
|
||||
API) into their legacy versions "r?" and "w?".
|
||||
|
||||
More info on _missing_:
|
||||
https://docs.python.org/3/library/enum.html#supported-sunder-names
|
||||
"""
|
||||
|
||||
if value == "r":
|
||||
return cls("r?")
|
||||
elif value == "w":
|
||||
return cls("w?")
|
||||
else:
|
||||
raise ValueError(f"{value} is no valid Mode.")
|
||||
|
||||
@property
|
||||
def io_mode(self) -> IOMode:
|
||||
return IOMode(self.value[0])
|
||||
|
||||
@property
|
||||
def image_mode(self) -> ImageMode:
|
||||
return ImageMode(self.value[1])
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""For backwards compatibility with the old non-enum modes"""
|
||||
if key == 0:
|
||||
return self.io_mode
|
||||
elif key == 1:
|
||||
return self.image_mode
|
||||
else:
|
||||
raise IndexError(f"Mode has no item {key}")
|
||||
|
||||
|
||||
SPECIAL_READ_URIS = "<video", "<screen>", "<clipboard>"
|
||||
|
||||
# The user can use this string in a write call to get the data back as bytes.
|
||||
RETURN_BYTES = "<bytes>"
|
||||
|
||||
# Example images that will be auto-downloaded
|
||||
EXAMPLE_IMAGES = {
|
||||
"astronaut.png": "Image of the astronaut Eileen Collins",
|
||||
"camera.png": "A grayscale image of a photographer",
|
||||
"checkerboard.png": "Black and white image of a chekerboard",
|
||||
"wood.jpg": "A (repeatable) texture of wooden planks",
|
||||
"bricks.jpg": "A (repeatable) texture of stone bricks",
|
||||
"clock.png": "Photo of a clock with motion blur (Stefan van der Walt)",
|
||||
"coffee.png": "Image of a cup of coffee (Rachel Michetti)",
|
||||
"chelsea.png": "Image of Stefan's cat",
|
||||
"wikkie.png": "Image of Almar's cat",
|
||||
"coins.png": "Image showing greek coins from Pompeii",
|
||||
"horse.png": "Image showing the silhouette of a horse (Andreas Preuss)",
|
||||
"hubble_deep_field.png": "Photograph taken by Hubble telescope (NASA)",
|
||||
"immunohistochemistry.png": "Immunohistochemical (IHC) staining",
|
||||
"moon.png": "Image showing a portion of the surface of the moon",
|
||||
"page.png": "A scanned page of text",
|
||||
"text.png": "A photograph of handdrawn text",
|
||||
"chelsea.zip": "The chelsea.png in a zipfile (for testing)",
|
||||
"chelsea.bsdf": "The chelsea.png in a BSDF file(for testing)",
|
||||
"newtonscradle.gif": "Animated GIF of a newton's cradle",
|
||||
"cockatoo.mp4": "Video file of a cockatoo",
|
||||
"stent.npz": "Volumetric image showing a stented abdominal aorta",
|
||||
"meadow_cube.jpg": "A cubemap image of a meadow, e.g. to render a skybox.",
|
||||
}
|
||||
|
||||
|
||||
class Request(object):
|
||||
"""ImageResource handling utility.
|
||||
|
||||
Represents a request for reading or saving an image resource. This
|
||||
object wraps information to that request and acts as an interface
|
||||
for the plugins to several resources; it allows the user to read
|
||||
from filenames, files, http, zipfiles, raw bytes, etc., but offer
|
||||
a simple interface to the plugins via ``get_file()`` and
|
||||
``get_local_filename()``.
|
||||
|
||||
For each read/write operation a single Request instance is used and passed
|
||||
to the can_read/can_write method of a format, and subsequently to
|
||||
the Reader/Writer class. This allows rudimentary passing of
|
||||
information between different formats and between a format and
|
||||
associated reader/writer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : {str, bytes, file}
|
||||
The resource to load the image from.
|
||||
mode : str
|
||||
The first character is "r" or "w", indicating a read or write
|
||||
request. The second character is used to indicate the kind of data:
|
||||
"i" for an image, "I" for multiple images, "v" for a volume,
|
||||
"V" for multiple volumes, "?" for don't care.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, uri, mode, *, extension=None, format_hint: str = None, **kwargs):
|
||||
# General
|
||||
self.raw_uri = uri
|
||||
self._uri_type = None
|
||||
self._filename = None
|
||||
self._extension = None
|
||||
self._format_hint = None
|
||||
self._kwargs = kwargs
|
||||
self._result = None # Some write actions may have a result
|
||||
|
||||
# To handle the user-side
|
||||
self._filename_zip = None # not None if a zipfile is used
|
||||
self._bytes = None # Incoming bytes
|
||||
self._zipfile = None # To store a zipfile instance (if used)
|
||||
|
||||
# To handle the plugin side
|
||||
self._file = None # To store the file instance
|
||||
self._file_is_local = False # whether the data needs to be copied at end
|
||||
self._filename_local = None # not None if using tempfile on this FS
|
||||
self._firstbytes = None # For easy header parsing
|
||||
|
||||
# To store formats that may be able to fulfil this request
|
||||
# self._potential_formats = []
|
||||
|
||||
# Check mode
|
||||
try:
|
||||
self._mode = Mode(mode)
|
||||
except ValueError:
|
||||
raise ValueError(f"Invalid Request.Mode: {mode}")
|
||||
|
||||
# Parse what was given
|
||||
self._parse_uri(uri)
|
||||
|
||||
# Set extension
|
||||
if extension is not None:
|
||||
if extension[0] != ".":
|
||||
raise ValueError(
|
||||
"`extension` should be a file extension starting with a `.`,"
|
||||
f" but is `{extension}`."
|
||||
)
|
||||
self._extension = extension
|
||||
elif self._filename is not None:
|
||||
if self._uri_type in (URI_FILENAME, URI_ZIPPED):
|
||||
path = self._filename
|
||||
else:
|
||||
path = urlparse(self._filename).path
|
||||
ext = Path(path).suffix.lower()
|
||||
self._extension = ext if ext != "" else None
|
||||
|
||||
if format_hint is not None:
|
||||
warnings.warn(
|
||||
"The usage of `format_hint` is deprecated and will be removed "
|
||||
"in ImageIO v3. Use `extension` instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
if format_hint is not None and format_hint[0] != ".":
|
||||
raise ValueError(
|
||||
"`format_hint` should be a file extension starting with a `.`,"
|
||||
f" but is `{format_hint}`."
|
||||
)
|
||||
|
||||
self.format_hint = format_hint
|
||||
|
||||
def _parse_uri(self, uri):
|
||||
"""Try to figure our what we were given"""
|
||||
is_read_request = self.mode.io_mode is IOMode.read
|
||||
is_write_request = self.mode.io_mode is IOMode.write
|
||||
|
||||
if isinstance(uri, str):
|
||||
# Explicit
|
||||
if uri.startswith("imageio:"):
|
||||
if is_write_request:
|
||||
raise RuntimeError("Cannot write to the standard images.")
|
||||
fn = uri.split(":", 1)[-1].lower()
|
||||
fn, _, zip_part = fn.partition(".zip/")
|
||||
if zip_part:
|
||||
fn += ".zip"
|
||||
if fn not in EXAMPLE_IMAGES:
|
||||
raise ValueError("Unknown standard image %r." % fn)
|
||||
self._uri_type = URI_FILENAME
|
||||
self._filename = get_remote_file("images/" + fn, auto=True)
|
||||
if zip_part:
|
||||
self._filename += "/" + zip_part
|
||||
elif uri.startswith("http://") or uri.startswith("https://"):
|
||||
self._uri_type = URI_HTTP
|
||||
self._filename = uri
|
||||
elif uri.startswith("ftp://") or uri.startswith("ftps://"):
|
||||
self._uri_type = URI_FTP
|
||||
self._filename = uri
|
||||
elif uri.startswith("file://"):
|
||||
self._uri_type = URI_FILENAME
|
||||
self._filename = uri[7:]
|
||||
elif uri.startswith(SPECIAL_READ_URIS) and is_read_request:
|
||||
self._uri_type = URI_BYTES
|
||||
self._filename = uri
|
||||
elif uri.startswith(RETURN_BYTES) and is_write_request:
|
||||
self._uri_type = URI_BYTES
|
||||
self._filename = uri
|
||||
else:
|
||||
self._uri_type = URI_FILENAME
|
||||
self._filename = uri
|
||||
|
||||
elif isinstance(uri, memoryview) and is_read_request:
|
||||
self._uri_type = URI_BYTES
|
||||
self._filename = "<bytes>"
|
||||
self._bytes = uri.tobytes()
|
||||
elif isinstance(uri, bytes) and is_read_request:
|
||||
self._uri_type = URI_BYTES
|
||||
self._filename = "<bytes>"
|
||||
self._bytes = uri
|
||||
elif isinstance(uri, Path):
|
||||
self._uri_type = URI_FILENAME
|
||||
self._filename = str(uri)
|
||||
# Files
|
||||
elif is_read_request:
|
||||
if hasattr(uri, "read") and hasattr(uri, "close"):
|
||||
self._uri_type = URI_FILE
|
||||
self._filename = "<file>"
|
||||
self._file = uri # Data must be read from here
|
||||
elif is_write_request:
|
||||
if hasattr(uri, "write") and hasattr(uri, "close"):
|
||||
self._uri_type = URI_FILE
|
||||
self._filename = "<file>"
|
||||
self._file = uri # Data must be written here
|
||||
|
||||
# Expand user dir
|
||||
if self._uri_type == URI_FILENAME and self._filename.startswith("~"):
|
||||
self._filename = os.path.expanduser(self._filename)
|
||||
|
||||
# Check if a zipfile
|
||||
if self._uri_type == URI_FILENAME:
|
||||
# Search for zip extension followed by a path separater
|
||||
for needle in [".zip/", ".zip\\"]:
|
||||
zip_i = self._filename.lower().find(needle)
|
||||
if zip_i > 0:
|
||||
zip_i += 4
|
||||
zip_path = self._filename[:zip_i]
|
||||
if os.path.isdir(zip_path):
|
||||
pass # is an existing dir (see #548)
|
||||
elif is_write_request or os.path.isfile(zip_path):
|
||||
self._uri_type = URI_ZIPPED
|
||||
self._filename_zip = (
|
||||
zip_path,
|
||||
self._filename[zip_i:].lstrip("/\\"),
|
||||
)
|
||||
break
|
||||
|
||||
# Check if we could read it
|
||||
if self._uri_type is None:
|
||||
uri_r = repr(uri)
|
||||
if len(uri_r) > 60:
|
||||
uri_r = uri_r[:57] + "..."
|
||||
raise IOError("Cannot understand given URI: %s." % uri_r)
|
||||
|
||||
# Check if this is supported
|
||||
noWriting = [URI_HTTP, URI_FTP]
|
||||
if is_write_request and self._uri_type in noWriting:
|
||||
raise IOError("imageio does not support writing to http/ftp.")
|
||||
|
||||
# Deprecated way to load standard images, give a sensible error message
|
||||
if is_read_request and self._uri_type in [URI_FILENAME, URI_ZIPPED]:
|
||||
fn = self._filename
|
||||
if self._filename_zip:
|
||||
fn = self._filename_zip[0]
|
||||
if (not os.path.exists(fn)) and (fn in EXAMPLE_IMAGES):
|
||||
raise IOError(
|
||||
"No such file: %r. This file looks like one of "
|
||||
"the standard images, but from imageio 2.1, "
|
||||
"standard images have to be specified using "
|
||||
'"imageio:%s".' % (fn, fn)
|
||||
)
|
||||
|
||||
# Make filename absolute
|
||||
if self._uri_type in [URI_FILENAME, URI_ZIPPED]:
|
||||
if self._filename_zip:
|
||||
self._filename_zip = (
|
||||
os.path.abspath(self._filename_zip[0]),
|
||||
self._filename_zip[1],
|
||||
)
|
||||
else:
|
||||
self._filename = os.path.abspath(self._filename)
|
||||
|
||||
# Check whether file name is valid
|
||||
if self._uri_type in [URI_FILENAME, URI_ZIPPED]:
|
||||
fn = self._filename
|
||||
if self._filename_zip:
|
||||
fn = self._filename_zip[0]
|
||||
if is_read_request:
|
||||
# Reading: check that the file exists (but is allowed a dir)
|
||||
if not os.path.exists(fn):
|
||||
raise FileNotFoundError("No such file: '%s'" % fn)
|
||||
else:
|
||||
# Writing: check that the directory to write to does exist
|
||||
dn = os.path.dirname(fn)
|
||||
if not os.path.exists(dn):
|
||||
raise FileNotFoundError("The directory %r does not exist" % dn)
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""Name of the ImageResource.
|
||||
|
||||
|
||||
The uri for which reading/saving was requested. This
|
||||
can be a filename, an http address, or other resource
|
||||
identifier. Do not rely on the filename to obtain the data,
|
||||
but use ``get_file()`` or ``get_local_filename()`` instead.
|
||||
"""
|
||||
return self._filename
|
||||
|
||||
@property
|
||||
def extension(self) -> str:
|
||||
"""The (lowercase) extension of the requested filename.
|
||||
Suffixes in url's are stripped. Can be None if the request is
|
||||
not based on a filename.
|
||||
"""
|
||||
return self._extension
|
||||
|
||||
@property
|
||||
def format_hint(self) -> Optional[str]:
|
||||
return self._format_hint
|
||||
|
||||
@format_hint.setter
|
||||
def format_hint(self, format: str) -> None:
|
||||
self._format_hint = format
|
||||
if self._extension is None:
|
||||
self._extension = format
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
"""The mode of the request. The first character is "r" or "w",
|
||||
indicating a read or write request. The second character is
|
||||
used to indicate the kind of data:
|
||||
"i" for an image, "I" for multiple images, "v" for a volume,
|
||||
"V" for multiple volumes, "?" for don't care.
|
||||
"""
|
||||
return self._mode
|
||||
|
||||
@property
|
||||
def kwargs(self):
|
||||
"""The dict of keyword arguments supplied by the user."""
|
||||
return self._kwargs
|
||||
|
||||
# For obtaining data
|
||||
|
||||
def get_file(self):
|
||||
"""get_file()
|
||||
Get a file object for the resource associated with this request.
|
||||
If this is a reading request, the file is in read mode,
|
||||
otherwise in write mode. This method is not thread safe. Plugins
|
||||
should not close the file when done.
|
||||
|
||||
This is the preferred way to read/write the data. But if a
|
||||
format cannot handle file-like objects, they should use
|
||||
``get_local_filename()``.
|
||||
"""
|
||||
want_to_write = self.mode.io_mode is IOMode.write
|
||||
|
||||
# Is there already a file?
|
||||
# Either _uri_type == URI_FILE, or we already opened the file,
|
||||
# e.g. by using firstbytes
|
||||
if self._file is not None:
|
||||
return self._file
|
||||
|
||||
if self._uri_type == URI_BYTES:
|
||||
if want_to_write:
|
||||
# Create new file object, we catch the bytes in finish()
|
||||
self._file = BytesIO()
|
||||
self._file_is_local = True
|
||||
else:
|
||||
self._file = BytesIO(self._bytes)
|
||||
|
||||
elif self._uri_type == URI_FILENAME:
|
||||
if want_to_write:
|
||||
self._file = open(self.filename, "wb")
|
||||
else:
|
||||
self._file = open(self.filename, "rb")
|
||||
|
||||
elif self._uri_type == URI_ZIPPED:
|
||||
# Get the correct filename
|
||||
filename, name = self._filename_zip
|
||||
if want_to_write:
|
||||
# Create new file object, we catch the bytes in finish()
|
||||
self._file = BytesIO()
|
||||
self._file_is_local = True
|
||||
else:
|
||||
# Open zipfile and open new file object for specific file
|
||||
self._zipfile = zipfile.ZipFile(filename, "r")
|
||||
self._file = self._zipfile.open(name, "r")
|
||||
self._file = SeekableFileObject(self._file)
|
||||
|
||||
elif self._uri_type in [URI_HTTP or URI_FTP]:
|
||||
assert not want_to_write # This should have been tested in init
|
||||
timeout = os.getenv("IMAGEIO_REQUEST_TIMEOUT")
|
||||
if timeout is None or not timeout.isdigit():
|
||||
timeout = 5
|
||||
self._file = urlopen(self.filename, timeout=float(timeout))
|
||||
self._file = SeekableFileObject(self._file)
|
||||
|
||||
return self._file
|
||||
|
||||
def get_local_filename(self):
|
||||
"""get_local_filename()
|
||||
If the filename is an existing file on this filesystem, return
|
||||
that. Otherwise a temporary file is created on the local file
|
||||
system which can be used by the format to read from or write to.
|
||||
"""
|
||||
|
||||
if self._uri_type == URI_FILENAME:
|
||||
return self._filename
|
||||
else:
|
||||
# Get filename
|
||||
if self.extension is not None:
|
||||
ext = self.extension
|
||||
else:
|
||||
ext = os.path.splitext(self._filename)[1]
|
||||
self._filename_local = tempfile.mktemp(ext, "imageio_")
|
||||
# Write stuff to it?
|
||||
if self.mode.io_mode == IOMode.read:
|
||||
with open(self._filename_local, "wb") as file:
|
||||
shutil.copyfileobj(self.get_file(), file)
|
||||
return self._filename_local
|
||||
|
||||
def finish(self) -> None:
|
||||
"""Wrap up this request.
|
||||
|
||||
Finishes any pending reads or writes, closes any open files and frees
|
||||
any resources allocated by this request.
|
||||
"""
|
||||
|
||||
if self.mode.io_mode == IOMode.write:
|
||||
# See if we "own" the data and must put it somewhere
|
||||
bytes = None
|
||||
if self._filename_local:
|
||||
bytes = Path(self._filename_local).read_bytes()
|
||||
elif self._file_is_local:
|
||||
self._file_is_local = False
|
||||
bytes = self._file.getvalue()
|
||||
|
||||
# Put the data in the right place
|
||||
if bytes is not None:
|
||||
if self._uri_type == URI_BYTES:
|
||||
self._result = bytes # Picked up by imread function
|
||||
elif self._uri_type == URI_FILE:
|
||||
self._file.write(bytes)
|
||||
elif self._uri_type == URI_ZIPPED:
|
||||
zf = zipfile.ZipFile(self._filename_zip[0], "a")
|
||||
zf.writestr(self._filename_zip[1], bytes)
|
||||
zf.close()
|
||||
# elif self._uri_type == URI_FILENAME: -> is always direct
|
||||
# elif self._uri_type == URI_FTP/HTTP: -> write not supported
|
||||
|
||||
# Close open files that we know of (and are responsible for)
|
||||
if self._file and self._uri_type != URI_FILE:
|
||||
self._file.close()
|
||||
self._file = None
|
||||
if self._zipfile:
|
||||
self._zipfile.close()
|
||||
self._zipfile = None
|
||||
|
||||
# Remove temp file
|
||||
if self._filename_local:
|
||||
try:
|
||||
os.remove(self._filename_local)
|
||||
except Exception: # pragma: no cover
|
||||
warnings.warn(
|
||||
"Failed to delete the temporary file at "
|
||||
f"`{self._filename_local}`. Please report this issue."
|
||||
)
|
||||
self._filename_local = None
|
||||
|
||||
# Detach so gc can clean even if a reference of self lingers
|
||||
self._bytes = None
|
||||
|
||||
def get_result(self):
|
||||
"""For internal use. In some situations a write action can have
|
||||
a result (bytes data). That is obtained with this function.
|
||||
"""
|
||||
# Is there a reason to disallow reading multiple times?
|
||||
self._result, res = None, self._result
|
||||
return res
|
||||
|
||||
@property
|
||||
def firstbytes(self):
|
||||
"""The first 256 bytes of the file. These can be used to
|
||||
parse the header to determine the file-format.
|
||||
"""
|
||||
if self._firstbytes is None:
|
||||
self._read_first_bytes()
|
||||
return self._firstbytes
|
||||
|
||||
def _read_first_bytes(self, N=256):
|
||||
if self._bytes is not None:
|
||||
self._firstbytes = self._bytes[:N]
|
||||
else:
|
||||
# Prepare
|
||||
try:
|
||||
f = self.get_file()
|
||||
except IOError:
|
||||
if os.path.isdir(self.filename): # A directory, e.g. for DICOM
|
||||
self._firstbytes = bytes()
|
||||
return
|
||||
raise
|
||||
try:
|
||||
i = f.tell()
|
||||
except Exception:
|
||||
i = None
|
||||
# Read
|
||||
self._firstbytes = read_n_bytes(f, N)
|
||||
# Set back
|
||||
try:
|
||||
if i is None:
|
||||
raise Exception("cannot seek with None")
|
||||
f.seek(i)
|
||||
except Exception:
|
||||
# Prevent get_file() from reusing the file
|
||||
self._file = None
|
||||
# If the given URI was a file object, we have a problem,
|
||||
if self._uri_type == URI_FILE:
|
||||
raise IOError("Cannot seek back after getting firstbytes!")
|
||||
|
||||
|
||||
def read_n_bytes(f, N):
|
||||
"""read_n_bytes(file, n)
|
||||
|
||||
Read n bytes from the given file, or less if the file has less
|
||||
bytes. Returns zero bytes if the file is closed.
|
||||
"""
|
||||
bb = bytes()
|
||||
while len(bb) < N:
|
||||
extra_bytes = f.read(N - len(bb))
|
||||
if not extra_bytes:
|
||||
break
|
||||
bb += extra_bytes
|
||||
return bb
|
||||
|
||||
|
||||
class SeekableFileObject:
|
||||
"""A readonly wrapper file object that add support for seeking, even if
|
||||
the wrapped file object does not. The allows us to stream from http and
|
||||
still use Pillow.
|
||||
"""
|
||||
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
self._i = 0 # >=0 but can exceed buffer
|
||||
self._buffer = b""
|
||||
self._have_all = False
|
||||
self.closed = False
|
||||
|
||||
def read(self, n=None):
|
||||
# Fix up n
|
||||
if n is None:
|
||||
pass
|
||||
else:
|
||||
n = int(n)
|
||||
if n < 0:
|
||||
n = None
|
||||
|
||||
# Can and must we read more?
|
||||
if not self._have_all:
|
||||
more = b""
|
||||
if n is None:
|
||||
more = self.f.read()
|
||||
self._have_all = True
|
||||
else:
|
||||
want_i = self._i + n
|
||||
want_more = want_i - len(self._buffer)
|
||||
if want_more > 0:
|
||||
more = self.f.read(want_more)
|
||||
if len(more) < want_more:
|
||||
self._have_all = True
|
||||
self._buffer += more
|
||||
|
||||
# Read data from buffer and update pointer
|
||||
if n is None:
|
||||
res = self._buffer[self._i :]
|
||||
else:
|
||||
res = self._buffer[self._i : self._i + n]
|
||||
self._i += len(res)
|
||||
|
||||
return res
|
||||
|
||||
def tell(self):
|
||||
return self._i
|
||||
|
||||
def seek(self, i, mode=0):
|
||||
# Mimic BytesIO behavior
|
||||
|
||||
# Get the absolute new position
|
||||
i = int(i)
|
||||
if mode == 0:
|
||||
if i < 0:
|
||||
raise ValueError("negative seek value " + str(i))
|
||||
real_i = i
|
||||
elif mode == 1:
|
||||
real_i = max(0, self._i + i) # negative ok here
|
||||
elif mode == 2:
|
||||
if not self._have_all:
|
||||
self.read()
|
||||
real_i = max(0, len(self._buffer) + i)
|
||||
else:
|
||||
raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % i)
|
||||
|
||||
# Read some?
|
||||
if real_i <= len(self._buffer):
|
||||
pass # no need to read
|
||||
elif not self._have_all:
|
||||
assert real_i > self._i # if we don't have all, _i cannot be > _buffer
|
||||
self.read(real_i - self._i) # sets self._i
|
||||
|
||||
self._i = real_i
|
||||
return self._i
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
self.f.close()
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
||||
def seekable(self):
|
||||
return True
|
||||
|
||||
|
||||
class InitializationError(Exception):
|
||||
"""The plugin could not initialize from the given request.
|
||||
|
||||
This is a _internal_ error that is raised by plugins that fail to handle
|
||||
a given request. We use this to differentiate incompatibility between
|
||||
a plugin and a request from an actual error/bug inside a plugin.
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -1,90 +0,0 @@
|
||||
from typing import BinaryIO, Optional, Dict, Any, Sequence, overload, Literal
|
||||
from ..typing import ImageResource
|
||||
import enum
|
||||
|
||||
EXAMPLE_IMAGES: Dict[str, str]
|
||||
RETURN_BYTES = "<bytes>"
|
||||
URI_BYTES = 1
|
||||
URI_FILE = 2
|
||||
URI_FILENAME = 3
|
||||
URI_ZIPPED = 4
|
||||
URI_HTTP = 5
|
||||
URI_FTP = 6
|
||||
|
||||
class IOMode(str, enum.Enum):
|
||||
read = "r"
|
||||
write = "w"
|
||||
|
||||
class ImageMode(str, enum.Enum):
|
||||
single_image = "i"
|
||||
multi_image = "I"
|
||||
single_volume = "v"
|
||||
multi_volume = "V"
|
||||
any_mode = "?"
|
||||
|
||||
@enum.unique
|
||||
class Mode(str, enum.Enum):
|
||||
read_single_image = "ri"
|
||||
read_multi_image = "rI"
|
||||
read_single_volume = "rv"
|
||||
read_multi_volume = "rV"
|
||||
read_any = "r?"
|
||||
write_single_image = "wi"
|
||||
write_multi_image = "wI"
|
||||
write_single_volume = "wv"
|
||||
write_multi_volume = "wV"
|
||||
write_any = "w?"
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value: Any) -> Mode: ...
|
||||
@property
|
||||
def io_mode(self) -> IOMode: ...
|
||||
@property
|
||||
def image_mode(self) -> ImageMode: ...
|
||||
|
||||
class InitializationError(Exception): ...
|
||||
|
||||
class Request(object):
|
||||
_uri_type: int
|
||||
raw_uri: ImageResource
|
||||
|
||||
@property
|
||||
def filename(self) -> str: ...
|
||||
@property
|
||||
def extension(self) -> str: ...
|
||||
@property
|
||||
def format_hint(self) -> Optional[str]: ...
|
||||
@format_hint.setter
|
||||
def format_hint(self, format: str) -> None: ...
|
||||
@property
|
||||
def mode(self) -> Mode: ...
|
||||
@property
|
||||
def kwargs(self) -> Dict[str, Any]: ...
|
||||
@property
|
||||
def firstbytes(self) -> bytes: ...
|
||||
def __init__(
|
||||
self,
|
||||
uri: ImageResource,
|
||||
mode: str,
|
||||
*,
|
||||
extension: str = None,
|
||||
format_hint: str = None,
|
||||
**kwargs
|
||||
) -> None: ...
|
||||
def _parse_uri(self, uri: ImageResource) -> None: ...
|
||||
def get_file(self) -> BinaryIO: ...
|
||||
def get_local_filename(self) -> str: ...
|
||||
def finish(self) -> None: ...
|
||||
def get_result(self) -> Optional[bytes]: ...
|
||||
def _read_first_bytes(self, N: int = 256) -> bytes: ...
|
||||
|
||||
def read_n_bytes(f: BinaryIO, N: int) -> bytes: ...
|
||||
|
||||
class SeekableFileObject:
|
||||
def __init__(self, f: BinaryIO) -> None: ...
|
||||
def read(self, n: int = None) -> bytes: ...
|
||||
def tell(self) -> int: ...
|
||||
def seek(self, i: int, mode: int = 0) -> int: ...
|
||||
def close(self) -> None: ...
|
||||
def isatty(self) -> bool: ...
|
||||
def seekable(self) -> bool: ...
|
||||
559
.CondaPkg/env/Lib/site-packages/imageio/core/util.py
vendored
559
.CondaPkg/env/Lib/site-packages/imageio/core/util.py
vendored
@@ -1,559 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""
|
||||
Various utilities for imageio
|
||||
"""
|
||||
|
||||
|
||||
from collections import OrderedDict
|
||||
import numpy as np
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger("imageio")
|
||||
|
||||
IS_PYPY = "__pypy__" in sys.builtin_module_names
|
||||
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def urlopen(*args, **kwargs):
|
||||
"""Compatibility function for the urlopen function. Raises an
|
||||
RuntimeError if urlopen could not be imported (which can occur in
|
||||
frozen applications.
|
||||
"""
|
||||
try:
|
||||
from urllib.request import urlopen
|
||||
except ImportError:
|
||||
raise RuntimeError("Could not import urlopen.")
|
||||
return urlopen(*args, **kwargs)
|
||||
|
||||
|
||||
def _precision_warn(p1, p2, extra=""):
|
||||
t = (
|
||||
"Lossy conversion from {} to {}. {} Convert image to {} prior to "
|
||||
"saving to suppress this warning."
|
||||
)
|
||||
logger.warning(t.format(p1, p2, extra, p2))
|
||||
|
||||
|
||||
def image_as_uint(im, bitdepth=None):
|
||||
"""Convert the given image to uint (default: uint8)
|
||||
|
||||
If the dtype already matches the desired format, it is returned
|
||||
as-is. If the image is float, and all values are between 0 and 1,
|
||||
the values are multiplied by np.power(2.0, bitdepth). In all other
|
||||
situations, the values are scaled such that the minimum value
|
||||
becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
|
||||
(255 for 8-bit and 65535 for 16-bit).
|
||||
"""
|
||||
if not bitdepth:
|
||||
bitdepth = 8
|
||||
if not isinstance(im, np.ndarray):
|
||||
raise ValueError("Image must be a numpy array")
|
||||
if bitdepth == 8:
|
||||
out_type = np.uint8
|
||||
elif bitdepth == 16:
|
||||
out_type = np.uint16
|
||||
else:
|
||||
raise ValueError("Bitdepth must be either 8 or 16")
|
||||
dtype_str1 = str(im.dtype)
|
||||
dtype_str2 = out_type.__name__
|
||||
if (im.dtype == np.uint8 and bitdepth == 8) or (
|
||||
im.dtype == np.uint16 and bitdepth == 16
|
||||
):
|
||||
# Already the correct format? Return as-is
|
||||
return im
|
||||
if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1:
|
||||
_precision_warn(dtype_str1, dtype_str2, "Range [0, 1].")
|
||||
im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
|
||||
elif im.dtype == np.uint16 and bitdepth == 8:
|
||||
_precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.")
|
||||
im = np.right_shift(im, 8)
|
||||
elif im.dtype == np.uint32:
|
||||
_precision_warn(
|
||||
dtype_str1,
|
||||
dtype_str2,
|
||||
"Losing {} bits of resolution.".format(32 - bitdepth),
|
||||
)
|
||||
im = np.right_shift(im, 32 - bitdepth)
|
||||
elif im.dtype == np.uint64:
|
||||
_precision_warn(
|
||||
dtype_str1,
|
||||
dtype_str2,
|
||||
"Losing {} bits of resolution.".format(64 - bitdepth),
|
||||
)
|
||||
im = np.right_shift(im, 64 - bitdepth)
|
||||
else:
|
||||
mi = np.nanmin(im)
|
||||
ma = np.nanmax(im)
|
||||
if not np.isfinite(mi):
|
||||
raise ValueError("Minimum image value is not finite")
|
||||
if not np.isfinite(ma):
|
||||
raise ValueError("Maximum image value is not finite")
|
||||
if ma == mi:
|
||||
return im.astype(out_type)
|
||||
_precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma))
|
||||
# Now make float copy before we scale
|
||||
im = im.astype("float64")
|
||||
# Scale the values between 0 and 1 then multiply by the max value
|
||||
im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999
|
||||
assert np.nanmin(im) >= 0
|
||||
assert np.nanmax(im) < np.power(2.0, bitdepth)
|
||||
return im.astype(out_type)
|
||||
|
||||
|
||||
class Array(np.ndarray):
|
||||
"""Array(array, meta=None)
|
||||
|
||||
A subclass of np.ndarray that has a meta attribute. Get the dictionary
|
||||
that contains the meta data using ``im.meta``. Convert to a plain numpy
|
||||
array using ``np.asarray(im)``.
|
||||
|
||||
"""
|
||||
|
||||
def __new__(cls, array, meta=None):
|
||||
# Check
|
||||
if not isinstance(array, np.ndarray):
|
||||
raise ValueError("Array expects a numpy array.")
|
||||
if not (meta is None or isinstance(meta, dict)):
|
||||
raise ValueError("Array expects meta data to be a dict.")
|
||||
# Convert and return
|
||||
meta = meta if meta is not None else getattr(array, "meta", {})
|
||||
try:
|
||||
ob = array.view(cls)
|
||||
except AttributeError: # pragma: no cover
|
||||
# Just return the original; no metadata on the array in Pypy!
|
||||
return array
|
||||
ob._copy_meta(meta)
|
||||
return ob
|
||||
|
||||
def _copy_meta(self, meta):
|
||||
"""Make a 2-level deep copy of the meta dictionary."""
|
||||
self._meta = Dict()
|
||||
for key, val in meta.items():
|
||||
if isinstance(val, dict):
|
||||
val = Dict(val) # Copy this level
|
||||
self._meta[key] = val
|
||||
|
||||
@property
|
||||
def meta(self):
|
||||
"""The dict with the meta data of this image."""
|
||||
return self._meta
|
||||
|
||||
def __array_finalize__(self, ob):
|
||||
"""So the meta info is maintained when doing calculations with
|
||||
the array.
|
||||
"""
|
||||
if isinstance(ob, Array):
|
||||
self._copy_meta(ob.meta)
|
||||
else:
|
||||
self._copy_meta({})
|
||||
|
||||
def __array_wrap__(self, out, context=None):
|
||||
"""So that we return a native numpy array (or scalar) when a
|
||||
reducting ufunc is applied (such as sum(), std(), etc.)
|
||||
"""
|
||||
if not out.shape:
|
||||
return out.dtype.type(out) # Scalar
|
||||
elif out.shape != self.shape:
|
||||
return out.view(type=np.ndarray)
|
||||
else:
|
||||
return out # Type Array
|
||||
|
||||
|
||||
Image = Array # Alias for backwards compatibility
|
||||
|
||||
|
||||
def asarray(a):
|
||||
"""Pypy-safe version of np.asarray. Pypy's np.asarray consumes a
|
||||
*lot* of memory if the given array is an ndarray subclass. This
|
||||
function does not.
|
||||
"""
|
||||
if isinstance(a, np.ndarray):
|
||||
if IS_PYPY: # pragma: no cover
|
||||
a = a.copy() # pypy has issues with base views
|
||||
plain = a.view(type=np.ndarray)
|
||||
return plain
|
||||
return np.asarray(a)
|
||||
|
||||
|
||||
class Dict(OrderedDict):
|
||||
"""A dict in which the keys can be get and set as if they were
|
||||
attributes. Very convenient in combination with autocompletion.
|
||||
|
||||
This Dict still behaves as much as possible as a normal dict, and
|
||||
keys can be anything that are otherwise valid keys. However,
|
||||
keys that are not valid identifiers or that are names of the dict
|
||||
class (such as 'items' and 'copy') cannot be get/set as attributes.
|
||||
"""
|
||||
|
||||
__reserved_names__ = dir(OrderedDict()) # Also from OrderedDict
|
||||
__pure_names__ = dir(dict())
|
||||
|
||||
def __getattribute__(self, key):
|
||||
try:
|
||||
return object.__getattribute__(self, key)
|
||||
except AttributeError:
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
raise
|
||||
|
||||
def __setattr__(self, key, val):
|
||||
if key in Dict.__reserved_names__:
|
||||
# Either let OrderedDict do its work, or disallow
|
||||
if key not in Dict.__pure_names__:
|
||||
return OrderedDict.__setattr__(self, key, val)
|
||||
else:
|
||||
raise AttributeError(
|
||||
"Reserved name, this key can only "
|
||||
+ "be set via ``d[%r] = X``" % key
|
||||
)
|
||||
else:
|
||||
# if isinstance(val, dict): val = Dict(val) -> no, makes a copy!
|
||||
self[key] = val
|
||||
|
||||
def __dir__(self):
|
||||
def isidentifier(x):
|
||||
return bool(re.match(r"[a-z_]\w*$", x, re.I))
|
||||
|
||||
names = [k for k in self.keys() if (isinstance(k, str) and isidentifier(k))]
|
||||
return Dict.__reserved_names__ + names
|
||||
|
||||
|
||||
class BaseProgressIndicator(object):
|
||||
"""BaseProgressIndicator(name)
|
||||
|
||||
A progress indicator helps display the progres of a task to the
|
||||
user. Progress can be pending, running, finished or failed.
|
||||
|
||||
Each task has:
|
||||
* a name - a short description of what needs to be done.
|
||||
* an action - the current action in performing the task (e.g. a subtask)
|
||||
* progress - how far the task is completed
|
||||
* max - max number of progress units. If 0, the progress is indefinite
|
||||
* unit - the units in which the progress is counted
|
||||
* status - 0: pending, 1: in progress, 2: finished, 3: failed
|
||||
|
||||
This class defines an abstract interface. Subclasses should implement
|
||||
_start, _stop, _update_progress(progressText), _write(message).
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._action = ""
|
||||
self._unit = ""
|
||||
self._max = 0
|
||||
self._status = 0
|
||||
self._last_progress_update = 0
|
||||
|
||||
def start(self, action="", unit="", max=0):
|
||||
"""start(action='', unit='', max=0)
|
||||
|
||||
Start the progress. Optionally specify an action, a unit,
|
||||
and a maxium progress value.
|
||||
"""
|
||||
if self._status == 1:
|
||||
self.finish()
|
||||
self._action = action
|
||||
self._unit = unit
|
||||
self._max = max
|
||||
#
|
||||
self._progress = 0
|
||||
self._status = 1
|
||||
self._start()
|
||||
|
||||
def status(self):
|
||||
"""status()
|
||||
|
||||
Get the status of the progress - 0: pending, 1: in progress,
|
||||
2: finished, 3: failed
|
||||
"""
|
||||
return self._status
|
||||
|
||||
def set_progress(self, progress=0, force=False):
|
||||
"""set_progress(progress=0, force=False)
|
||||
|
||||
Set the current progress. To avoid unnecessary progress updates
|
||||
this will only have a visual effect if the time since the last
|
||||
update is > 0.1 seconds, or if force is True.
|
||||
"""
|
||||
self._progress = progress
|
||||
# Update or not?
|
||||
if not (force or (time.time() - self._last_progress_update > 0.1)):
|
||||
return
|
||||
self._last_progress_update = time.time()
|
||||
# Compose new string
|
||||
unit = self._unit or ""
|
||||
progressText = ""
|
||||
if unit == "%":
|
||||
progressText = "%2.1f%%" % progress
|
||||
elif self._max > 0:
|
||||
percent = 100 * float(progress) / self._max
|
||||
progressText = "%i/%i %s (%2.1f%%)" % (progress, self._max, unit, percent)
|
||||
elif progress > 0:
|
||||
if isinstance(progress, float):
|
||||
progressText = "%0.4g %s" % (progress, unit)
|
||||
else:
|
||||
progressText = "%i %s" % (progress, unit)
|
||||
# Update
|
||||
self._update_progress(progressText)
|
||||
|
||||
def increase_progress(self, extra_progress):
|
||||
"""increase_progress(extra_progress)
|
||||
|
||||
Increase the progress by a certain amount.
|
||||
"""
|
||||
self.set_progress(self._progress + extra_progress)
|
||||
|
||||
def finish(self, message=None):
|
||||
"""finish(message=None)
|
||||
|
||||
Finish the progress, optionally specifying a message. This will
|
||||
not set the progress to the maximum.
|
||||
"""
|
||||
self.set_progress(self._progress, True) # fore update
|
||||
self._status = 2
|
||||
self._stop()
|
||||
if message is not None:
|
||||
self._write(message)
|
||||
|
||||
def fail(self, message=None):
|
||||
"""fail(message=None)
|
||||
|
||||
Stop the progress with a failure, optionally specifying a message.
|
||||
"""
|
||||
self.set_progress(self._progress, True) # fore update
|
||||
self._status = 3
|
||||
self._stop()
|
||||
message = "FAIL " + (message or "")
|
||||
self._write(message)
|
||||
|
||||
def write(self, message):
|
||||
"""write(message)
|
||||
|
||||
Write a message during progress (such as a warning).
|
||||
"""
|
||||
if self.__class__ == BaseProgressIndicator:
|
||||
# When this class is used as a dummy, print explicit message
|
||||
print(message)
|
||||
else:
|
||||
return self._write(message)
|
||||
|
||||
# Implementing classes should implement these
|
||||
|
||||
def _start(self):
|
||||
pass
|
||||
|
||||
def _stop(self):
|
||||
pass
|
||||
|
||||
def _update_progress(self, progressText):
|
||||
pass
|
||||
|
||||
def _write(self, message):
|
||||
pass
|
||||
|
||||
|
||||
class StdoutProgressIndicator(BaseProgressIndicator):
|
||||
"""StdoutProgressIndicator(name)
|
||||
|
||||
A progress indicator that shows the progress in stdout. It
|
||||
assumes that the tty can appropriately deal with backspace
|
||||
characters.
|
||||
"""
|
||||
|
||||
def _start(self):
|
||||
self._chars_prefix, self._chars = "", ""
|
||||
# Write message
|
||||
if self._action:
|
||||
self._chars_prefix = "%s (%s): " % (self._name, self._action)
|
||||
else:
|
||||
self._chars_prefix = "%s: " % self._name
|
||||
sys.stdout.write(self._chars_prefix)
|
||||
sys.stdout.flush()
|
||||
|
||||
def _update_progress(self, progressText):
|
||||
# If progress is unknown, at least make something move
|
||||
if not progressText:
|
||||
i1, i2, i3, i4 = "-\\|/"
|
||||
M = {i1: i2, i2: i3, i3: i4, i4: i1}
|
||||
progressText = M.get(self._chars, i1)
|
||||
# Store new string and write
|
||||
delChars = "\b" * len(self._chars)
|
||||
self._chars = progressText
|
||||
sys.stdout.write(delChars + self._chars)
|
||||
sys.stdout.flush()
|
||||
|
||||
def _stop(self):
|
||||
self._chars = self._chars_prefix = ""
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
def _write(self, message):
|
||||
# Write message
|
||||
delChars = "\b" * len(self._chars_prefix + self._chars)
|
||||
sys.stdout.write(delChars + " " + message + "\n")
|
||||
# Reprint progress text
|
||||
sys.stdout.write(self._chars_prefix + self._chars)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)
|
||||
def appdata_dir(appname=None, roaming=False):
|
||||
"""appdata_dir(appname=None, roaming=False)
|
||||
|
||||
Get the path to the application directory, where applications are allowed
|
||||
to write user specific files (e.g. configurations). For non-user specific
|
||||
data, consider using common_appdata_dir().
|
||||
If appname is given, a subdir is appended (and created if necessary).
|
||||
If roaming is True, will prefer a roaming directory (Windows Vista/7).
|
||||
"""
|
||||
|
||||
# Define default user directory
|
||||
userDir = os.getenv("IMAGEIO_USERDIR", None)
|
||||
if userDir is None:
|
||||
userDir = os.path.expanduser("~")
|
||||
if not os.path.isdir(userDir): # pragma: no cover
|
||||
userDir = "/var/tmp" # issue #54
|
||||
|
||||
# Get system app data dir
|
||||
path = None
|
||||
if sys.platform.startswith("win"):
|
||||
path1, path2 = os.getenv("LOCALAPPDATA"), os.getenv("APPDATA")
|
||||
path = (path2 or path1) if roaming else (path1 or path2)
|
||||
elif sys.platform.startswith("darwin"):
|
||||
path = os.path.join(userDir, "Library", "Application Support")
|
||||
# On Linux and as fallback
|
||||
if not (path and os.path.isdir(path)):
|
||||
path = userDir
|
||||
|
||||
# Maybe we should store things local to the executable (in case of a
|
||||
# portable distro or a frozen application that wants to be portable)
|
||||
prefix = sys.prefix
|
||||
if getattr(sys, "frozen", None):
|
||||
prefix = os.path.abspath(os.path.dirname(sys.executable))
|
||||
for reldir in ("settings", "../settings"):
|
||||
localpath = os.path.abspath(os.path.join(prefix, reldir))
|
||||
if os.path.isdir(localpath): # pragma: no cover
|
||||
try:
|
||||
open(os.path.join(localpath, "test.write"), "wb").close()
|
||||
os.remove(os.path.join(localpath, "test.write"))
|
||||
except IOError:
|
||||
pass # We cannot write in this directory
|
||||
else:
|
||||
path = localpath
|
||||
break
|
||||
|
||||
# Get path specific for this app
|
||||
if appname:
|
||||
if path == userDir:
|
||||
appname = "." + appname.lstrip(".") # Make it a hidden directory
|
||||
path = os.path.join(path, appname)
|
||||
if not os.path.isdir(path): # pragma: no cover
|
||||
os.makedirs(path, exist_ok=True)
|
||||
|
||||
# Done
|
||||
return path
|
||||
|
||||
|
||||
def resource_dirs():
|
||||
"""resource_dirs()
|
||||
|
||||
Get a list of directories where imageio resources may be located.
|
||||
The first directory in this list is the "resources" directory in
|
||||
the package itself. The second directory is the appdata directory
|
||||
(~/.imageio on Linux). The list further contains the application
|
||||
directory (for frozen apps), and may include additional directories
|
||||
in the future.
|
||||
"""
|
||||
dirs = [resource_package_dir()]
|
||||
# Resource dir baked in the package.
|
||||
# Appdata directory
|
||||
try:
|
||||
dirs.append(appdata_dir("imageio"))
|
||||
except Exception: # pragma: no cover
|
||||
pass # The home dir may not be writable
|
||||
# Directory where the app is located (mainly for frozen apps)
|
||||
if getattr(sys, "frozen", None):
|
||||
dirs.append(os.path.abspath(os.path.dirname(sys.executable)))
|
||||
elif sys.path and sys.path[0]:
|
||||
dirs.append(os.path.abspath(sys.path[0]))
|
||||
return dirs
|
||||
|
||||
|
||||
def resource_package_dir():
|
||||
"""package_dir
|
||||
|
||||
Get the resources directory in the imageio package installation
|
||||
directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This is a convenience method that is used by `resource_dirs` and
|
||||
imageio entry point scripts.
|
||||
"""
|
||||
# Make pkg_resources optional if setuptools is not available
|
||||
try:
|
||||
# Avoid importing pkg_resources in the top level due to how slow it is
|
||||
# https://github.com/pypa/setuptools/issues/510
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
pkg_resources = None
|
||||
|
||||
if pkg_resources:
|
||||
# The directory returned by `pkg_resources.resource_filename`
|
||||
# also works with eggs.
|
||||
pdir = pkg_resources.resource_filename("imageio", "resources")
|
||||
else:
|
||||
# If setuptools is not available, use fallback
|
||||
pdir = os.path.abspath(os.path.join(THIS_DIR, "..", "resources"))
|
||||
return pdir
|
||||
|
||||
|
||||
def get_platform():
|
||||
"""get_platform()
|
||||
|
||||
Get a string that specifies the platform more specific than
|
||||
sys.platform does. The result can be: linux32, linux64, win32,
|
||||
win64, osx32, osx64. Other platforms may be added in the future.
|
||||
"""
|
||||
# Get platform
|
||||
if sys.platform.startswith("linux"):
|
||||
plat = "linux%i"
|
||||
elif sys.platform.startswith("win"):
|
||||
plat = "win%i"
|
||||
elif sys.platform.startswith("darwin"):
|
||||
plat = "osx%i"
|
||||
elif sys.platform.startswith("freebsd"):
|
||||
plat = "freebsd%i"
|
||||
else: # pragma: no cover
|
||||
return None
|
||||
|
||||
return plat % (struct.calcsize("P") * 8) # 32 or 64 bits
|
||||
|
||||
|
||||
def has_module(module_name):
|
||||
"""Check to see if a python module is available."""
|
||||
if sys.version_info > (3, 4):
|
||||
import importlib
|
||||
|
||||
name_parts = module_name.split(".")
|
||||
for i in range(len(name_parts)):
|
||||
if importlib.util.find_spec(".".join(name_parts[: i + 1])) is None:
|
||||
return False
|
||||
return True
|
||||
else: # pragma: no cover
|
||||
import imp
|
||||
|
||||
try:
|
||||
imp.find_module(module_name)
|
||||
except ImportError:
|
||||
return False
|
||||
return True
|
||||
@@ -1,370 +0,0 @@
|
||||
from . import Request
|
||||
from ..typing import ArrayLike
|
||||
import numpy as np
|
||||
from typing import Optional, Dict, Any, Tuple, Union, List, Iterator
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageProperties:
|
||||
"""Standardized Metadata
|
||||
|
||||
ImageProperties represent a set of standardized metadata that is available
|
||||
under the same name for every supported format. If the ImageResource (or
|
||||
format) does not specify the value, a sensible default value is chosen
|
||||
instead.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
shape : Tuple[int, ...]
|
||||
The shape of the loaded ndimage.
|
||||
dtype : np.dtype
|
||||
The dtype of the loaded ndimage.
|
||||
n_images : int
|
||||
Number of images in the file if ``index=...``, `None` for single images.
|
||||
is_batch : bool
|
||||
If True, the first dimension of the ndimage represents a batch dimension
|
||||
along which several images are stacked.
|
||||
spacing : Tuple
|
||||
A tuple describing the spacing between pixels along each axis of the
|
||||
ndimage. If the spacing is uniform along an axis the value corresponding
|
||||
to that axis is a single float. If the spacing is non-uniform, the value
|
||||
corresponding to that axis is a tuple in which the i-th element
|
||||
indicates the spacing between the i-th and (i+1)-th pixel along that
|
||||
axis.
|
||||
|
||||
"""
|
||||
|
||||
shape: Tuple[int, ...]
|
||||
dtype: np.dtype
|
||||
n_images: Optional[int] = None
|
||||
is_batch: bool = False
|
||||
spacing: Optional[tuple] = None
|
||||
|
||||
|
||||
class PluginV3:
|
||||
"""A ImageIO Plugin.
|
||||
|
||||
This is an abstract plugin that documents the v3 plugin API interface. A
|
||||
plugin is an adapter/wrapper around a backend that converts a request from
|
||||
iio.core (e.g., read an image from file) into a sequence of instructions for
|
||||
the backend that fullfill the request.
|
||||
|
||||
Plugin authors may choose to subclass this class when implementing a new
|
||||
plugin, but aren't obliged to do so. As long as the plugin class implements
|
||||
the interface (methods) described below the ImageIO core will treat it just
|
||||
like any other plugin.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : iio.Request
|
||||
A request object that represents the users intent. It provides a
|
||||
standard interface to access the various ImageResources and serves them
|
||||
to the plugin as a file object (or file). Check the docs for details.
|
||||
**kwargs : Any
|
||||
Additional configuration arguments for the plugin or backend. Usually
|
||||
these match the configuration arguments available on the backend and
|
||||
are forwarded to it.
|
||||
|
||||
|
||||
Raises
|
||||
------
|
||||
InitializationError
|
||||
During ``__init__`` the plugin tests if it can fulfill the request. If
|
||||
it can't, e.g., because the request points to a file in the wrong
|
||||
format, then it should raise an ``InitializationError`` and provide a
|
||||
reason for failure. This reason may be reported to the user.
|
||||
ImportError
|
||||
Plugins will be imported dynamically when listed in
|
||||
``iio.config.known_plugins`` to fullfill requests. This way, users only
|
||||
have to load plugins/backends they actually use. If this plugin's backend
|
||||
is not installed, it should raise an ``ImportError`` either during
|
||||
module import or during class construction.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Upon successful construction the plugin takes ownership of the provided
|
||||
request. This means that it is the plugin's responsibility to call
|
||||
request.finish() to close the resource when it is no longer needed.
|
||||
|
||||
Plugins _must_ implement a context manager that closes and cleans any
|
||||
resources held by the plugin upon exit.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, request: Request) -> None:
|
||||
"""Initialize a new Plugin Instance.
|
||||
|
||||
See Plugin's docstring for detailed documentation.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The implementation here stores the request as a local variable that is
|
||||
exposed using a @property below. If you inherit from PluginV3, remember
|
||||
to call ``super().__init__(request)``.
|
||||
|
||||
"""
|
||||
|
||||
self._request = request
|
||||
|
||||
def read(self, *, index: int = 0) -> np.ndarray:
|
||||
"""Read a ndimage.
|
||||
|
||||
The ``read`` method loads a (single) ndimage, located at ``index`` from
|
||||
the requested ImageResource.
|
||||
|
||||
It is at the plugin's descretion to decide (and document) what
|
||||
constitutes a single ndimage. A sensible way to make this decision is to
|
||||
choose based on the ImageResource's format and on what users will expect
|
||||
from such a format. For example, a sensible choice for a TIFF file
|
||||
produced by an ImageJ hyperstack is to read it as a volumetric ndimage
|
||||
(1 color dimension followed by 3 spatial dimensions). On the other hand,
|
||||
a sensible choice for a MP4 file produced by Davinci Resolve is to treat
|
||||
each frame as a ndimage (2 spatial dimensions followed by 1 color
|
||||
dimension).
|
||||
|
||||
The value ``index=None`` is special. It requests the plugin to load all
|
||||
ndimages in the file and stack them along a new first axis. For example,
|
||||
if a MP4 file is read with ``index=None`` and the plugin identifies
|
||||
single frames as ndimages, then the plugin should read all frames and
|
||||
stack them into a new ndimage which now contains a time axis as its
|
||||
first axis. If a PNG file (single image format) is read with
|
||||
``index=None`` the plugin does a very similar thing: It loads all
|
||||
ndimages in the file (here it's just one) and stacks them along a new
|
||||
first axis, effectively prepending an axis with size 1 to the image. If
|
||||
a plugin does not wish to support ``index=None`` it should set a more
|
||||
sensible default and raise a ``ValueError`` when requested to read using
|
||||
``index=None``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return it.
|
||||
If index is an ellipsis (...), read all ndimages in the file and
|
||||
stack them along a new batch dimension. If index is None, let the
|
||||
plugin decide. If the index is out of bounds a ``ValueError`` is
|
||||
raised.
|
||||
**kwargs : Any
|
||||
The read method may accept any number of plugin-specific keyword
|
||||
arguments to further customize the read behavior. Usually these
|
||||
match the arguments available on the backend and are forwarded to
|
||||
it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : np.ndarray
|
||||
A ndimage containing decoded pixel data (sometimes called bitmap).
|
||||
|
||||
Notes
|
||||
-----
|
||||
The ImageResource from which the plugin should read is managed by the
|
||||
provided request object. Directly accessing the managed ImageResource is
|
||||
_not_ permitted. Instead, you can get FileLike access to the
|
||||
ImageResource via request.get_file().
|
||||
|
||||
If the backend doesn't support reading from FileLike objects, you can
|
||||
request a temporary file to pass to the backend via
|
||||
``request.get_local_filename()``. This is, however, not very performant
|
||||
(involves copying the Request's content into a temporary file), so you
|
||||
should avoid doing this whenever possible. Consider it a fallback method
|
||||
in case all else fails.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def write(self, ndimage: Union[ArrayLike, List[ArrayLike]]) -> Optional[bytes]:
|
||||
"""Write a ndimage to a ImageResource.
|
||||
|
||||
The ``write`` method encodes the given ndimage into the format handled
|
||||
by the backend and writes it to the ImageResource. It overwrites
|
||||
any content that may have been previously stored in the file.
|
||||
|
||||
If the backend supports only a single format then it must check if
|
||||
the ImageResource matches that format and raise an exception if not.
|
||||
Typically, this should be done during initialization in the form of a
|
||||
``InitializationError``.
|
||||
|
||||
If the backend supports more than one format it must determine the
|
||||
requested/desired format. Usually this can be done by inspecting the
|
||||
ImageResource (e.g., by checking ``request.extension``), or by providing
|
||||
a mechanism to explicitly set the format (perhaps with a - sensible -
|
||||
default value). If the plugin can not determine the desired format, it
|
||||
**must not** write to the ImageResource, but raise an exception instead.
|
||||
|
||||
If the backend supports at least one format that can hold multiple
|
||||
ndimages it should be capable of handling ndimage batches and lists of
|
||||
ndimages. If the ``ndimage`` input is a list of ndimages, the plugin
|
||||
should not assume that the ndimages are not stackable, i.e., ndimages
|
||||
may have different shapes. Otherwise, the ``ndimage`` may be a batch of
|
||||
multiple ndimages stacked along the first axis of the array. The plugin
|
||||
must be able to discover this, either automatically or via additional
|
||||
`kwargs`. If there is ambiguity in the process, the plugin must clearly
|
||||
document what happens in such cases and, if possible, describe how to
|
||||
resolve this ambiguity.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : ArrayLike
|
||||
The ndimage to encode and write to the current ImageResource.
|
||||
**kwargs : Any
|
||||
The write method may accept any number of plugin-specific keyword
|
||||
arguments to customize the writing behavior. Usually these match the
|
||||
arguments available on the backend and are forwarded to it.
|
||||
|
||||
Returns
|
||||
-------
|
||||
encoded_image : bytes or None
|
||||
If the chosen ImageResource is the special target ``"<bytes>"`` then
|
||||
write should return a byte string containing the encoded image data.
|
||||
Otherwise, it returns None.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The ImageResource to which the plugin should write to is managed by the
|
||||
provided request object. Directly accessing the managed ImageResource is
|
||||
_not_ permitted. Instead, you can get FileLike access to the
|
||||
ImageResource via request.get_file().
|
||||
|
||||
If the backend doesn't support writing to FileLike objects, you can
|
||||
request a temporary file to pass to the backend via
|
||||
``request.get_local_filename()``. This is, however, not very performant
|
||||
(involves copying the Request's content from a temporary file), so you
|
||||
should avoid doing this whenever possible. Consider it a fallback method
|
||||
in case all else fails.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def iter(self) -> Iterator[np.ndarray]:
|
||||
"""Iterate the ImageResource.
|
||||
|
||||
This method returns a generator that yields ndimages in the order in which
|
||||
they appear in the file. This is roughly equivalent to::
|
||||
|
||||
idx = 0
|
||||
while True:
|
||||
try:
|
||||
yield self.read(index=idx)
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
It works very similar to ``read``, and you can consult the documentation
|
||||
of that method for additional information on desired behavior.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
**kwargs : Any
|
||||
The iter method may accept any number of plugin-specific keyword
|
||||
arguments to further customize the reading/iteration behavior.
|
||||
Usually these match the arguments available on the backend and are
|
||||
forwarded to it.
|
||||
|
||||
Yields
|
||||
------
|
||||
ndimage : np.ndarray
|
||||
A ndimage containing decoded pixel data (sometimes called bitmap).
|
||||
|
||||
See Also
|
||||
--------
|
||||
PluginV3.read
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def properties(self, index: int = 0) -> ImageProperties:
|
||||
"""Standardized ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
properties. If index is an ellipsis (...), read all ndimages in the file
|
||||
and stack them along a new batch dimension and return their properties.
|
||||
If index is None, the plugin decides the default.
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def metadata(self, index: int = 0, exclude_applied: bool = True) -> Dict[str, Any]:
|
||||
"""Format-Specific ndimage metadata.
|
||||
|
||||
The method reads metadata stored in the ImageResource and returns it as
|
||||
a python dict. The plugin is free to choose which name to give a piece
|
||||
of metadata; however, if possible, it should match the name given by the
|
||||
format. There is no requirement regarding the fields a plugin must
|
||||
expose; however, if a plugin does expose any,``exclude_applied`` applies
|
||||
to these fields.
|
||||
|
||||
If the plugin does return metadata items, it must check the value of
|
||||
``exclude_applied`` before returning them. If ``exclude applied`` is
|
||||
True, then any metadata item that would be applied to an ndimage
|
||||
returned by ``read`` (or ``iter``) must not be returned. This is done to
|
||||
avoid confusion; for example, if an ImageResource defines the ExIF
|
||||
rotation tag, and the plugin applies the rotation to the data before
|
||||
returning it, then ``exclude_applied`` prevents confusion on whether the
|
||||
tag was already applied or not.
|
||||
|
||||
The `kwarg` ``index`` behaves similar to its counterpart in ``read``
|
||||
with one exception: If the ``index`` is None, then global metadata is
|
||||
returned instead of returning a combination of all metadata items. If
|
||||
there is no global metadata, the Plugin should return an empty dict or
|
||||
raise an exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
metadata. If index is an ellipsis (...), return global metadata. If
|
||||
index is None, the plugin decides the default.
|
||||
exclude_applied : bool
|
||||
If True (default), do not report metadata fields that the plugin
|
||||
would apply/consume while reading the image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary filled with format-specific metadata fields and their
|
||||
values.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the ImageResource.
|
||||
|
||||
This method allows a plugin to behave similar to the python build-in ``open``::
|
||||
|
||||
image_file = my_plugin(Request, "r")
|
||||
...
|
||||
image_file.close()
|
||||
|
||||
It is used by the context manager and deconstructor below to avoid leaking
|
||||
ImageResources. If the plugin has no other cleanup to do it doesn't have
|
||||
to overwrite this method itself and can rely on the implementation
|
||||
below.
|
||||
|
||||
"""
|
||||
|
||||
self.request.finish()
|
||||
|
||||
@property
|
||||
def request(self) -> Request:
|
||||
return self._request
|
||||
|
||||
def __enter__(self) -> "PluginV3":
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
self.close()
|
||||
@@ -1,11 +0,0 @@
|
||||
"""
|
||||
Helper functions for freezing imageio.
|
||||
"""
|
||||
|
||||
|
||||
def get_includes():
|
||||
return ["email", "urllib.request", "numpy", "zipfile", "io"]
|
||||
|
||||
|
||||
def get_excludes():
|
||||
return []
|
||||
@@ -1,103 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Here you can find documentation on how to write your own plugin to allow
|
||||
ImageIO to access a new backend. Plugins are quite object oriented, and
|
||||
the relevant classes and their interaction are documented here:
|
||||
|
||||
.. currentmodule:: imageio
|
||||
|
||||
.. autosummary::
|
||||
:toctree: ../_autosummary
|
||||
:template: better_class.rst
|
||||
|
||||
imageio.core.Format
|
||||
imageio.core.Request
|
||||
|
||||
.. note::
|
||||
You can always check existing plugins if you want to see examples.
|
||||
|
||||
What methods to implement
|
||||
-------------------------
|
||||
|
||||
To implement a new plugin, create a new class that inherits from
|
||||
:class:`imageio.core.Format`. and implement the following functions:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: ../_autosummary
|
||||
|
||||
imageio.core.Format.__init__
|
||||
imageio.core.Format._can_read
|
||||
imageio.core.Format._can_write
|
||||
|
||||
Further, each format contains up to two nested classes; one for reading and
|
||||
one for writing. To support reading and/or writing, the respective classes
|
||||
need to be defined.
|
||||
|
||||
For reading, create a nested class that inherits from
|
||||
``imageio.core.Format.Reader`` and that implements the following functions:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the reader. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_get_length()`` to provide a suitable length based on what
|
||||
the user expects. Can be ``inf`` for streaming data.
|
||||
* Implement ``_get_data(index)`` to return an array and a meta-data dict.
|
||||
* Implement ``_get_meta_data(index)`` to return a meta-data dict. If index
|
||||
is None, it should return the 'global' meta-data.
|
||||
|
||||
For writing, create a nested class that inherits from
|
||||
``imageio.core.Format.Writer`` and implement the following functions:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the writer. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_append_data(im, meta)`` to add data (and meta-data).
|
||||
* Implement ``_set_meta_data(meta)`` to set the global meta-data.
|
||||
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import warnings
|
||||
|
||||
|
||||
# v2 imports remove in v3
|
||||
from .. import formats
|
||||
|
||||
# v2 allows formatting plugins by environment variable
|
||||
# this is done here.
|
||||
env_plugin_order = os.getenv("IMAGEIO_FORMAT_ORDER", None)
|
||||
if env_plugin_order is not None: # pragma: no cover
|
||||
warnings.warn(
|
||||
"Setting plugin priority through an environment variable is"
|
||||
" deprecated and will be removed in ImageIO v3. There is no"
|
||||
" replacement planned for this feature. If you have an"
|
||||
" active use-case for it, please reach out to us on GitHub.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(","))
|
||||
|
||||
|
||||
# this class replaces plugin module. For details
|
||||
# see https://stackoverflow.com/questions/2447353/getattr-on-a-module
|
||||
def __getattr__(name):
|
||||
"""Lazy-Import Plugins
|
||||
|
||||
This function dynamically loads plugins into the imageio.plugin
|
||||
namespace upon first access. For example, the following snippet will
|
||||
delay importing freeimage until the second line:
|
||||
|
||||
>>> import imageio
|
||||
>>> imageio.plugins.freeimage.download()
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
return importlib.import_module(f"imageio.plugins.{name}")
|
||||
except ImportError:
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'") from None
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,915 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# This file is distributed under the terms of the 2-clause BSD License.
|
||||
# Copyright (c) 2017-2018, Almar Klein
|
||||
|
||||
"""
|
||||
Python implementation of the Binary Structured Data Format (BSDF).
|
||||
|
||||
BSDF is a binary format for serializing structured (scientific) data.
|
||||
See http://bsdf.io for more information.
|
||||
|
||||
This is the reference implementation, which is relatively relatively
|
||||
sophisticated, providing e.g. lazy loading of blobs and streamed
|
||||
reading/writing. A simpler Python implementation is available as
|
||||
``bsdf_lite.py``.
|
||||
|
||||
This module has no dependencies and works on Python 2.7 and 3.4+.
|
||||
|
||||
Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes.
|
||||
"""
|
||||
|
||||
# todo: in 2020, remove six stuff, __future__ and _isidentifier
|
||||
# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import bz2
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import types
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Notes on versioning: the major and minor numbers correspond to the
|
||||
# BSDF format version. The major number if increased when backward
|
||||
# incompatible changes are introduced. An implementation must raise an
|
||||
# exception when the file being read has a higher major version. The
|
||||
# minor number is increased when new backward compatible features are
|
||||
# introduced. An implementation must display a warning when the file
|
||||
# being read has a higher minor version. The patch version is increased
|
||||
# for subsequent releases of the implementation.
|
||||
VERSION = 2, 1, 2
|
||||
__version__ = ".".join(str(i) for i in VERSION)
|
||||
|
||||
|
||||
# %% The encoder and decoder implementation
|
||||
|
||||
# From six.py
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
if PY3:
|
||||
text_type = str
|
||||
string_types = str
|
||||
unicode_types = str
|
||||
integer_types = int
|
||||
classtypes = type
|
||||
else: # pragma: no cover
|
||||
logging.basicConfig() # avoid "no handlers found" error
|
||||
text_type = unicode # noqa
|
||||
string_types = basestring # noqa
|
||||
unicode_types = unicode # noqa
|
||||
integer_types = (int, long) # noqa
|
||||
classtypes = type, types.ClassType
|
||||
|
||||
# Shorthands
|
||||
spack = struct.pack
|
||||
strunpack = struct.unpack
|
||||
|
||||
|
||||
def lencode(x):
|
||||
"""Encode an unsigned integer into a variable sized blob of bytes."""
|
||||
# We could support 16 bit and 32 bit as well, but the gain is low, since
|
||||
# 9 bytes for collections with over 250 elements is marginal anyway.
|
||||
if x <= 250:
|
||||
return spack("<B", x)
|
||||
# elif x < 65536:
|
||||
# return spack('<BH', 251, x)
|
||||
# elif x < 4294967296:
|
||||
# return spack('<BI', 252, x)
|
||||
else:
|
||||
return spack("<BQ", 253, x)
|
||||
|
||||
|
||||
# Include len decoder for completeness; we've inlined it for performance.
|
||||
def lendecode(f):
|
||||
"""Decode an unsigned integer from a file."""
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
return n
|
||||
|
||||
|
||||
def encode_type_id(b, ext_id):
|
||||
"""Encode the type identifier, with or without extension id."""
|
||||
if ext_id is not None:
|
||||
bb = ext_id.encode("UTF-8")
|
||||
return b.upper() + lencode(len(bb)) + bb # noqa
|
||||
else:
|
||||
return b # noqa
|
||||
|
||||
|
||||
def _isidentifier(s): # pragma: no cover
|
||||
"""Use of str.isidentifier() for Legacy Python, but slower."""
|
||||
# http://stackoverflow.com/questions/2544972/
|
||||
return (
|
||||
isinstance(s, string_types)
|
||||
and re.match(r"^\w+$", s, re.UNICODE)
|
||||
and re.match(r"^[0-9]", s) is None
|
||||
)
|
||||
|
||||
|
||||
class BsdfSerializer(object):
|
||||
"""Instances of this class represent a BSDF encoder/decoder.
|
||||
|
||||
It acts as a placeholder for a set of extensions and encoding/decoding
|
||||
options. Use this to predefine extensions and options for high
|
||||
performance encoding/decoding. For general use, see the functions
|
||||
`save()`, `encode()`, `load()`, and `decode()`.
|
||||
|
||||
This implementation of BSDF supports streaming lists (keep adding
|
||||
to a list after writing the main file), lazy loading of blobs, and
|
||||
in-place editing of blobs (for streams opened with a+).
|
||||
|
||||
Options for encoding:
|
||||
|
||||
* compression (int or str): ``0`` or "no" for no compression (default),
|
||||
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
|
||||
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
|
||||
Note that some BSDF implementations (e.g. JavaScript) may not support
|
||||
compression.
|
||||
* use_checksum (bool): whether to include a checksum with binary blobs.
|
||||
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
|
||||
|
||||
Options for decoding:
|
||||
|
||||
* load_streaming (bool): if True, and the final object in the structure was
|
||||
a stream, will make it available as a stream in the decoded object.
|
||||
* lazy_blob (bool): if True, bytes are represented as Blob objects that can
|
||||
be used to lazily access the data, and also overwrite the data if the
|
||||
file is open in a+ mode.
|
||||
"""
|
||||
|
||||
def __init__(self, extensions=None, **options):
|
||||
self._extensions = {} # name -> extension
|
||||
self._extensions_by_cls = {} # cls -> (name, extension.encode)
|
||||
if extensions is None:
|
||||
extensions = standard_extensions
|
||||
for extension in extensions:
|
||||
self.add_extension(extension)
|
||||
self._parse_options(**options)
|
||||
|
||||
def _parse_options(
|
||||
self,
|
||||
compression=0,
|
||||
use_checksum=False,
|
||||
float64=True,
|
||||
load_streaming=False,
|
||||
lazy_blob=False,
|
||||
):
|
||||
# Validate compression
|
||||
if isinstance(compression, string_types):
|
||||
m = {"no": 0, "zlib": 1, "bz2": 2}
|
||||
compression = m.get(compression.lower(), compression)
|
||||
if compression not in (0, 1, 2):
|
||||
raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"')
|
||||
self._compression = compression
|
||||
|
||||
# Other encoding args
|
||||
self._use_checksum = bool(use_checksum)
|
||||
self._float64 = bool(float64)
|
||||
|
||||
# Decoding args
|
||||
self._load_streaming = bool(load_streaming)
|
||||
self._lazy_blob = bool(lazy_blob)
|
||||
|
||||
def add_extension(self, extension_class):
|
||||
"""Add an extension to this serializer instance, which must be
|
||||
a subclass of Extension. Can be used as a decorator.
|
||||
"""
|
||||
# Check class
|
||||
if not (
|
||||
isinstance(extension_class, type) and issubclass(extension_class, Extension)
|
||||
):
|
||||
raise TypeError("add_extension() expects a Extension class.")
|
||||
extension = extension_class()
|
||||
|
||||
# Get name
|
||||
name = extension.name
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if len(name) == 0 or len(name) > 250:
|
||||
raise NameError(
|
||||
"Extension names must be nonempty and shorter " "than 251 chars."
|
||||
)
|
||||
if name in self._extensions:
|
||||
logger.warning(
|
||||
'BSDF warning: overwriting extension "%s", '
|
||||
"consider removing first" % name
|
||||
)
|
||||
|
||||
# Get classes
|
||||
cls = extension.cls
|
||||
if not cls:
|
||||
clss = []
|
||||
elif isinstance(cls, (tuple, list)):
|
||||
clss = cls
|
||||
else:
|
||||
clss = [cls]
|
||||
for cls in clss:
|
||||
if not isinstance(cls, classtypes):
|
||||
raise TypeError("Extension classes must be types.")
|
||||
|
||||
# Store
|
||||
for cls in clss:
|
||||
self._extensions_by_cls[cls] = name, extension.encode
|
||||
self._extensions[name] = extension
|
||||
return extension_class
|
||||
|
||||
def remove_extension(self, name):
|
||||
"""Remove a converted by its unique name."""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if name in self._extensions:
|
||||
self._extensions.pop(name)
|
||||
for cls in list(self._extensions_by_cls.keys()):
|
||||
if self._extensions_by_cls[cls][0] == name:
|
||||
self._extensions_by_cls.pop(cls)
|
||||
|
||||
def _encode(self, f, value, streams, ext_id):
|
||||
"""Main encoder function."""
|
||||
x = encode_type_id
|
||||
|
||||
if value is None:
|
||||
f.write(x(b"v", ext_id)) # V for void
|
||||
elif value is True:
|
||||
f.write(x(b"y", ext_id)) # Y for yes
|
||||
elif value is False:
|
||||
f.write(x(b"n", ext_id)) # N for no
|
||||
elif isinstance(value, integer_types):
|
||||
if -32768 <= value <= 32767:
|
||||
f.write(x(b"h", ext_id) + spack("h", value)) # H for ...
|
||||
else:
|
||||
f.write(x(b"i", ext_id) + spack("<q", value)) # I for int
|
||||
elif isinstance(value, float):
|
||||
if self._float64:
|
||||
f.write(x(b"d", ext_id) + spack("<d", value)) # D for double
|
||||
else:
|
||||
f.write(x(b"f", ext_id) + spack("<f", value)) # f for float
|
||||
elif isinstance(value, unicode_types):
|
||||
bb = value.encode("UTF-8")
|
||||
f.write(x(b"s", ext_id) + lencode(len(bb))) # S for str
|
||||
f.write(bb)
|
||||
elif isinstance(value, (list, tuple)):
|
||||
f.write(x(b"l", ext_id) + lencode(len(value))) # L for list
|
||||
for v in value:
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, dict):
|
||||
f.write(x(b"m", ext_id) + lencode(len(value))) # M for mapping
|
||||
for key, v in value.items():
|
||||
if PY3:
|
||||
assert key.isidentifier() # faster
|
||||
else: # pragma: no cover
|
||||
assert _isidentifier(key)
|
||||
# yield ' ' * indent + key
|
||||
name_b = key.encode("UTF-8")
|
||||
f.write(lencode(len(name_b)))
|
||||
f.write(name_b)
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, bytes):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
blob = Blob(
|
||||
value, compression=self._compression, use_checksum=self._use_checksum
|
||||
)
|
||||
blob._to_file(f) # noqa
|
||||
elif isinstance(value, Blob):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
value._to_file(f) # noqa
|
||||
elif isinstance(value, BaseStream):
|
||||
# Initialize the stream
|
||||
if value.mode != "w":
|
||||
raise ValueError("Cannot serialize a read-mode stream.")
|
||||
elif isinstance(value, ListStream):
|
||||
f.write(x(b"l", ext_id) + spack("<BQ", 255, 0)) # L for list
|
||||
else:
|
||||
raise TypeError("Only ListStream is supported")
|
||||
# Mark this as *the* stream, and activate the stream.
|
||||
# The save() function verifies this is the last written object.
|
||||
if len(streams) > 0:
|
||||
raise ValueError("Can only have one stream per file.")
|
||||
streams.append(value)
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
else:
|
||||
if ext_id is not None:
|
||||
raise ValueError(
|
||||
"Extension %s wronfully encodes object to another "
|
||||
"extension object (though it may encode to a list/dict "
|
||||
"that contains other extension objects)." % ext_id
|
||||
)
|
||||
# Try if the value is of a type we know
|
||||
ex = self._extensions_by_cls.get(value.__class__, None)
|
||||
# Maybe its a subclass of a type we know
|
||||
if ex is None:
|
||||
for name, c in self._extensions.items():
|
||||
if c.match(self, value):
|
||||
ex = name, c.encode
|
||||
break
|
||||
else:
|
||||
ex = None
|
||||
# Success or fail
|
||||
if ex is not None:
|
||||
ext_id2, extension_encode = ex
|
||||
self._encode(f, extension_encode(self, value), streams, ext_id2)
|
||||
else:
|
||||
t = (
|
||||
"Class %r is not a valid base BSDF type, nor is it "
|
||||
"handled by an extension."
|
||||
)
|
||||
raise TypeError(t % value.__class__.__name__)
|
||||
|
||||
def _decode(self, f):
|
||||
"""Main decoder function."""
|
||||
|
||||
# Get value
|
||||
char = f.read(1)
|
||||
c = char.lower()
|
||||
|
||||
# Conversion (uppercase value identifiers signify converted values)
|
||||
if not char:
|
||||
raise EOFError()
|
||||
elif char != c:
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
|
||||
ext_id = f.read(n).decode("UTF-8")
|
||||
else:
|
||||
ext_id = None
|
||||
|
||||
if c == b"v":
|
||||
value = None
|
||||
elif c == b"y":
|
||||
value = True
|
||||
elif c == b"n":
|
||||
value = False
|
||||
elif c == b"h":
|
||||
value = strunpack("<h", f.read(2))[0]
|
||||
elif c == b"i":
|
||||
value = strunpack("<q", f.read(8))[0]
|
||||
elif c == b"f":
|
||||
value = strunpack("<f", f.read(4))[0]
|
||||
elif c == b"d":
|
||||
value = strunpack("<d", f.read(8))[0]
|
||||
elif c == b"s":
|
||||
n_s = strunpack("<B", f.read(1))[0]
|
||||
if n_s == 253:
|
||||
n_s = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = f.read(n_s).decode("UTF-8")
|
||||
elif c == b"l":
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n >= 254:
|
||||
# Streaming
|
||||
closed = n == 254
|
||||
n = strunpack("<Q", f.read(8))[0]
|
||||
if self._load_streaming:
|
||||
value = ListStream(n if closed else "r")
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
elif closed:
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
else:
|
||||
value = []
|
||||
try:
|
||||
while True:
|
||||
value.append(self._decode(f))
|
||||
except EOFError:
|
||||
pass
|
||||
else:
|
||||
# Normal
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
elif c == b"m":
|
||||
value = dict()
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
for i in range(n):
|
||||
n_name = strunpack("<B", f.read(1))[0]
|
||||
if n_name == 253:
|
||||
n_name = strunpack("<Q", f.read(8))[0] # noqa
|
||||
assert n_name > 0
|
||||
name = f.read(n_name).decode("UTF-8")
|
||||
value[name] = self._decode(f)
|
||||
elif c == b"b":
|
||||
if self._lazy_blob:
|
||||
value = Blob((f, True))
|
||||
else:
|
||||
blob = Blob((f, False))
|
||||
value = blob.get_bytes()
|
||||
else:
|
||||
raise RuntimeError("Parse error %r" % char)
|
||||
|
||||
# Convert value if we have an extension for it
|
||||
if ext_id is not None:
|
||||
extension = self._extensions.get(ext_id, None)
|
||||
if extension is not None:
|
||||
value = extension.decode(self, value)
|
||||
else:
|
||||
logger.warning("BSDF warning: no extension found for %r" % ext_id)
|
||||
|
||||
return value
|
||||
|
||||
def encode(self, ob):
|
||||
"""Save the given object to bytes."""
|
||||
f = BytesIO()
|
||||
self.save(f, ob)
|
||||
return f.getvalue()
|
||||
|
||||
def save(self, f, ob):
|
||||
"""Write the given object to the given file object."""
|
||||
f.write(b"BSDF")
|
||||
f.write(struct.pack("<B", VERSION[0]))
|
||||
f.write(struct.pack("<B", VERSION[1]))
|
||||
|
||||
# Prepare streaming, this list will have 0 or 1 item at the end
|
||||
streams = []
|
||||
|
||||
self._encode(f, ob, streams, None)
|
||||
|
||||
# Verify that stream object was at the end, and add initial elements
|
||||
if len(streams) > 0:
|
||||
stream = streams[0]
|
||||
if stream._start_pos != f.tell():
|
||||
raise ValueError(
|
||||
"The stream object must be " "the last object to be encoded."
|
||||
)
|
||||
|
||||
def decode(self, bb):
|
||||
"""Load the data structure that is BSDF-encoded in the given bytes."""
|
||||
f = BytesIO(bb)
|
||||
return self.load(f)
|
||||
|
||||
def load(self, f):
|
||||
"""Load a BSDF-encoded object from the given file object."""
|
||||
# Check magic string
|
||||
f4 = f.read(4)
|
||||
if f4 != b"BSDF":
|
||||
raise RuntimeError("This does not look like a BSDF file: %r" % f4)
|
||||
# Check version
|
||||
major_version = strunpack("<B", f.read(1))[0]
|
||||
minor_version = strunpack("<B", f.read(1))[0]
|
||||
file_version = "%i.%i" % (major_version, minor_version)
|
||||
if major_version != VERSION[0]: # major version should be 2
|
||||
t = (
|
||||
"Reading file with different major version (%s) "
|
||||
"from the implementation (%s)."
|
||||
)
|
||||
raise RuntimeError(t % (__version__, file_version))
|
||||
if minor_version > VERSION[1]: # minor should be < ours
|
||||
t = (
|
||||
"BSDF warning: reading file with higher minor version (%s) "
|
||||
"than the implementation (%s)."
|
||||
)
|
||||
logger.warning(t % (__version__, file_version))
|
||||
|
||||
return self._decode(f)
|
||||
|
||||
|
||||
# %% Streaming and blob-files
|
||||
|
||||
|
||||
class BaseStream(object):
|
||||
"""Base class for streams."""
|
||||
|
||||
def __init__(self, mode="w"):
|
||||
self._i = 0
|
||||
self._count = -1
|
||||
if isinstance(mode, int):
|
||||
self._count = mode
|
||||
mode = "r"
|
||||
elif mode == "w":
|
||||
self._count = 0
|
||||
assert mode in ("r", "w")
|
||||
self._mode = mode
|
||||
self._f = None
|
||||
self._start_pos = 0
|
||||
|
||||
def _activate(self, file, encode_func, decode_func):
|
||||
if self._f is not None: # Associated with another write
|
||||
raise IOError("Stream object cannot be activated twice?")
|
||||
self._f = file
|
||||
self._start_pos = self._f.tell()
|
||||
self._encode = encode_func
|
||||
self._decode = decode_func
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
"""The mode of this stream: 'r' or 'w'."""
|
||||
return self._mode
|
||||
|
||||
|
||||
class ListStream(BaseStream):
|
||||
"""A streamable list object used for writing or reading.
|
||||
In read mode, it can also be iterated over.
|
||||
"""
|
||||
|
||||
@property
|
||||
def count(self):
|
||||
"""The number of elements in the stream (can be -1 for unclosed
|
||||
streams in read-mode).
|
||||
"""
|
||||
return self._count
|
||||
|
||||
@property
|
||||
def index(self):
|
||||
"""The current index of the element to read/write."""
|
||||
return self._i
|
||||
|
||||
def append(self, item):
|
||||
"""Append an item to the streaming list. The object is immediately
|
||||
serialized and written to the underlying file.
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only append items to the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("List stream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot stream to a close file.")
|
||||
self._encode(self._f, item, [self], None)
|
||||
self._i += 1
|
||||
self._count += 1
|
||||
|
||||
def close(self, unstream=False):
|
||||
"""Close the stream, marking the number of written elements. New
|
||||
elements may still be appended, but they won't be read during decoding.
|
||||
If ``unstream`` is False, the stream is turned into a regular list
|
||||
(not streaming).
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only close when at the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot close a stream on a close file.")
|
||||
i = self._f.tell()
|
||||
self._f.seek(self._start_pos - 8 - 1)
|
||||
self._f.write(spack("<B", 253 if unstream else 254))
|
||||
self._f.write(spack("<Q", self._count))
|
||||
self._f.seek(i)
|
||||
|
||||
def next(self):
|
||||
"""Read and return the next element in the streaming list.
|
||||
Raises StopIteration if the stream is exhausted.
|
||||
"""
|
||||
if self._mode != "r":
|
||||
raise IOError("This ListStream in not in read mode.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if getattr(self._f, "closed", None): # not present on 2.7 http req :/
|
||||
raise IOError("Cannot read a stream from a close file.")
|
||||
if self._count >= 0:
|
||||
if self._i >= self._count:
|
||||
raise StopIteration()
|
||||
self._i += 1
|
||||
return self._decode(self._f)
|
||||
else:
|
||||
# This raises EOFError at some point.
|
||||
try:
|
||||
res = self._decode(self._f)
|
||||
self._i += 1
|
||||
return res
|
||||
except EOFError:
|
||||
self._count = self._i
|
||||
raise StopIteration()
|
||||
|
||||
def __iter__(self):
|
||||
if self._mode != "r":
|
||||
raise IOError("Cannot iterate: ListStream in not in read mode.")
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
|
||||
|
||||
class Blob(object):
|
||||
"""Object to represent a blob of bytes. When used to write a BSDF file,
|
||||
it's a wrapper for bytes plus properties such as what compression to apply.
|
||||
When used to read a BSDF file, it can be used to read the data lazily, and
|
||||
also modify the data if reading in 'r+' mode and the blob isn't compressed.
|
||||
"""
|
||||
|
||||
# For now, this does not allow re-sizing blobs (within the allocated size)
|
||||
# but this can be added later.
|
||||
|
||||
def __init__(self, bb, compression=0, extra_size=0, use_checksum=False):
|
||||
if isinstance(bb, bytes):
|
||||
self._f = None
|
||||
self.compressed = self._from_bytes(bb, compression)
|
||||
self.compression = compression
|
||||
self.allocated_size = self.used_size + extra_size
|
||||
self.use_checksum = use_checksum
|
||||
elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"):
|
||||
self._f, allow_seek = bb
|
||||
self.compressed = None
|
||||
self._from_file(self._f, allow_seek)
|
||||
self._modified = False
|
||||
else:
|
||||
raise TypeError("Wrong argument to create Blob.")
|
||||
|
||||
def _from_bytes(self, value, compression):
|
||||
"""When used to wrap bytes in a blob."""
|
||||
if compression == 0:
|
||||
compressed = value
|
||||
elif compression == 1:
|
||||
compressed = zlib.compress(value, 9)
|
||||
elif compression == 2:
|
||||
compressed = bz2.compress(value, 9)
|
||||
else: # pragma: no cover
|
||||
assert False, "Unknown compression identifier"
|
||||
|
||||
self.data_size = len(value)
|
||||
self.used_size = len(compressed)
|
||||
return compressed
|
||||
|
||||
def _to_file(self, f):
|
||||
"""Private friend method called by encoder to write a blob to a file."""
|
||||
# Write sizes - write at least in a size that allows resizing
|
||||
if self.allocated_size <= 250 and self.compression == 0:
|
||||
f.write(spack("<B", self.allocated_size))
|
||||
f.write(spack("<B", self.used_size))
|
||||
f.write(lencode(self.data_size))
|
||||
else:
|
||||
f.write(spack("<BQ", 253, self.allocated_size))
|
||||
f.write(spack("<BQ", 253, self.used_size))
|
||||
f.write(spack("<BQ", 253, self.data_size))
|
||||
# Compression and checksum
|
||||
f.write(spack("B", self.compression))
|
||||
if self.use_checksum:
|
||||
f.write(b"\xff" + hashlib.md5(self.compressed).digest())
|
||||
else:
|
||||
f.write(b"\x00")
|
||||
# Byte alignment (only necessary for uncompressed data)
|
||||
if self.compression == 0:
|
||||
alignment = 8 - (f.tell() + 1) % 8 # +1 for the byte to write
|
||||
f.write(spack("<B", alignment)) # padding for byte alignment
|
||||
f.write(b"\x00" * alignment)
|
||||
else:
|
||||
f.write(spack("<B", 0))
|
||||
# The actual data and extra space
|
||||
f.write(self.compressed)
|
||||
f.write(b"\x00" * (self.allocated_size - self.used_size))
|
||||
|
||||
def _from_file(self, f, allow_seek):
|
||||
"""Used when a blob is read by the decoder."""
|
||||
# Read blob header data (5 to 42 bytes)
|
||||
# Size
|
||||
allocated_size = strunpack("<B", f.read(1))[0]
|
||||
if allocated_size == 253:
|
||||
allocated_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
used_size = strunpack("<B", f.read(1))[0]
|
||||
if used_size == 253:
|
||||
used_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
data_size = strunpack("<B", f.read(1))[0]
|
||||
if data_size == 253:
|
||||
data_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
# Compression and checksum
|
||||
compression = strunpack("<B", f.read(1))[0]
|
||||
has_checksum = strunpack("<B", f.read(1))[0]
|
||||
if has_checksum:
|
||||
checksum = f.read(16)
|
||||
# Skip alignment
|
||||
alignment = strunpack("<B", f.read(1))[0]
|
||||
f.read(alignment)
|
||||
# Get or skip data + extra space
|
||||
if allow_seek:
|
||||
self.start_pos = f.tell()
|
||||
self.end_pos = self.start_pos + used_size
|
||||
f.seek(self.start_pos + allocated_size)
|
||||
else:
|
||||
self.start_pos = None
|
||||
self.end_pos = None
|
||||
self.compressed = f.read(used_size)
|
||||
f.read(allocated_size - used_size)
|
||||
# Store info
|
||||
self.alignment = alignment
|
||||
self.compression = compression
|
||||
self.use_checksum = checksum if has_checksum else None
|
||||
self.used_size = used_size
|
||||
self.allocated_size = allocated_size
|
||||
self.data_size = data_size
|
||||
|
||||
def seek(self, p):
|
||||
"""Seek to the given position (relative to the blob start)."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot seek in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if p < 0:
|
||||
p = self.allocated_size + p
|
||||
if p < 0 or p > self.allocated_size:
|
||||
raise IOError("Seek beyond blob boundaries.")
|
||||
self._f.seek(self.start_pos + p)
|
||||
|
||||
def tell(self):
|
||||
"""Get the current file pointer position (relative to the blob start)."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot tell in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
return self._f.tell() - self.start_pos
|
||||
|
||||
def write(self, bb):
|
||||
"""Write bytes to the blob."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot write in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily write in compressed blob.")
|
||||
if self._f.tell() + len(bb) > self.end_pos:
|
||||
raise IOError("Write beyond blob boundaries.")
|
||||
self._modified = True
|
||||
return self._f.write(bb)
|
||||
|
||||
def read(self, n):
|
||||
"""Read n bytes from the blob."""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot read in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily read in compressed blob.")
|
||||
if self._f.tell() + n > self.end_pos:
|
||||
raise IOError("Read beyond blob boundaries.")
|
||||
return self._f.read(n)
|
||||
|
||||
def get_bytes(self):
|
||||
"""Get the contents of the blob as bytes."""
|
||||
if self.compressed is not None:
|
||||
compressed = self.compressed
|
||||
else:
|
||||
i = self._f.tell()
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(i)
|
||||
if self.compression == 0:
|
||||
value = compressed
|
||||
elif self.compression == 1:
|
||||
value = zlib.decompress(compressed)
|
||||
elif self.compression == 2:
|
||||
value = bz2.decompress(compressed)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Invalid compression %i" % self.compression)
|
||||
return value
|
||||
|
||||
def update_checksum(self):
|
||||
"""Reset the blob's checksum if present. Call this after modifying
|
||||
the data.
|
||||
"""
|
||||
# or ... should the presence of a checksum mean that data is proteced?
|
||||
if self.use_checksum and self._modified:
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(self.start_pos - self.alignment - 1 - 16)
|
||||
self._f.write(hashlib.md5(compressed).digest())
|
||||
|
||||
|
||||
# %% High-level functions
|
||||
|
||||
|
||||
def encode(ob, extensions=None, **options):
|
||||
"""Save (BSDF-encode) the given object to bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.encode(ob)
|
||||
|
||||
|
||||
def save(f, ob, extensions=None, **options):
|
||||
"""Save (BSDF-encode) the given object to the given filename or
|
||||
file object. See` BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
with open(f, "wb") as fp:
|
||||
return s.save(fp, ob)
|
||||
else:
|
||||
return s.save(f, ob)
|
||||
|
||||
|
||||
def decode(bb, extensions=None, **options):
|
||||
"""Load a (BSDF-encoded) structure from bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.decode(bb)
|
||||
|
||||
|
||||
def load(f, extensions=None, **options):
|
||||
"""Load a (BSDF-encoded) structure from the given filename or file object.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
if f.startswith(("~/", "~\\")): # pragma: no cover
|
||||
f = os.path.expanduser(f)
|
||||
with open(f, "rb") as fp:
|
||||
return s.load(fp)
|
||||
else:
|
||||
return s.load(f)
|
||||
|
||||
|
||||
# Aliases for json compat
|
||||
loads = decode
|
||||
dumps = encode
|
||||
|
||||
|
||||
# %% Standard extensions
|
||||
|
||||
# Defining extensions as a dict would be more compact and feel lighter, but
|
||||
# that would only allow lambdas, which is too limiting, e.g. for ndarray
|
||||
# extension.
|
||||
|
||||
|
||||
class Extension(object):
|
||||
"""Base class to implement BSDF extensions for special data types.
|
||||
|
||||
Extension classes are provided to the BSDF serializer, which
|
||||
instantiates the class. That way, the extension can be somewhat dynamic:
|
||||
e.g. the NDArrayExtension exposes the ndarray class only when numpy
|
||||
is imported.
|
||||
|
||||
A extension instance must have two attributes. These can be attribiutes of
|
||||
the class, or of the instance set in ``__init__()``:
|
||||
|
||||
* name (str): the name by which encoded values will be identified.
|
||||
* cls (type): the type (or list of types) to match values with.
|
||||
This is optional, but it makes the encoder select extensions faster.
|
||||
|
||||
Further, it needs 3 methods:
|
||||
|
||||
* `match(serializer, value) -> bool`: return whether the extension can
|
||||
convert the given value. The default is ``isinstance(value, self.cls)``.
|
||||
* `encode(serializer, value) -> encoded_value`: the function to encode a
|
||||
value to more basic data types.
|
||||
* `decode(serializer, encoded_value) -> value`: the function to decode an
|
||||
encoded value back to its intended representation.
|
||||
|
||||
"""
|
||||
|
||||
name = ""
|
||||
cls = ()
|
||||
|
||||
def __repr__(self):
|
||||
return "<BSDF extension %r at 0x%s>" % (self.name, hex(id(self)))
|
||||
|
||||
def match(self, s, v):
|
||||
return isinstance(v, self.cls)
|
||||
|
||||
def encode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
def decode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ComplexExtension(Extension):
|
||||
name = "c"
|
||||
cls = complex
|
||||
|
||||
def encode(self, s, v):
|
||||
return (v.real, v.imag)
|
||||
|
||||
def decode(self, s, v):
|
||||
return complex(v[0], v[1])
|
||||
|
||||
|
||||
class NDArrayExtension(Extension):
|
||||
name = "ndarray"
|
||||
|
||||
def __init__(self):
|
||||
if "numpy" in sys.modules:
|
||||
import numpy as np
|
||||
|
||||
self.cls = np.ndarray
|
||||
|
||||
def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS
|
||||
return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes")
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError: # pragma: no cover
|
||||
return v
|
||||
a = np.frombuffer(v["data"], dtype=v["dtype"])
|
||||
a.shape = v["shape"]
|
||||
return a
|
||||
|
||||
|
||||
standard_extensions = [ComplexExtension, NDArrayExtension]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Invoke CLI
|
||||
import bsdf_cli
|
||||
|
||||
bsdf_cli.main()
|
||||
@@ -1,923 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading DICOM files.
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import sys
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
# Define a dictionary that contains the tags that we would like to know
|
||||
MINIDICT = {
|
||||
(0x7FE0, 0x0010): ("PixelData", "OB"),
|
||||
# Date and time
|
||||
(0x0008, 0x0020): ("StudyDate", "DA"),
|
||||
(0x0008, 0x0021): ("SeriesDate", "DA"),
|
||||
(0x0008, 0x0022): ("AcquisitionDate", "DA"),
|
||||
(0x0008, 0x0023): ("ContentDate", "DA"),
|
||||
(0x0008, 0x0030): ("StudyTime", "TM"),
|
||||
(0x0008, 0x0031): ("SeriesTime", "TM"),
|
||||
(0x0008, 0x0032): ("AcquisitionTime", "TM"),
|
||||
(0x0008, 0x0033): ("ContentTime", "TM"),
|
||||
# With what, where, by whom?
|
||||
(0x0008, 0x0060): ("Modality", "CS"),
|
||||
(0x0008, 0x0070): ("Manufacturer", "LO"),
|
||||
(0x0008, 0x0080): ("InstitutionName", "LO"),
|
||||
# Descriptions
|
||||
(0x0008, 0x1030): ("StudyDescription", "LO"),
|
||||
(0x0008, 0x103E): ("SeriesDescription", "LO"),
|
||||
# UID's
|
||||
(0x0008, 0x0016): ("SOPClassUID", "UI"),
|
||||
(0x0008, 0x0018): ("SOPInstanceUID", "UI"),
|
||||
(0x0020, 0x000D): ("StudyInstanceUID", "UI"),
|
||||
(0x0020, 0x000E): ("SeriesInstanceUID", "UI"),
|
||||
(0x0008, 0x0117): ("ContextUID", "UI"),
|
||||
# Numbers
|
||||
(0x0020, 0x0011): ("SeriesNumber", "IS"),
|
||||
(0x0020, 0x0012): ("AcquisitionNumber", "IS"),
|
||||
(0x0020, 0x0013): ("InstanceNumber", "IS"),
|
||||
(0x0020, 0x0014): ("IsotopeNumber", "IS"),
|
||||
(0x0020, 0x0015): ("PhaseNumber", "IS"),
|
||||
(0x0020, 0x0016): ("IntervalNumber", "IS"),
|
||||
(0x0020, 0x0017): ("TimeSlotNumber", "IS"),
|
||||
(0x0020, 0x0018): ("AngleNumber", "IS"),
|
||||
(0x0020, 0x0019): ("ItemNumber", "IS"),
|
||||
(0x0020, 0x0020): ("PatientOrientation", "CS"),
|
||||
(0x0020, 0x0030): ("ImagePosition", "CS"),
|
||||
(0x0020, 0x0032): ("ImagePositionPatient", "CS"),
|
||||
(0x0020, 0x0035): ("ImageOrientation", "CS"),
|
||||
(0x0020, 0x0037): ("ImageOrientationPatient", "CS"),
|
||||
# Patient information
|
||||
(0x0010, 0x0010): ("PatientName", "PN"),
|
||||
(0x0010, 0x0020): ("PatientID", "LO"),
|
||||
(0x0010, 0x0030): ("PatientBirthDate", "DA"),
|
||||
(0x0010, 0x0040): ("PatientSex", "CS"),
|
||||
(0x0010, 0x1010): ("PatientAge", "AS"),
|
||||
(0x0010, 0x1020): ("PatientSize", "DS"),
|
||||
(0x0010, 0x1030): ("PatientWeight", "DS"),
|
||||
# Image specific (required to construct numpy array)
|
||||
(0x0028, 0x0002): ("SamplesPerPixel", "US"),
|
||||
(0x0028, 0x0008): ("NumberOfFrames", "IS"),
|
||||
(0x0028, 0x0100): ("BitsAllocated", "US"),
|
||||
(0x0028, 0x0101): ("BitsStored", "US"),
|
||||
(0x0028, 0x0102): ("HighBit", "US"),
|
||||
(0x0028, 0x0103): ("PixelRepresentation", "US"),
|
||||
(0x0028, 0x0010): ("Rows", "US"),
|
||||
(0x0028, 0x0011): ("Columns", "US"),
|
||||
(0x0028, 0x1052): ("RescaleIntercept", "DS"),
|
||||
(0x0028, 0x1053): ("RescaleSlope", "DS"),
|
||||
# Image specific (for the user)
|
||||
(0x0028, 0x0030): ("PixelSpacing", "DS"),
|
||||
(0x0018, 0x0088): ("SliceSpacing", "DS"),
|
||||
}
|
||||
|
||||
# Define some special tags:
|
||||
# See PS 3.5-2008 section 7.5 (p.40)
|
||||
ItemTag = (0xFFFE, 0xE000) # start of Sequence Item
|
||||
ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item
|
||||
SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length
|
||||
|
||||
# Define set of groups that we're interested in (so we can quickly skip others)
|
||||
GROUPS = set([key[0] for key in MINIDICT.keys()])
|
||||
VRS = set([val[1] for val in MINIDICT.values()])
|
||||
|
||||
|
||||
class NotADicomFile(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CompressedDicom(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleDicomReader(object):
|
||||
"""
|
||||
This class provides reading of pixel data from DICOM files. It is
|
||||
focussed on getting the pixel data, not the meta info.
|
||||
|
||||
To use, first create an instance of this class (giving it
|
||||
a file object or filename). Next use the info attribute to
|
||||
get a dict of the meta data. The loading of pixel data is
|
||||
deferred until get_numpy_array() is called.
|
||||
|
||||
Comparison with Pydicom
|
||||
-----------------------
|
||||
|
||||
This code focusses on getting the pixel data out, which allows some
|
||||
shortcuts, resulting in the code being much smaller.
|
||||
|
||||
Since the processing of data elements is much cheaper (it skips a lot
|
||||
of tags), this code is about 3x faster than pydicom (except for the
|
||||
deflated DICOM files).
|
||||
|
||||
This class does borrow some code (and ideas) from the pydicom
|
||||
project, and (to the best of our knowledge) has the same limitations
|
||||
as pydicom with regard to the type of files that it can handle.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
For more advanced DICOM processing, please check out pydicom.
|
||||
|
||||
* Only a predefined subset of data elements (meta information) is read.
|
||||
* This is a reader; it can not write DICOM files.
|
||||
* (just like pydicom) it can handle none of the compressed DICOM
|
||||
formats except for "Deflated Explicit VR Little Endian"
|
||||
(1.2.840.10008.1.2.1.99).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, file):
|
||||
# Open file if filename given
|
||||
if isinstance(file, str):
|
||||
self._filename = file
|
||||
self._file = open(file, "rb")
|
||||
else:
|
||||
self._filename = "<unknown file>"
|
||||
self._file = file
|
||||
# Init variable to store position and size of pixel data
|
||||
self._pixel_data_loc = None
|
||||
# The meta header is always explicit and little endian
|
||||
self.is_implicit_VR = False
|
||||
self.is_little_endian = True
|
||||
self._unpackPrefix = "<"
|
||||
# Dict to store data elements of interest in
|
||||
self._info = {}
|
||||
# VR Conversion
|
||||
self._converters = {
|
||||
# Numbers
|
||||
"US": lambda x: self._unpack("H", x),
|
||||
"UL": lambda x: self._unpack("L", x),
|
||||
# Numbers encoded as strings
|
||||
"DS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"IS": lambda x: self._splitValues(x, int, "\\"),
|
||||
# strings
|
||||
"AS": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"DA": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"TM": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"UI": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
"CS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
}
|
||||
|
||||
# Initiate reading
|
||||
self._read()
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return self._info
|
||||
|
||||
def _splitValues(self, x, type, splitter):
|
||||
s = x.decode("ascii").strip("\x00")
|
||||
try:
|
||||
if splitter in s:
|
||||
return tuple([type(v) for v in s.split(splitter) if v.strip()])
|
||||
else:
|
||||
return type(s)
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
def _unpack(self, fmt, value):
|
||||
return struct.unpack(self._unpackPrefix + fmt, value)[0]
|
||||
|
||||
# Really only so we need minimal changes to _pixel_data_numpy
|
||||
def __iter__(self):
|
||||
return iter(self._info.keys())
|
||||
|
||||
def __getattr__(self, key):
|
||||
info = object.__getattribute__(self, "_info")
|
||||
if key in info:
|
||||
return info[key]
|
||||
return object.__getattribute__(self, key) # pragma: no cover
|
||||
|
||||
def _read(self):
|
||||
f = self._file
|
||||
# Check prefix after peamble
|
||||
f.seek(128)
|
||||
if f.read(4) != b"DICM":
|
||||
raise NotADicomFile("Not a valid DICOM file.")
|
||||
# Read
|
||||
self._read_header()
|
||||
self._read_data_elements()
|
||||
self._get_shape_and_sampling()
|
||||
# Close if done, reopen if necessary to read pixel data
|
||||
if os.path.isfile(self._filename):
|
||||
self._file.close()
|
||||
self._file = None
|
||||
|
||||
def _readDataElement(self):
|
||||
f = self._file
|
||||
# Get group and element
|
||||
group = self._unpack("H", f.read(2))
|
||||
element = self._unpack("H", f.read(2))
|
||||
# Get value length
|
||||
if self.is_implicit_VR:
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vr = f.read(2)
|
||||
if vr in (b"OB", b"OW", b"SQ", b"UN"):
|
||||
reserved = f.read(2) # noqa
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vl = self._unpack("H", f.read(2))
|
||||
# Get value
|
||||
if group == 0x7FE0 and element == 0x0010:
|
||||
here = f.tell()
|
||||
self._pixel_data_loc = here, vl
|
||||
f.seek(here + vl)
|
||||
return group, element, b"Deferred loading of pixel data"
|
||||
else:
|
||||
if vl == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = f.read(vl)
|
||||
return group, element, value
|
||||
|
||||
def _read_undefined_length_value(self, read_size=128):
|
||||
"""Copied (in compacted form) from PyDicom
|
||||
Copyright Darcy Mason.
|
||||
"""
|
||||
fp = self._file
|
||||
# data_start = fp.tell()
|
||||
search_rewind = 3
|
||||
bytes_to_find = struct.pack(
|
||||
self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1]
|
||||
)
|
||||
|
||||
found = False
|
||||
value_chunks = []
|
||||
while not found:
|
||||
chunk_start = fp.tell()
|
||||
bytes_read = fp.read(read_size)
|
||||
if len(bytes_read) < read_size:
|
||||
# try again,
|
||||
# if still don't get required amount, this is last block
|
||||
new_bytes = fp.read(read_size - len(bytes_read))
|
||||
bytes_read += new_bytes
|
||||
if len(bytes_read) < read_size:
|
||||
raise EOFError(
|
||||
"End of file reached before sequence " "delimiter found."
|
||||
)
|
||||
index = bytes_read.find(bytes_to_find)
|
||||
if index != -1:
|
||||
found = True
|
||||
value_chunks.append(bytes_read[:index])
|
||||
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
|
||||
length = fp.read(4)
|
||||
if length != b"\0\0\0\0":
|
||||
logger.warning(
|
||||
"Expected 4 zero bytes after undefined length " "delimiter"
|
||||
)
|
||||
else:
|
||||
fp.seek(fp.tell() - search_rewind) # rewind a bit
|
||||
# accumulate the bytes read (not including the rewind)
|
||||
value_chunks.append(bytes_read[:-search_rewind])
|
||||
|
||||
# if get here then have found the byte string
|
||||
return b"".join(value_chunks)
|
||||
|
||||
def _read_header(self):
|
||||
f = self._file
|
||||
TransferSyntaxUID = None
|
||||
|
||||
# Read all elements, store transferSyntax when we encounter it
|
||||
try:
|
||||
while True:
|
||||
fp_save = f.tell()
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
if group == 0x02:
|
||||
if group == 0x02 and element == 0x10:
|
||||
TransferSyntaxUID = value.decode("ascii").strip("\x00")
|
||||
else:
|
||||
# No more group 2: rewind and break
|
||||
# (don't trust group length)
|
||||
f.seek(fp_save)
|
||||
break
|
||||
except (EOFError, struct.error): # pragma: no cover
|
||||
raise RuntimeError("End of file reached while still in header.")
|
||||
|
||||
# Handle transfer syntax
|
||||
self._info["TransferSyntaxUID"] = TransferSyntaxUID
|
||||
#
|
||||
if TransferSyntaxUID is None:
|
||||
# Assume ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1":
|
||||
# ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.2":
|
||||
# ExplicitVRBigEndian
|
||||
is_implicit_VR, is_little_endian = False, False
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2":
|
||||
# implicit VR little endian
|
||||
is_implicit_VR, is_little_endian = True, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99":
|
||||
# DeflatedExplicitVRLittleEndian:
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
self._inflate()
|
||||
else:
|
||||
# http://www.dicomlibrary.com/dicom/transfer-syntax/
|
||||
t, extra_info = TransferSyntaxUID, ""
|
||||
if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG)"
|
||||
if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG 2000)"
|
||||
if t == "1.2.840.10008.1.2.5":
|
||||
extra_info = " (RLE)"
|
||||
if t == "1.2.840.10008.1.2.6.1":
|
||||
extra_info = " (RFC 2557)"
|
||||
raise CompressedDicom(
|
||||
"The dicom reader can only read files with "
|
||||
"uncompressed image data - not %r%s. You "
|
||||
"can try using dcmtk or gdcm to convert the "
|
||||
"image." % (t, extra_info)
|
||||
)
|
||||
|
||||
# From hereon, use implicit/explicit big/little endian
|
||||
self.is_implicit_VR = is_implicit_VR
|
||||
self.is_little_endian = is_little_endian
|
||||
self._unpackPrefix = "><"[is_little_endian]
|
||||
|
||||
def _read_data_elements(self):
|
||||
info = self._info
|
||||
try:
|
||||
while True:
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
# Is it a group we are interested in?
|
||||
if group in GROUPS:
|
||||
key = (group, element)
|
||||
name, vr = MINIDICT.get(key, (None, None))
|
||||
# Is it an element we are interested in?
|
||||
if name:
|
||||
# Store value
|
||||
converter = self._converters.get(vr, lambda x: x)
|
||||
info[name] = converter(value)
|
||||
except (EOFError, struct.error):
|
||||
pass # end of file ...
|
||||
|
||||
def get_numpy_array(self):
|
||||
"""Get numpy arra for this DICOM file, with the correct shape,
|
||||
and pixel values scaled appropriately.
|
||||
"""
|
||||
# Is there pixel data at all?
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# Load it now if it was not already loaded
|
||||
if self._pixel_data_loc and len(self.PixelData) < 100:
|
||||
# Reopen file?
|
||||
close_file = False
|
||||
if self._file is None:
|
||||
close_file = True
|
||||
self._file = open(self._filename, "rb")
|
||||
# Read data
|
||||
self._file.seek(self._pixel_data_loc[0])
|
||||
if self._pixel_data_loc[1] == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = self._file.read(self._pixel_data_loc[1])
|
||||
# Close file
|
||||
if close_file:
|
||||
self._file.close()
|
||||
self._file = None
|
||||
# Overwrite
|
||||
self._info["PixelData"] = value
|
||||
|
||||
# Get data
|
||||
data = self._pixel_data_numpy()
|
||||
data = self._apply_slope_and_offset(data)
|
||||
|
||||
# Remove data again to preserve memory
|
||||
# Note that the data for the original file is loaded twice ...
|
||||
self._info["PixelData"] = (
|
||||
b"Data converted to numpy array, " + b"raw data removed to preserve memory"
|
||||
)
|
||||
return data
|
||||
|
||||
def _get_shape_and_sampling(self):
|
||||
"""Get shape and sampling without actuall using the pixel data.
|
||||
In this way, the user can get an idea what's inside without having
|
||||
to load it.
|
||||
"""
|
||||
# Get shape (in the same way that pydicom does)
|
||||
if "NumberOfFrames" in self and self.NumberOfFrames > 1:
|
||||
if self.SamplesPerPixel > 1:
|
||||
shape = (
|
||||
self.SamplesPerPixel,
|
||||
self.NumberOfFrames,
|
||||
self.Rows,
|
||||
self.Columns,
|
||||
)
|
||||
else:
|
||||
shape = self.NumberOfFrames, self.Rows, self.Columns
|
||||
elif "SamplesPerPixel" in self:
|
||||
if self.SamplesPerPixel > 1:
|
||||
if self.BitsAllocated == 8:
|
||||
shape = self.SamplesPerPixel, self.Rows, self.Columns
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"DICOM plugin only handles "
|
||||
"SamplesPerPixel > 1 if Bits "
|
||||
"Allocated = 8"
|
||||
)
|
||||
else:
|
||||
shape = self.Rows, self.Columns
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"DICOM file has no SamplesPerPixel " "(perhaps this is a report?)"
|
||||
)
|
||||
|
||||
# Try getting sampling between pixels
|
||||
if "PixelSpacing" in self:
|
||||
sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1])
|
||||
else:
|
||||
sampling = 1.0, 1.0
|
||||
if "SliceSpacing" in self:
|
||||
sampling = (abs(self.SliceSpacing),) + sampling
|
||||
|
||||
# Ensure that sampling has as many elements as shape
|
||||
sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :]
|
||||
|
||||
# Set shape and sampling
|
||||
self._info["shape"] = shape
|
||||
self._info["sampling"] = sampling
|
||||
|
||||
def _pixel_data_numpy(self):
|
||||
"""Return a NumPy array of the pixel data."""
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# determine the type used for the array
|
||||
need_byteswap = self.is_little_endian != sys_is_little_endian
|
||||
|
||||
# Make NumPy format code, e.g. "uint16", "int32" etc
|
||||
# from two pieces of info:
|
||||
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
|
||||
# self.BitsAllocated -- 8, 16, or 32
|
||||
format_str = "%sint%d" % (
|
||||
("u", "")[self.PixelRepresentation],
|
||||
self.BitsAllocated,
|
||||
)
|
||||
try:
|
||||
numpy_format = np.dtype(format_str)
|
||||
except TypeError: # pragma: no cover
|
||||
raise TypeError(
|
||||
"Data type not understood by NumPy: format='%s', "
|
||||
" PixelRepresentation=%d, BitsAllocated=%d"
|
||||
% (numpy_format, self.PixelRepresentation, self.BitsAllocated)
|
||||
)
|
||||
|
||||
# Have correct Numpy format, so create the NumPy array
|
||||
arr = np.frombuffer(self.PixelData, numpy_format).copy()
|
||||
|
||||
# XXX byte swap - may later handle this in read_file!!?
|
||||
if need_byteswap:
|
||||
arr.byteswap(True) # True means swap in-place, don't make new copy
|
||||
|
||||
# Note the following reshape operations return a new *view* onto arr,
|
||||
# but don't copy the data
|
||||
arr = arr.reshape(*self._info["shape"])
|
||||
return arr
|
||||
|
||||
def _apply_slope_and_offset(self, data):
|
||||
"""
|
||||
If RescaleSlope and RescaleIntercept are present in the data,
|
||||
apply them. The data type of the data is changed if necessary.
|
||||
"""
|
||||
# Obtain slope and offset
|
||||
slope, offset = 1, 0
|
||||
needFloats, needApplySlopeOffset = False, False
|
||||
if "RescaleSlope" in self:
|
||||
needApplySlopeOffset = True
|
||||
slope = self.RescaleSlope
|
||||
if "RescaleIntercept" in self:
|
||||
needApplySlopeOffset = True
|
||||
offset = self.RescaleIntercept
|
||||
if int(slope) != slope or int(offset) != offset:
|
||||
needFloats = True
|
||||
if not needFloats:
|
||||
slope, offset = int(slope), int(offset)
|
||||
|
||||
# Apply slope and offset
|
||||
if needApplySlopeOffset:
|
||||
# Maybe we need to change the datatype?
|
||||
if data.dtype in [np.float32, np.float64]:
|
||||
pass
|
||||
elif needFloats:
|
||||
data = data.astype(np.float32)
|
||||
else:
|
||||
# Determine required range
|
||||
minReq, maxReq = data.min(), data.max()
|
||||
minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
|
||||
# Determine required datatype from that
|
||||
dtype = None
|
||||
if minReq < 0:
|
||||
# Signed integer type
|
||||
maxReq = max([-minReq, maxReq])
|
||||
if maxReq < 2**7:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2**15:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2**31:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
else:
|
||||
# Unsigned integer type
|
||||
if maxReq < 2**8:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2**16:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2**32:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
# Change datatype
|
||||
if dtype != data.dtype:
|
||||
data = data.astype(dtype)
|
||||
|
||||
# Apply slope and offset
|
||||
data *= slope
|
||||
data += offset
|
||||
|
||||
# Done
|
||||
return data
|
||||
|
||||
def _inflate(self):
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
|
||||
# following the file metadata was prepared the normal way,
|
||||
# then "deflate" compression applied.
|
||||
# All that is needed here is to decompress and then
|
||||
# use as normal in a file-like object
|
||||
zipped = self._file.read()
|
||||
# -MAX_WBITS part is from comp.lang.python answer:
|
||||
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
|
||||
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
|
||||
self._file = BytesIO(unzipped) # a file-like object
|
||||
|
||||
|
||||
class DicomSeries(object):
|
||||
"""DicomSeries
|
||||
This class represents a serie of dicom files (SimpleDicomReader
|
||||
objects) that belong together. If these are multiple files, they
|
||||
represent the slices of a volume (like for CT or MRI).
|
||||
"""
|
||||
|
||||
def __init__(self, suid, progressIndicator):
|
||||
# Init dataset list and the callback
|
||||
self._entries = []
|
||||
|
||||
# Init props
|
||||
self._suid = suid
|
||||
self._info = {}
|
||||
self._progressIndicator = progressIndicator
|
||||
|
||||
def __len__(self):
|
||||
return len(self._entries)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._entries)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._entries[index]
|
||||
|
||||
@property
|
||||
def suid(self):
|
||||
return self._suid
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""The shape of the data (nz, ny, nx)."""
|
||||
return self._info["shape"]
|
||||
|
||||
@property
|
||||
def sampling(self):
|
||||
"""The sampling (voxel distances) of the data (dz, dy, dx)."""
|
||||
return self._info["sampling"]
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
"""A dictionary containing the information as present in the
|
||||
first dicomfile of this serie. None if there are no entries."""
|
||||
return self._info
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""A description of the dicom series. Used fields are
|
||||
PatientName, shape of the data, SeriesDescription, and
|
||||
ImageComments.
|
||||
"""
|
||||
info = self.info
|
||||
|
||||
# If no info available, return simple description
|
||||
if not info: # pragma: no cover
|
||||
return "DicomSeries containing %i images" % len(self)
|
||||
|
||||
fields = []
|
||||
# Give patient name
|
||||
if "PatientName" in info:
|
||||
fields.append("" + info["PatientName"])
|
||||
# Also add dimensions
|
||||
if self.shape:
|
||||
tmp = [str(d) for d in self.shape]
|
||||
fields.append("x".join(tmp))
|
||||
# Try adding more fields
|
||||
if "SeriesDescription" in info:
|
||||
fields.append("'" + info["SeriesDescription"] + "'")
|
||||
if "ImageComments" in info:
|
||||
fields.append("'" + info["ImageComments"] + "'")
|
||||
|
||||
# Combine
|
||||
return " ".join(fields)
|
||||
|
||||
def __repr__(self):
|
||||
adr = hex(id(self)).upper()
|
||||
return "<DicomSeries with %i images at %s>" % (len(self), adr)
|
||||
|
||||
def get_numpy_array(self):
|
||||
"""Get (load) the data that this DicomSeries represents, and return
|
||||
it as a numpy array. If this serie contains multiple images, the
|
||||
resulting array is 3D, otherwise it's 2D.
|
||||
"""
|
||||
|
||||
# It's easy if no file or if just a single file
|
||||
if len(self) == 0:
|
||||
raise ValueError("Serie does not contain any files.")
|
||||
elif len(self) == 1:
|
||||
return self[0].get_numpy_array()
|
||||
|
||||
# Check info
|
||||
if self.info is None:
|
||||
raise RuntimeError("Cannot return volume if series not finished.")
|
||||
|
||||
# Init data (using what the dicom packaged produces as a reference)
|
||||
slice = self[0].get_numpy_array()
|
||||
vol = np.zeros(self.shape, dtype=slice.dtype)
|
||||
vol[0] = slice
|
||||
|
||||
# Fill volume
|
||||
self._progressIndicator.start("loading data", "", len(self))
|
||||
for z in range(1, len(self)):
|
||||
vol[z] = self[z].get_numpy_array()
|
||||
self._progressIndicator.set_progress(z + 1)
|
||||
self._progressIndicator.finish()
|
||||
|
||||
# Done
|
||||
import gc
|
||||
|
||||
gc.collect()
|
||||
return vol
|
||||
|
||||
def _append(self, dcm):
|
||||
self._entries.append(dcm)
|
||||
|
||||
def _sort(self):
|
||||
self._entries.sort(key=lambda k: k.InstanceNumber)
|
||||
|
||||
def _finish(self):
|
||||
"""
|
||||
Evaluate the series of dicom files. Together they should make up
|
||||
a volumetric dataset. This means the files should meet certain
|
||||
conditions. Also some additional information has to be calculated,
|
||||
such as the distance between the slices. This method sets the
|
||||
attributes for "shape", "sampling" and "info".
|
||||
|
||||
This method checks:
|
||||
* that there are no missing files
|
||||
* that the dimensions of all images match
|
||||
* that the pixel spacing of all images match
|
||||
"""
|
||||
|
||||
# The datasets list should be sorted by instance number
|
||||
L = self._entries
|
||||
if len(L) == 0:
|
||||
return
|
||||
elif len(L) == 1:
|
||||
self._info = L[0].info
|
||||
return
|
||||
|
||||
# Get previous
|
||||
ds1 = L[0]
|
||||
# Init measures to calculate average of
|
||||
distance_sum = 0.0
|
||||
# Init measures to check (these are in 2D)
|
||||
dimensions = ds1.Rows, ds1.Columns
|
||||
# sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
|
||||
sampling = ds1.info["sampling"][:2] # row, column
|
||||
|
||||
for index in range(len(L)):
|
||||
# The first round ds1 and ds2 will be the same, for the
|
||||
# distance calculation this does not matter
|
||||
# Get current
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Update distance_sum to calculate distance later
|
||||
distance_sum += abs(pos1 - pos2)
|
||||
# Test measures
|
||||
dimensions2 = ds2.Rows, ds2.Columns
|
||||
# sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
|
||||
sampling2 = ds2.info["sampling"][:2] # row, column
|
||||
if dimensions != dimensions2:
|
||||
# We cannot produce a volume if the dimensions match
|
||||
raise ValueError("Dimensions of slices does not match.")
|
||||
if sampling != sampling2:
|
||||
# We can still produce a volume, but we should notify the user
|
||||
self._progressIndicator.write("Warn: sampling does not match.")
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Finish calculating average distance
|
||||
# (Note that there are len(L)-1 distances)
|
||||
distance_mean = distance_sum / (len(L) - 1)
|
||||
|
||||
# Set info dict
|
||||
self._info = L[0].info.copy()
|
||||
|
||||
# Store information that is specific for the serie
|
||||
self._info["shape"] = (len(L),) + ds2.info["shape"]
|
||||
self._info["sampling"] = (distance_mean,) + ds2.info["sampling"]
|
||||
|
||||
|
||||
def list_files(files, path):
|
||||
"""List all files in the directory, recursively."""
|
||||
for item in os.listdir(path):
|
||||
item = os.path.join(path, item)
|
||||
if os.path.isdir(item):
|
||||
list_files(files, item)
|
||||
elif os.path.isfile(item):
|
||||
files.append(item)
|
||||
|
||||
|
||||
def process_directory(request, progressIndicator, readPixelData=False):
|
||||
"""
|
||||
Reads dicom files and returns a list of DicomSeries objects, which
|
||||
contain information about the data, and can be used to load the
|
||||
image or volume data.
|
||||
|
||||
if readPixelData is True, the pixel data of all series is read. By
|
||||
default the loading of pixeldata is deferred until it is requested
|
||||
using the DicomSeries.get_pixel_array() method. In general, both
|
||||
methods should be equally fast.
|
||||
"""
|
||||
# Get directory to examine
|
||||
if os.path.isdir(request.filename):
|
||||
path = request.filename
|
||||
elif os.path.isfile(request.filename):
|
||||
path = os.path.dirname(request.filename)
|
||||
else: # pragma: no cover - tested earlier
|
||||
raise ValueError("Dicom plugin needs a valid filename to examine the directory")
|
||||
|
||||
# Check files
|
||||
files = []
|
||||
list_files(files, path) # Find files recursively
|
||||
|
||||
# Gather file data and put in DicomSeries
|
||||
series = {}
|
||||
count = 0
|
||||
progressIndicator.start("examining files", "files", len(files))
|
||||
for filename in files:
|
||||
# Show progress (note that we always start with a 0.0)
|
||||
count += 1
|
||||
progressIndicator.set_progress(count)
|
||||
# Skip DICOMDIR files
|
||||
if filename.count("DICOMDIR"): # pragma: no cover
|
||||
continue
|
||||
# Try loading dicom ...
|
||||
try:
|
||||
dcm = SimpleDicomReader(filename)
|
||||
except NotADicomFile:
|
||||
continue # skip non-dicom file
|
||||
except Exception as why: # pragma: no cover
|
||||
progressIndicator.write(str(why))
|
||||
continue
|
||||
# Get SUID and register the file with an existing or new series object
|
||||
try:
|
||||
suid = dcm.SeriesInstanceUID
|
||||
except AttributeError: # pragma: no cover
|
||||
continue # some other kind of dicom file
|
||||
if suid not in series:
|
||||
series[suid] = DicomSeries(suid, progressIndicator)
|
||||
series[suid]._append(dcm)
|
||||
|
||||
# Finish progress
|
||||
# progressIndicator.finish('Found %i series.' % len(series))
|
||||
|
||||
# Make a list and sort, so that the order is deterministic
|
||||
series = list(series.values())
|
||||
series.sort(key=lambda x: x.suid)
|
||||
|
||||
# Split series if necessary
|
||||
for serie in reversed([serie for serie in series]):
|
||||
splitSerieIfRequired(serie, series, progressIndicator)
|
||||
|
||||
# Finish all series
|
||||
# progressIndicator.start('analyse series', '', len(series))
|
||||
series_ = []
|
||||
for i in range(len(series)):
|
||||
try:
|
||||
series[i]._finish()
|
||||
series_.append(series[i])
|
||||
except Exception as err: # pragma: no cover
|
||||
progressIndicator.write(str(err))
|
||||
pass # Skip serie (probably report-like file without pixels)
|
||||
# progressIndicator.set_progress(i+1)
|
||||
progressIndicator.finish("Found %i correct series." % len(series_))
|
||||
|
||||
# Done
|
||||
return series_
|
||||
|
||||
|
||||
def splitSerieIfRequired(serie, series, progressIndicator):
|
||||
"""
|
||||
Split the serie in multiple series if this is required. The choice
|
||||
is based on examing the image position relative to the previous
|
||||
image. If it differs too much, it is assumed that there is a new
|
||||
dataset. This can happen for example in unspitted gated CT data.
|
||||
"""
|
||||
|
||||
# Sort the original list and get local name
|
||||
serie._sort()
|
||||
L = serie._entries
|
||||
# Init previous slice
|
||||
ds1 = L[0]
|
||||
# Check whether we can do this
|
||||
if "ImagePositionPatient" not in ds1:
|
||||
return
|
||||
# Initialize a list of new lists
|
||||
L2 = [[ds1]]
|
||||
# Init slice distance estimate
|
||||
distance = 0
|
||||
|
||||
for index in range(1, len(L)):
|
||||
# Get current slice
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Get distances
|
||||
newDist = abs(pos1 - pos2)
|
||||
# deltaDist = abs(firstPos-pos2)
|
||||
# If the distance deviates more than 2x from what we've seen,
|
||||
# we can agree it's a new dataset.
|
||||
if distance and newDist > 2.1 * distance:
|
||||
L2.append([])
|
||||
distance = 0
|
||||
else:
|
||||
# Test missing file
|
||||
if distance and newDist > 1.5 * distance:
|
||||
progressIndicator.write(
|
||||
"Warning: missing file after %r" % ds1._filename
|
||||
)
|
||||
distance = newDist
|
||||
# Add to last list
|
||||
L2[-1].append(ds2)
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Split if we should
|
||||
if len(L2) > 1:
|
||||
# At what position are we now?
|
||||
i = series.index(serie)
|
||||
# Create new series
|
||||
series2insert = []
|
||||
for L in L2:
|
||||
newSerie = DicomSeries(serie.suid, progressIndicator)
|
||||
newSerie._entries = L
|
||||
series2insert.append(newSerie)
|
||||
# Insert series and remove self
|
||||
for newSerie in reversed(series2insert):
|
||||
series.insert(i, newSerie)
|
||||
series.remove(serie)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,897 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
# This code was taken from https://github.com/almarklein/visvis/blob/master/vvmovie/images2swf.py
|
||||
|
||||
# styletest: ignore E261
|
||||
|
||||
"""
|
||||
Provides a function (write_swf) to store a series of numpy arrays in an
|
||||
SWF movie, that can be played on a wide range of OS's.
|
||||
|
||||
In desperation of wanting to share animated images, and then lacking a good
|
||||
writer for animated gif or .avi, I decided to look into SWF. This format
|
||||
is very well documented.
|
||||
|
||||
This is a pure python module to create an SWF file that shows a series
|
||||
of images. The images are stored using the DEFLATE algorithm (same as
|
||||
PNG and ZIP and which is included in the standard Python distribution).
|
||||
As this compression algorithm is much more effective than that used in
|
||||
GIF images, we obtain better quality (24 bit colors + alpha channel)
|
||||
while still producesing smaller files (a test showed ~75%). Although
|
||||
SWF also allows for JPEG compression, doing so would probably require
|
||||
a third party library for the JPEG encoding/decoding, we could
|
||||
perhaps do this via Pillow or freeimage.
|
||||
|
||||
sources and tools:
|
||||
|
||||
- SWF on wikipedia
|
||||
- Adobes "SWF File Format Specification" version 10
|
||||
(http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf)
|
||||
- swftools (swfdump in specific) for debugging
|
||||
- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really
|
||||
good quality, while file size is reduced with factors 20-100.
|
||||
A good program in my opinion. The free version has the limitation
|
||||
of a watermark in the upper left corner.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import time # noqa
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# todo: use Pillow to support reading JPEG images from SWF?
|
||||
|
||||
|
||||
# Base functions and classes
|
||||
|
||||
|
||||
class BitArray:
|
||||
"""Dynamic array of bits that automatically resizes
|
||||
with factors of two.
|
||||
Append bits using .append() or +=
|
||||
You can reverse bits using .reverse()
|
||||
"""
|
||||
|
||||
def __init__(self, initvalue=None):
|
||||
self.data = np.zeros((16,), dtype=np.uint8)
|
||||
self._len = 0
|
||||
if initvalue is not None:
|
||||
self.append(initvalue)
|
||||
|
||||
def __len__(self):
|
||||
return self._len # self.data.shape[0]
|
||||
|
||||
def __repr__(self):
|
||||
return self.data[: self._len].tobytes().decode("ascii")
|
||||
|
||||
def _checkSize(self):
|
||||
# check length... grow if necessary
|
||||
arraylen = self.data.shape[0]
|
||||
if self._len >= arraylen:
|
||||
tmp = np.zeros((arraylen * 2,), dtype=np.uint8)
|
||||
tmp[: self._len] = self.data[: self._len]
|
||||
self.data = tmp
|
||||
|
||||
def __add__(self, value):
|
||||
self.append(value)
|
||||
return self
|
||||
|
||||
def append(self, bits):
|
||||
# check input
|
||||
if isinstance(bits, BitArray):
|
||||
bits = str(bits)
|
||||
if isinstance(bits, int): # pragma: no cover - we dont use it
|
||||
bits = str(bits)
|
||||
if not isinstance(bits, str): # pragma: no cover
|
||||
raise ValueError("Append bits as strings or integers!")
|
||||
|
||||
# add bits
|
||||
for bit in bits:
|
||||
self.data[self._len] = ord(bit)
|
||||
self._len += 1
|
||||
self._checkSize()
|
||||
|
||||
def reverse(self):
|
||||
"""In-place reverse."""
|
||||
tmp = self.data[: self._len].copy()
|
||||
self.data[: self._len] = tmp[::-1]
|
||||
|
||||
def tobytes(self):
|
||||
"""Convert to bytes. If necessary,
|
||||
zeros are padded to the end (right side).
|
||||
"""
|
||||
bits = str(self)
|
||||
|
||||
# determine number of bytes
|
||||
nbytes = 0
|
||||
while nbytes * 8 < len(bits):
|
||||
nbytes += 1
|
||||
# pad
|
||||
bits = bits.ljust(nbytes * 8, "0")
|
||||
|
||||
# go from bits to bytes
|
||||
bb = bytes()
|
||||
for i in range(nbytes):
|
||||
tmp = int(bits[i * 8 : (i + 1) * 8], 2)
|
||||
bb += int2uint8(tmp)
|
||||
|
||||
# done
|
||||
return bb
|
||||
|
||||
|
||||
def int2uint32(i):
|
||||
return int(i).to_bytes(4, "little")
|
||||
|
||||
|
||||
def int2uint16(i):
|
||||
return int(i).to_bytes(2, "little")
|
||||
|
||||
|
||||
def int2uint8(i):
|
||||
return int(i).to_bytes(1, "little")
|
||||
|
||||
|
||||
def int2bits(i, n=None):
|
||||
"""convert int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Convert back using int(ss,2)."""
|
||||
ii = i
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("int2bits fail: len larger than padlength.")
|
||||
bb = str(bb).rjust(n, "0")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def bits2int(bb, n=8):
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get value in bits
|
||||
for i in range(len(bb)):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Make decimal
|
||||
return int(value[:n], 2)
|
||||
|
||||
|
||||
def get_type_and_len(bb):
|
||||
"""bb should be 6 bytes at least
|
||||
Return (type, length, length_of_full_tag)
|
||||
"""
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get first 16 bits
|
||||
for i in range(2):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Get type and length
|
||||
type = int(value[:10], 2)
|
||||
L = int(value[10:], 2)
|
||||
L2 = L + 2
|
||||
|
||||
# Long tag header?
|
||||
if L == 63: # '111111'
|
||||
value = ""
|
||||
for i in range(2, 6):
|
||||
b = bb[i : i + 1] # becomes a single-byte bytes()
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
L = int(value, 2)
|
||||
L2 = L + 6
|
||||
|
||||
# Done
|
||||
return type, L, L2
|
||||
|
||||
|
||||
def signedint2bits(i, n=None):
|
||||
"""convert signed int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Negative numbers are stored in 2's complement bit
|
||||
patterns, thus positive numbers always start with a 0.
|
||||
"""
|
||||
|
||||
# negative number?
|
||||
ii = i
|
||||
if i < 0:
|
||||
# A negative number, -n, is represented as the bitwise opposite of
|
||||
ii = abs(ii) - 1 # the positive-zero number n-1.
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
bb = "0" + str(bb) # always need the sign bit in front
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("signedint2bits fail: len larger than padlength.")
|
||||
bb = bb.rjust(n, "0")
|
||||
|
||||
# was it negative? (then opposite bits)
|
||||
if i < 0:
|
||||
bb = bb.replace("0", "x").replace("1", "0").replace("x", "1")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def twits2bits(arr):
|
||||
"""Given a few (signed) numbers, store them
|
||||
as compactly as possible in the wat specifief by the swf format.
|
||||
The numbers are multiplied by 20, assuming they
|
||||
are twits.
|
||||
Can be used to make the RECT record.
|
||||
"""
|
||||
|
||||
# first determine length using non justified bit strings
|
||||
maxlen = 1
|
||||
for i in arr:
|
||||
tmp = len(signedint2bits(i * 20))
|
||||
if tmp > maxlen:
|
||||
maxlen = tmp
|
||||
|
||||
# build array
|
||||
bits = int2bits(maxlen, 5)
|
||||
for i in arr:
|
||||
bits += signedint2bits(i * 20, maxlen)
|
||||
|
||||
return bits
|
||||
|
||||
|
||||
def floats2bits(arr):
|
||||
"""Given a few (signed) numbers, convert them to bits,
|
||||
stored as FB (float bit values). We always use 16.16.
|
||||
Negative numbers are not (yet) possible, because I don't
|
||||
know how the're implemented (ambiguity).
|
||||
"""
|
||||
bits = int2bits(31, 5) # 32 does not fit in 5 bits!
|
||||
for i in arr:
|
||||
if i < 0: # pragma: no cover
|
||||
raise ValueError("Dit not implement negative floats!")
|
||||
i1 = int(i)
|
||||
i2 = i - i1
|
||||
bits += int2bits(i1, 15)
|
||||
bits += int2bits(i2 * 2**16, 16)
|
||||
return bits
|
||||
|
||||
|
||||
# Base Tag
|
||||
|
||||
|
||||
class Tag:
|
||||
def __init__(self):
|
||||
self.bytes = bytes()
|
||||
self.tagtype = -1
|
||||
|
||||
def process_tag(self):
|
||||
"""Implement this to create the tag."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_tag(self):
|
||||
"""Calls processTag and attaches the header."""
|
||||
self.process_tag()
|
||||
|
||||
# tag to binary
|
||||
bits = int2bits(self.tagtype, 10)
|
||||
|
||||
# complete header uint16 thing
|
||||
bits += "1" * 6 # = 63 = 0x3f
|
||||
# make uint16
|
||||
bb = int2uint16(int(str(bits), 2))
|
||||
|
||||
# now add 32bit length descriptor
|
||||
bb += int2uint32(len(self.bytes))
|
||||
|
||||
# done, attach and return
|
||||
bb += self.bytes
|
||||
return bb
|
||||
|
||||
def make_rect_record(self, xmin, xmax, ymin, ymax):
|
||||
"""Simply uses makeCompactArray to produce
|
||||
a RECT Record."""
|
||||
return twits2bits([xmin, xmax, ymin, ymax])
|
||||
|
||||
def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None):
|
||||
# empty matrix?
|
||||
if scale_xy is None and rot_xy is None and trans_xy is None:
|
||||
return "0" * 8
|
||||
|
||||
# init
|
||||
bits = BitArray()
|
||||
|
||||
# scale
|
||||
if scale_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([scale_xy[0], scale_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# rotation
|
||||
if rot_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([rot_xy[0], rot_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# translation (no flag here)
|
||||
if trans_xy:
|
||||
bits += twits2bits([trans_xy[0], trans_xy[1]])
|
||||
else:
|
||||
bits += twits2bits([0, 0])
|
||||
|
||||
# done
|
||||
return bits
|
||||
|
||||
|
||||
# Control tags
|
||||
|
||||
|
||||
class ControlTag(Tag):
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
|
||||
|
||||
class FileAttributesTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 69
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = "\x00".encode("ascii") * (1 + 3)
|
||||
|
||||
|
||||
class ShowFrameTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 1
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = bytes()
|
||||
|
||||
|
||||
class SetBackgroundTag(ControlTag):
|
||||
"""Set the color in 0-255, or 0-1 (if floats given)."""
|
||||
|
||||
def __init__(self, *rgb):
|
||||
self.tagtype = 9
|
||||
if len(rgb) == 1:
|
||||
rgb = rgb[0]
|
||||
self.rgb = rgb
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
for i in range(3):
|
||||
clr = self.rgb[i]
|
||||
if isinstance(clr, float): # pragma: no cover - not used
|
||||
clr = clr * 255
|
||||
bb += int2uint8(clr)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class DoActionTag(Tag):
|
||||
def __init__(self, action="stop"):
|
||||
Tag.__init__(self)
|
||||
self.tagtype = 12
|
||||
self.actions = [action]
|
||||
|
||||
def append(self, action): # pragma: no cover - not used
|
||||
self.actions.append(action)
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
|
||||
for action in self.actions:
|
||||
action = action.lower()
|
||||
if action == "stop":
|
||||
bb += "\x07".encode("ascii")
|
||||
elif action == "play": # pragma: no cover - not used
|
||||
bb += "\x06".encode("ascii")
|
||||
else: # pragma: no cover
|
||||
logger.warning("unkown action: %s" % action)
|
||||
|
||||
bb += int2uint8(0)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
# Definition tags
|
||||
class DefinitionTag(Tag):
|
||||
counter = 0 # to give automatically id's
|
||||
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
DefinitionTag.counter += 1
|
||||
self.id = DefinitionTag.counter # id in dictionary
|
||||
|
||||
|
||||
class BitmapTag(DefinitionTag):
|
||||
def __init__(self, im):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 36 # DefineBitsLossless2
|
||||
|
||||
# convert image (note that format is ARGB)
|
||||
# even a grayscale image is stored in ARGB, nevertheless,
|
||||
# the fabilous deflate compression will make it that not much
|
||||
# more data is required for storing (25% or so, and less than 10%
|
||||
# when storing RGB as ARGB).
|
||||
|
||||
if len(im.shape) == 3:
|
||||
if im.shape[2] in [3, 4]:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :, i]
|
||||
if im.shape[2] == 4:
|
||||
tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
elif len(im.shape) == 2:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :]
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
# we changed the image to uint8 4 channels.
|
||||
# now compress!
|
||||
self._data = zlib.compress(tmp.tobytes(), zlib.DEFLATED)
|
||||
self.imshape = im.shape
|
||||
|
||||
def process_tag(self):
|
||||
# build tag
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id) # CharacterID
|
||||
bb += int2uint8(5) # BitmapFormat
|
||||
bb += int2uint16(self.imshape[1]) # BitmapWidth
|
||||
bb += int2uint16(self.imshape[0]) # BitmapHeight
|
||||
bb += self._data # ZlibBitmapData
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class PlaceObjectTag(ControlTag):
|
||||
def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 26
|
||||
self.depth = depth
|
||||
self.idToPlace = idToPlace
|
||||
self.xy = xy
|
||||
self.move = move
|
||||
|
||||
def process_tag(self):
|
||||
# retrieve stuff
|
||||
depth = self.depth
|
||||
xy = self.xy
|
||||
id = self.idToPlace
|
||||
|
||||
# build PlaceObject2
|
||||
bb = bytes()
|
||||
if self.move:
|
||||
bb += "\x07".encode("ascii")
|
||||
else:
|
||||
# (8 bit flags): 4:matrix, 2:character, 1:move
|
||||
bb += "\x06".encode("ascii")
|
||||
bb += int2uint16(depth) # Depth
|
||||
bb += int2uint16(id) # character id
|
||||
bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class ShapeTag(DefinitionTag):
|
||||
def __init__(self, bitmapId, xy, wh):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 2
|
||||
self.bitmapId = bitmapId
|
||||
self.xy = xy
|
||||
self.wh = wh
|
||||
|
||||
def process_tag(self):
|
||||
"""Returns a defineshape tag. with a bitmap fill"""
|
||||
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id)
|
||||
xy, wh = self.xy, self.wh
|
||||
tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds
|
||||
bb += tmp.tobytes()
|
||||
|
||||
# make SHAPEWITHSTYLE structure
|
||||
|
||||
# first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
bb += int2uint8(1) # FillStyleCount
|
||||
bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed)
|
||||
bb += int2uint16(self.bitmapId) # BitmapId
|
||||
# bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled)
|
||||
bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes()
|
||||
|
||||
# # first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
# bb += int2uint8(1) # FillStyleCount
|
||||
# bb += '\x00' # solid fill
|
||||
# bb += '\x00\x00\xff' # color
|
||||
|
||||
# second entry: LINESTYLEARRAY with a single line style
|
||||
bb += int2uint8(0) # LineStyleCount
|
||||
# bb += int2uint16(0*20) # Width
|
||||
# bb += '\x00\xff\x00' # Color
|
||||
|
||||
# third and fourth entry: NumFillBits and NumLineBits (4 bits each)
|
||||
# I each give them four bits, so 16 styles possible.
|
||||
bb += "\x44".encode("ascii")
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
# last entries: SHAPERECORDs ... (individual shape records not aligned)
|
||||
# STYLECHANGERECORD
|
||||
bits = BitArray()
|
||||
bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1]))
|
||||
# STRAIGHTEDGERECORD 4x
|
||||
bits += self.make_straight_edge_record(-self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, -self.wh[1])
|
||||
bits += self.make_straight_edge_record(self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, self.wh[1])
|
||||
|
||||
# ENDSHAPRECORD
|
||||
bits += self.make_end_shape_record()
|
||||
|
||||
self.bytes += bits.tobytes()
|
||||
|
||||
# done
|
||||
# self.bytes = bb
|
||||
|
||||
def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None):
|
||||
# first 6 flags
|
||||
# Note that we use FillStyle1. If we don't flash (at least 8) does not
|
||||
# recognize the frames properly when importing to library.
|
||||
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag (not an edge record)
|
||||
bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3)
|
||||
if lineStyle:
|
||||
bits += "1" # StateLineStyle
|
||||
else:
|
||||
bits += "0"
|
||||
if fillStyle:
|
||||
bits += "1" # StateFillStyle1
|
||||
else:
|
||||
bits += "0"
|
||||
bits += "0" # StateFillStyle0
|
||||
if moveTo:
|
||||
bits += "1" # StateMoveTo
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# give information
|
||||
# todo: nbits for fillStyle and lineStyle is hard coded.
|
||||
|
||||
if moveTo:
|
||||
bits += twits2bits([moveTo[0], moveTo[1]])
|
||||
if fillStyle:
|
||||
bits += int2bits(fillStyle, 4)
|
||||
if lineStyle:
|
||||
bits += int2bits(lineStyle, 4)
|
||||
|
||||
return bits
|
||||
|
||||
def make_straight_edge_record(self, *dxdy):
|
||||
if len(dxdy) == 1:
|
||||
dxdy = dxdy[0]
|
||||
|
||||
# determine required number of bits
|
||||
xbits = signedint2bits(dxdy[0] * 20)
|
||||
ybits = signedint2bits(dxdy[1] * 20)
|
||||
nbits = max([len(xbits), len(ybits)])
|
||||
|
||||
bits = BitArray()
|
||||
bits += "11" # TypeFlag and StraightFlag
|
||||
bits += int2bits(nbits - 2, 4)
|
||||
bits += "1" # GeneralLineFlag
|
||||
bits += signedint2bits(dxdy[0] * 20, nbits)
|
||||
bits += signedint2bits(dxdy[1] * 20, nbits)
|
||||
|
||||
# note: I do not make use of vertical/horizontal only lines...
|
||||
|
||||
return bits
|
||||
|
||||
def make_end_shape_record(self):
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag: no edge
|
||||
bits += "0" * 5 # EndOfShape
|
||||
return bits
|
||||
|
||||
|
||||
def read_pixels(bb, i, tagType, L1):
|
||||
"""With pf's seed after the recordheader, reads the pixeldata."""
|
||||
|
||||
# Get info
|
||||
charId = bb[i : i + 2] # noqa
|
||||
i += 2
|
||||
format = ord(bb[i : i + 1])
|
||||
i += 1
|
||||
width = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
height = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
|
||||
# If we can, get pixeldata and make numpy array
|
||||
if format != 5:
|
||||
logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.")
|
||||
else:
|
||||
# Read byte data
|
||||
offset = 2 + 1 + 2 + 2 # all the info bits
|
||||
bb2 = bb[i : i + (L1 - offset)]
|
||||
|
||||
# Decompress and make numpy array
|
||||
data = zlib.decompress(bb2)
|
||||
a = np.frombuffer(data, dtype=np.uint8)
|
||||
|
||||
# Set shape
|
||||
if tagType == 20:
|
||||
# DefineBitsLossless - RGB data
|
||||
try:
|
||||
a.shape = height, width, 3
|
||||
except Exception:
|
||||
# Byte align stuff might cause troubles
|
||||
logger.warning("Cannot read image due to byte alignment")
|
||||
if tagType == 36:
|
||||
# DefineBitsLossless2 - ARGB data
|
||||
a.shape = height, width, 4
|
||||
# Swap alpha channel to make RGBA
|
||||
b = a
|
||||
a = np.zeros_like(a)
|
||||
a[:, :, 0] = b[:, :, 1]
|
||||
a[:, :, 1] = b[:, :, 2]
|
||||
a[:, :, 2] = b[:, :, 3]
|
||||
a[:, :, 3] = b[:, :, 0]
|
||||
|
||||
return a
|
||||
|
||||
|
||||
# Last few functions
|
||||
|
||||
|
||||
# These are the original public functions, we don't use them, but we
|
||||
# keep it so that in principle this module can be used stand-alone.
|
||||
|
||||
|
||||
def checkImages(images): # pragma: no cover
|
||||
"""checkImages(images)
|
||||
Check numpy images and correct intensity range etc.
|
||||
The same for all movie formats.
|
||||
"""
|
||||
# Init results
|
||||
images2 = []
|
||||
|
||||
for im in images:
|
||||
if isinstance(im, np.ndarray):
|
||||
# Check and convert dtype
|
||||
if im.dtype == np.uint8:
|
||||
images2.append(im) # Ok
|
||||
elif im.dtype in [np.float32, np.float64]:
|
||||
theMax = im.max()
|
||||
if 128 < theMax < 300:
|
||||
pass # assume 0:255
|
||||
else:
|
||||
im = im.copy()
|
||||
im[im < 0] = 0
|
||||
im[im > 1] = 1
|
||||
im *= 255
|
||||
images2.append(im.astype(np.uint8))
|
||||
else:
|
||||
im = im.astype(np.uint8)
|
||||
images2.append(im)
|
||||
# Check size
|
||||
if im.ndim == 2:
|
||||
pass # ok
|
||||
elif im.ndim == 3:
|
||||
if im.shape[2] not in [3, 4]:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("Invalid image type: " + str(type(im)))
|
||||
|
||||
# Done
|
||||
return images2
|
||||
|
||||
|
||||
def build_file(
|
||||
fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8
|
||||
): # pragma: no cover
|
||||
"""Give the given file (as bytes) a header."""
|
||||
|
||||
# compose header
|
||||
bb = bytes()
|
||||
bb += "F".encode("ascii") # uncompressed
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += int2uint8(version) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
bb += int2uint8(0) + int2uint8(fps) # FrameRate
|
||||
bb += int2uint16(nframes)
|
||||
fp.write(bb)
|
||||
|
||||
# produce all tags
|
||||
for tag in taglist:
|
||||
fp.write(tag.get_tag())
|
||||
|
||||
# finish with end tag
|
||||
fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
# set size
|
||||
sze = fp.tell()
|
||||
fp.seek(4)
|
||||
fp.write(int2uint32(sze))
|
||||
|
||||
|
||||
def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover
|
||||
"""Write an swf-file from the specified images. If repeat is False,
|
||||
the movie is finished with a stop action. Duration may also
|
||||
be a list with durations for each frame (note that the duration
|
||||
for each frame is always an integer amount of the minimum duration.)
|
||||
|
||||
Images should be a list consisting numpy arrays with values between
|
||||
0 and 255 for integer types, and between 0 and 1 for float types.
|
||||
|
||||
"""
|
||||
|
||||
# Check images
|
||||
images2 = checkImages(images)
|
||||
|
||||
# Init
|
||||
taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)]
|
||||
|
||||
# Check duration
|
||||
if hasattr(duration, "__len__"):
|
||||
if len(duration) == len(images2):
|
||||
duration = [d for d in duration]
|
||||
else:
|
||||
raise ValueError("len(duration) doesn't match amount of images.")
|
||||
else:
|
||||
duration = [duration for im in images2]
|
||||
|
||||
# Build delays list
|
||||
minDuration = float(min(duration))
|
||||
delays = [round(d / minDuration) for d in duration]
|
||||
delays = [max(1, int(d)) for d in delays]
|
||||
|
||||
# Get FPS
|
||||
fps = 1.0 / minDuration
|
||||
|
||||
# Produce series of tags for each image
|
||||
# t0 = time.time()
|
||||
nframes = 0
|
||||
for im in images2:
|
||||
bm = BitmapTag(im)
|
||||
wh = (im.shape[1], im.shape[0])
|
||||
sh = ShapeTag(bm.id, (0, 0), wh)
|
||||
po = PlaceObjectTag(1, sh.id, move=nframes > 0)
|
||||
taglist.extend([bm, sh, po])
|
||||
for i in range(delays[nframes]):
|
||||
taglist.append(ShowFrameTag())
|
||||
nframes += 1
|
||||
|
||||
if not repeat:
|
||||
taglist.append(DoActionTag("stop"))
|
||||
|
||||
# Build file
|
||||
# t1 = time.time()
|
||||
fp = open(filename, "wb")
|
||||
try:
|
||||
build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
fp.close()
|
||||
# t2 = time.time()
|
||||
|
||||
# logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) )
|
||||
|
||||
|
||||
def read_swf(filename): # pragma: no cover
|
||||
"""Read all images from an SWF (shockwave flash) file. Returns a list
|
||||
of numpy arrays.
|
||||
|
||||
Limitation: only read the PNG encoded images (not the JPG encoded ones).
|
||||
"""
|
||||
|
||||
# Check whether it exists
|
||||
if not os.path.isfile(filename):
|
||||
raise IOError("File not found: " + str(filename))
|
||||
|
||||
# Init images
|
||||
images = []
|
||||
|
||||
# Open file and read all
|
||||
fp = open(filename, "rb")
|
||||
bb = fp.read()
|
||||
|
||||
try:
|
||||
# Check opening tag
|
||||
tmp = bb[0:3].decode("ascii", "ignore")
|
||||
if tmp.upper() == "FWS":
|
||||
pass # ok
|
||||
elif tmp.upper() == "CWS":
|
||||
# Decompress movie
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
else:
|
||||
raise IOError("Not a valid SWF file: " + str(filename))
|
||||
|
||||
# Set filepointer at first tag (skipping framesize RECT and two uin16's
|
||||
i = 8
|
||||
nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
i += Lrect + 4
|
||||
|
||||
# Iterate over the tags
|
||||
counter = 0
|
||||
while True:
|
||||
counter += 1
|
||||
|
||||
# Get tag header
|
||||
head = bb[i : i + 6]
|
||||
if not head:
|
||||
break # Done (we missed end tag)
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = get_type_and_len(head)
|
||||
if not L2:
|
||||
logger.warning("Invalid tag length, could not proceed")
|
||||
break
|
||||
# logger.warning(T, L2)
|
||||
|
||||
# Read image if we can
|
||||
if T in [20, 36]:
|
||||
im = read_pixels(bb, i + 6, T, L1)
|
||||
if im is not None:
|
||||
images.append(im)
|
||||
elif T in [6, 21, 35, 90]:
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Detect end tag
|
||||
if T == 0:
|
||||
break
|
||||
|
||||
# Next tag!
|
||||
i += L2
|
||||
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# Done
|
||||
return images
|
||||
|
||||
|
||||
# Backward compatibility; same public names as when this was images2swf.
|
||||
writeSwf = write_swf
|
||||
readSwf = read_swf
|
||||
10680
.CondaPkg/env/Lib/site-packages/imageio/plugins/_tifffile.py
vendored
10680
.CondaPkg/env/Lib/site-packages/imageio/plugins/_tifffile.py
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,324 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write BSDF files.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
The BSDF format enables reading and writing of image data in the
|
||||
BSDF serialization format. This format allows storage of images, volumes,
|
||||
and series thereof. Data can be of any numeric data type, and can
|
||||
optionally be compressed. Each image/volume can have associated
|
||||
meta data, which can consist of any data type supported by BSDF.
|
||||
|
||||
By default, image data is lazily loaded; the actual image data is
|
||||
not read until it is requested. This allows storing multiple images
|
||||
in a single file and still have fast access to individual images.
|
||||
Alternatively, a series of images can be read in streaming mode, reading
|
||||
images as they are read (e.g. from http).
|
||||
|
||||
BSDF is a simple generic binary format. It is easy to extend and there
|
||||
are standard extension definitions for 2D and 3D image data.
|
||||
Read more at http://bsdf.io.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
random_access : bool
|
||||
Whether individual images in the file can be read in random order.
|
||||
Defaults to True for normal files, and to False when reading from HTTP.
|
||||
If False, the file is read in "streaming mode", allowing reading
|
||||
files as they are read, but without support for "rewinding".
|
||||
Note that setting this to True when reading from HTTP, the whole file
|
||||
is read upon opening it (since lazy loading is not possible over HTTP).
|
||||
|
||||
compression : int
|
||||
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
||||
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
||||
compression (more compact but slower). Default 1 (zlib).
|
||||
Note that some BSDF implementations may not support compression
|
||||
(e.g. JavaScript).
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
def get_bsdf_serializer(options):
|
||||
from . import _bsdf as bsdf
|
||||
|
||||
class NDArrayExtension(bsdf.Extension):
|
||||
"""Copy of BSDF's NDArrayExtension but deal with lazy blobs."""
|
||||
|
||||
name = "ndarray"
|
||||
cls = np.ndarray
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
return v # return as dict, because of lazy blobs, decode in Image
|
||||
|
||||
class ImageExtension(bsdf.Extension):
|
||||
"""We implement two extensions that trigger on the Image classes."""
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(array=v.array, meta=v.meta)
|
||||
|
||||
def decode(self, s, v):
|
||||
return Image(v["array"], v["meta"])
|
||||
|
||||
class Image2DExtension(ImageExtension):
|
||||
name = "image2d"
|
||||
cls = Image2D
|
||||
|
||||
class Image3DExtension(ImageExtension):
|
||||
name = "image3d"
|
||||
cls = Image3D
|
||||
|
||||
exts = [NDArrayExtension, Image2DExtension, Image3DExtension]
|
||||
serializer = bsdf.BsdfSerializer(exts, **options)
|
||||
|
||||
return bsdf, serializer
|
||||
|
||||
|
||||
class Image:
|
||||
"""Class in which we wrap the array and meta data. By using an extension
|
||||
we can make BSDF trigger on these classes and thus encode the images.
|
||||
as actual images.
|
||||
"""
|
||||
|
||||
def __init__(self, array, meta):
|
||||
self.array = array
|
||||
self.meta = meta
|
||||
|
||||
def get_array(self):
|
||||
if not isinstance(self.array, np.ndarray):
|
||||
v = self.array
|
||||
blob = v["data"]
|
||||
if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob
|
||||
blob = blob.get_bytes()
|
||||
self.array = np.frombuffer(blob, dtype=v["dtype"])
|
||||
self.array.shape = v["shape"]
|
||||
return self.array
|
||||
|
||||
def get_meta(self):
|
||||
return self.meta
|
||||
|
||||
|
||||
class Image2D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class Image3D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class BsdfFormat(Format):
|
||||
"""The BSDF format enables reading and writing of image data in the
|
||||
BSDF serialization format. This format allows storage of images, volumes,
|
||||
and series thereof. Data can be of any numeric data type, and can
|
||||
optionally be compressed. Each image/volume can have associated
|
||||
meta data, which can consist of any data type supported by BSDF.
|
||||
|
||||
By default, image data is lazily loaded; the actual image data is
|
||||
not read until it is requested. This allows storing multiple images
|
||||
in a single file and still have fast access to individual images.
|
||||
Alternatively, a series of images can be read in streaming mode, reading
|
||||
images as they are read (e.g. from http).
|
||||
|
||||
BSDF is a simple generic binary format. It is easy to extend and there
|
||||
are standard extension definitions for 2D and 3D image data.
|
||||
Read more at http://bsdf.io.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
random_access : bool
|
||||
Whether individual images in the file can be read in random order.
|
||||
Defaults to True for normal files, and to False when reading from HTTP.
|
||||
If False, the file is read in "streaming mode", allowing reading
|
||||
files as they are read, but without support for "rewinding".
|
||||
Note that setting this to True when reading from HTTP, the whole file
|
||||
is read upon opening it (since lazy loading is not possible over HTTP).
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 2}
|
||||
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
||||
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
||||
compression (more compact but slower). Default 1 (zlib).
|
||||
Note that some BSDF implementations may not support compression
|
||||
(e.g. JavaScript).
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
# if request.extension in self.extensions:
|
||||
# return True
|
||||
if request.firstbytes.startswith(b"BSDF"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, random_access=None):
|
||||
# Validate - we need a BSDF file consisting of a list of images
|
||||
# The list is typically a stream, but does not have to be.
|
||||
assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file"
|
||||
# self.request.firstbytes[5:6] == major and minor version
|
||||
if not (
|
||||
self.request.firstbytes[6:15] == b"M\x07image2D"
|
||||
or self.request.firstbytes[6:15] == b"M\x07image3D"
|
||||
or self.request.firstbytes[6:7] == b"l"
|
||||
):
|
||||
pass # Actually, follow a more duck-type approach ...
|
||||
# raise RuntimeError('BSDF file does not look like an '
|
||||
# 'image container.')
|
||||
# Set options. If we think that seeking is allowed, we lazily load
|
||||
# blobs, and set streaming to False (i.e. the whole file is read,
|
||||
# but we skip over binary blobs), so that we subsequently allow
|
||||
# random access to the images.
|
||||
# If seeking is not allowed (e.g. with a http request), we cannot
|
||||
# lazily load blobs, but we can still load streaming from the web.
|
||||
options = {}
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
ra = False if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = False # Because we cannot seek now
|
||||
options["load_streaming"] = not ra # Load as a stream?
|
||||
else:
|
||||
ra = True if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = ra # Don't read data until needed
|
||||
options["load_streaming"] = not ra
|
||||
|
||||
file = self.request.get_file()
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
self._stream = self._serializer.load(file)
|
||||
# Another validation
|
||||
if (
|
||||
isinstance(self._stream, dict)
|
||||
and "meta" in self._stream
|
||||
and "array" in self._stream
|
||||
):
|
||||
self._stream = Image(self._stream["array"], self._stream["meta"])
|
||||
if not isinstance(self._stream, (Image, list, bsdf.ListStream)):
|
||||
raise RuntimeError(
|
||||
"BSDF file does not look seem to have an " "image container."
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if isinstance(self._stream, Image):
|
||||
return 1
|
||||
elif isinstance(self._stream, list):
|
||||
return len(self._stream)
|
||||
elif self._stream.count < 0:
|
||||
return np.inf
|
||||
return self._stream.count
|
||||
|
||||
def _get_data(self, index):
|
||||
# Validate
|
||||
if index < 0 or index >= self.get_length():
|
||||
raise IndexError(
|
||||
"Image index %i not in [0 %i]." % (index, self.get_length())
|
||||
)
|
||||
# Get Image object
|
||||
if isinstance(self._stream, Image):
|
||||
image_ob = self._stream # singleton
|
||||
elif isinstance(self._stream, list):
|
||||
# Easy when we have random access
|
||||
image_ob = self._stream[index]
|
||||
else:
|
||||
# For streaming, we need to skip over frames
|
||||
if index < self._stream.index:
|
||||
raise IndexError(
|
||||
"BSDF file is being read in streaming "
|
||||
"mode, thus does not allow rewinding."
|
||||
)
|
||||
while index > self._stream.index:
|
||||
self._stream.next()
|
||||
image_ob = self._stream.next() # Can raise StopIteration
|
||||
# Is this an image?
|
||||
if (
|
||||
isinstance(image_ob, dict)
|
||||
and "meta" in image_ob
|
||||
and "array" in image_ob
|
||||
):
|
||||
image_ob = Image(image_ob["array"], image_ob["meta"])
|
||||
if isinstance(image_ob, Image):
|
||||
# Return as array (if we have lazy blobs, they are read now)
|
||||
return image_ob.get_array(), image_ob.get_meta()
|
||||
else:
|
||||
r = repr(image_ob)
|
||||
r = r if len(r) < 200 else r[:197] + "..."
|
||||
raise RuntimeError("BSDF file contains non-image " + r)
|
||||
|
||||
def _get_meta_data(self, index): # pragma: no cover
|
||||
return {} # This format does not support global meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, compression=1):
|
||||
options = {"compression": compression}
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
if self.request.mode[1] in "iv":
|
||||
self._stream = None # Singleton image
|
||||
self._written = False
|
||||
else:
|
||||
# Series (stream) of images
|
||||
file = self.request.get_file()
|
||||
self._stream = bsdf.ListStream()
|
||||
self._serializer.save(file, self._stream)
|
||||
|
||||
def _close(self):
|
||||
# We close the stream here, which will mark the number of written
|
||||
# elements. If we would not close it, the file would be fine, it's
|
||||
# just that upon reading it would not be known how many items are
|
||||
# in there.
|
||||
if self._stream is not None:
|
||||
self._stream.close(False) # False says "keep this a stream"
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Determine dimension
|
||||
ndim = None
|
||||
if self.request.mode[1] in "iI":
|
||||
ndim = 2
|
||||
elif self.request.mode[1] in "vV":
|
||||
ndim = 3
|
||||
else:
|
||||
ndim = 3 # Make an educated guess
|
||||
if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4):
|
||||
ndim = 2
|
||||
# Validate shape
|
||||
assert ndim in (2, 3)
|
||||
if ndim == 2:
|
||||
assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4)
|
||||
else:
|
||||
assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4)
|
||||
# Wrap data and meta data in our special class that will trigger
|
||||
# the BSDF image2D or image3D extension.
|
||||
if ndim == 2:
|
||||
ob = Image2D(im, meta)
|
||||
else:
|
||||
ob = Image3D(im, meta)
|
||||
# Write directly or to stream
|
||||
if self._stream is None:
|
||||
assert not self._written, "Cannot write singleton image twice"
|
||||
self._written = True
|
||||
file = self.request.get_file()
|
||||
self._serializer.save(file, ob)
|
||||
else:
|
||||
self._stream.append(ob)
|
||||
|
||||
def set_meta_data(self, meta): # pragma: no cover
|
||||
raise RuntimeError("The BSDF format only supports " "per-image meta data.")
|
||||
@@ -1,333 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read DICOM files.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
A format for reading DICOM images: a common format used to store
|
||||
medical image data, such as X-ray, CT and MRI.
|
||||
|
||||
This format borrows some code (and ideas) from the pydicom project. However,
|
||||
only a predefined subset of tags are extracted from the file. This allows
|
||||
for great simplifications allowing us to make a stand-alone reader, and
|
||||
also results in a much faster read time.
|
||||
|
||||
By default, only uncompressed and deflated transfer syntaxes are supported.
|
||||
If gdcm or dcmtk is installed, these will be used to automatically convert
|
||||
the data. See https://github.com/malaterre/GDCM/releases for installing GDCM.
|
||||
|
||||
This format provides functionality to group images of the same
|
||||
series together, thus extracting volumes (and multiple volumes).
|
||||
Using volread will attempt to yield a volume. If multiple volumes
|
||||
are present, the first one is given. Using mimread will simply yield
|
||||
all images in the given directory (not taking series into account).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
progress : {True, False, BaseProgressIndicator}
|
||||
Whether to show progress when reading from multiple files.
|
||||
Default True. By passing an object that inherits from
|
||||
BaseProgressIndicator, the way in which progress is reported
|
||||
can be costumized.
|
||||
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator
|
||||
from ..core import read_n_bytes
|
||||
|
||||
_dicom = None # lazily loaded in load_lib()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _dicom
|
||||
from . import _dicom
|
||||
|
||||
return _dicom
|
||||
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
|
||||
def get_dcmdjpeg_exe():
|
||||
fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win")
|
||||
for dir in (
|
||||
"c:\\dcmtk",
|
||||
"c:\\Program Files",
|
||||
"c:\\Program Files\\dcmtk",
|
||||
"c:\\Program Files (x86)\\dcmtk",
|
||||
):
|
||||
filename = os.path.join(dir, fname)
|
||||
if os.path.isfile(filename):
|
||||
return [filename]
|
||||
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_gdcmconv_exe():
|
||||
fname = "gdcmconv" + ".exe" * sys.platform.startswith("win")
|
||||
# Maybe it's on the path
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname, "--raw"]
|
||||
except Exception:
|
||||
pass
|
||||
# Select directories where it could be
|
||||
candidates = []
|
||||
base_dirs = [r"c:\Program Files"]
|
||||
for base_dir in base_dirs:
|
||||
if os.path.isdir(base_dir):
|
||||
for dname in os.listdir(base_dir):
|
||||
if dname.lower().startswith("gdcm"):
|
||||
suffix = dname[4:].strip()
|
||||
candidates.append((suffix, os.path.join(base_dir, dname)))
|
||||
# Sort, so higher versions are tried earlier
|
||||
candidates.sort(reverse=True)
|
||||
# Select executable
|
||||
filename = None
|
||||
for _, dirname in candidates:
|
||||
exe1 = os.path.join(dirname, "gdcmconv.exe")
|
||||
exe2 = os.path.join(dirname, "bin", "gdcmconv.exe")
|
||||
if os.path.isfile(exe1):
|
||||
filename = exe1
|
||||
break
|
||||
if os.path.isfile(exe2):
|
||||
filename = exe2
|
||||
break
|
||||
else:
|
||||
return None
|
||||
return [filename, "--raw"]
|
||||
|
||||
|
||||
class DicomFormat(Format):
|
||||
"""See :mod:`imageio.plugins.dicom`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If user URI was a directory, we check whether it has a DICOM file
|
||||
if os.path.isdir(request.filename):
|
||||
files = os.listdir(request.filename)
|
||||
for fname in sorted(files): # Sorting make it consistent
|
||||
filename = os.path.join(request.filename, fname)
|
||||
if os.path.isfile(filename) and "DICOMDIR" not in fname:
|
||||
with open(filename, "rb") as f:
|
||||
first_bytes = read_n_bytes(f, 140)
|
||||
return first_bytes[128:132] == b"DICM"
|
||||
else:
|
||||
return False
|
||||
# Check
|
||||
return request.firstbytes[128:132] == b"DICM"
|
||||
|
||||
def _can_write(self, request):
|
||||
# We cannot save yet. May be possible if we will used pydicom as
|
||||
# a backend.
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
_compressed_warning_dirs = set()
|
||||
|
||||
def _open(self, progress=True):
|
||||
if not _dicom:
|
||||
load_lib()
|
||||
if os.path.isdir(self.request.filename):
|
||||
# A dir can be given if the user used the format explicitly
|
||||
self._info = {}
|
||||
self._data = None
|
||||
else:
|
||||
# Read the given dataset now ...
|
||||
try:
|
||||
dcm = _dicom.SimpleDicomReader(self.request.get_file())
|
||||
except _dicom.CompressedDicom as err:
|
||||
# We cannot do this on our own. Perhaps with some help ...
|
||||
cmd = get_gdcmconv_exe()
|
||||
if not cmd and "JPEG" in str(err):
|
||||
cmd = get_dcmdjpeg_exe()
|
||||
if not cmd:
|
||||
msg = err.args[0].replace("using", "installing")
|
||||
msg = msg.replace("convert", "auto-convert")
|
||||
err.args = (msg,)
|
||||
raise
|
||||
else:
|
||||
fname1 = self.request.get_local_filename()
|
||||
fname2 = fname1 + ".raw"
|
||||
try:
|
||||
subprocess.check_call(cmd + [fname1, fname2])
|
||||
except Exception:
|
||||
raise err
|
||||
d = os.path.dirname(fname1)
|
||||
if d not in self._compressed_warning_dirs:
|
||||
self._compressed_warning_dirs.add(d)
|
||||
logger.warning(
|
||||
"DICOM file contained compressed data. "
|
||||
+ "Autoconverting with "
|
||||
+ cmd[0]
|
||||
+ " (this warning is shown once for each directory)"
|
||||
)
|
||||
dcm = _dicom.SimpleDicomReader(fname2)
|
||||
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
# Initialize series, list of DicomSeries objects
|
||||
self._series = None # only created if needed
|
||||
|
||||
# Set progress indicator
|
||||
if isinstance(progress, BaseProgressIndicator):
|
||||
self._progressIndicator = progress
|
||||
elif progress is True:
|
||||
p = StdoutProgressIndicator("Reading DICOM")
|
||||
self._progressIndicator = p
|
||||
elif progress in (None, False):
|
||||
self._progressIndicator = BaseProgressIndicator("Dummy")
|
||||
else:
|
||||
raise ValueError("Invalid value for progress.")
|
||||
|
||||
def _close(self):
|
||||
# Clean up
|
||||
self._info = None
|
||||
self._data = None
|
||||
self._series = None
|
||||
|
||||
@property
|
||||
def series(self):
|
||||
if self._series is None:
|
||||
pi = self._progressIndicator
|
||||
self._series = _dicom.process_directory(self.request, pi)
|
||||
return self._series
|
||||
|
||||
def _get_length(self):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
# User expects one, but lets be honest about this file
|
||||
return nslices
|
||||
elif self.request.mode[1] == "I":
|
||||
# User expects multiple, if this file has multiple slices, ok.
|
||||
# Otherwise we have to check the series.
|
||||
if nslices > 1:
|
||||
return nslices
|
||||
else:
|
||||
return sum([len(serie) for serie in self.series])
|
||||
elif self.request.mode[1] == "v":
|
||||
# User expects a volume, if this file has one, ok.
|
||||
# Otherwise we have to check the series
|
||||
if nslices > 1:
|
||||
return 1
|
||||
else:
|
||||
return len(self.series) # We assume one volume per series
|
||||
elif self.request.mode[1] == "V":
|
||||
# User expects multiple volumes. We have to check the series
|
||||
return len(self.series) # We assume one volume per series
|
||||
else:
|
||||
raise RuntimeError("DICOM plugin should know what to expect.")
|
||||
|
||||
def _get_slice_data(self, index):
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
# Allow index >1 only if this file contains >1
|
||||
if nslices > 1:
|
||||
return self._data[index], self._info
|
||||
elif index == 0:
|
||||
return self._data, self._info
|
||||
else:
|
||||
raise IndexError("Dicom file contains only one slice.")
|
||||
|
||||
def _get_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
return self._get_slice_data(index)
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data[index], self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].get_numpy_array(), L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data, self._info
|
||||
else:
|
||||
return (
|
||||
self.series[index].get_numpy_array(),
|
||||
self.series[index].info,
|
||||
)
|
||||
# mode is `?` (typically because we are using V3). If there is a
|
||||
# series (multiple files), index referrs to the element of the
|
||||
# series and we read volumes. If there is no series, index
|
||||
# referrs to the slice in the volume we read "flat" images.
|
||||
elif len(self.series) > 1:
|
||||
# mode is `?` and there are multiple series. Each series is a ndimage.
|
||||
return (
|
||||
self.series[index].get_numpy_array(),
|
||||
self.series[index].info,
|
||||
)
|
||||
else:
|
||||
# mode is `?` and there is only one series. Each slice is an ndimage.
|
||||
return self._get_slice_data(index)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
# Default is the meta data of the given file, or the "first" file.
|
||||
if index is None:
|
||||
return self._info
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
return self._info
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
return self.series[index].info
|
||||
else: # pragma: no cover
|
||||
raise ValueError("DICOM plugin should know what to expect.")
|
||||
@@ -1,145 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Example plugin. You can use this as a template for your own plugin.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class DummyFormat(Format):
|
||||
"""The dummy format is an example format that does nothing.
|
||||
It will never indicate that it can read or write a file. When
|
||||
explicitly asked to read, it will simply read the bytes. When
|
||||
explicitly asked to write, it will raise an error.
|
||||
|
||||
This documentation is shown when the user does ``help('thisformat')``.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to read a certain image. Return True if this format
|
||||
# can do it.
|
||||
#
|
||||
# The format manager is aware of the extensions and the modes
|
||||
# that each format can handle. It will first ask all formats
|
||||
# that *seem* to be able to read it whether they can. If none
|
||||
# can, it will ask the remaining formats if they can: the
|
||||
# extension might be missing, and this allows formats to provide
|
||||
# functionality for certain extensions, while giving preference
|
||||
# to other plugins.
|
||||
#
|
||||
# If a format says it can, it should live up to it. The format
|
||||
# would ideally check the request.firstbytes and look for a
|
||||
# header of some kind.
|
||||
#
|
||||
# The request object has:
|
||||
# request.filename: a representation of the source (only for reporting)
|
||||
# request.firstbytes: the first 256 bytes of the file.
|
||||
# request.mode[0]: read or write mode
|
||||
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to write a certain image. It will first ask all
|
||||
# formats that *seem* to be able to write it whether they can.
|
||||
# If none can, it will ask the remaining formats if they can.
|
||||
#
|
||||
# Return True if the format can do it.
|
||||
|
||||
# In most cases, this code does suffice:
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, some_option=False, length=1):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to get access to the
|
||||
# data. Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
self._length = length # passed as an arg in this case for testing
|
||||
self._data = None
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return self._length
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._fp.read()
|
||||
# Put in a numpy array
|
||||
im = np.frombuffer(self._data, "uint8")
|
||||
im.shape = len(im), 1
|
||||
# Return array and dummy meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to write the data.
|
||||
# Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The dummy format cannot write image data.")
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The dummy format cannot write meta data.")
|
||||
|
||||
|
||||
# Register. You register an *instance* of a Format class. Here specify:
|
||||
format = DummyFormat(
|
||||
"dummy", # short name
|
||||
"An example format that does nothing.", # one line descr.
|
||||
".foobar .nonexistentext", # list of extensions
|
||||
"iI", # modes, characters in iIvV
|
||||
)
|
||||
formats.add_format(format)
|
||||
@@ -1,95 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read TIFF from FEI SEM microscopes.
|
||||
|
||||
Backend Library: internal
|
||||
|
||||
This format is based on :mod:`TIFF <imageio.plugins.tifffile>`, and supports the
|
||||
same parameters. FEI microscopes append metadata as ASCII text at the end of the
|
||||
file, which this reader correctly extracts.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
discard_watermark : bool
|
||||
If True (default), discard the bottom rows of the image, which
|
||||
contain no image data, only a watermark with metadata.
|
||||
watermark_height : int
|
||||
The height in pixels of the FEI watermark. The default is 70.
|
||||
|
||||
See Also
|
||||
--------
|
||||
:mod:`imageio.plugins.tifffile`
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from .tifffile import TiffFormat
|
||||
|
||||
|
||||
class FEISEMFormat(TiffFormat):
|
||||
"""See :mod:`imageio.plugins.feisem`"""
|
||||
|
||||
def _can_write(self, request):
|
||||
return False # FEI-SEM only supports reading
|
||||
|
||||
class Reader(TiffFormat.Reader):
|
||||
def _get_data(self, index=0, discard_watermark=True, watermark_height=70):
|
||||
"""Get image and metadata from given index.
|
||||
|
||||
FEI images usually (always?) contain a watermark at the
|
||||
bottom of the image, 70 pixels high. We discard this by
|
||||
default as it does not contain any information not present
|
||||
in the metadata.
|
||||
"""
|
||||
im, meta = super(FEISEMFormat.Reader, self)._get_data(index)
|
||||
if discard_watermark:
|
||||
im = im[:-watermark_height]
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index=None):
|
||||
"""Read the metadata from an FEI SEM TIFF.
|
||||
|
||||
This metadata is included as ASCII text at the end of the file.
|
||||
|
||||
The index, if provided, is ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
Dictionary of metadata.
|
||||
"""
|
||||
if hasattr(self, "_fei_meta"):
|
||||
return self._fei_meta
|
||||
|
||||
md = {"root": {}}
|
||||
current_tag = "root"
|
||||
reading_metadata = False
|
||||
filename = self.request.get_local_filename()
|
||||
with open(filename, encoding="utf8", errors="ignore") as fin:
|
||||
for line in fin:
|
||||
if not reading_metadata:
|
||||
if not line.startswith("Date="):
|
||||
continue
|
||||
else:
|
||||
reading_metadata = True
|
||||
line = line.rstrip()
|
||||
if line.startswith("["):
|
||||
current_tag = line.lstrip("[").rstrip("]")
|
||||
md[current_tag] = {}
|
||||
else:
|
||||
if "=" in line: # ignore empty and irrelevant lines
|
||||
key, val = line.split("=", maxsplit=1)
|
||||
for tag_type in (int, float):
|
||||
try:
|
||||
val = tag_type(val)
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
md[current_tag][key] = val
|
||||
if not md["root"] and len(md) == 1:
|
||||
raise ValueError("Input file %s contains no FEI metadata." % filename)
|
||||
|
||||
self._fei_meta = md
|
||||
return md
|
||||
@@ -1,729 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write video using FFMPEG
|
||||
|
||||
.. note::
|
||||
We are in the process of (slowly) replacing this plugin with a new one that
|
||||
is based on `pyav <https://pyav.org/docs/stable/>`_. It is faster and more
|
||||
flexible than the plugin documented here. Check the :mod:`pyav
|
||||
plugin's documentation <imageio.plugins.pyav>` for more information about
|
||||
this plugin.
|
||||
|
||||
Backend Library: https://github.com/imageio/imageio-ffmpeg
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[ffmpeg]
|
||||
|
||||
|
||||
The ffmpeg format provides reading and writing for a wide range of movie formats
|
||||
such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from
|
||||
webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy
|
||||
<https://github.com/Zulko/moviepy/>`_ by Zulko.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
fps : scalar
|
||||
The number of frames per second to read the data at. Default None (i.e.
|
||||
read at the file's own fps). One can use this for files with a
|
||||
variable fps, or in cases where imageio is unable to correctly detect
|
||||
the fps. In case of trouble opening camera streams, it may help to set an
|
||||
explicit fps value matching a framerate supported by the camera.
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
Setting this to True will internally call ``count_frames()``,
|
||||
and set the reader's length to that value instead of inf.
|
||||
size : str | tuple
|
||||
The frame size (i.e. resolution) to read the images, e.g.
|
||||
(100, 100) or "640x480". For camera streams, this allows setting
|
||||
the capture resolution. For normal video data, ffmpeg will
|
||||
rescale the data.
|
||||
dtype : str | type
|
||||
The dtype for the output arrays. Determines the bit-depth that
|
||||
is requested from ffmpeg. Supported dtypes: uint8, uint16.
|
||||
Default: uint8.
|
||||
pixelformat : str
|
||||
The pixel format for the camera to use (e.g. "yuyv422" or
|
||||
"gray"). The camera needs to support the format in order for
|
||||
this to take effect. Note that the images produced by this
|
||||
reader are always RGB.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use aggressive error handling:
|
||||
['-err_detect', 'aggressive']
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options (i.e. the
|
||||
stream being read by imageio).
|
||||
print_info : bool
|
||||
Print information about the video file as reported by ffmpeg.
|
||||
|
||||
Parameters for writing
|
||||
----------------------
|
||||
fps : scalar
|
||||
The number of frames per second. Default 10.
|
||||
codec : str
|
||||
the video codec to use. Default 'libx264', which represents the
|
||||
widely available mpeg4. Except when saving .wmv files, then the
|
||||
defaults is 'msmpeg4' which is more commonly supported for windows
|
||||
quality : float | None
|
||||
Video output quality. Default is 5. Uses variable bit rate. Highest
|
||||
quality is 10, lowest is 0. Set to None to prevent variable bitrate
|
||||
flags to FFMPEG so you can manually specify them using output_params
|
||||
instead. Specifying a fixed bitrate using 'bitrate' disables this
|
||||
parameter.
|
||||
bitrate : int | None
|
||||
Set a constant bitrate for the video encoding. Default is None causing
|
||||
'quality' parameter to be used instead. Better quality videos with
|
||||
smaller file sizes will result from using the 'quality' variable
|
||||
bitrate parameter rather than specifiying a fixed bitrate with this
|
||||
parameter.
|
||||
pixelformat: str
|
||||
The output video pixel format. Default is 'yuv420p' which most widely
|
||||
supported by video players.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options (i.e. the
|
||||
stream that imageio provides).
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use only intra frames and set aspect ratio:
|
||||
['-intra', '-aspect', '16:9']
|
||||
ffmpeg_log_level: str
|
||||
Sets ffmpeg output log level. Default is "warning".
|
||||
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
|
||||
"verbose", or "debug". Also prints the FFMPEG command being used by
|
||||
imageio if "info", "verbose", or "debug".
|
||||
macro_block_size: int
|
||||
Size constraint for video. Width and height, must be divisible by this
|
||||
number. If not divisible by this number imageio will tell ffmpeg to
|
||||
scale the image up to the next closest size
|
||||
divisible by this number. Most codecs are compatible with a macroblock
|
||||
size of 16 (default), some can go smaller (4, 8). To disable this
|
||||
automatic feature set it to None or 1, however be warned many players
|
||||
can't decode videos that are odd in size and some codecs will produce
|
||||
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
|
||||
audio_path : str | None
|
||||
Audio path of any audio that needs to be written. Defaults to nothing,
|
||||
so no audio will be written. Please note, when writing shorter video
|
||||
than the original, ffmpeg will not truncate the audio track; it
|
||||
will maintain its original length and be longer than the video.
|
||||
audio_codec : str | None
|
||||
The audio codec to use. Defaults to nothing, but if an audio_path has
|
||||
been provided ffmpeg will attempt to set a default codec.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to
|
||||
encode/decode H.264 (likely due to licensing concerns). If you need this
|
||||
format on anaconda install ``conda-forge/ffmpeg`` instead.
|
||||
|
||||
You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a
|
||||
specific ffmpeg executable.
|
||||
|
||||
To get the number of frames before having read them all, you can use the
|
||||
``reader.count_frames()`` method (the reader will then use
|
||||
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames,
|
||||
note that this operation can take a few seconds on large files). Alternatively,
|
||||
the number of frames can be estimated from the fps and duration in the meta data
|
||||
(though these values themselves are not always present/reliable).
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import platform
|
||||
import threading
|
||||
import subprocess as sp
|
||||
import imageio_ffmpeg
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get camera format
|
||||
if sys.platform.startswith("win"):
|
||||
CAM_FORMAT = "dshow" # dshow or vfwcap
|
||||
elif sys.platform.startswith("linux"):
|
||||
CAM_FORMAT = "video4linux2"
|
||||
elif sys.platform.startswith("darwin"):
|
||||
CAM_FORMAT = "avfoundation"
|
||||
else: # pragma: no cover
|
||||
CAM_FORMAT = "unknown-cam-format"
|
||||
|
||||
|
||||
def download(directory=None, force_download=False): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"imageio.ffmpeg.download() has been deprecated. "
|
||||
"Use 'pip install imageio-ffmpeg' instead.'"
|
||||
)
|
||||
|
||||
|
||||
# For backwards compatibility - we dont use this ourselves
|
||||
def get_exe(): # pragma: no cover
|
||||
"""Wrapper for imageio_ffmpeg.get_ffmpeg_exe()"""
|
||||
|
||||
return imageio_ffmpeg.get_ffmpeg_exe()
|
||||
|
||||
|
||||
class FfmpegFormat(Format):
|
||||
"""Read/Write ImageResources using FFMPEG.
|
||||
|
||||
See :mod:`imageio.plugins.ffmpeg`
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Read from video stream?
|
||||
# Note that we could write the _video flag here, but a user might
|
||||
# select this format explicitly (and this code is not run)
|
||||
if re.match(r"<video(\d+)>", request.filename):
|
||||
return True
|
||||
|
||||
# Read from file that we know?
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
_frame_catcher = None
|
||||
_read_gen = None
|
||||
|
||||
def _get_cam_inputname(self, index):
|
||||
if sys.platform.startswith("linux"):
|
||||
return "/dev/" + self.request._video[1:-1]
|
||||
|
||||
elif sys.platform.startswith("win"):
|
||||
# Ask ffmpeg for list of dshow device names
|
||||
ffmpeg_api = imageio_ffmpeg
|
||||
cmd = [
|
||||
ffmpeg_api.get_ffmpeg_exe(),
|
||||
"-list_devices",
|
||||
"true",
|
||||
"-f",
|
||||
CAM_FORMAT,
|
||||
"-i",
|
||||
"dummy",
|
||||
]
|
||||
# Set `shell=True` in sp.run to prevent popup of a command
|
||||
# line window in frozen applications. Note: this would be a
|
||||
# security vulnerability if user-input goes into the cmd.
|
||||
# Note that the ffmpeg process returns with exit code 1 when
|
||||
# using `-list_devices` (or `-list_options`), even if the
|
||||
# command is successful, so we set `check=False` explicitly.
|
||||
completed_process = sp.run(
|
||||
cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE,
|
||||
encoding="utf-8",
|
||||
shell=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
# Return device name at index
|
||||
try:
|
||||
name = parse_device_names(completed_process.stderr)[index]
|
||||
except IndexError:
|
||||
raise IndexError("No ffdshow camera at index %i." % index)
|
||||
return "video=%s" % name
|
||||
|
||||
elif sys.platform.startswith("darwin"):
|
||||
# Appears that newer ffmpeg builds don't support -list-devices
|
||||
# on OS X. But you can directly open the camera by index.
|
||||
name = str(index)
|
||||
return name
|
||||
|
||||
else: # pragma: no cover
|
||||
return "??"
|
||||
|
||||
def _open(
|
||||
self,
|
||||
loop=False,
|
||||
size=None,
|
||||
dtype=None,
|
||||
pixelformat=None,
|
||||
print_info=False,
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
fps=None,
|
||||
):
|
||||
# Get generator functions
|
||||
self._ffmpeg_api = imageio_ffmpeg
|
||||
# Process input args
|
||||
self._arg_loop = bool(loop)
|
||||
if size is None:
|
||||
self._arg_size = None
|
||||
elif isinstance(size, tuple):
|
||||
self._arg_size = "%ix%i" % size
|
||||
elif isinstance(size, str) and "x" in size:
|
||||
self._arg_size = size
|
||||
else:
|
||||
raise ValueError('FFMPEG size must be tuple of "NxM"')
|
||||
if pixelformat is None:
|
||||
pass
|
||||
elif not isinstance(pixelformat, str):
|
||||
raise ValueError("FFMPEG pixelformat must be str")
|
||||
if dtype is None:
|
||||
self._dtype = np.dtype("uint8")
|
||||
else:
|
||||
self._dtype = np.dtype(dtype)
|
||||
allowed_dtypes = ["uint8", "uint16"]
|
||||
if self._dtype.name not in allowed_dtypes:
|
||||
raise ValueError(
|
||||
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
|
||||
)
|
||||
self._arg_pixelformat = pixelformat
|
||||
self._arg_input_params = input_params or []
|
||||
self._arg_output_params = output_params or []
|
||||
self._arg_input_params += ffmpeg_params or [] # backward compat
|
||||
# Write "_video"_arg - indicating webcam support
|
||||
self.request._video = None
|
||||
regex_match = re.match(r"<video(\d+)>", self.request.filename)
|
||||
if regex_match:
|
||||
self.request._video = self.request.filename
|
||||
# Get local filename
|
||||
if self.request._video:
|
||||
index = int(regex_match.group(1))
|
||||
self._filename = self._get_cam_inputname(index)
|
||||
else:
|
||||
self._filename = self.request.get_local_filename()
|
||||
# When passed to ffmpeg on command line, carets need to be escaped.
|
||||
self._filename = self._filename.replace("^", "^^")
|
||||
# Determine pixel format and depth
|
||||
self._depth = 3
|
||||
if self._dtype.name == "uint8":
|
||||
self._pix_fmt = "rgb24"
|
||||
self._bytes_per_channel = 1
|
||||
else:
|
||||
self._pix_fmt = "rgb48le"
|
||||
self._bytes_per_channel = 2
|
||||
# Initialize parameters
|
||||
self._pos = -1
|
||||
self._meta = {"plugin": "ffmpeg"}
|
||||
self._lastread = None
|
||||
|
||||
# Calculating this from fps and duration is not accurate,
|
||||
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
|
||||
# takes too long to do for each video. But we need it for looping.
|
||||
self._nframes = float("inf")
|
||||
if self._arg_loop and not self.request._video:
|
||||
self._nframes = self.count_frames()
|
||||
self._meta["nframes"] = self._nframes
|
||||
|
||||
# Specify input framerate? (only on macOS)
|
||||
# Ideally we'd get the supported framerate from the metadata, but we get the
|
||||
# metadata when we boot ffmpeg ... maybe we could refactor this so we can
|
||||
# get the metadata beforehand, but for now we'll just give it 2 tries on MacOS,
|
||||
# one with fps 30 and one with fps 15.
|
||||
need_input_fps = need_output_fps = False
|
||||
if self.request._video and platform.system().lower() == "darwin":
|
||||
if "-framerate" not in str(self._arg_input_params):
|
||||
need_input_fps = True
|
||||
if not self.request.kwargs.get("fps", None):
|
||||
need_output_fps = True
|
||||
if need_input_fps:
|
||||
self._arg_input_params.extend(["-framerate", str(float(30))])
|
||||
if need_output_fps:
|
||||
self._arg_output_params.extend(["-r", str(float(30))])
|
||||
|
||||
# Start ffmpeg subprocess and get meta information
|
||||
try:
|
||||
self._initialize()
|
||||
except IndexError:
|
||||
# Specify input framerate again, this time different.
|
||||
if need_input_fps:
|
||||
self._arg_input_params[-1] = str(float(15))
|
||||
self._initialize()
|
||||
else:
|
||||
raise
|
||||
|
||||
# For cameras, create thread that keeps reading the images
|
||||
if self.request._video:
|
||||
self._frame_catcher = FrameCatcher(self._read_gen)
|
||||
|
||||
# For reference - but disabled, because it is inaccurate
|
||||
# if self._meta["nframes"] == float("inf"):
|
||||
# if self._meta.get("fps", 0) > 0:
|
||||
# if self._meta.get("duration", 0) > 0:
|
||||
# n = round(self._meta["duration"] * self._meta["fps"])
|
||||
# self._meta["nframes"] = int(n)
|
||||
|
||||
def _close(self):
|
||||
# First close the frame catcher, because we cannot close the gen
|
||||
# if the frame catcher thread is using it
|
||||
if self._frame_catcher is not None:
|
||||
self._frame_catcher.stop_me()
|
||||
self._frame_catcher = None
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
self._read_gen = None
|
||||
|
||||
def count_frames(self):
|
||||
"""Count the number of frames. Note that this can take a few
|
||||
seconds for large files. Also note that it counts the number
|
||||
of frames in the original video and does not take a given fps
|
||||
into account.
|
||||
"""
|
||||
# This would have been nice, but this does not work :(
|
||||
# oargs = []
|
||||
# if self.request.kwargs.get("fps", None):
|
||||
# fps = float(self.request.kwargs["fps"])
|
||||
# oargs += ["-r", "%.02f" % fps]
|
||||
cf = self._ffmpeg_api.count_frames_and_secs
|
||||
return cf(self._filename)[0]
|
||||
|
||||
def _get_length(self):
|
||||
return self._nframes # only not inf if loop is True
|
||||
|
||||
def _get_data(self, index):
|
||||
"""Reads a frame at index. Note for coders: getting an
|
||||
arbitrary frame in the video with ffmpeg can be painfully
|
||||
slow if some decoding has to be done. This function tries
|
||||
to avoid fectching arbitrary frames whenever possible, by
|
||||
moving between adjacent frames."""
|
||||
# Modulo index (for looping)
|
||||
if self._arg_loop and self._nframes < float("inf"):
|
||||
index %= self._nframes
|
||||
|
||||
if index == self._pos:
|
||||
return self._lastread, dict(new=False)
|
||||
elif index < 0:
|
||||
raise IndexError("Frame index must be >= 0")
|
||||
elif index >= self._nframes:
|
||||
raise IndexError("Reached end of video")
|
||||
else:
|
||||
if (index < self._pos) or (index > self._pos + 100):
|
||||
self._initialize(index)
|
||||
else:
|
||||
self._skip_frames(index - self._pos - 1)
|
||||
result, is_new = self._read_frame()
|
||||
self._pos = index
|
||||
return result, dict(new=is_new)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._meta
|
||||
|
||||
def _initialize(self, index=0):
|
||||
# Close the current generator, and thereby terminate its subprocess
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
|
||||
iargs = []
|
||||
oargs = []
|
||||
|
||||
# Create input args
|
||||
iargs += self._arg_input_params
|
||||
if self.request._video:
|
||||
iargs += ["-f", CAM_FORMAT]
|
||||
if self._arg_pixelformat:
|
||||
iargs += ["-pix_fmt", self._arg_pixelformat]
|
||||
if self._arg_size:
|
||||
iargs += ["-s", self._arg_size]
|
||||
elif index > 0: # re-initialize / seek
|
||||
# Note: only works if we initialized earlier, and now have meta
|
||||
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
|
||||
# There are two ways to seek, one before -i (input_params) and
|
||||
# after (output_params). The former is fast, because it uses
|
||||
# keyframes, the latter is slow but accurate. According to
|
||||
# the article above, the fast method should also be accurate
|
||||
# from ffmpeg version 2.1, however in version 4.1 our tests
|
||||
# start failing again. Not sure why, but we can solve this
|
||||
# by combining slow and fast. Seek the long stretch using
|
||||
# the fast method, and seek the last 10s the slow way.
|
||||
starttime = index / self._meta["fps"]
|
||||
seek_slow = min(10, starttime)
|
||||
seek_fast = starttime - seek_slow
|
||||
# We used to have this epsilon earlier, when we did not use
|
||||
# the slow seek. I don't think we need it anymore.
|
||||
# epsilon = -1 / self._meta["fps"] * 0.1
|
||||
iargs += ["-ss", "%.06f" % (seek_fast)]
|
||||
oargs += ["-ss", "%.06f" % (seek_slow)]
|
||||
|
||||
# Output args, for writing to pipe
|
||||
if self._arg_size:
|
||||
oargs += ["-s", self._arg_size]
|
||||
if self.request.kwargs.get("fps", None):
|
||||
fps = float(self.request.kwargs["fps"])
|
||||
oargs += ["-r", "%.02f" % fps]
|
||||
oargs += self._arg_output_params
|
||||
|
||||
# Get pixelformat and bytes per pixel
|
||||
pix_fmt = self._pix_fmt
|
||||
bpp = self._depth * self._bytes_per_channel
|
||||
|
||||
# Create generator
|
||||
rf = self._ffmpeg_api.read_frames
|
||||
self._read_gen = rf(
|
||||
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
|
||||
)
|
||||
|
||||
# Read meta data. This start the generator (and ffmpeg subprocess)
|
||||
if self.request._video:
|
||||
# With cameras, catch error and turn into IndexError
|
||||
try:
|
||||
meta = self._read_gen.__next__()
|
||||
except IOError as err:
|
||||
err_text = str(err)
|
||||
if "darwin" in sys.platform:
|
||||
if "Unknown input format: 'avfoundation'" in err_text:
|
||||
err_text += (
|
||||
"Try installing FFMPEG using "
|
||||
"home brew to get a version with "
|
||||
"support for cameras."
|
||||
)
|
||||
raise IndexError(
|
||||
"No (working) camera at {}.\n\n{}".format(
|
||||
self.request._video, err_text
|
||||
)
|
||||
)
|
||||
else:
|
||||
self._meta.update(meta)
|
||||
elif index == 0:
|
||||
self._meta.update(self._read_gen.__next__())
|
||||
else:
|
||||
self._read_gen.__next__() # we already have meta data
|
||||
|
||||
def _skip_frames(self, n=1):
|
||||
"""Reads and throws away n frames"""
|
||||
for i in range(n):
|
||||
self._read_gen.__next__()
|
||||
self._pos += n
|
||||
|
||||
def _read_frame(self):
|
||||
# Read and convert to numpy array
|
||||
w, h = self._meta["size"]
|
||||
framesize = w * h * self._depth * self._bytes_per_channel
|
||||
# t0 = time.time()
|
||||
|
||||
# Read frame
|
||||
if self._frame_catcher: # pragma: no cover - camera thing
|
||||
s, is_new = self._frame_catcher.get_frame()
|
||||
else:
|
||||
s = self._read_gen.__next__()
|
||||
is_new = True
|
||||
|
||||
# Check
|
||||
if len(s) != framesize:
|
||||
raise RuntimeError(
|
||||
"Frame is %i bytes, but expected %i." % (len(s), framesize)
|
||||
)
|
||||
|
||||
result = np.frombuffer(s, dtype=self._dtype).copy()
|
||||
result = result.reshape((h, w, self._depth))
|
||||
# t1 = time.time()
|
||||
# print('etime', t1-t0)
|
||||
|
||||
# Store and return
|
||||
self._lastread = result
|
||||
return result, is_new
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
_write_gen = None
|
||||
|
||||
def _open(
|
||||
self,
|
||||
fps=10,
|
||||
codec="libx264",
|
||||
bitrate=None,
|
||||
pixelformat="yuv420p",
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
ffmpeg_log_level="quiet",
|
||||
quality=5,
|
||||
macro_block_size=16,
|
||||
audio_path=None,
|
||||
audio_codec=None,
|
||||
):
|
||||
self._ffmpeg_api = imageio_ffmpeg
|
||||
self._filename = self.request.get_local_filename()
|
||||
self._pix_fmt = None
|
||||
self._depth = None
|
||||
self._size = None
|
||||
|
||||
def _close(self):
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
self._write_gen = None
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Get props of image
|
||||
h, w = im.shape[:2]
|
||||
size = w, h
|
||||
depth = 1 if im.ndim == 2 else im.shape[2]
|
||||
|
||||
# Ensure that image is in uint8
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# To be written efficiently, ie. without creating an immutable
|
||||
# buffer, by calling im.tobytes() the array must be contiguous.
|
||||
if not im.flags.c_contiguous:
|
||||
# checkign the flag is a micro optimization.
|
||||
# the image will be a numpy subclass. See discussion
|
||||
# https://github.com/numpy/numpy/issues/11804
|
||||
im = np.ascontiguousarray(im)
|
||||
|
||||
# Set size and initialize if not initialized yet
|
||||
if self._size is None:
|
||||
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
|
||||
self._pix_fmt = map.get(depth, None)
|
||||
if self._pix_fmt is None:
|
||||
raise ValueError("Image must have 1, 2, 3 or 4 channels")
|
||||
self._size = size
|
||||
self._depth = depth
|
||||
self._initialize()
|
||||
|
||||
# Check size of image
|
||||
if size != self._size:
|
||||
raise ValueError("All images in a movie should have same size")
|
||||
if depth != self._depth:
|
||||
raise ValueError(
|
||||
"All images in a movie should have same " "number of channels"
|
||||
)
|
||||
|
||||
assert self._write_gen is not None # Check status
|
||||
|
||||
# Write. Yes, we can send the data in as a numpy array
|
||||
self._write_gen.send(im)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError(
|
||||
"The ffmpeg format does not support setting " "meta data."
|
||||
)
|
||||
|
||||
def _initialize(self):
|
||||
# Close existing generator
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
|
||||
# Get parameters
|
||||
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
|
||||
fps = self.request.kwargs.get("fps", 10)
|
||||
codec = self.request.kwargs.get("codec", None)
|
||||
bitrate = self.request.kwargs.get("bitrate", None)
|
||||
quality = self.request.kwargs.get("quality", None)
|
||||
input_params = self.request.kwargs.get("input_params") or []
|
||||
output_params = self.request.kwargs.get("output_params") or []
|
||||
output_params += self.request.kwargs.get("ffmpeg_params") or []
|
||||
pixelformat = self.request.kwargs.get("pixelformat", None)
|
||||
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
|
||||
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
|
||||
audio_path = self.request.kwargs.get("audio_path", None)
|
||||
audio_codec = self.request.kwargs.get("audio_codec", None)
|
||||
|
||||
macro_block_size = macro_block_size or 1 # None -> 1
|
||||
|
||||
# Create generator
|
||||
self._write_gen = self._ffmpeg_api.write_frames(
|
||||
self._filename,
|
||||
self._size,
|
||||
pix_fmt_in=self._pix_fmt,
|
||||
pix_fmt_out=pixelformat,
|
||||
fps=fps,
|
||||
quality=quality,
|
||||
bitrate=bitrate,
|
||||
codec=codec,
|
||||
macro_block_size=macro_block_size,
|
||||
ffmpeg_log_level=ffmpeg_log_level,
|
||||
input_params=input_params,
|
||||
output_params=output_params,
|
||||
audio_path=audio_path,
|
||||
audio_codec=audio_codec,
|
||||
)
|
||||
|
||||
# Seed the generator (this is where the ffmpeg subprocess starts)
|
||||
self._write_gen.send(None)
|
||||
|
||||
|
||||
class FrameCatcher(threading.Thread):
|
||||
"""Thread to keep reading the frame data from stdout. This is
|
||||
useful when streaming from a webcam. Otherwise, if the user code
|
||||
does not grab frames fast enough, the buffer will fill up, leading
|
||||
to lag, and ffmpeg can also stall (experienced on Linux). The
|
||||
get_frame() method always returns the last available image.
|
||||
"""
|
||||
|
||||
def __init__(self, gen):
|
||||
self._gen = gen
|
||||
self._frame = None
|
||||
self._frame_is_new = False
|
||||
self._lock = threading.RLock()
|
||||
threading.Thread.__init__(self)
|
||||
self.daemon = True # do not let this thread hold up Python shutdown
|
||||
self._should_stop = False
|
||||
self.start()
|
||||
|
||||
def stop_me(self):
|
||||
self._should_stop = True
|
||||
while self.is_alive():
|
||||
time.sleep(0.001)
|
||||
|
||||
def get_frame(self):
|
||||
while self._frame is None: # pragma: no cover - an init thing
|
||||
time.sleep(0.001)
|
||||
with self._lock:
|
||||
is_new = self._frame_is_new
|
||||
self._frame_is_new = False # reset
|
||||
return self._frame, is_new
|
||||
|
||||
def run(self):
|
||||
# This runs in the worker thread
|
||||
try:
|
||||
while not self._should_stop:
|
||||
time.sleep(0) # give control to other threads
|
||||
frame = self._gen.__next__()
|
||||
with self._lock:
|
||||
self._frame = frame
|
||||
self._frame_is_new = True
|
||||
except (StopIteration, EOFError):
|
||||
pass
|
||||
|
||||
|
||||
def parse_device_names(ffmpeg_output):
|
||||
"""Parse the output of the ffmpeg -list-devices command"""
|
||||
# Collect device names - get [friendly_name, alt_name] of each
|
||||
device_names = []
|
||||
in_video_devices = False
|
||||
for line in ffmpeg_output.splitlines():
|
||||
if line.startswith("[dshow"):
|
||||
logger.debug(line)
|
||||
line = line.split("]", 1)[1].strip()
|
||||
if in_video_devices and line.startswith('"'):
|
||||
friendly_name = line[1:-1]
|
||||
device_names.append([friendly_name, ""])
|
||||
elif in_video_devices and line.lower().startswith("alternative name"):
|
||||
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
|
||||
if sys.platform.startswith("win"):
|
||||
alt_name = alt_name.replace("&", "^&") # Tested to work
|
||||
else:
|
||||
alt_name = alt_name.replace("&", "\\&") # Does this work?
|
||||
device_names[-1][-1] = alt_name
|
||||
elif "video devices" in line:
|
||||
in_video_devices = True
|
||||
elif "devices" in line:
|
||||
# set False for subsequent "devices" sections
|
||||
in_video_devices = False
|
||||
# Post-process, see #441
|
||||
# prefer friendly names, use alt name if two cams have same friendly name
|
||||
device_names2 = []
|
||||
for friendly_name, alt_name in device_names:
|
||||
if friendly_name not in device_names2:
|
||||
device_names2.append(friendly_name)
|
||||
elif alt_name:
|
||||
device_names2.append(alt_name)
|
||||
else:
|
||||
device_names2.append(friendly_name) # duplicate, but not much we can do
|
||||
return device_names2
|
||||
@@ -1,126 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read FITS files.
|
||||
|
||||
Backend Library: `Astropy <https://www.astropy.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[fits]
|
||||
|
||||
Flexible Image Transport System (FITS) is an open standard defining a
|
||||
digital file format useful for storage, transmission and processing of
|
||||
scientific and other images. FITS is the most commonly used digital
|
||||
file format in astronomy.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cache : bool
|
||||
If the file name is a URL, `~astropy.utils.data.download_file` is used
|
||||
to open the file. This specifies whether or not to save the file
|
||||
locally in Astropy's download cache (default: `True`).
|
||||
uint : bool
|
||||
Interpret signed integer data where ``BZERO`` is the
|
||||
central value and ``BSCALE == 1`` as unsigned integer
|
||||
data. For example, ``int16`` data with ``BZERO = 32768``
|
||||
and ``BSCALE = 1`` would be treated as ``uint16`` data.
|
||||
|
||||
Note, for backward compatibility, the kwarg **uint16** may
|
||||
be used instead. The kwarg was renamed when support was
|
||||
added for integers of any size.
|
||||
ignore_missing_end : bool
|
||||
Do not issue an exception when opening a file that is
|
||||
missing an ``END`` card in the last header.
|
||||
checksum : bool or str
|
||||
If `True`, verifies that both ``DATASUM`` and
|
||||
``CHECKSUM`` card values (when present in the HDU header)
|
||||
match the header and data of all HDU's in the file. Updates to a
|
||||
file that already has a checksum will preserve and update the
|
||||
existing checksums unless this argument is given a value of
|
||||
'remove', in which case the CHECKSUM and DATASUM values are not
|
||||
checked, and are removed when saving changes to the file.
|
||||
disable_image_compression : bool, optional
|
||||
If `True`, treats compressed image HDU's like normal
|
||||
binary table HDU's.
|
||||
do_not_scale_image_data : bool
|
||||
If `True`, image data is not scaled using BSCALE/BZERO values
|
||||
when read.
|
||||
ignore_blank : bool
|
||||
If `True`, the BLANK keyword is ignored if present.
|
||||
scale_back : bool
|
||||
If `True`, when saving changes to a file that contained scaled
|
||||
image data, restore the data to the original type and reapply the
|
||||
original BSCALE/BZERO values. This could lead to loss of accuracy
|
||||
if scaling back to integer values after performing floating point
|
||||
operations on the data.
|
||||
|
||||
"""
|
||||
|
||||
from ..core import Format
|
||||
|
||||
_fits = None # lazily loaded
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _fits
|
||||
try:
|
||||
from astropy.io import fits as _fits
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The FITS format relies on the astropy package."
|
||||
"Please refer to http://www.astropy.org/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _fits
|
||||
|
||||
|
||||
class FitsFormat(Format):
|
||||
"""See :mod:`imageio.plugins.fits`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We return True if ext matches, because this is the only plugin
|
||||
# that can. If astropy is not installed, a useful error follows.
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# No write support
|
||||
return False
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, cache=False, **kwargs):
|
||||
if not _fits:
|
||||
load_lib()
|
||||
hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs)
|
||||
|
||||
self._index = []
|
||||
allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU)
|
||||
for n, hdu in zip(range(len(hdulist)), hdulist):
|
||||
if isinstance(hdu, allowed_hdu_types):
|
||||
# Ignore (primary) header units with no data (use '.size'
|
||||
# rather than '.data' to avoid actually loading the image):
|
||||
if hdu.size > 0:
|
||||
self._index.append(n)
|
||||
self._hdulist = hdulist
|
||||
|
||||
def _close(self):
|
||||
self._hdulist.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._index)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._index):
|
||||
raise IndexError("Index out of range while reading from fits")
|
||||
im = self._hdulist[self._index[index]].data
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The fits format does not support meta data.")
|
||||
@@ -1,404 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write images using FreeImage.
|
||||
|
||||
Backend Library: `FreeImage <https://freeimage.sourceforge.io/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
imageio_download_bin freeimage
|
||||
|
||||
or you can download the backend using the function::
|
||||
|
||||
imageio.plugins.freeimage.download()
|
||||
|
||||
Each Freeimage format has the ``flags`` keyword argument. See the `Freeimage
|
||||
documentation <https://freeimage.sourceforge.io/>`_ for more information.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
flags : int
|
||||
A freeimage-specific option. In most cases we provide explicit
|
||||
parameters for influencing image reading.
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ..core.request import RETURN_BYTES
|
||||
from ._freeimage import FNAME_PER_PLATFORM, IO_FLAGS, download, fi # noqa
|
||||
|
||||
# todo: support files with only meta data
|
||||
|
||||
|
||||
class FreeimageFormat(Format):
|
||||
"""See :mod:`imageio.plugins.freeimage`"""
|
||||
|
||||
_modes = "i"
|
||||
|
||||
def __init__(self, name, description, extensions=None, modes=None, *, fif=None):
|
||||
super().__init__(name, description, extensions=extensions, modes=modes)
|
||||
self._fif = fif
|
||||
|
||||
@property
|
||||
def fif(self):
|
||||
return self._fif # Set when format is created
|
||||
|
||||
def _can_read(self, request):
|
||||
# Ask freeimage if it can read it, maybe ext missing
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "r", request.firstbytes)
|
||||
except Exception: # pragma: no cover
|
||||
request._fif = -1
|
||||
if request._fif == self.fif:
|
||||
return True
|
||||
elif request._fif == 7 and self.fif == 14:
|
||||
# PPM gets identified as PBM and PPM can read PBM
|
||||
# see: https://github.com/imageio/imageio/issues/677
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# Ask freeimage, because we are not aware of all formats
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "w")
|
||||
except ValueError: # pragma: no cover
|
||||
if request.raw_uri == RETURN_BYTES:
|
||||
request._fif = self.fif
|
||||
else:
|
||||
request._fif = -1
|
||||
if request._fif is self.fif:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _open(self, flags=0):
|
||||
self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("This format only supports singleton images.")
|
||||
return self._bm.get_image_data(), self._bm.get_meta_data()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._bm.get_meta_data()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._flags = flags # Store flags for later use
|
||||
self._bm = None
|
||||
self._is_set = False # To prevent appending more than one image
|
||||
self._meta = {}
|
||||
|
||||
def _close(self):
|
||||
# Set global meta data
|
||||
self._bm.set_meta_data(self._meta)
|
||||
# Write and close
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Check if set
|
||||
if not self._is_set:
|
||||
self._is_set = True
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Singleton image; " "can only append image data once."
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
# Lazy instantaion of the bitmap, we need image data
|
||||
if self._bm is None:
|
||||
self._bm = fi.create_bitmap(
|
||||
self.request.filename, self.format.fif, self._flags
|
||||
)
|
||||
self._bm.allocate(im)
|
||||
# Set data
|
||||
self._bm.set_image_data(im)
|
||||
# There is no distinction between global and per-image meta data
|
||||
# for singleton images
|
||||
self._meta = meta
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
self._meta = meta
|
||||
|
||||
|
||||
# Special plugins
|
||||
|
||||
# todo: there is also FIF_LOAD_NOPIXELS,
|
||||
# but perhaps that should be used with get_meta_data.
|
||||
|
||||
|
||||
class FreeimageBmpFormat(FreeimageFormat):
|
||||
"""A BMP format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : bool
|
||||
Whether to compress the bitmap using RLE when saving. Default False.
|
||||
It seems this does not always work, but who cares, you should use
|
||||
PNG anyway.
|
||||
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if compression:
|
||||
flags |= IO_FLAGS.BMP_SAVE_RLE
|
||||
else:
|
||||
flags |= IO_FLAGS.BMP_DEFAULT
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePngFormat(FreeimageFormat):
|
||||
"""A PNG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
ignoregamma : bool
|
||||
Avoid gamma correction. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 6, 9}
|
||||
The compression factor. Higher factors result in more
|
||||
compression at the cost of speed. Note that PNG compression is
|
||||
always lossless. Default 9.
|
||||
quantize : int
|
||||
If specified, turn the given RGB or RGBA image in a paletted image
|
||||
for more efficient storage. The value should be between 2 and 256.
|
||||
If the value of 0 the image is not quantized.
|
||||
interlaced : bool
|
||||
Save using Adam7 interlacing. Default False.
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, ignoregamma=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if ignoregamma:
|
||||
flags |= IO_FLAGS.PNG_IGNOREGAMMA
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=9, quantize=0, interlaced=False):
|
||||
compression_map = {
|
||||
0: IO_FLAGS.PNG_Z_NO_COMPRESSION,
|
||||
1: IO_FLAGS.PNG_Z_BEST_SPEED,
|
||||
6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION,
|
||||
9: IO_FLAGS.PNG_Z_BEST_COMPRESSION,
|
||||
}
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if interlaced:
|
||||
flags |= IO_FLAGS.PNG_INTERLACED
|
||||
try:
|
||||
flags |= compression_map[compression]
|
||||
except KeyError:
|
||||
raise ValueError("Png compression must be 0, 1, 6, or 9.")
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16":
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
# Quantize?
|
||||
q = int(self.request.kwargs.get("quantize", False))
|
||||
if not q:
|
||||
pass
|
||||
elif not (im.ndim == 3 and im.shape[-1] == 3):
|
||||
raise ValueError("Can only quantize RGB images")
|
||||
elif q < 2 or q > 256:
|
||||
raise ValueError("PNG quantize param must be 2..256")
|
||||
else:
|
||||
bm = self._bm.quantize(0, q)
|
||||
self._bm.close()
|
||||
self._bm = bm
|
||||
|
||||
|
||||
class FreeimageJpegFormat(FreeimageFormat):
|
||||
"""A JPEG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale and RGB images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
exifrotate : bool
|
||||
Automatically rotate the image according to the exif flag.
|
||||
Default True. If 2 is given, do the rotation in Python instead
|
||||
of freeimage.
|
||||
quickread : bool
|
||||
Read the image more quickly, at the expense of quality.
|
||||
Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
quality : scalar
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
On saving, compute optimal Huffman coding tables (can reduce a
|
||||
few percent of file size). Default False.
|
||||
baseline : bool
|
||||
Save basic JPEG, without metadata or any markers. Default False.
|
||||
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, exifrotate=True, quickread=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if exifrotate and exifrotate != 2:
|
||||
flags |= IO_FLAGS.JPEG_EXIFROTATE
|
||||
if not quickread:
|
||||
flags |= IO_FLAGS.JPEG_ACCURATE
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageFormat.Reader._get_data(self, index)
|
||||
im = self._rotate(im, meta)
|
||||
return im, meta
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Freeimage is also supposed to
|
||||
support that, and I am pretty sure it once did, but now it
|
||||
does not, so let's just do it in Python.
|
||||
Edit: and now it works again, just leave in place as a fallback.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", None) == 2:
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(
|
||||
self, flags=0, quality=75, progressive=False, optimize=False, baseline=False
|
||||
):
|
||||
# Test quality
|
||||
quality = int(quality)
|
||||
if quality < 1 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 1 and 100.")
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
flags |= quality
|
||||
if progressive:
|
||||
flags |= IO_FLAGS.JPEG_PROGRESSIVE
|
||||
if optimize:
|
||||
flags |= IO_FLAGS.JPEG_OPTIMIZE
|
||||
if baseline:
|
||||
flags |= IO_FLAGS.JPEG_BASELINE
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePnmFormat(FreeimageFormat):
|
||||
"""A PNM format based on the Freeimage library.
|
||||
|
||||
This format supports single bit (PBM), grayscale (PGM) and RGB (PPM)
|
||||
images, even with ASCII or binary coding.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
use_ascii : bool
|
||||
Save with ASCII coding. Default True.
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, use_ascii=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if use_ascii:
|
||||
flags |= IO_FLAGS.PNM_SAVE_ASCII
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
@@ -1,316 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Plugin for multi-image freeimafe formats, like animated GIF and ico.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ._freeimage import fi, IO_FLAGS
|
||||
from .freeimage import FreeimageFormat
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FreeimageMulti(FreeimageFormat):
|
||||
"""Base class for freeimage formats that support multiple images."""
|
||||
|
||||
_modes = "iI"
|
||||
_fif = -1
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, flags=0):
|
||||
flags = int(flags)
|
||||
# Create bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._bm)
|
||||
|
||||
def _get_data(self, index):
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_image_data(), sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
index = index or 0
|
||||
if index < 0 or index >= len(self._bm):
|
||||
raise IndexError()
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Set flags
|
||||
self._flags = flags = int(flags)
|
||||
# Instantiate multi-page bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
# Close bitmap
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Prepare data
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Create sub bitmap
|
||||
sub1 = fi.create_bitmap(self._bm._filename, self.format.fif)
|
||||
# Let subclass add data to bitmap, optionally return new
|
||||
sub2 = self._append_bitmap(im, meta, sub1)
|
||||
# Add
|
||||
self._bm.append_bitmap(sub2)
|
||||
sub2.close()
|
||||
if sub1 is not sub2:
|
||||
sub1.close()
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Set data
|
||||
bitmap.allocate(im)
|
||||
bitmap.set_image_data(im)
|
||||
bitmap.set_meta_data(meta)
|
||||
# Return that same bitmap
|
||||
return bitmap
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
pass # ignore global meta data
|
||||
|
||||
|
||||
class MngFormat(FreeimageMulti):
|
||||
"""An Mng format based on the Freeimage library.
|
||||
|
||||
Read only. Seems broken.
|
||||
"""
|
||||
|
||||
_fif = 6
|
||||
|
||||
def _can_write(self, request): # pragma: no cover
|
||||
return False
|
||||
|
||||
|
||||
class IcoFormat(FreeimageMulti):
|
||||
"""An ICO format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
makealpha : bool
|
||||
Convert to 32-bit and create an alpha channel from the AND-
|
||||
mask when loading. Default False. Note that this returns wrong
|
||||
results if the image was already RGBA.
|
||||
|
||||
"""
|
||||
|
||||
_fif = 1
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, makealpha=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if makealpha:
|
||||
flags |= IO_FLAGS.ICO_MAKEALPHA
|
||||
return FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
|
||||
class GifFormat(FreeimageMulti):
|
||||
"""A format for reading and writing static and animated GIF, based
|
||||
on the Freeimage library.
|
||||
|
||||
Images read with this format are always RGBA. Currently,
|
||||
the alpha channel is ignored when saving RGB images with this
|
||||
format.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
playback : bool
|
||||
'Play' the GIF to generate each frame (as 32bpp) instead of
|
||||
returning raw frame data when loading. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
loop : int
|
||||
The number of iterations. Default 0 (meaning loop indefinitely)
|
||||
duration : {float, list}
|
||||
The duration (in seconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
Note that in the GIF format the duration/delay is expressed in
|
||||
hundredths of a second, which limits the precision of the duration.
|
||||
fps : float
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
quantizer : {'wu', 'nq'}
|
||||
The quantization algorithm:
|
||||
* wu - Wu, Xiaolin, Efficient Statistical Computations for
|
||||
Optimal Color Quantization
|
||||
* nq (neuqant) - Dekker A. H., Kohonen neural networks for
|
||||
optimal color quantization
|
||||
subrectangles : bool
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Unfortunately, this option seems currently broken
|
||||
because FreeImage does not handle DisposalMethod correctly.
|
||||
Default False.
|
||||
"""
|
||||
|
||||
_fif = 25
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, playback=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if playback:
|
||||
flags |= IO_FLAGS.GIF_PLAYBACK
|
||||
FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageMulti.Reader._get_data(self, index)
|
||||
# im = im[:, :, :3] # Drop alpha channel
|
||||
return im, meta
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(FreeimageMulti.Writer):
|
||||
# todo: subrectangles
|
||||
# todo: global palette
|
||||
|
||||
def _open(
|
||||
self,
|
||||
flags=0,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer="Wu",
|
||||
subrectangles=False,
|
||||
):
|
||||
# Check palettesize
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
self._palettesize = palettesize
|
||||
# Check quantizer
|
||||
self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None)
|
||||
if self._quantizer is None:
|
||||
raise ValueError('Invalid quantizer, must be "wu" or "nq".')
|
||||
# Check frametime
|
||||
if duration is None:
|
||||
self._frametime = [int(1000 / float(fps) + 0.5)]
|
||||
elif isinstance(duration, list):
|
||||
self._frametime = [int(1000 * d) for d in duration]
|
||||
elif isinstance(duration, (float, int)):
|
||||
self._frametime = [int(1000 * duration)]
|
||||
else:
|
||||
raise ValueError("Invalid value for duration: %r" % duration)
|
||||
# Check subrectangles
|
||||
self._subrectangles = bool(subrectangles)
|
||||
self._prev_im = None
|
||||
# Init
|
||||
FreeimageMulti.Writer._open(self, flags)
|
||||
# Set global meta data
|
||||
self._meta = {}
|
||||
self._meta["ANIMATION"] = {
|
||||
# 'GlobalPalette': np.array([0]).astype(np.uint8),
|
||||
"Loop": np.array([loop]).astype(np.uint32),
|
||||
# 'LogicalWidth': np.array([x]).astype(np.uint16),
|
||||
# 'LogicalHeight': np.array([x]).astype(np.uint16),
|
||||
}
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Prepare meta data
|
||||
meta = meta.copy()
|
||||
meta_a = meta["ANIMATION"] = {}
|
||||
# If this is the first frame, assign it our "global" meta data
|
||||
if len(self._bm) == 0:
|
||||
meta.update(self._meta)
|
||||
meta_a = meta["ANIMATION"]
|
||||
# Set frame time
|
||||
index = len(self._bm)
|
||||
if index < len(self._frametime):
|
||||
ft = self._frametime[index]
|
||||
else:
|
||||
ft = self._frametime[-1]
|
||||
meta_a["FrameTime"] = np.array([ft]).astype(np.uint32)
|
||||
# Check array
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
im = im[:, :, :3]
|
||||
# Process subrectangles
|
||||
im_uncropped = im
|
||||
if self._subrectangles and self._prev_im is not None:
|
||||
im, xy = self._get_sub_rectangles(self._prev_im, im)
|
||||
meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8)
|
||||
meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16)
|
||||
meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16)
|
||||
self._prev_im = im_uncropped
|
||||
# Set image data
|
||||
sub2 = sub1 = bitmap
|
||||
sub1.allocate(im)
|
||||
sub1.set_image_data(im)
|
||||
# Quantize it if its RGB
|
||||
if im.ndim == 3 and im.shape[-1] == 3:
|
||||
sub2 = sub1.quantize(self._quantizer, self._palettesize)
|
||||
# Set meta data and return
|
||||
sub2.set_meta_data(meta)
|
||||
return sub2
|
||||
|
||||
def _get_sub_rectangles(self, prev, im):
|
||||
"""
|
||||
Calculate the minimal rectangles that need updating each frame.
|
||||
Returns a two-element tuple containing the cropped images and a
|
||||
list of x-y positions.
|
||||
"""
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1]) + 1
|
||||
y0, y1 = int(Y[0]), int(Y[-1]) + 1
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
# Cut out and return
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
@@ -1,71 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read GDAL files.
|
||||
|
||||
Backend: `GDAL <https://gdal.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[gdal]
|
||||
|
||||
Parameters
|
||||
----------
|
||||
none
|
||||
"""
|
||||
|
||||
from ..core import Format, has_module
|
||||
|
||||
_gdal = None # lazily loaded in load_lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _gdal
|
||||
try:
|
||||
import osgeo.gdal as _gdal
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The GDAL format relies on the GDAL package."
|
||||
"Please refer to http://www.gdal.org/"
|
||||
"for further instructions."
|
||||
)
|
||||
return _gdal
|
||||
|
||||
|
||||
GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg")
|
||||
|
||||
|
||||
class GdalFormat(Format):
|
||||
"""See :mod:`imageio.plugins.gdal`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.extension in (".ecw",):
|
||||
return True
|
||||
if has_module("osgeo.gdal"):
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
if not _gdal:
|
||||
load_lib()
|
||||
self._ds = _gdal.Open(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
del self._ds
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("Gdal file contains only one dataset")
|
||||
return self._ds.ReadAsArray(), self._get_meta_data(index)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._ds.GetMetadata()
|
||||
@@ -1,105 +0,0 @@
|
||||
"""
|
||||
PIL-based formats to take screenshots and grab from the clipboard.
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class BaseGrabFormat(Format):
|
||||
"""Base format for grab formats."""
|
||||
|
||||
_pillow_imported = False
|
||||
_ImageGrab = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseGrabFormat, self).__init__(*args, **kwargs)
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
|
||||
try:
|
||||
from PIL import ImageGrab
|
||||
except ImportError:
|
||||
return None
|
||||
self._ImageGrab = ImageGrab
|
||||
return self._ImageGrab
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
pass
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
return self.format._get_data(index)
|
||||
|
||||
|
||||
class ScreenGrabFormat(BaseGrabFormat):
|
||||
"""The ScreenGrabFormat provided a means to grab screenshots using
|
||||
the uri of "<screen>".
|
||||
|
||||
This functionality is provided via Pillow. Note that "<screen>" is
|
||||
only supported on Windows and OS X.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.filename != "<screen>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grab()
|
||||
assert pil_im is not None
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
|
||||
|
||||
class ClipboardGrabFormat(BaseGrabFormat):
|
||||
"""The ClipboardGrabFormat provided a means to grab image data from
|
||||
the clipboard, using the uri "<clipboard>"
|
||||
|
||||
This functionality is provided via Pillow. Note that "<clipboard>" is
|
||||
only supported on Windows.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.filename != "<clipboard>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grabclipboard()
|
||||
if pil_im is None:
|
||||
raise RuntimeError(
|
||||
"There seems to be no image data on the " "clipboard now."
|
||||
)
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
@@ -1,714 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, imageio contributors
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
#
|
||||
|
||||
""" Read LFR files (Lytro Illum).
|
||||
|
||||
Backend: internal
|
||||
|
||||
Plugin to read Lytro Illum .lfr and .raw files as produced
|
||||
by the Lytro Illum light field camera. It is actually a collection
|
||||
of plugins, each supporting slightly different keyword arguments
|
||||
|
||||
Parameters
|
||||
----------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
(only for lytro-lfr and lytro-lfp)
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
|
||||
"""
|
||||
#
|
||||
#
|
||||
# This code is based on work by
|
||||
# David Uhlig and his lfr_reader
|
||||
# (https://www.iiit.kit.edu/uhlig.php)
|
||||
# Donald Dansereau and his Matlab LF Toolbox
|
||||
# (http://dgd.vision/Tools/LFToolbox/)
|
||||
# and Behnam Esfahbod and his Python LFP-Reader
|
||||
# (https://github.com/behnam/python-lfp-reader/)
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
from ..v2 import imread
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor
|
||||
LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728)
|
||||
LYTRO_F01_IMAGE_SIZE = (3280, 3280)
|
||||
|
||||
# Parameter of lfr file format
|
||||
HEADER_LENGTH = 12
|
||||
SIZE_LENGTH = 4 # = 16 - header_length
|
||||
SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4)
|
||||
PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length
|
||||
DATA_CHUNKS_ILLUM = 11
|
||||
DATA_CHUNKS_F01 = 3
|
||||
|
||||
|
||||
class LytroFormat(Format):
|
||||
"""Base class for Lytro format.
|
||||
The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and
|
||||
LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format
|
||||
for the Illum and original F01 camera respectively.
|
||||
Writing is not supported.
|
||||
"""
|
||||
|
||||
# Only single images are supported.
|
||||
_modes = "i"
|
||||
|
||||
def _can_write(self, request):
|
||||
# Writing of Lytro files is not supported
|
||||
return False
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The lytro format cannot write image data.")
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The lytro format cannot write meta data.")
|
||||
|
||||
|
||||
class LytroIllumRawFormat(LytroFormat):
|
||||
"""This is the Lytro Illum RAW format.
|
||||
The raw format is a 10bit image format as used by the Lytro Illum
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 10-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::5]
|
||||
t1 = array[1::5]
|
||||
t2 = array[2::5]
|
||||
t3 = array[3::5]
|
||||
lsb = array[4::5]
|
||||
|
||||
t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3)
|
||||
t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2)
|
||||
t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4)
|
||||
t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6)
|
||||
|
||||
image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::4] = t0.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 1::4] = t1.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 2::4] = t2.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 3::4] = t3.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 1023 as the Lytro Illum saves 10-bit raw data.
|
||||
return np.divide(image, 1023.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
|
||||
else:
|
||||
# Return empty image
|
||||
img = np.array([])
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfrFormat(LytroFormat):
|
||||
"""This is the Lytro Illum LFR format.
|
||||
The lfr is a image and meta data container format as used by the
|
||||
Lytro Illum light field camera.
|
||||
The format will read the specified lfr file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".lfr",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False, include_thumbnail=True):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
self._meta_only = meta_only
|
||||
self._include_thumbnail = include_thumbnail
|
||||
|
||||
self._find_header()
|
||||
self._find_chunks()
|
||||
self._find_meta()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["frames"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
if not self._meta_only:
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
# Read image preview thumbnail
|
||||
if self._include_thumbnail:
|
||||
chunk_dict = self._content["thumbnails"][0]
|
||||
if chunk_dict["imageRef"] in self._chunks:
|
||||
# Read thumbnail image from thumbnail chunk
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
# Read binary data, read image as jpeg
|
||||
thumbnail_data = self._file.read(size)
|
||||
thumbnail_img = imread(thumbnail_data, format="jpeg")
|
||||
|
||||
thumbnail_height = chunk_dict["height"]
|
||||
thumbnail_width = chunk_dict["width"]
|
||||
|
||||
# Add thumbnail to metadata
|
||||
self.metadata["thumbnail"] = {
|
||||
"image": thumbnail_img,
|
||||
"height": thumbnail_height,
|
||||
"width": thumbnail_width,
|
||||
}
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFR file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFR file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_ILLUM):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFR chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfr file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
|
||||
np.uint16
|
||||
)
|
||||
im = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
else:
|
||||
im = np.array([])
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
|
||||
|
||||
class LytroF01RawFormat(LytroFormat):
|
||||
"""This is the Lytro RAW format for the original F01 Lytro camera.
|
||||
The raw format is a 12bit image format as used by the Lytro F01
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 12-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::3]
|
||||
t1 = array[1::3]
|
||||
t2 = array[2::3]
|
||||
|
||||
a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4)
|
||||
a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2
|
||||
|
||||
image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::2] = a0.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
image[:, 1::2] = a1.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 4095 as the Lytro F01 saves 12-bit raw data.
|
||||
return np.divide(image, 4095.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroF01RawFormat.rearrange_bits(raw)
|
||||
|
||||
else:
|
||||
img = np.array([])
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfpFormat(LytroFormat):
|
||||
"""This is the Lytro Illum LFP format.
|
||||
The lfp is a image and meta data container format as used by the
|
||||
Lytro F01 light field camera.
|
||||
The format will read the specified lfp file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
meta_only : bool
|
||||
Whether to only read the metadata.
|
||||
include_thumbnail : bool
|
||||
Whether to include an image thumbnail in the metadata.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.extension in (".lfp",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, meta_only=False):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
self._meta_only = meta_only
|
||||
|
||||
self._find_header()
|
||||
self._find_meta()
|
||||
self._find_chunks()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["picture"]["frameArray"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
if not self._meta_only:
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFP file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFP file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_F01):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
data = self._file.read(5) # Skip 5
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFP chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfp file contains only one dataset")
|
||||
|
||||
if not self._meta_only:
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
|
||||
np.uint16
|
||||
)
|
||||
im = LytroF01RawFormat.rearrange_bits(raw)
|
||||
else:
|
||||
im = np.array([])
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
@@ -1,85 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
"""Read/Write NPZ files.
|
||||
|
||||
Backend: `Numpy <https://numpy.org/doc/stable/reference/generated/numpy.savez.html>`_
|
||||
|
||||
NPZ is a file format by numpy that provides storage of array data using gzip
|
||||
compression. This imageio plugin supports data of any shape, and also supports
|
||||
multiple images per file. However, the npz format does not provide streaming;
|
||||
all data is read/written at once. Further, there is no support for meta data.
|
||||
|
||||
See the BSDF format for a similar (but more fully featured) format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
This format is not available on Pypy.
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class NpzFormat(Format):
|
||||
"""See :mod:`imageio.plugins.npz`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
# Load npz file, which provides another file like object
|
||||
self._npz = np.load(self.request.get_file())
|
||||
assert isinstance(self._npz, np.lib.npyio.NpzFile)
|
||||
# Get list of names, ordered by name, but smarter
|
||||
self._names = sorted(self._npz.files, key=lambda x: x.split("_")[-1])
|
||||
|
||||
def _close(self):
|
||||
self._npz.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._names)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._names):
|
||||
raise IndexError("Index out of range while reading from nzp")
|
||||
im = self._npz[self._names[index]]
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
# Npz is not such a great format. We cannot stream to the file.
|
||||
# So we remember all images and write them to file at the end.
|
||||
self._images = []
|
||||
|
||||
def _close(self):
|
||||
# Write everything
|
||||
np.savez_compressed(self.request.get_file(), *self._images)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
self._images.append(im) # discart meta data
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
@@ -1,314 +0,0 @@
|
||||
"""Read/Write images using OpenCV.
|
||||
|
||||
Backend Library: `OpenCV <https://opencv.org/>`_
|
||||
|
||||
This plugin wraps OpenCV (also known as ``cv2``), a popular image processing
|
||||
library. Currently, it exposes OpenCVs image reading capability (no video or GIF
|
||||
support yet); however, this may be added in future releases.
|
||||
|
||||
Methods
|
||||
-------
|
||||
.. note::
|
||||
Check the respective function for a list of supported kwargs and their
|
||||
documentation.
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
OpenCVPlugin.read
|
||||
OpenCVPlugin.iter
|
||||
OpenCVPlugin.write
|
||||
OpenCVPlugin.properties
|
||||
OpenCVPlugin.metadata
|
||||
|
||||
Pixel Formats (Colorspaces)
|
||||
---------------------------
|
||||
|
||||
OpenCV is known to process images in BGR; however, most of the python ecosystem
|
||||
(in particular matplotlib and other pydata libraries) use the RGB. As such,
|
||||
images are converted to RGB, RGBA, or grayscale (where applicable) by default.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ..core import Request
|
||||
from ..core.request import URI_BYTES, InitializationError, IOMode
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
class OpenCVPlugin(PluginV3):
|
||||
def __init__(self, request: Request) -> None:
|
||||
super().__init__(request)
|
||||
|
||||
self.file_handle = request.get_local_filename()
|
||||
if request._uri_type is URI_BYTES:
|
||||
self.filename = "<bytes>"
|
||||
else:
|
||||
self.filename = request.raw_uri
|
||||
|
||||
mode = request.mode.io_mode
|
||||
if mode == IOMode.read and not cv2.haveImageReader(self.file_handle):
|
||||
raise InitializationError(f"OpenCV can't read `{self.filename}`.")
|
||||
elif mode == IOMode.write and not cv2.haveImageWriter(self.file_handle):
|
||||
raise InitializationError(f"OpenCV can't write to `{self.filename}`.")
|
||||
|
||||
def read(
|
||||
self,
|
||||
*,
|
||||
index: int = None,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> np.ndarray:
|
||||
"""Read an image from the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int, Ellipsis
|
||||
If int, read the index-th image from the ImageResource. If ``...``,
|
||||
read all images from the ImageResource and stack them along a new,
|
||||
prepended, batch dimension. If None (default), use ``index=0`` if
|
||||
the image contains exactly one image and ``index=...`` otherwise.
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : np.ndarray
|
||||
The decoded image as a numpy array.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
index = 0 if n_images == 1 else ...
|
||||
|
||||
if index is ...:
|
||||
retval, img = cv2.imreadmulti(self.file_handle, flags=flags)
|
||||
is_batch = True
|
||||
else:
|
||||
retval, img = cv2.imreadmulti(self.file_handle, index, 1, flags=flags)
|
||||
is_batch = False
|
||||
|
||||
if retval is False:
|
||||
raise ValueError(f"Could not read index `{index}` from `{self.filename}`.")
|
||||
|
||||
if img[0].ndim == 2:
|
||||
in_colorspace = "GRAY"
|
||||
out_colorspace = colorspace or "GRAY"
|
||||
elif img[0].shape[-1] == 4:
|
||||
in_colorspace = "BGRA"
|
||||
out_colorspace = colorspace or "RGBA"
|
||||
else:
|
||||
in_colorspace = "BGR"
|
||||
out_colorspace = colorspace or "RGB"
|
||||
|
||||
if isinstance(colorspace, int):
|
||||
cvt_space = colorspace
|
||||
elif in_colorspace == out_colorspace.upper():
|
||||
cvt_space = None
|
||||
else:
|
||||
out_colorspace = out_colorspace.upper()
|
||||
cvt_space = getattr(cv2, f"COLOR_{in_colorspace}2{out_colorspace}")
|
||||
|
||||
if cvt_space is not None:
|
||||
img = np.stack([cv2.cvtColor(x, cvt_space) for x in img])
|
||||
else:
|
||||
img = np.stack(img)
|
||||
|
||||
return img if is_batch else img[0]
|
||||
|
||||
def iter(
|
||||
self,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> np.ndarray:
|
||||
"""Yield images from the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Yields
|
||||
------
|
||||
ndimage : np.ndarray
|
||||
The decoded image as a numpy array.
|
||||
|
||||
"""
|
||||
for idx in range(cv2.imcount(self.file_handle)):
|
||||
yield self.read(index=idx, flags=flags, colorspace=colorspace)
|
||||
|
||||
def write(
|
||||
self,
|
||||
ndimage: Union[ArrayLike, List[ArrayLike]],
|
||||
is_batch: bool = False,
|
||||
params: List[int] = None,
|
||||
) -> Optional[bytes]:
|
||||
"""Save an ndimage in the ImageResource.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : ArrayLike, List[ArrayLike]
|
||||
The image data that will be written to the file. It is either a
|
||||
single image, a batch of images, or a list of images.
|
||||
is_batch : bool
|
||||
If True, the provided ndimage is a batch of images. If False (default), the
|
||||
provided ndimage is a single image. If the provided ndimage is a list of images,
|
||||
this parameter has no effect.
|
||||
params : List[int]
|
||||
A list of parameters that will be passed to OpenCVs imwrite or
|
||||
imwritemulti functions. Possible values are documented in the
|
||||
`OpenCV documentation
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce>`_.
|
||||
|
||||
Returns
|
||||
-------
|
||||
encoded_image : bytes, None
|
||||
If the ImageResource is ``"<bytes>"`` the call to write returns the
|
||||
encoded image as a bytes string. Otherwise it returns None.
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(ndimage, list):
|
||||
ndimage = np.stack(ndimage, axis=0)
|
||||
elif not is_batch:
|
||||
ndimage = ndimage[None, ...]
|
||||
|
||||
if ndimage[0].ndim == 2:
|
||||
n_channels = 1
|
||||
else:
|
||||
n_channels = ndimage[0].shape[-1]
|
||||
|
||||
if n_channels == 1:
|
||||
ndimage_cv2 = [x for x in ndimage]
|
||||
elif n_channels == 4:
|
||||
ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGBA2BGRA) for x in ndimage]
|
||||
else:
|
||||
ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in ndimage]
|
||||
|
||||
retval = cv2.imwritemulti(self.file_handle, ndimage_cv2, params)
|
||||
|
||||
if retval is False:
|
||||
# not sure what scenario would trigger this, but
|
||||
# it can occur theoretically.
|
||||
raise IOError("OpenCV failed to write.") # pragma: no cover
|
||||
|
||||
if self.request._uri_type == URI_BYTES:
|
||||
return Path(self.file_handle).read_bytes()
|
||||
|
||||
def properties(
|
||||
self,
|
||||
index: int = None,
|
||||
colorspace: Union[int, str] = None,
|
||||
flags: int = cv2.IMREAD_COLOR,
|
||||
) -> ImageProperties:
|
||||
"""Standardized image metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int, Ellipsis
|
||||
If int, get the properties of the index-th image in the
|
||||
ImageResource. If ``...``, get the properties of the image stack
|
||||
that contains all images. If None (default), use ``index=0`` if the
|
||||
image contains exactly one image and ``index=...`` otherwise.
|
||||
colorspace : str, int
|
||||
The colorspace to convert into after loading and before returning
|
||||
the image. If None (default) keep grayscale images as is, convert
|
||||
images with an alpha channel to ``RGBA`` and all other images to
|
||||
``RGB``. If int, interpret ``colorspace`` as one of OpenCVs
|
||||
`conversion flags
|
||||
<https://docs.opencv.org/4.x/d8/d01/group__imgproc__color__conversions.html>`_
|
||||
and use it for conversion. If str, convert the image into the given
|
||||
colorspace. Possible string values are: ``"RGB"``, ``"BGR"``,
|
||||
``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``.
|
||||
flags : int
|
||||
The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs
|
||||
<https://docs.opencv.org/4.x/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56>`_
|
||||
for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
props : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Reading properties with OpenCV involves decoding pixel data, because
|
||||
OpenCV doesn't provide a direct way to access metadata.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
is_batch = n_images > 1
|
||||
elif index is Ellipsis:
|
||||
n_images = cv2.imcount(self.file_handle, flags)
|
||||
is_batch = True
|
||||
else:
|
||||
is_batch = False
|
||||
|
||||
# unfortunately, OpenCV doesn't allow reading shape without reading pixel data
|
||||
if is_batch:
|
||||
img = self.read(index=0, flags=flags, colorspace=colorspace)
|
||||
return ImageProperties(
|
||||
shape=(n_images, *img.shape),
|
||||
dtype=img.dtype,
|
||||
n_images=n_images,
|
||||
is_batch=True,
|
||||
)
|
||||
|
||||
img = self.read(index=index, flags=flags, colorspace=colorspace)
|
||||
return ImageProperties(shape=img.shape, dtype=img.dtype, is_batch=False)
|
||||
|
||||
def metadata(
|
||||
self, index: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Format-specific metadata.
|
||||
|
||||
.. warning::
|
||||
OpenCV does not support reading metadata. When called, this function
|
||||
will raise a ``NotImplementedError``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
This parameter has no effect.
|
||||
exclude_applied : bool
|
||||
This parameter has no effect.
|
||||
|
||||
"""
|
||||
|
||||
warnings.warn("OpenCV does not support reading metadata.", UserWarning)
|
||||
return dict()
|
||||
@@ -1,477 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using Pillow/PIL.
|
||||
|
||||
Backend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_
|
||||
|
||||
Plugin that wraps the the Pillow library. Pillow is a friendly fork of PIL
|
||||
(Python Image Library) and supports reading and writing of common formats (jpg,
|
||||
png, gif, tiff, ...). For, the complete list of features and supported formats
|
||||
please refer to pillows official docs (see the Backend Library link).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : Request
|
||||
A request object representing the resource to be operated on.
|
||||
|
||||
Methods
|
||||
-------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: _plugins/pillow
|
||||
|
||||
PillowPlugin.read
|
||||
PillowPlugin.write
|
||||
PillowPlugin.iter
|
||||
PillowPlugin.get_meta
|
||||
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
from typing import Callable, Optional, Dict, Any, Tuple, cast, Iterator, Union, List
|
||||
import numpy as np
|
||||
from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags # type: ignore
|
||||
from ..core.request import Request, IOMode, InitializationError, URI_BYTES
|
||||
from ..core.v3_plugin_api import PluginV3, ImageProperties
|
||||
import warnings
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
def _exif_orientation_transform(orientation: int, mode: str) -> Callable:
|
||||
# get transformation that transforms an image from a
|
||||
# given EXIF orientation into the standard orientation
|
||||
|
||||
# -1 if the mode has color channel, 0 otherwise
|
||||
axis = -2 if Image.getmodebands(mode) > 1 else -1
|
||||
|
||||
EXIF_ORIENTATION = {
|
||||
1: lambda x: x,
|
||||
2: lambda x: np.flip(x, axis=axis),
|
||||
3: lambda x: np.rot90(x, k=2),
|
||||
4: lambda x: np.flip(x, axis=axis - 1),
|
||||
5: lambda x: np.flip(np.rot90(x, k=3), axis=axis),
|
||||
6: lambda x: np.rot90(x, k=1),
|
||||
7: lambda x: np.flip(np.rot90(x, k=1), axis=axis),
|
||||
8: lambda x: np.rot90(x, k=3),
|
||||
}
|
||||
|
||||
return EXIF_ORIENTATION[orientation]
|
||||
|
||||
|
||||
class PillowPlugin(PluginV3):
|
||||
def __init__(self, request: Request) -> None:
|
||||
"""Instantiate a new Pillow Plugin Object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : {Request}
|
||||
A request object representing the resource to be operated on.
|
||||
|
||||
"""
|
||||
|
||||
super().__init__(request)
|
||||
|
||||
self._image: Image = None
|
||||
|
||||
if request.mode.io_mode == IOMode.read:
|
||||
try:
|
||||
with Image.open(request.get_file()):
|
||||
# Check if it is generally possible to read the image.
|
||||
# This will not read any data and merely try to find a
|
||||
# compatible pillow plugin (ref: the pillow docs).
|
||||
pass
|
||||
except UnidentifiedImageError:
|
||||
if request._uri_type == URI_BYTES:
|
||||
raise InitializationError(
|
||||
"Pillow can not read the provided bytes."
|
||||
) from None
|
||||
else:
|
||||
raise InitializationError(
|
||||
f"Pillow can not read {request.raw_uri}."
|
||||
) from None
|
||||
|
||||
self._image = Image.open(self._request.get_file())
|
||||
else:
|
||||
extension = self.request.extension or self.request.format_hint
|
||||
if extension is None:
|
||||
warnings.warn(
|
||||
"Can't determine file format to write as. You _must_"
|
||||
" set `format` during write or the call will fail. Use "
|
||||
"`extension` to supress this warning. ",
|
||||
UserWarning,
|
||||
)
|
||||
return
|
||||
|
||||
tirage = [Image.preinit, Image.init]
|
||||
for format_loader in tirage:
|
||||
format_loader()
|
||||
if extension in Image.registered_extensions().keys():
|
||||
return
|
||||
|
||||
raise InitializationError(
|
||||
f"Pillow can not write `{extension}` files."
|
||||
) from None
|
||||
|
||||
def close(self) -> None:
|
||||
if self._image:
|
||||
self._image.close()
|
||||
|
||||
self._request.finish()
|
||||
|
||||
def read(
|
||||
self, *, index=None, mode=None, rotate=False, apply_gamma=False, as_gray=None
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Parses the given URI and creates a ndarray from it.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return it.
|
||||
If index is an ellipsis (...), read all ndimages in the file and
|
||||
stack them along a new batch dimension and return them. If index is
|
||||
None, this plugin reads the first image of the file (index=0) unless
|
||||
the image is a GIF or APNG, in which case all images are read
|
||||
(index=...).
|
||||
mode : str
|
||||
Convert the image to the given mode before returning it. If None,
|
||||
the mode will be left unchanged. Possible modes can be found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
rotate : bool
|
||||
If set to ``True`` and the image contains an EXIF orientation tag,
|
||||
apply the orientation before returning the ndimage.
|
||||
apply_gamma : bool
|
||||
If ``True`` and the image contains metadata about gamma, apply gamma
|
||||
correction to the image.
|
||||
as_gray : bool
|
||||
Deprecated. Exists to raise a constructive error message.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndimage : ndarray
|
||||
A numpy array containing the loaded image data
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you read a paletted image (e.g. GIF) then the plugin will apply the
|
||||
palette by default. Should you wish to read the palette indices of each
|
||||
pixel use ``mode="P"``. The coresponding color pallete can be found in
|
||||
the image's metadata using the ``palette`` key when metadata is
|
||||
extracted using the ``exclude_applied=False`` kwarg. The latter is
|
||||
needed, as palettes are applied by default and hence excluded by default
|
||||
to keep metadata and pixel data consistent.
|
||||
|
||||
"""
|
||||
|
||||
if as_gray is not None:
|
||||
raise TypeError(
|
||||
"The keyword `as_gray` is no longer supported."
|
||||
"Use `mode='L'` instead."
|
||||
)
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if isinstance(index, int):
|
||||
# will raise IO error if index >= number of frames in image
|
||||
self._image.seek(index)
|
||||
image = self._apply_transforms(self._image, mode, rotate, apply_gamma)
|
||||
return image
|
||||
else:
|
||||
iterator = self.iter(mode=mode, rotate=rotate, apply_gamma=apply_gamma)
|
||||
image = np.stack([im for im in iterator], axis=0)
|
||||
return image
|
||||
|
||||
def iter(
|
||||
self, *, mode: str = None, rotate: bool = False, apply_gamma: bool = False
|
||||
) -> Iterator[np.ndarray]:
|
||||
"""
|
||||
Iterate over all ndimages/frames in the URI
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {str, None}
|
||||
Convert the image to the given mode before returning it. If None,
|
||||
the mode will be left unchanged. Possible modes can be found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
rotate : {bool}
|
||||
If set to ``True`` and the image contains an EXIF orientation tag,
|
||||
apply the orientation before returning the ndimage.
|
||||
apply_gamma : {bool}
|
||||
If ``True`` and the image contains metadata about gamma, apply gamma
|
||||
correction to the image.
|
||||
"""
|
||||
|
||||
for im in ImageSequence.Iterator(self._image):
|
||||
yield self._apply_transforms(im, mode, rotate, apply_gamma)
|
||||
|
||||
def _apply_transforms(self, image, mode, rotate, apply_gamma) -> np.ndarray:
|
||||
if mode is not None:
|
||||
image = image.convert(mode)
|
||||
elif image.mode == "P":
|
||||
# adjust for pillow9 changes
|
||||
# see: https://github.com/python-pillow/Pillow/issues/5929
|
||||
image = image.convert(image.palette.mode)
|
||||
image = np.asarray(image)
|
||||
|
||||
meta = self.metadata(index=self._image.tell(), exclude_applied=False)
|
||||
if rotate and "Orientation" in meta:
|
||||
transformation = _exif_orientation_transform(
|
||||
meta["Orientation"], self._image.mode
|
||||
)
|
||||
image = transformation(image)
|
||||
|
||||
if apply_gamma and "gamma" in meta:
|
||||
gamma = float(meta["gamma"])
|
||||
scale = float(65536 if image.dtype == np.uint16 else 255)
|
||||
gain = 1.0
|
||||
image = ((image / scale) ** gamma) * scale * gain + 0.4999
|
||||
image = np.round(image).astype(np.uint8)
|
||||
|
||||
return image
|
||||
|
||||
def write(
|
||||
self,
|
||||
ndimage: Union[ArrayLike, List[ArrayLike]],
|
||||
*,
|
||||
mode: str = None,
|
||||
format: str = None,
|
||||
is_batch: bool = None,
|
||||
**kwargs,
|
||||
) -> Optional[bytes]:
|
||||
"""
|
||||
Write an ndimage to the URI specified in path.
|
||||
|
||||
If the URI points to a file on the current host and the file does not
|
||||
yet exist it will be created. If the file exists already, it will be
|
||||
appended if possible; otherwise, it will be replaced.
|
||||
|
||||
If necessary, the image is broken down along the leading dimension to
|
||||
fit into individual frames of the chosen format. If the format doesn't
|
||||
support multiple frames, and IOError is raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray or list
|
||||
The ndimage to write. If a list is given each element is expected to
|
||||
be an ndimage.
|
||||
mode : str
|
||||
Specify the image's color format. If None (default), the mode is
|
||||
inferred from the array's shape and dtype. Possible modes can be
|
||||
found at:
|
||||
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||
format : str
|
||||
Optional format override. If omitted, the format to use is
|
||||
determined from the filename extension. If a file object was used
|
||||
instead of a filename, this parameter must always be used.
|
||||
is_batch : bool
|
||||
Explicitly tell the writer that ``image`` is a batch of images
|
||||
(True) or not (False). If None, the writer will guess this from the
|
||||
provided ``mode`` or ``image.shape``. While the latter often works,
|
||||
it may cause problems for small images due to aliasing of spatial
|
||||
and color-channel axes.
|
||||
kwargs : ...
|
||||
Extra arguments to pass to pillow. If a writer doesn't recognise an
|
||||
option, it is silently ignored. The available options are described
|
||||
in pillow's `image format documentation
|
||||
<https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html>`_
|
||||
for each writer.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When writing batches of very narrow (2-4 pixels wide) gray images set
|
||||
the ``mode`` explicitly to avoid the batch being identified as a colored
|
||||
image.
|
||||
|
||||
"""
|
||||
if "fps" in kwargs:
|
||||
raise TypeError(
|
||||
"The keyword `fps` is no longer supported. Use `duration`"
|
||||
"(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50)."
|
||||
)
|
||||
|
||||
extension = self.request.extension or self.request.format_hint
|
||||
|
||||
save_args = {
|
||||
"format": format or Image.registered_extensions()[extension],
|
||||
}
|
||||
|
||||
if isinstance(ndimage, list):
|
||||
ndimage = np.stack(ndimage, axis=0)
|
||||
is_batch = True
|
||||
else:
|
||||
ndimage = np.asarray(ndimage)
|
||||
|
||||
# check if ndimage is a batch of frames/pages (e.g. for writing GIF)
|
||||
# if mode is given, use it; otherwise fall back to image.ndim only
|
||||
if is_batch is not None:
|
||||
pass
|
||||
elif mode is not None:
|
||||
is_batch = (
|
||||
ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2
|
||||
)
|
||||
elif ndimage.ndim == 2:
|
||||
is_batch = False
|
||||
elif ndimage.ndim == 3 and ndimage.shape[-1] == 1:
|
||||
raise ValueError("Can't write images with one color channel.")
|
||||
elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]:
|
||||
# Note: this makes a channel-last assumption
|
||||
is_batch = False
|
||||
else:
|
||||
is_batch = True
|
||||
|
||||
if not is_batch:
|
||||
ndimage = ndimage[None, ...]
|
||||
|
||||
pil_frames = list()
|
||||
for frame in ndimage:
|
||||
pil_frame = Image.fromarray(frame, mode=mode)
|
||||
if "bits" in kwargs:
|
||||
pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"])
|
||||
pil_frames.append(pil_frame)
|
||||
primary_image, other_images = pil_frames[0], pil_frames[1:]
|
||||
|
||||
if is_batch:
|
||||
save_args["save_all"] = True
|
||||
save_args["append_images"] = other_images
|
||||
|
||||
save_args.update(kwargs)
|
||||
primary_image.save(self._request.get_file(), **save_args)
|
||||
|
||||
if self._request._uri_type == URI_BYTES:
|
||||
file = cast(BytesIO, self._request.get_file())
|
||||
return file.getvalue()
|
||||
|
||||
return None
|
||||
|
||||
def get_meta(self, *, index=0) -> Dict[str, Any]:
|
||||
return self.metadata(index=index, exclude_applied=False)
|
||||
|
||||
def metadata(
|
||||
self, index: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Read ndimage metadata.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : {integer, None}
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
metadata. If index is an ellipsis (...), read and return global
|
||||
metadata. If index is None, this plugin reads metadata from the
|
||||
first image of the file (index=0) unless the image is a GIF or APNG,
|
||||
in which case global metadata is read (index=...).
|
||||
exclude_applied : bool
|
||||
If True, exclude metadata fields that are applied to the image while
|
||||
reading. For example, if the binary data contains a rotation flag,
|
||||
the image is rotated by default and the rotation flag is excluded
|
||||
from the metadata to avoid confusion.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary of format-specific metadata.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if isinstance(index, int) and self._image.tell() != index:
|
||||
self._image.seek(index)
|
||||
|
||||
metadata = self._image.info.copy()
|
||||
metadata["mode"] = self._image.mode
|
||||
metadata["shape"] = self._image.size
|
||||
|
||||
if self._image.mode == "P" and not exclude_applied:
|
||||
metadata["palette"] = np.asarray(tuple(self._image.palette.colors.keys()))
|
||||
|
||||
if self._image.getexif():
|
||||
exif_data = {
|
||||
ExifTags.TAGS.get(key, "unknown"): value
|
||||
for key, value in dict(self._image.getexif()).items()
|
||||
}
|
||||
exif_data.pop("unknown", None)
|
||||
metadata.update(exif_data)
|
||||
|
||||
if exclude_applied:
|
||||
metadata.pop("Orientation", None)
|
||||
|
||||
return metadata
|
||||
|
||||
def properties(self, index: int = None) -> ImageProperties:
|
||||
"""Standardized ndimage metadata
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If the ImageResource contains multiple ndimages, and index is an
|
||||
integer, select the index-th ndimage from among them and return its
|
||||
properties. If index is an ellipsis (...), read and return the
|
||||
properties of all ndimages in the file stacked along a new batch
|
||||
dimension. If index is None, this plugin reads and returns the
|
||||
properties of the first image (index=0) unless the image is a GIF or
|
||||
APNG, in which case it reads and returns the properties all images
|
||||
(index=...).
|
||||
|
||||
Returns
|
||||
-------
|
||||
properties : ImageProperties
|
||||
A dataclass filled with standardized image metadata.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This does not decode pixel data and is fast for large images.
|
||||
|
||||
"""
|
||||
|
||||
if index is None:
|
||||
if self._image.format == "GIF":
|
||||
index = Ellipsis
|
||||
elif self._image.custom_mimetype == "image/apng":
|
||||
index = Ellipsis
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if index is Ellipsis:
|
||||
self._image.seek(0)
|
||||
else:
|
||||
self._image.seek(index)
|
||||
|
||||
if self._image.mode == "P":
|
||||
# mode of palette images is determined by their palette
|
||||
mode = self._image.palette.mode
|
||||
else:
|
||||
mode = self._image.mode
|
||||
|
||||
width: int = self._image.width
|
||||
height: int = self._image.height
|
||||
shape: Tuple[int, ...] = (height, width)
|
||||
|
||||
n_frames: int = self._image.n_frames
|
||||
if index is ...:
|
||||
shape = (n_frames, *shape)
|
||||
|
||||
dummy = np.asarray(Image.new(mode, (1, 1)))
|
||||
pil_shape: Tuple[int, ...] = dummy.shape
|
||||
if len(pil_shape) > 2:
|
||||
shape = (*shape, *pil_shape[2:])
|
||||
|
||||
return ImageProperties(
|
||||
shape=shape,
|
||||
dtype=dummy.dtype,
|
||||
n_images=n_frames if index is Ellipsis else None,
|
||||
is_batch=index is Ellipsis,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,825 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using pillow/PIL (legacy).
|
||||
|
||||
Backend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_
|
||||
|
||||
Pillow is a friendly fork of PIL (Python Image Library) and supports
|
||||
reading and writing of common formats (jpg, png, gif, tiff, ...). While
|
||||
these docs provide an overview of some of its features, pillow is
|
||||
constantly improving. Hence, the complete list of features can be found
|
||||
in pillows official docs (see the Backend Library link).
|
||||
|
||||
Parameters for Reading
|
||||
----------------------
|
||||
pilmode : str
|
||||
(Available for all formates except GIF-PIL)
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
(Available for all formates except GIF-PIL)
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
ignoregamma : bool
|
||||
(Only available in PNG-PIL)
|
||||
Avoid gamma correction. Default True.
|
||||
exifrotate : bool
|
||||
(Only available in JPEG-PIL)
|
||||
Automatically rotate the image according to exif flag. Default True.
|
||||
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
optimize : bool
|
||||
(Only available in PNG-PIL)
|
||||
If present and true, instructs the PNG writer to make the output file
|
||||
as small as possible. This includes extra processing in order to find
|
||||
optimal encoder settings.
|
||||
transparency:
|
||||
(Only available in PNG-PIL)
|
||||
This option controls what color image to mark as transparent.
|
||||
dpi: tuple of two scalars
|
||||
(Only available in PNG-PIL)
|
||||
The desired dpi in each direction.
|
||||
pnginfo: PIL.PngImagePlugin.PngInfo
|
||||
(Only available in PNG-PIL)
|
||||
Object containing text tags.
|
||||
compress_level: int
|
||||
(Only available in PNG-PIL)
|
||||
ZLIB compression level, a number between 0 and 9: 1 gives best speed,
|
||||
9 gives best compression, 0 gives no compression at all. Default is 9.
|
||||
When ``optimize`` option is True ``compress_level`` has no effect
|
||||
(it is set to 9 regardless of a value passed).
|
||||
compression: int
|
||||
(Only available in PNG-PIL)
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
compress_level.
|
||||
icc_profile:
|
||||
(Only available in PNG-PIL)
|
||||
The ICC Profile to include in the saved file.
|
||||
bits (experimental): int
|
||||
(Only available in PNG-PIL)
|
||||
This option controls how many bits to store. If omitted,
|
||||
the PNG writer uses 8 bits (256 colors).
|
||||
quantize:
|
||||
(Only available in PNG-PIL)
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
bits. In this case, given as a number between 1-256.
|
||||
dictionary (experimental): dict
|
||||
(Only available in PNG-PIL)
|
||||
Set the ZLIB encoder dictionary.
|
||||
prefer_uint8: bool
|
||||
(Only available in PNG-PIL)
|
||||
Let the PNG writer truncate uint16 image arrays to uint8 if their values fall
|
||||
within the range [0, 255]. Defaults to true for legacy compatibility, however
|
||||
it is recommended to set this to false to avoid unexpected behavior when
|
||||
saving e.g. weakly saturated images.
|
||||
|
||||
quality : scalar
|
||||
(Only available in JPEG-PIL)
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
(Only available in JPEG-PIL)
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
(Only available in JPEG-PIL)
|
||||
On saving, compute optimal Huffman coding tables (can reduce a few
|
||||
percent of file size). Default False.
|
||||
dpi : tuple of int
|
||||
(Only available in JPEG-PIL)
|
||||
The pixel density, ``(x,y)``.
|
||||
icc_profile : object
|
||||
(Only available in JPEG-PIL)
|
||||
If present and true, the image is stored with the provided ICC profile.
|
||||
If this parameter is not provided, the image will be saved with no
|
||||
profile attached.
|
||||
exif : dict
|
||||
(Only available in JPEG-PIL)
|
||||
If present, the image will be stored with the provided raw EXIF data.
|
||||
subsampling : str
|
||||
(Only available in JPEG-PIL)
|
||||
Sets the subsampling for the encoder. See Pillow docs for details.
|
||||
qtables : object
|
||||
(Only available in JPEG-PIL)
|
||||
Set the qtables for the encoder. See Pillow docs for details.
|
||||
quality_mode : str
|
||||
(Only available in JPEG2000-PIL)
|
||||
Either `"rates"` or `"dB"` depending on the units you want to use to
|
||||
specify image quality.
|
||||
quality : float
|
||||
(Only available in JPEG2000-PIL)
|
||||
Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio
|
||||
in decibels (if quality mode is `dB`).
|
||||
loop : int
|
||||
(Only available in GIF-PIL)
|
||||
The number of iterations. Default 0 (meaning loop indefinitely).
|
||||
duration : {float, list}
|
||||
(Only available in GIF-PIL)
|
||||
The duration (in seconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
Note that in the GIF format the duration/delay is expressed in
|
||||
hundredths of a second, which limits the precision of the duration.
|
||||
fps : float
|
||||
(Only available in GIF-PIL)
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
(Only available in GIF-PIL)
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
subrectangles : bool
|
||||
(Only available in GIF-PIL)
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Default False.
|
||||
|
||||
Notes
|
||||
-----
|
||||
To enable JPEG 2000 support, you need to build and install the OpenJPEG library,
|
||||
version 2.0.0 or higher, before building the Python Imaging Library. Windows
|
||||
users can install the OpenJPEG binaries available on the OpenJPEG website, but
|
||||
must add them to their PATH in order to use PIL (if you fail to do this, you
|
||||
will get errors about not being able to load the ``_imaging`` DLL).
|
||||
|
||||
GIF images read with this plugin are always RGBA. The alpha channel is ignored
|
||||
when saving RGB images.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, image_as_uint
|
||||
from ..core.request import URI_FILE, URI_BYTES
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX.
|
||||
|
||||
|
||||
GENERIC_DOCS = """
|
||||
Parameters for reading
|
||||
----------------------
|
||||
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
"""
|
||||
|
||||
|
||||
class PillowFormat(Format):
|
||||
"""
|
||||
Base format class for Pillow formats.
|
||||
"""
|
||||
|
||||
_pillow_imported = False
|
||||
_Image = None
|
||||
_modes = "i"
|
||||
_description = ""
|
||||
|
||||
def __init__(self, *args, plugin_id: str = None, **kwargs):
|
||||
super(PillowFormat, self).__init__(*args, **kwargs)
|
||||
# Used to synchronize _init_pillow(), see #244
|
||||
self._lock = threading.RLock()
|
||||
|
||||
self._plugin_id = plugin_id
|
||||
|
||||
@property
|
||||
def plugin_id(self):
|
||||
"""The PIL plugin id."""
|
||||
return self._plugin_id # Set when format is created
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError(
|
||||
"Imageio Pillow plugin requires " "Pillow, not PIL!"
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
self._Image = Image
|
||||
elif self._Image is None: # pragma: no cover
|
||||
raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.")
|
||||
Image = self._Image
|
||||
|
||||
if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"):
|
||||
Image.preinit()
|
||||
else:
|
||||
Image.init()
|
||||
return Image
|
||||
|
||||
def _can_read(self, request):
|
||||
Image = self._init_pillow()
|
||||
if self.plugin_id in Image.OPEN:
|
||||
factory, accept = Image.OPEN[self.plugin_id]
|
||||
if accept:
|
||||
if request.firstbytes and accept(request.firstbytes):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
Image = self._init_pillow()
|
||||
if request.extension in self.extensions or request._uri_type in [
|
||||
URI_FILE,
|
||||
URI_BYTES,
|
||||
]:
|
||||
if self.plugin_id in Image.SAVE:
|
||||
return True
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
factory, accept = Image.OPEN[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot read images." % self.format.name)
|
||||
self._fp = self._get_file()
|
||||
self._im = factory(self._fp, "")
|
||||
if hasattr(Image, "_decompression_bomb_check"):
|
||||
Image._decompression_bomb_check(self._im.size)
|
||||
# Save the raw mode used by the palette for a BMP because it may not be the number of channels
|
||||
# When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument
|
||||
# However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame.
|
||||
# This issue is resolved by using the raw palette data but the rawmode information is now lost. So we
|
||||
# store the raw mode for later use
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
pil_try_read(self._im)
|
||||
# Store args
|
||||
self._kwargs = dict(
|
||||
as_gray=as_gray, is_gray=_palette_is_grayscale(self._im)
|
||||
)
|
||||
# setting mode=None is not the same as just not providing it
|
||||
if pilmode is not None:
|
||||
self._kwargs["mode"] = pilmode
|
||||
# Set length
|
||||
self._length = 1
|
||||
if hasattr(self._im, "n_frames"):
|
||||
self._length = self._im.n_frames
|
||||
|
||||
def _get_file(self):
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
save_pillow_close(self._im)
|
||||
if self._we_own_fp:
|
||||
self._fp.close()
|
||||
# else: request object handles closing the _fp
|
||||
|
||||
def _get_length(self):
|
||||
return self._length
|
||||
|
||||
def _seek(self, index):
|
||||
try:
|
||||
self._im.seek(index)
|
||||
except EOFError:
|
||||
raise IndexError("Could not seek to index %i" % index)
|
||||
|
||||
def _get_data(self, index):
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
i = self._im.tell()
|
||||
if i > index:
|
||||
self._seek(index) # just try
|
||||
else:
|
||||
while i < index: # some formats need to be read in sequence
|
||||
i += 1
|
||||
self._seek(i)
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
self._im.getdata()[0]
|
||||
im = pil_get_frame(self._im, **self._kwargs)
|
||||
return im, self._im.info
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._im.info
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
self._save_func = Image.SAVE[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot write images." % self.format.name)
|
||||
self._fp = self.request.get_file()
|
||||
self._meta = {}
|
||||
self._written = False
|
||||
|
||||
def _close(self):
|
||||
pass # request object handled closing _fp
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if self._written:
|
||||
raise RuntimeError(
|
||||
"Format %s only supports single images." % self.format.name
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
self._written = True
|
||||
self._meta.update(meta)
|
||||
img = ndarray_to_pil(
|
||||
im, self.format.plugin_id, self._meta.pop("prefer_uint8", True)
|
||||
)
|
||||
if "bits" in self._meta:
|
||||
img = img.quantize() # Make it a P image, so bits arg is used
|
||||
img.save(self._fp, format=self.format.plugin_id, **self._meta)
|
||||
save_pillow_close(img)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta.update(meta)
|
||||
|
||||
|
||||
class PNGFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, ignoregamma=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
if not self.request.kwargs.get("ignoregamma", True):
|
||||
# The gamma value in the file represents the gamma factor for the
|
||||
# hardware on the system where the file was created, and is meant
|
||||
# to be able to match the colors with the system on which the
|
||||
# image is shown. See also issue #366
|
||||
try:
|
||||
gamma = float(info["gamma"])
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
else:
|
||||
scale = float(65536 if im.dtype == np.uint16 else 255)
|
||||
gain = 1.0
|
||||
im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999
|
||||
return im, info
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, compression=None, quantize=None, interlaced=False, **kwargs):
|
||||
# Better default for compression
|
||||
kwargs["compress_level"] = kwargs.get("compress_level", 9)
|
||||
|
||||
if compression is not None:
|
||||
if compression < 0 or compression > 9:
|
||||
raise ValueError("Invalid PNG compression level: %r" % compression)
|
||||
kwargs["compress_level"] = compression
|
||||
if quantize is not None:
|
||||
for bits in range(1, 9):
|
||||
if 2**bits == quantize:
|
||||
break
|
||||
else:
|
||||
raise ValueError(
|
||||
"PNG quantize must be power of two, " "not %r" % quantize
|
||||
)
|
||||
kwargs["bits"] = bits
|
||||
if interlaced:
|
||||
logger.warning("PIL PNG writer cannot produce interlaced images.")
|
||||
|
||||
ok_keys = (
|
||||
"optimize",
|
||||
"transparency",
|
||||
"dpi",
|
||||
"pnginfo",
|
||||
"bits",
|
||||
"compress_level",
|
||||
"icc_profile",
|
||||
"dictionary",
|
||||
"prefer_uint8",
|
||||
)
|
||||
for key in kwargs:
|
||||
if key not in ok_keys:
|
||||
raise TypeError("Invalid arg for PNG writer: %r" % key)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1):
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class JPEGFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, exifrotate=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality=75, progressive=False, optimize=False, **kwargs):
|
||||
# The JPEG quality can be between 0 (worst) and 100 (best)
|
||||
quality = int(quality)
|
||||
if quality < 0 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 0 and 100.")
|
||||
|
||||
kwargs["quality"] = quality
|
||||
kwargs["progressive"] = bool(progressive)
|
||||
kwargs["optimize"] = bool(progressive)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
class JPEG2000Format(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
"""Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality_mode="rates", quality=5, **kwargs):
|
||||
# Check quality - in Pillow it should be no higher than 95
|
||||
if quality_mode not in {"rates", "dB"}:
|
||||
raise ValueError("Quality mode should be either 'rates' or 'dB'")
|
||||
|
||||
quality = float(quality)
|
||||
|
||||
if quality_mode == "rates" and (quality < 1 or quality > 1000):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid rate!".format(quality)
|
||||
)
|
||||
elif quality_mode == "dB" and (quality < 15 or quality > 100):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid PSNR!".format(quality)
|
||||
)
|
||||
|
||||
kwargs["quality_mode"] = quality_mode
|
||||
kwargs["quality_layers"] = [quality]
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError(
|
||||
"The current implementation of JPEG 2000 does not support alpha channel."
|
||||
)
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
def save_pillow_close(im):
|
||||
# see issue #216 and #300
|
||||
if hasattr(im, "close"):
|
||||
if hasattr(getattr(im, "fp", None), "close"):
|
||||
im.close()
|
||||
|
||||
|
||||
# Func from skimage
|
||||
|
||||
# This cells contains code from scikit-image, in particular from
|
||||
# http://github.com/scikit-image/scikit-image/blob/master/
|
||||
# skimage/io/_plugins/pil_plugin.py
|
||||
# The scikit-image license applies.
|
||||
|
||||
|
||||
def pil_try_read(im):
|
||||
try:
|
||||
# this will raise an IOError if the file is not readable
|
||||
im.getdata()[0]
|
||||
except IOError as e:
|
||||
site = "http://pillow.readthedocs.io/en/latest/installation.html"
|
||||
site += "#external-libraries"
|
||||
pillow_error_message = str(e)
|
||||
error_message = (
|
||||
'Could not load "%s" \n'
|
||||
'Reason: "%s"\n'
|
||||
"Please see documentation at: %s"
|
||||
% (im.filename, pillow_error_message, site)
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
|
||||
|
||||
def _palette_is_grayscale(pil_image):
|
||||
if pil_image.mode != "P":
|
||||
return False
|
||||
elif pil_image.info.get("transparency", None): # see issue #475
|
||||
return False
|
||||
# get palette as an array with R, G, B columns
|
||||
# Note: starting in pillow 9.1 palettes may have less than 256 entries
|
||||
palette = np.asarray(pil_image.getpalette()).reshape((-1, 3))
|
||||
# Not all palette colors are used; unused colors have junk values.
|
||||
start, stop = pil_image.getextrema()
|
||||
valid_palette = palette[start : stop + 1]
|
||||
# Image is grayscale if channel differences (R - G and G - B)
|
||||
# are all zero.
|
||||
return np.allclose(np.diff(valid_palette), 0)
|
||||
|
||||
|
||||
def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None):
|
||||
"""
|
||||
is_gray: Whether the image *is* gray (by inspecting its palette).
|
||||
as_gray: Whether the resulting image must be converted to gaey.
|
||||
mode: The mode to convert to.
|
||||
"""
|
||||
|
||||
if is_gray is None:
|
||||
is_gray = _palette_is_grayscale(im)
|
||||
|
||||
frame = im
|
||||
|
||||
# Convert ...
|
||||
if mode is not None:
|
||||
# Mode is explicitly given ...
|
||||
if mode != im.mode:
|
||||
frame = im.convert(mode)
|
||||
elif as_gray:
|
||||
pass # don't do any auto-conversions (but do the explit one above)
|
||||
elif im.mode == "P" and is_gray:
|
||||
# Paletted images that are already gray by their palette
|
||||
# are converted so that the resulting numpy array is 2D.
|
||||
frame = im.convert("L")
|
||||
elif im.mode == "P":
|
||||
# Paletted images are converted to RGB/RGBA. We jump some loops to make
|
||||
# this work well.
|
||||
if im.info.get("transparency", None) is not None:
|
||||
# Let Pillow apply the transparency, see issue #210 and #246
|
||||
frame = im.convert("RGBA")
|
||||
elif im.palette.mode in ("RGB", "RGBA"):
|
||||
# We can do this ourselves. Pillow seems to sometimes screw
|
||||
# this up if a multi-gif has a palette for each frame ...
|
||||
# Create palette array
|
||||
p = np.frombuffer(im.palette.getdata()[1], np.uint8)
|
||||
# Restore the raw mode that was saved to be used to parse the palette
|
||||
if hasattr(im.palette, "rawmode_saved"):
|
||||
im.palette.rawmode = im.palette.rawmode_saved
|
||||
mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode
|
||||
nchannels = len(mode)
|
||||
# Shape it.
|
||||
p.shape = -1, nchannels
|
||||
if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"):
|
||||
p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype)))
|
||||
# Swap the axes if the mode is in BGR and not RGB
|
||||
if mode.startswith("BGR"):
|
||||
p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]]
|
||||
# Apply palette
|
||||
frame_paletted = np.array(im, np.uint8)
|
||||
try:
|
||||
frame = p[frame_paletted]
|
||||
except Exception:
|
||||
# Ok, let PIL do it. The introduction of the branch that
|
||||
# tests `im.info['transparency']` should make this happen
|
||||
# much less often, but let's keep it, to be safe.
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
# Let Pillow do it. Unlinke skimage, we always convert
|
||||
# to RGBA; palettes can be RGBA.
|
||||
if True: # im.format == 'PNG' and 'transparency' in im.info:
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
frame = im.convert("RGB")
|
||||
elif "A" in im.mode:
|
||||
frame = im.convert("RGBA")
|
||||
elif im.mode == "CMYK":
|
||||
frame = im.convert("RGB")
|
||||
elif im.format == "GIF" and im.mode == "RGB":
|
||||
# pillow9 returns RGBA images for subsequent frames so that it can deal
|
||||
# with multi-frame GIF that use frame-level palettes and don't dispose
|
||||
# all areas.
|
||||
|
||||
# For backwards compatibility, we promote everything to RGBA.
|
||||
frame = im.convert("RGBA")
|
||||
|
||||
# Apply a post-convert if necessary
|
||||
if as_gray:
|
||||
frame = frame.convert("F") # Scipy compat
|
||||
elif not isinstance(frame, np.ndarray) and frame.mode == "1":
|
||||
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
|
||||
# can cause a segfault, or generate garbage. See
|
||||
# https://github.com/scipy/scipy/issues/2138 and
|
||||
# https://github.com/python-pillow/Pillow/issues/350.
|
||||
#
|
||||
# This converts im from a 1-bit image to an 8-bit image.
|
||||
frame = frame.convert("L")
|
||||
|
||||
# Convert to numpy array
|
||||
if im.mode.startswith("I;16"):
|
||||
# e.g. in16 PNG's
|
||||
shape = im.size
|
||||
dtype = ">u2" if im.mode.endswith("B") else "<u2"
|
||||
if "S" in im.mode:
|
||||
dtype = dtype.replace("u", "i")
|
||||
frame = np.frombuffer(frame.tobytes(), dtype).copy()
|
||||
frame.shape = shape[::-1]
|
||||
else:
|
||||
# Use uint16 for PNG's in mode I
|
||||
if im.format == "PNG" and im.mode == "I" and dtype is None:
|
||||
dtype = "uint16"
|
||||
frame = np.array(frame, dtype=dtype)
|
||||
|
||||
return frame
|
||||
|
||||
|
||||
def ndarray_to_pil(arr, format_str=None, prefer_uint8=True):
|
||||
from PIL import Image
|
||||
|
||||
if arr.ndim == 3:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = {3: "RGB", 4: "RGBA"}[arr.shape[2]]
|
||||
|
||||
elif format_str in ["png", "PNG"]:
|
||||
mode = "I;16"
|
||||
mode_base = "I"
|
||||
|
||||
if arr.dtype.kind == "f":
|
||||
arr = image_as_uint(arr)
|
||||
|
||||
elif prefer_uint8 and arr.max() < 256 and arr.min() >= 0:
|
||||
arr = arr.astype(np.uint8)
|
||||
mode = mode_base = "L"
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=16)
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = "L"
|
||||
mode_base = "L"
|
||||
|
||||
if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6:
|
||||
# Pillow < v6.0.0 has limited support for the "I;16" mode,
|
||||
# requiring us to fall back to this expensive workaround.
|
||||
# tobytes actually creates a copy of the image, which is costly.
|
||||
array_buffer = arr.tobytes()
|
||||
if arr.ndim == 2:
|
||||
im = Image.new(mode_base, arr.T.shape)
|
||||
im.frombytes(array_buffer, "raw", mode)
|
||||
else:
|
||||
image_shape = (arr.shape[1], arr.shape[0])
|
||||
im = Image.frombytes(mode, image_shape, array_buffer)
|
||||
return im
|
||||
else:
|
||||
return Image.fromarray(arr, mode)
|
||||
|
||||
|
||||
# imported for backwards compatibility
|
||||
from .pillowmulti import GIFFormat, TIFFFormat # noqa: E402, F401
|
||||
@@ -1,329 +0,0 @@
|
||||
"""
|
||||
PIL formats for multiple images.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .pillow_legacy import PillowFormat, ndarray_to_pil, image_as_uint
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NeuQuant = None # we can implement this when we need it
|
||||
|
||||
|
||||
class TIFFFormat(PillowFormat):
|
||||
_modes = "i" # arg, why bother; people should use the tiffile version
|
||||
_description = "TIFF format (Pillow)"
|
||||
|
||||
|
||||
class GIFFormat(PillowFormat):
|
||||
"""See :mod:`imageio.plugins.pillow_legacy`"""
|
||||
|
||||
_modes = "iI"
|
||||
_description = "Static and animated gif (Pillow)"
|
||||
|
||||
# GIF reader needs no modifications compared to base pillow reader
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(
|
||||
self,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer=0,
|
||||
subrectangles=False,
|
||||
):
|
||||
# Check palettesize
|
||||
palettesize = int(palettesize)
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
# Duratrion / fps
|
||||
if duration is None:
|
||||
self._duration = 1.0 / float(fps)
|
||||
elif isinstance(duration, (list, tuple)):
|
||||
self._duration = [float(d) for d in duration]
|
||||
else:
|
||||
self._duration = float(duration)
|
||||
# loop
|
||||
loop = float(loop)
|
||||
if loop <= 0 or loop == float("inf"):
|
||||
loop = 0
|
||||
loop = int(loop)
|
||||
# Subrectangles / dispose
|
||||
subrectangles = bool(subrectangles)
|
||||
self._dispose = 1 if subrectangles else 2
|
||||
# The "0" (median cut) quantizer is by far the best
|
||||
|
||||
fp = self.request.get_file()
|
||||
self._writer = GifWriter(
|
||||
fp, subrectangles, loop, quantizer, int(palettesize)
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
self._writer.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
duration = self._duration
|
||||
if isinstance(duration, list):
|
||||
duration = duration[min(len(duration) - 1, self._writer._count)]
|
||||
dispose = self._dispose
|
||||
self._writer.add_image(im, duration, dispose)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def intToBin(i):
|
||||
return i.to_bytes(2, byteorder="little")
|
||||
|
||||
|
||||
class GifWriter:
|
||||
"""Class that for helping write the animated GIF file. This is based on
|
||||
code from images2gif.py (part of visvis). The version here is modified
|
||||
to allow streamed writing.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file,
|
||||
opt_subrectangle=True,
|
||||
opt_loop=0,
|
||||
opt_quantizer=0,
|
||||
opt_palette_size=256,
|
||||
):
|
||||
self.fp = file
|
||||
|
||||
self.opt_subrectangle = opt_subrectangle
|
||||
self.opt_loop = opt_loop
|
||||
self.opt_quantizer = opt_quantizer
|
||||
self.opt_palette_size = opt_palette_size
|
||||
|
||||
self._previous_image = None # as np array
|
||||
self._global_palette = None # as bytes
|
||||
self._count = 0
|
||||
|
||||
from PIL.GifImagePlugin import getdata
|
||||
|
||||
self.getdata = getdata
|
||||
|
||||
def add_image(self, im, duration, dispose):
|
||||
# Prepare image
|
||||
im_rect, rect = im, (0, 0)
|
||||
if self.opt_subrectangle:
|
||||
im_rect, rect = self.getSubRectangle(im)
|
||||
im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size)
|
||||
|
||||
# Get pallette - apparently, this is the 3d element of the header
|
||||
# (but it has not always been). Best we've got. Its not the same
|
||||
# as im_pil.palette.tobytes().
|
||||
from PIL.GifImagePlugin import getheader
|
||||
|
||||
palette = getheader(im_pil)[0][3]
|
||||
|
||||
# Write image
|
||||
if self._count == 0:
|
||||
self.write_header(im_pil, palette, self.opt_loop)
|
||||
self._global_palette = palette
|
||||
self.write_image(im_pil, palette, rect, duration, dispose)
|
||||
# assert len(palette) == len(self._global_palette)
|
||||
|
||||
# Bookkeeping
|
||||
self._previous_image = im
|
||||
self._count += 1
|
||||
|
||||
def write_header(self, im, globalPalette, loop):
|
||||
# Gather info
|
||||
header = self.getheaderAnim(im)
|
||||
appext = self.getAppExt(loop)
|
||||
# Write
|
||||
self.fp.write(header)
|
||||
self.fp.write(globalPalette)
|
||||
self.fp.write(appext)
|
||||
|
||||
def close(self):
|
||||
self.fp.write(";".encode("utf-8")) # end gif
|
||||
|
||||
def write_image(self, im, palette, rect, duration, dispose):
|
||||
fp = self.fp
|
||||
|
||||
# Gather local image header and data, using PIL's getdata. That
|
||||
# function returns a list of bytes objects, but which parts are
|
||||
# what has changed multiple times, so we put together the first
|
||||
# parts until we have enough to form the image header.
|
||||
data = self.getdata(im)
|
||||
imdes = b""
|
||||
while data and len(imdes) < 11:
|
||||
imdes += data.pop(0)
|
||||
assert len(imdes) == 11
|
||||
|
||||
# Make image descriptor suitable for using 256 local color palette
|
||||
lid = self.getImageDescriptor(im, rect)
|
||||
graphext = self.getGraphicsControlExt(duration, dispose)
|
||||
|
||||
# Write local header
|
||||
if (palette != self._global_palette) or (dispose != 2):
|
||||
# Use local color palette
|
||||
fp.write(graphext)
|
||||
fp.write(lid) # write suitable image descriptor
|
||||
fp.write(palette) # write local color table
|
||||
fp.write(b"\x08") # LZW minimum size code
|
||||
else:
|
||||
# Use global color palette
|
||||
fp.write(graphext)
|
||||
fp.write(imdes) # write suitable image descriptor
|
||||
|
||||
# Write image data
|
||||
for d in data:
|
||||
fp.write(d)
|
||||
|
||||
def getheaderAnim(self, im):
|
||||
"""Get animation header. To replace PILs getheader()[0]"""
|
||||
bb = b"GIF89a"
|
||||
bb += intToBin(im.size[0])
|
||||
bb += intToBin(im.size[1])
|
||||
bb += b"\x87\x00\x00"
|
||||
return bb
|
||||
|
||||
def getImageDescriptor(self, im, xy=None):
|
||||
"""Used for the local color table properties per image.
|
||||
Otherwise global color table applies to all frames irrespective of
|
||||
whether additional colors comes in play that require a redefined
|
||||
palette. Still a maximum of 256 color per frame, obviously.
|
||||
|
||||
Written by Ant1 on 2010-08-22
|
||||
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
|
||||
"""
|
||||
|
||||
# Defaule use full image and place at upper left
|
||||
if xy is None:
|
||||
xy = (0, 0)
|
||||
|
||||
# Image separator,
|
||||
bb = b"\x2C"
|
||||
|
||||
# Image position and size
|
||||
bb += intToBin(xy[0]) # Left position
|
||||
bb += intToBin(xy[1]) # Top position
|
||||
bb += intToBin(im.size[0]) # image width
|
||||
bb += intToBin(im.size[1]) # image height
|
||||
|
||||
# packed field: local color table flag1, interlace0, sorted table0,
|
||||
# reserved00, lct size111=7=2^(7 + 1)=256.
|
||||
bb += b"\x87"
|
||||
|
||||
# LZW minimum size code now comes later, begining of [imagedata] blocks
|
||||
return bb
|
||||
|
||||
def getAppExt(self, loop):
|
||||
"""Application extension. This part specifies the amount of loops.
|
||||
If loop is 0 or inf, it goes on infinitely.
|
||||
"""
|
||||
if loop == 1:
|
||||
return b""
|
||||
if loop == 0:
|
||||
loop = 2**16 - 1
|
||||
bb = b""
|
||||
if loop != 0: # omit the extension if we would like a nonlooping gif
|
||||
bb = b"\x21\xFF\x0B" # application extension
|
||||
bb += b"NETSCAPE2.0"
|
||||
bb += b"\x03\x01"
|
||||
bb += intToBin(loop)
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getGraphicsControlExt(self, duration=0.1, dispose=2):
|
||||
"""Graphics Control Extension. A sort of header at the start of
|
||||
each image. Specifies duration and transparancy.
|
||||
|
||||
Dispose
|
||||
-------
|
||||
* 0 - No disposal specified.
|
||||
* 1 - Do not dispose. The graphic is to be left in place.
|
||||
* 2 - Restore to background color. The area used by the graphic
|
||||
must be restored to the background color.
|
||||
* 3 - Restore to previous. The decoder is required to restore the
|
||||
area overwritten by the graphic with what was there prior to
|
||||
rendering the graphic.
|
||||
* 4-7 -To be defined.
|
||||
"""
|
||||
|
||||
bb = b"\x21\xF9\x04"
|
||||
bb += chr((dispose & 3) << 2).encode("utf-8")
|
||||
# low bit 1 == transparency,
|
||||
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
|
||||
# are dispose.
|
||||
bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds
|
||||
bb += b"\x00" # no transparant color
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getSubRectangle(self, im):
|
||||
"""Calculate the minimal rectangle that need updating. Returns
|
||||
a two-element tuple containing the cropped image and an x-y tuple.
|
||||
|
||||
Calculating the subrectangles takes extra time, obviously. However,
|
||||
if the image sizes were reduced, the actual writing of the GIF
|
||||
goes faster. In some cases applying this method produces a GIF faster.
|
||||
"""
|
||||
|
||||
# Cannot do subrectangle for first image
|
||||
if self._count == 0:
|
||||
return im, (0, 0)
|
||||
|
||||
prev = self._previous_image
|
||||
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1] + 1)
|
||||
y0, y1 = int(Y[0]), int(Y[-1] + 1)
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
|
||||
def converToPIL(self, im, quantizer, palette_size=256):
|
||||
"""Convert image to Paletted PIL image.
|
||||
|
||||
PIL used to not do a very good job at quantization, but I guess
|
||||
this has improved a lot (at least in Pillow). I don't think we need
|
||||
neuqant (and we can add it later if we really want).
|
||||
"""
|
||||
|
||||
im_pil = ndarray_to_pil(im, "gif")
|
||||
|
||||
if quantizer in ("nq", "neuquant"):
|
||||
# NeuQuant algorithm
|
||||
nq_samplefac = 10 # 10 seems good in general
|
||||
im_pil = im_pil.convert("RGBA") # NQ assumes RGBA
|
||||
nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors
|
||||
im_pil = nqInstance.quantize(im_pil, colors=palette_size)
|
||||
elif quantizer in (0, 1, 2):
|
||||
# Adaptive PIL algorithm
|
||||
if quantizer == 2:
|
||||
im_pil = im_pil.convert("RGBA")
|
||||
else:
|
||||
im_pil = im_pil.convert("RGB")
|
||||
im_pil = im_pil.quantize(colors=palette_size, method=quantizer)
|
||||
else:
|
||||
raise ValueError("Invalid value for quantizer: %r" % quantizer)
|
||||
return im_pil
|
||||
1199
.CondaPkg/env/Lib/site-packages/imageio/plugins/pyav.py
vendored
1199
.CondaPkg/env/Lib/site-packages/imageio/plugins/pyav.py
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,156 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write images using SimpleITK.
|
||||
|
||||
Backend: `Insight Toolkit <https://itk.org/>`_
|
||||
|
||||
.. note::
|
||||
To use this plugin you have to install its backend::
|
||||
|
||||
pip install imageio[itk]
|
||||
|
||||
The ItkFormat uses the ITK or SimpleITK library to support a range of
|
||||
ITK-related formats. It also supports a few common formats (e.g. PNG and JPEG).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
from ..core import Format, has_module
|
||||
|
||||
_itk = None # Defer loading to load_lib() function.
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _itk, _read_function, _write_function
|
||||
try:
|
||||
import itk as _itk
|
||||
|
||||
_read_function = _itk.imread
|
||||
_write_function = _itk.imwrite
|
||||
except ImportError:
|
||||
try:
|
||||
import SimpleITK as _itk
|
||||
|
||||
_read_function = _itk.ReadImage
|
||||
_write_function = _itk.WriteImage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"itk could not be found. "
|
||||
"Please try "
|
||||
" python -m pip install itk "
|
||||
"or "
|
||||
" python -m pip install simpleitk "
|
||||
"or refer to "
|
||||
" https://itkpythonpackage.readthedocs.io/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _itk
|
||||
|
||||
|
||||
# Split up in real ITK and all supported formats.
|
||||
ITK_FORMATS = (
|
||||
".gipl",
|
||||
".ipl",
|
||||
".mha",
|
||||
".mhd",
|
||||
".nhdr",
|
||||
"nia",
|
||||
"hdr",
|
||||
".nrrd",
|
||||
".nii",
|
||||
".nii.gz",
|
||||
".img",
|
||||
".img.gz",
|
||||
".vtk",
|
||||
"hdf5",
|
||||
"lsm",
|
||||
"mnc",
|
||||
"mnc2",
|
||||
"mgh",
|
||||
"mnc",
|
||||
"pic",
|
||||
)
|
||||
ALL_FORMATS = ITK_FORMATS + (
|
||||
".bmp",
|
||||
".jpeg",
|
||||
".jpg",
|
||||
".png",
|
||||
".tiff",
|
||||
".tif",
|
||||
".dicom",
|
||||
".dcm",
|
||||
".gdcm",
|
||||
)
|
||||
|
||||
|
||||
class ItkFormat(Format):
|
||||
"""See :mod:`imageio.plugins.simpleitk`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If the request is a format that only this plugin can handle,
|
||||
# we report that we can do it; a useful error will be raised
|
||||
# when simpleitk is not installed. For the more common formats
|
||||
# we only report that we can read if the library is installed.
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pixel_type=None, fallback_only=None, **kwargs):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
args = ()
|
||||
if pixel_type is not None:
|
||||
args += (pixel_type,)
|
||||
if fallback_only is not None:
|
||||
args += (fallback_only,)
|
||||
self._img = _read_function(self.request.get_local_filename(), *args)
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index != 0:
|
||||
error_msg = "Index out of range while reading from itk file"
|
||||
raise IndexError(error_msg)
|
||||
|
||||
# Return array and empty meta data
|
||||
return _itk.GetArrayFromImage(self._img), {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
_itk_img = _itk.GetImageFromArray(im)
|
||||
_write_function(_itk_img, self.request.get_local_filename())
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
@@ -1,753 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read SPE files.
|
||||
|
||||
Backend: internal
|
||||
|
||||
This plugin supports reading files saved in the Princeton Instruments
|
||||
SPE file format.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
char_encoding : str
|
||||
Character encoding used to decode strings in the metadata. Defaults
|
||||
to "latin1".
|
||||
check_filesize : bool
|
||||
The number of frames in the file is stored in the file header. However,
|
||||
this number may be wrong for certain software. If this is `True`
|
||||
(default), derive the number of frames also from the file size and
|
||||
raise a warning if the two values do not match.
|
||||
sdt_meta : bool
|
||||
If set to `True` (default), check for special metadata written by the
|
||||
`SDT-control` software. Does not have an effect for files written by
|
||||
other software.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
ROIs : list of dict
|
||||
Regions of interest used for recording images. Each dict has the
|
||||
"top_left" key containing x and y coordinates of the top left corner,
|
||||
the "bottom_right" key with x and y coordinates of the bottom right
|
||||
corner, and the "bin" key with number of binned pixels in x and y
|
||||
directions.
|
||||
comments : list of str
|
||||
The SPE format allows for 5 comment strings of 80 characters each.
|
||||
controller_version : int
|
||||
Hardware version
|
||||
logic_output : int
|
||||
Definition of output BNC
|
||||
amp_hi_cap_low_noise : int
|
||||
Amp switching mode
|
||||
mode : int
|
||||
Timing mode
|
||||
exp_sec : float
|
||||
Alternative exposure in seconds
|
||||
date : str
|
||||
Date string
|
||||
detector_temp : float
|
||||
Detector temperature
|
||||
detector_type : int
|
||||
CCD / diode array type
|
||||
st_diode : int
|
||||
Trigger diode
|
||||
delay_time : float
|
||||
Used with async mode
|
||||
shutter_control : int
|
||||
Normal, disabled open, or disabled closed
|
||||
absorb_live : bool
|
||||
on / off
|
||||
absorb_mode : int
|
||||
Reference strip or file
|
||||
can_do_virtual_chip : bool
|
||||
True or False whether chip can do virtual chip
|
||||
threshold_min_live : bool
|
||||
on / off
|
||||
threshold_min_val : float
|
||||
Threshold minimum value
|
||||
threshold_max_live : bool
|
||||
on / off
|
||||
threshold_max_val : float
|
||||
Threshold maximum value
|
||||
time_local : str
|
||||
Experiment local time
|
||||
time_utc : str
|
||||
Experiment UTC time
|
||||
adc_offset : int
|
||||
ADC offset
|
||||
adc_rate : int
|
||||
ADC rate
|
||||
adc_type : int
|
||||
ADC type
|
||||
adc_resolution : int
|
||||
ADC resolution
|
||||
adc_bit_adjust : int
|
||||
ADC bit adjust
|
||||
gain : int
|
||||
gain
|
||||
sw_version : str
|
||||
Version of software which created this file
|
||||
spare_4 : bytes
|
||||
Reserved space
|
||||
readout_time : float
|
||||
Experiment readout time
|
||||
type : str
|
||||
Controller type
|
||||
clockspeed_us : float
|
||||
Vertical clock speed in microseconds
|
||||
readout_mode : ["full frame", "frame transfer", "kinetics", ""]
|
||||
Readout mode. Empty string means that this was not set by the
|
||||
Software.
|
||||
window_size : int
|
||||
Window size for Kinetics mode
|
||||
file_header_ver : float
|
||||
File header version
|
||||
chip_size : [int, int]
|
||||
x and y dimensions of the camera chip
|
||||
virt_chip_size : [int, int]
|
||||
Virtual chip x and y dimensions
|
||||
pre_pixels : [int, int]
|
||||
Pre pixels in x and y dimensions
|
||||
post_pixels : [int, int],
|
||||
Post pixels in x and y dimensions
|
||||
geometric : list of {"rotate", "reverse", "flip"}
|
||||
Geometric operations
|
||||
sdt_major_version : int
|
||||
(only for files created by SDT-control)
|
||||
Major version of SDT-control software
|
||||
sdt_minor_version : int
|
||||
(only for files created by SDT-control)
|
||||
Minor version of SDT-control software
|
||||
sdt_controller_name : str
|
||||
(only for files created by SDT-control)
|
||||
Controller name
|
||||
exposure_time : float
|
||||
(only for files created by SDT-control)
|
||||
Exposure time in seconds
|
||||
color_code : str
|
||||
(only for files created by SDT-control)
|
||||
Color channels used
|
||||
detection_channels : int
|
||||
(only for files created by SDT-control)
|
||||
Number of channels
|
||||
background_subtraction : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether background subtraction war turned on
|
||||
em_active : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether EM was turned on
|
||||
em_gain : int
|
||||
(only for files created by SDT-control)
|
||||
EM gain
|
||||
modulation_active : bool
|
||||
(only for files created by SDT-control)
|
||||
Whether laser modulation (“attenuate”) was turned on
|
||||
pixel_size : float
|
||||
(only for files created by SDT-control)
|
||||
Camera pixel size
|
||||
sequence_type : str
|
||||
(only for files created by SDT-control)
|
||||
Type of sequnce (standard, TOCCSL, arbitrary, …)
|
||||
grid : float
|
||||
(only for files created by SDT-control)
|
||||
Sequence time unit (“grid size”) in seconds
|
||||
n_macro : int
|
||||
(only for files created by SDT-control)
|
||||
Number of macro loops
|
||||
delay_macro : float
|
||||
(only for files created by SDT-control)
|
||||
Time between macro loops in seconds
|
||||
n_mini : int
|
||||
(only for files created by SDT-control)
|
||||
Number of mini loops
|
||||
delay_mini : float
|
||||
(only for files created by SDT-control)
|
||||
Time between mini loops in seconds
|
||||
n_micro : int (only for files created by SDT-control)
|
||||
Number of micro loops
|
||||
delay_micro : float (only for files created by SDT-control)
|
||||
Time between micro loops in seconds
|
||||
n_subpics : int
|
||||
(only for files created by SDT-control)
|
||||
Number of sub-pictures
|
||||
delay_shutter : float
|
||||
(only for files created by SDT-control)
|
||||
Camera shutter delay in seconds
|
||||
delay_prebleach : float
|
||||
(only for files created by SDT-control)
|
||||
Pre-bleach delay in seconds
|
||||
bleach_time : float
|
||||
(only for files created by SDT-control)
|
||||
Bleaching time in seconds
|
||||
recovery_time : float
|
||||
(only for files created by SDT-control)
|
||||
Recovery time in seconds
|
||||
comment : str
|
||||
(only for files created by SDT-control)
|
||||
User-entered comment. This replaces the "comments" field.
|
||||
datetime : datetime.datetime
|
||||
(only for files created by SDT-control)
|
||||
Combines the "date" and "time_local" keys. The latter two plus
|
||||
"time_utc" are removed.
|
||||
modulation_script : str
|
||||
(only for files created by SDT-control)
|
||||
Laser modulation script. Replaces the "spare_4" key.
|
||||
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Spec:
|
||||
"""SPE file specification data
|
||||
|
||||
Tuples of (offset, datatype, count), where offset is the offset in the SPE
|
||||
file and datatype is the datatype as used in `numpy.fromfile`()
|
||||
|
||||
`data_start` is the offset of actual image data.
|
||||
|
||||
`dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0]
|
||||
is dtype("<f") (which is np.float32).
|
||||
|
||||
`controllers` maps the `type` metadata to a human readable name
|
||||
|
||||
`readout_modes` maps the `readoutMode` metadata to something human readable
|
||||
although this may not be accurate since there is next to no documentation
|
||||
to be found.
|
||||
"""
|
||||
|
||||
basic = {
|
||||
"datatype": (108, "<h"), # dtypes
|
||||
"xdim": (42, "<H"),
|
||||
"ydim": (656, "<H"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"NumFrames": (1446, "<i"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
metadata = {
|
||||
# ROI information
|
||||
"NumROI": (1510, "<h"),
|
||||
"ROIs": (
|
||||
1512,
|
||||
np.dtype(
|
||||
[
|
||||
("startx", "<H"),
|
||||
("endx", "<H"),
|
||||
("groupx", "<H"),
|
||||
("starty", "<H"),
|
||||
("endy", "<H"),
|
||||
("groupy", "<H"),
|
||||
]
|
||||
),
|
||||
10,
|
||||
),
|
||||
# chip-related sizes
|
||||
"xDimDet": (6, "<H"),
|
||||
"yDimDet": (18, "<H"),
|
||||
"VChipXdim": (14, "<h"),
|
||||
"VChipYdim": (16, "<h"),
|
||||
# other stuff
|
||||
"controller_version": (0, "<h"),
|
||||
"logic_output": (2, "<h"),
|
||||
"amp_high_cap_low_noise": (4, "<H"), # enum?
|
||||
"mode": (8, "<h"), # enum?
|
||||
"exposure_sec": (10, "<f"),
|
||||
"date": (20, "<10S"),
|
||||
"detector_temp": (36, "<f"),
|
||||
"detector_type": (40, "<h"),
|
||||
"st_diode": (44, "<h"),
|
||||
"delay_time": (46, "<f"),
|
||||
# shutter_control: normal, disabled open, disabled closed
|
||||
# But which one is which?
|
||||
"shutter_control": (50, "<H"),
|
||||
"absorb_live": (52, "<h"),
|
||||
"absorb_mode": (54, "<H"),
|
||||
"can_do_virtual_chip": (56, "<h"),
|
||||
"threshold_min_live": (58, "<h"),
|
||||
"threshold_min_val": (60, "<f"),
|
||||
"threshold_max_live": (64, "<h"),
|
||||
"threshold_max_val": (66, "<f"),
|
||||
"time_local": (172, "<7S"),
|
||||
"time_utc": (179, "<7S"),
|
||||
"adc_offset": (188, "<H"),
|
||||
"adc_rate": (190, "<H"),
|
||||
"adc_type": (192, "<H"),
|
||||
"adc_resolution": (194, "<H"),
|
||||
"adc_bit_adjust": (196, "<H"),
|
||||
"gain": (198, "<H"),
|
||||
"comments": (200, "<80S", 5),
|
||||
"geometric": (600, "<H"), # flags
|
||||
"sw_version": (688, "<16S"),
|
||||
"spare_4": (742, "<436S"),
|
||||
"XPrePixels": (98, "<h"),
|
||||
"XPostPixels": (100, "<h"),
|
||||
"YPrePixels": (102, "<h"),
|
||||
"YPostPixels": (104, "<h"),
|
||||
"readout_time": (672, "<f"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"type": (704, "<h"), # controllers
|
||||
"clockspeed_us": (1428, "<f"),
|
||||
"readout_mode": (1480, "<H"), # readout_modes
|
||||
"window_size": (1482, "<H"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
data_start = 4100
|
||||
|
||||
dtypes = {
|
||||
0: np.dtype(np.float32),
|
||||
1: np.dtype(np.int32),
|
||||
2: np.dtype(np.int16),
|
||||
3: np.dtype(np.uint16),
|
||||
8: np.dtype(np.uint32),
|
||||
}
|
||||
|
||||
controllers = [
|
||||
"new120 (Type II)",
|
||||
"old120 (Type I)",
|
||||
"ST130",
|
||||
"ST121",
|
||||
"ST138",
|
||||
"DC131 (PentaMax)",
|
||||
"ST133 (MicroMax/Roper)",
|
||||
"ST135 (GPIB)",
|
||||
"VTCCD",
|
||||
"ST116 (GPIB)",
|
||||
"OMA3 (GPIB)",
|
||||
"OMA4",
|
||||
]
|
||||
|
||||
# This was gathered from random places on the internet and own experiments
|
||||
# with the camera. May not be accurate.
|
||||
readout_modes = ["full frame", "frame transfer", "kinetics"]
|
||||
|
||||
# Do not decode the following metadata keys into strings, but leave them
|
||||
# as byte arrays
|
||||
no_decode = ["spare_4"]
|
||||
|
||||
|
||||
class SDTControlSpec:
|
||||
"""Extract metadata written by the SDT-control software
|
||||
|
||||
Some of it is encoded in the comment strings
|
||||
(see :py:meth:`parse_comments`). Also, date and time are encoded in a
|
||||
peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata`
|
||||
to update the metadata dict.
|
||||
"""
|
||||
|
||||
months = {
|
||||
# Convert SDT-control month strings to month numbers
|
||||
"Jän": 1,
|
||||
"Jan": 1,
|
||||
"Feb": 2,
|
||||
"Mär": 3,
|
||||
"Mar": 3,
|
||||
"Apr": 4,
|
||||
"Mai": 5,
|
||||
"May": 5,
|
||||
"Jun": 6,
|
||||
"Jul": 7,
|
||||
"Aug": 8,
|
||||
"Sep": 9,
|
||||
"Okt": 10,
|
||||
"Oct": 10,
|
||||
"Nov": 11,
|
||||
"Dez": 12,
|
||||
"Dec": 12,
|
||||
}
|
||||
|
||||
sequence_types = {
|
||||
# TODO: complete
|
||||
"SEQU": "standard",
|
||||
"SETO": "TOCCSL",
|
||||
"KINE": "kinetics",
|
||||
"SEAR": "arbitrary",
|
||||
}
|
||||
|
||||
class CommentDesc:
|
||||
"""Describe how to extract a metadata entry from a comment string"""
|
||||
|
||||
n: int
|
||||
"""Which of the 5 SPE comment fields to use."""
|
||||
slice: slice
|
||||
"""Which characters from the `n`-th comment to use."""
|
||||
cvt: Callable[[str], Any]
|
||||
"""How to convert characters to something useful."""
|
||||
scale: Union[None, float]
|
||||
"""Optional scaling factor for numbers"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
n: int,
|
||||
slice: slice,
|
||||
cvt: Callable[[str], Any] = str,
|
||||
scale: Optional[float] = None,
|
||||
):
|
||||
self.n = n
|
||||
self.slice = slice
|
||||
self.cvt = cvt
|
||||
self.scale = scale
|
||||
|
||||
comments = {
|
||||
"sdt_major_version": CommentDesc(4, slice(66, 68), int),
|
||||
"sdt_minor_version": CommentDesc(4, slice(68, 70), int),
|
||||
"sdt_controller_name": CommentDesc(4, slice(0, 6), str),
|
||||
"exposure_time": CommentDesc(1, slice(64, 73), float, 10**-6),
|
||||
"color_code": CommentDesc(4, slice(10, 14), str),
|
||||
"detection_channels": CommentDesc(4, slice(15, 16), int),
|
||||
"background_subtraction": CommentDesc(4, 14, lambda x: x == "B"),
|
||||
"em_active": CommentDesc(4, 32, lambda x: x == "E"),
|
||||
"em_gain": CommentDesc(4, slice(28, 32), int),
|
||||
"modulation_active": CommentDesc(4, 33, lambda x: x == "A"),
|
||||
"pixel_size": CommentDesc(4, slice(25, 28), float, 0.1),
|
||||
"sequence_type": CommentDesc(
|
||||
4, slice(6, 10), lambda x: __class__.sequence_types[x]
|
||||
),
|
||||
"grid": CommentDesc(4, slice(16, 25), float, 10**-6),
|
||||
"n_macro": CommentDesc(1, slice(0, 4), int),
|
||||
"delay_macro": CommentDesc(1, slice(10, 19), float, 10**-3),
|
||||
"n_mini": CommentDesc(1, slice(4, 7), int),
|
||||
"delay_mini": CommentDesc(1, slice(19, 28), float, 10**-6),
|
||||
"n_micro": CommentDesc(1, slice(7, 10), int),
|
||||
"delay_micro": CommentDesc(1, slice(28, 37), float, 10**-6),
|
||||
"n_subpics": CommentDesc(1, slice(7, 10), int),
|
||||
"delay_shutter": CommentDesc(1, slice(73, 79), float, 10**-6),
|
||||
"delay_prebleach": CommentDesc(1, slice(37, 46), float, 10**-6),
|
||||
"bleach_time": CommentDesc(1, slice(46, 55), float, 10**-6),
|
||||
"recovery_time": CommentDesc(1, slice(55, 64), float, 10**-6),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def parse_comments(comments: Sequence[str]) -> Union[Dict, None]:
|
||||
"""Extract SDT-control metadata from comments
|
||||
|
||||
Parameters
|
||||
----------
|
||||
comments
|
||||
List of SPE file comments, typically ``metadata["comments"]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
If SDT-control comments were detected, return a dict of metadata, else
|
||||
`None`.
|
||||
"""
|
||||
sdt_md = {}
|
||||
if comments[4][70:] != "COMVER0500":
|
||||
logger.debug("SDT-control comments not found.")
|
||||
return None
|
||||
|
||||
sdt_md = {}
|
||||
for name, spec in SDTControlSpec.comments.items():
|
||||
try:
|
||||
v = spec.cvt(comments[spec.n][spec.slice])
|
||||
if spec.scale is not None:
|
||||
v *= spec.scale
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to decode SDT-control metadata " f'field "{name}": {e}'
|
||||
)
|
||||
sdt_md[name] = v
|
||||
comment = comments[0] + comments[2]
|
||||
sdt_md["comment"] = comment.strip()
|
||||
return sdt_md
|
||||
|
||||
@staticmethod
|
||||
def get_datetime(date: str, time: str) -> Union[datetime, None]:
|
||||
"""Turn date and time saved by SDT-control into proper datetime object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
date
|
||||
SPE file date, typically ``metadata["date"]``.
|
||||
time
|
||||
SPE file date, typically ``metadata["time_local"]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
File's datetime if parsing was succsessful, else None.
|
||||
"""
|
||||
try:
|
||||
month = __class__.months[date[2:5]]
|
||||
return datetime(
|
||||
int(date[5:9]),
|
||||
month,
|
||||
int(date[0:2]),
|
||||
int(time[0:2]),
|
||||
int(time[2:4]),
|
||||
int(time[4:6]),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(f"Failed to decode date from SDT-control metadata: {e}.")
|
||||
|
||||
@staticmethod
|
||||
def extract_metadata(meta: Mapping, char_encoding: str = "latin1"):
|
||||
"""Extract SDT-control metadata from SPE metadata
|
||||
|
||||
SDT-control stores some metadata in comments and other fields.
|
||||
Extract them and remove unused entries.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
meta
|
||||
SPE file metadata. Modified in place.
|
||||
char_encoding
|
||||
Character encoding used to decode strings in the metadata.
|
||||
"""
|
||||
sdt_meta = __class__.parse_comments(meta["comments"])
|
||||
if not sdt_meta:
|
||||
return
|
||||
# This file has SDT-control metadata
|
||||
meta.pop("comments")
|
||||
meta.update(sdt_meta)
|
||||
|
||||
# Get date and time in a usable format
|
||||
dt = __class__.get_datetime(meta["date"], meta["time_local"])
|
||||
if dt:
|
||||
meta["datetime"] = dt
|
||||
meta.pop("date")
|
||||
meta.pop("time_local")
|
||||
|
||||
sp4 = meta["spare_4"]
|
||||
try:
|
||||
meta["modulation_script"] = sp4.decode(char_encoding)
|
||||
meta.pop("spare_4")
|
||||
except UnicodeDecodeError:
|
||||
logger.warning(
|
||||
"Failed to decode SDT-control laser "
|
||||
"modulation script. Bad char_encoding?"
|
||||
)
|
||||
|
||||
# Get rid of unused data
|
||||
meta.pop("time_utc")
|
||||
meta.pop("exposure_sec")
|
||||
|
||||
|
||||
class SpeFormat(Format):
|
||||
"""See :mod:`imageio.plugins.spe`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, char_encoding="latin1", check_filesize=True, sdt_meta=True):
|
||||
self._file = self.request.get_file()
|
||||
self._char_encoding = char_encoding
|
||||
|
||||
info = self._parse_header(Spec.basic)
|
||||
self._file_header_ver = info["file_header_ver"]
|
||||
self._dtype = Spec.dtypes[info["datatype"]]
|
||||
self._shape = (info["ydim"], info["xdim"])
|
||||
self._len = info["NumFrames"]
|
||||
self._sdt_meta = sdt_meta
|
||||
|
||||
if check_filesize:
|
||||
# Some software writes incorrect `NumFrames` metadata.
|
||||
# To determine the number of frames, check the size of the data
|
||||
# segment -- until the end of the file for SPE<3, until the
|
||||
# xml footer for SPE>=3.
|
||||
data_end = (
|
||||
info["xml_footer_offset"]
|
||||
if info["file_header_ver"] >= 3
|
||||
else os.path.getsize(self.request.get_local_filename())
|
||||
)
|
||||
line = data_end - Spec.data_start
|
||||
line //= self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
if line != self._len:
|
||||
logger.warning(
|
||||
"The file header of %s claims there are %s frames, "
|
||||
"but there are actually %s frames.",
|
||||
self.request.filename,
|
||||
self._len,
|
||||
line,
|
||||
)
|
||||
self._len = min(line, self._len)
|
||||
|
||||
self._meta = None
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if self._meta is None:
|
||||
if self._file_header_ver < 3:
|
||||
self._init_meta_data_pre_v3()
|
||||
else:
|
||||
self._init_meta_data_post_v3()
|
||||
return self._meta
|
||||
|
||||
def _close(self):
|
||||
# The file should be closed by `self.request`
|
||||
pass
|
||||
|
||||
def _init_meta_data_pre_v3(self):
|
||||
self._meta = self._parse_header(Spec.metadata)
|
||||
|
||||
nr = self._meta.pop("NumROI", None)
|
||||
nr = 1 if nr < 1 else nr
|
||||
self._meta["ROIs"] = roi_array_to_dict(self._meta["ROIs"][:nr])
|
||||
|
||||
# chip sizes
|
||||
self._meta["chip_size"] = [
|
||||
self._meta.pop("xDimDet", None),
|
||||
self._meta.pop("yDimDet", None),
|
||||
]
|
||||
self._meta["virt_chip_size"] = [
|
||||
self._meta.pop("VChipXdim", None),
|
||||
self._meta.pop("VChipYdim", None),
|
||||
]
|
||||
self._meta["pre_pixels"] = [
|
||||
self._meta.pop("XPrePixels", None),
|
||||
self._meta.pop("YPrePixels", None),
|
||||
]
|
||||
self._meta["post_pixels"] = [
|
||||
self._meta.pop("XPostPixels", None),
|
||||
self._meta.pop("YPostPixels", None),
|
||||
]
|
||||
|
||||
# comments
|
||||
self._meta["comments"] = [str(c) for c in self._meta["comments"]]
|
||||
|
||||
# geometric operations
|
||||
g = []
|
||||
f = self._meta.pop("geometric", 0)
|
||||
if f & 1:
|
||||
g.append("rotate")
|
||||
if f & 2:
|
||||
g.append("reverse")
|
||||
if f & 4:
|
||||
g.append("flip")
|
||||
self._meta["geometric"] = g
|
||||
|
||||
# Make some additional information more human-readable
|
||||
t = self._meta["type"]
|
||||
if 1 <= t <= len(Spec.controllers):
|
||||
self._meta["type"] = Spec.controllers[t - 1]
|
||||
else:
|
||||
self._meta["type"] = ""
|
||||
m = self._meta["readout_mode"]
|
||||
if 1 <= m <= len(Spec.readout_modes):
|
||||
self._meta["readout_mode"] = Spec.readout_modes[m - 1]
|
||||
else:
|
||||
self._meta["readout_mode"] = ""
|
||||
|
||||
# bools
|
||||
for k in (
|
||||
"absorb_live",
|
||||
"can_do_virtual_chip",
|
||||
"threshold_min_live",
|
||||
"threshold_max_live",
|
||||
):
|
||||
self._meta[k] = bool(self._meta[k])
|
||||
|
||||
# frame shape
|
||||
self._meta["frame_shape"] = self._shape
|
||||
|
||||
# Extract SDT-control metadata if desired
|
||||
if self._sdt_meta:
|
||||
SDTControlSpec.extract_metadata(self._meta, self._char_encoding)
|
||||
|
||||
def _parse_header(self, spec):
|
||||
ret = {}
|
||||
# Decode each string from the numpy array read by np.fromfile
|
||||
decode = np.vectorize(lambda x: x.decode(self._char_encoding))
|
||||
|
||||
for name, sp in spec.items():
|
||||
self._file.seek(sp[0])
|
||||
cnt = 1 if len(sp) < 3 else sp[2]
|
||||
v = np.fromfile(self._file, dtype=sp[1], count=cnt)
|
||||
if v.dtype.kind == "S" and name not in Spec.no_decode:
|
||||
# Silently ignore string decoding failures
|
||||
try:
|
||||
v = decode(v)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
'Failed to decode "{}" metadata '
|
||||
"string. Check `char_encoding` "
|
||||
"parameter.".format(name)
|
||||
)
|
||||
|
||||
try:
|
||||
# For convenience, if the array contains only one single
|
||||
# entry, return this entry itself.
|
||||
v = v.item()
|
||||
except ValueError:
|
||||
v = np.squeeze(v)
|
||||
ret[name] = v
|
||||
return ret
|
||||
|
||||
def _init_meta_data_post_v3(self):
|
||||
info = self._parse_header(Spec.basic)
|
||||
self._file.seek(info["xml_footer_offset"])
|
||||
xml = self._file.read()
|
||||
self._meta = {"__xml": xml}
|
||||
|
||||
def _get_length(self):
|
||||
if self.request.mode[1] in "vV":
|
||||
return 1
|
||||
else:
|
||||
return self._len
|
||||
|
||||
def _get_data(self, index):
|
||||
if index < 0:
|
||||
raise IndexError("Image index %i < 0" % index)
|
||||
if index >= self._len:
|
||||
raise IndexError("Image index %i > %i" % (index, self._len))
|
||||
|
||||
if self.request.mode[1] in "vV":
|
||||
if index != 0:
|
||||
raise IndexError("Index has to be 0 in v and V modes")
|
||||
self._file.seek(Spec.data_start)
|
||||
data = np.fromfile(
|
||||
self._file,
|
||||
dtype=self._dtype,
|
||||
count=self._shape[0] * self._shape[1] * self._len,
|
||||
)
|
||||
data = data.reshape((self._len,) + self._shape)
|
||||
else:
|
||||
self._file.seek(
|
||||
Spec.data_start
|
||||
+ index * self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
)
|
||||
data = np.fromfile(
|
||||
self._file, dtype=self._dtype, count=self._shape[0] * self._shape[1]
|
||||
)
|
||||
data = data.reshape(self._shape)
|
||||
return data, self._get_meta_data(index)
|
||||
|
||||
|
||||
def roi_array_to_dict(a):
|
||||
"""Convert the `ROIs` structured arrays to :py:class:`dict`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : numpy.ndarray:
|
||||
Structured array containing ROI data
|
||||
|
||||
Returns
|
||||
-------
|
||||
list of dict
|
||||
One dict per ROI. Keys are "top_left", "bottom_right", and "bin",
|
||||
values are tuples whose first element is the x axis value and the
|
||||
second element is the y axis value.
|
||||
"""
|
||||
dict_list = []
|
||||
a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]]
|
||||
for sx, sy, ex, ey, gx, gy in a:
|
||||
roi_dict = {
|
||||
"top_left": [int(sx), int(sy)],
|
||||
"bottom_right": [int(ex), int(ey)],
|
||||
"bin": [int(gx), int(gy)],
|
||||
}
|
||||
dict_list.append(roi_dict)
|
||||
return dict_list
|
||||
@@ -1,336 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write SWF files.
|
||||
|
||||
Backend: internal
|
||||
|
||||
Shockwave flash (SWF) is a media format designed for rich and
|
||||
interactive animations. This plugin makes use of this format to
|
||||
store a series of images in a lossless format with good compression
|
||||
(zlib). The resulting images can be shown as an animation using
|
||||
a flash player (such as the browser).
|
||||
|
||||
SWF stores images in RGBA format. RGB or grayscale images are
|
||||
automatically converted. SWF does not support meta data.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
fps : int
|
||||
The speed to play the animation. Default 12.
|
||||
loop : bool
|
||||
If True, add a tag to the end of the file to play again from
|
||||
the first frame. Most flash players will then play the movie
|
||||
in a loop. Note that the imageio SWF Reader does not check this
|
||||
tag. Default True.
|
||||
html : bool
|
||||
If the output is a file on the file system, write an html file
|
||||
(in HTML5) that shows the animation. Default False.
|
||||
compress : bool
|
||||
Whether to compress the swf file. Default False. You probably don't
|
||||
want to use this. This does not decrease the file size since
|
||||
the images are already compressed. It will result in slower
|
||||
read and write time. The only purpose of this feature is to
|
||||
create compressed SWF files, so that we can test the
|
||||
functionality to read them.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..core import Format, read_n_bytes, image_as_uint
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_swf = None # lazily loaded in lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _swf
|
||||
from . import _swf
|
||||
|
||||
return _swf
|
||||
|
||||
|
||||
class SWFFormat(Format):
|
||||
"""See :mod:`imageio.plugins.swf`"""
|
||||
|
||||
def _can_read(self, request):
|
||||
tmp = request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp in ("FWS", "CWS"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, loop=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_loop = bool(loop)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
# Check file ...
|
||||
tmp = self.request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp == "FWS":
|
||||
pass # OK
|
||||
elif tmp == "CWS":
|
||||
# Compressed, we need to decompress
|
||||
bb = self._fp.read()
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
# Wrap up in a file object
|
||||
self._fp = BytesIO(bb)
|
||||
else:
|
||||
raise IOError("This does not look like a valid SWF file")
|
||||
|
||||
# Skip first bytes. This also tests support got seeking ...
|
||||
try:
|
||||
self._fp.seek(8)
|
||||
self._streaming_mode = False
|
||||
except Exception:
|
||||
self._streaming_mode = True
|
||||
self._fp_read(8)
|
||||
|
||||
# Skip header
|
||||
# Note that the number of frames is there, which we could
|
||||
# potentially use, but the number of frames does not necessarily
|
||||
# correspond to the number of images.
|
||||
nbits = _swf.bits2int(self._fp_read(1), 5)
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
self._fp_read(Lrect + 3)
|
||||
|
||||
# Now the rest is basically tags ...
|
||||
self._imlocs = [] # tuple (loc, sze, T, L1)
|
||||
if not self._streaming_mode:
|
||||
# Collect locations of frame, while skipping through the data
|
||||
# This does not read any of the tag *data*.
|
||||
try:
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
loc = self._fp.tell()
|
||||
if isimage:
|
||||
# Still need to check if the format is right
|
||||
format = ord(self._fp_read(3)[2:])
|
||||
if format == 5: # RGB or RGBA lossless
|
||||
self._imlocs.append((loc, sze, T, L1))
|
||||
self._fp.seek(loc + sze) # Skip over tag
|
||||
except IndexError:
|
||||
pass # done reading
|
||||
|
||||
def _fp_read(self, n):
|
||||
return read_n_bytes(self._fp, n)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if self._streaming_mode:
|
||||
return np.inf
|
||||
else:
|
||||
return len(self._imlocs)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Check index
|
||||
if index < 0:
|
||||
raise IndexError("Index in swf file must be > 0")
|
||||
if not self._streaming_mode:
|
||||
if self._arg_loop and self._imlocs:
|
||||
index = index % len(self._imlocs)
|
||||
if index >= len(self._imlocs):
|
||||
raise IndexError("Index out of bounds")
|
||||
|
||||
if self._streaming_mode:
|
||||
# Walk over tags until we find an image
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
bb = self._fp_read(sze) # always read data
|
||||
if isimage:
|
||||
im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
if im is not None:
|
||||
return im, {}
|
||||
|
||||
else:
|
||||
# Go to corresponding location, read data, and convert to image
|
||||
loc, sze, T, L1 = self._imlocs[index]
|
||||
self._fp.seek(loc)
|
||||
bb = self._fp_read(sze)
|
||||
# Read_pixels should return ndarry, since we checked format
|
||||
im = _swf.read_pixels(bb, 0, T, L1)
|
||||
return im, {}
|
||||
|
||||
def _read_one_tag(self):
|
||||
"""
|
||||
Return (True, loc, size, T, L1) if an image that we can read.
|
||||
Return (False, loc, size, T, L1) if any other tag.
|
||||
"""
|
||||
|
||||
# Get head
|
||||
head = self._fp_read(6)
|
||||
if not head: # pragma: no cover
|
||||
raise IndexError("Reached end of swf movie")
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = _swf.get_type_and_len(head)
|
||||
if not L2: # pragma: no cover
|
||||
raise RuntimeError("Invalid tag length, could not proceed")
|
||||
|
||||
# Read data
|
||||
isimage = False
|
||||
sze = L2 - 6
|
||||
# bb = self._fp_read(L2 - 6)
|
||||
|
||||
# Parse tag
|
||||
if T == 0:
|
||||
raise IndexError("Reached end of swf movie")
|
||||
elif T in [20, 36]:
|
||||
isimage = True
|
||||
# im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
elif T in [6, 21, 35, 90]: # pragma: no cover
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Done. Return image. Can be None
|
||||
# return im
|
||||
return isimage, sze, T, L1
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, fps=12, loop=True, html=False, compress=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_fps = int(fps)
|
||||
self._arg_loop = bool(loop)
|
||||
self._arg_html = bool(html)
|
||||
self._arg_compress = bool(compress)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
self._framecounter = 0
|
||||
self._framesize = (100, 100)
|
||||
|
||||
# For compress, we use an in-memory file object
|
||||
if self._arg_compress:
|
||||
self._fp_real = self._fp
|
||||
self._fp = BytesIO()
|
||||
|
||||
def _close(self):
|
||||
self._complete()
|
||||
# Get size of (uncompressed) file
|
||||
sze = self._fp.tell()
|
||||
# set nframes, this is in the potentially compressed region
|
||||
self._fp.seek(self._location_to_save_nframes)
|
||||
self._fp.write(_swf.int2uint16(self._framecounter))
|
||||
# Compress body?
|
||||
if self._arg_compress:
|
||||
bb = self._fp.getvalue()
|
||||
self._fp = self._fp_real
|
||||
self._fp.write(bb[:8])
|
||||
self._fp.write(zlib.compress(bb[8:]))
|
||||
sze = self._fp.tell() # renew sze value
|
||||
# set size
|
||||
self._fp.seek(4)
|
||||
self._fp.write(_swf.int2uint32(sze))
|
||||
self._fp = None # Disable
|
||||
|
||||
# Write html?
|
||||
if self._arg_html and os.path.isfile(self.request.filename):
|
||||
dirname, fname = os.path.split(self.request.filename)
|
||||
filename = os.path.join(dirname, fname[:-4] + ".html")
|
||||
w, h = self._framesize
|
||||
html = HTML % (fname, w, h, fname)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(html.encode("utf-8"))
|
||||
|
||||
def _write_header(self, framesize, fps):
|
||||
self._framesize = framesize
|
||||
# Called as soon as we know framesize; when we get first frame
|
||||
bb = b""
|
||||
bb += "FC"[self._arg_compress].encode("ascii")
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += _swf.int2uint8(8) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += (
|
||||
_swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
)
|
||||
bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate
|
||||
self._location_to_save_nframes = len(bb)
|
||||
bb += "00".encode("ascii") # nframes (leave open for now)
|
||||
self._fp.write(bb)
|
||||
|
||||
# Write some initial tags
|
||||
taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0)
|
||||
for tag in taglist:
|
||||
self._fp.write(tag.get_tag())
|
||||
|
||||
def _complete(self):
|
||||
# What if no images were saved?
|
||||
if not self._framecounter:
|
||||
self._write_header((10, 10), self._arg_fps)
|
||||
# Write stop tag if we do not loop
|
||||
if not self._arg_loop:
|
||||
self._fp.write(_swf.DoActionTag("stop").get_tag())
|
||||
# finish with end tag
|
||||
self._fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Correct shape and type
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Get frame size
|
||||
wh = im.shape[1], im.shape[0]
|
||||
# Write header on first frame
|
||||
isfirstframe = False
|
||||
if self._framecounter == 0:
|
||||
isfirstframe = True
|
||||
self._write_header(wh, self._arg_fps)
|
||||
# Create tags
|
||||
bm = _swf.BitmapTag(im)
|
||||
sh = _swf.ShapeTag(bm.id, (0, 0), wh)
|
||||
po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe))
|
||||
sf = _swf.ShowFrameTag()
|
||||
# Write tags
|
||||
for tag in [bm, sh, po, sf]:
|
||||
self._fp.write(tag.get_tag())
|
||||
self._framecounter += 1
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
pass
|
||||
|
||||
|
||||
HTML = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Show Flash animation %s</title>
|
||||
</head>
|
||||
<body>
|
||||
<embed width="%i" height="%i" src="%s">
|
||||
</html>
|
||||
"""
|
||||
@@ -1,561 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Read/Write TIFF files.
|
||||
|
||||
Backend: internal
|
||||
|
||||
Provides support for a wide range of Tiff images using the tifffile
|
||||
backend.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
offset : int
|
||||
Optional start position of embedded file. By default this is
|
||||
the current file position.
|
||||
size : int
|
||||
Optional size of embedded file. By default this is the number
|
||||
of bytes from the 'offset' to the end of the file.
|
||||
multifile : bool
|
||||
If True (default), series may include pages from multiple files.
|
||||
Currently applies to OME-TIFF only.
|
||||
multifile_close : bool
|
||||
If True (default), keep the handles of other files in multifile
|
||||
series closed. This is inefficient when few files refer to
|
||||
many pages. If False, the C runtime may run out of resources.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
bigtiff : bool
|
||||
If True, the BigTIFF format is used.
|
||||
byteorder : {'<', '>'}
|
||||
The endianness of the data in the file.
|
||||
By default this is the system's native byte order.
|
||||
software : str
|
||||
Name of the software used to create the image.
|
||||
Saved with the first page only.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
planar_configuration : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution_unit : int
|
||||
The resolution unit stored in the TIFF tag. Usually 1 means no/unknown unit,
|
||||
2 means dpi (inch), 3 means dpc (centimeter).
|
||||
resolution : (float, float, str)
|
||||
A tuple formatted as (X_resolution, Y_resolution, unit). The unit is a
|
||||
string representing one of the following units::
|
||||
|
||||
NONE # No unit or unit unknown
|
||||
INCH # dpi
|
||||
CENTIMETER # cpi
|
||||
MILLIMETER
|
||||
MICROMETER
|
||||
|
||||
compression : int
|
||||
Value indicating the compression algorithm used, e.g. 5 is LZW,
|
||||
7 is JPEG, 8 is deflate.
|
||||
If 1, data are uncompressed.
|
||||
predictor : int
|
||||
Value 2 indicates horizontal differencing was used before compression,
|
||||
while 3 indicates floating point horizontal differencing.
|
||||
If 1, no prediction scheme was used before compression.
|
||||
orientation : {'top_left', 'bottom_right', ...}
|
||||
Oriented of image array.
|
||||
is_rgb : bool
|
||||
True if page contains a RGB image.
|
||||
is_contig : bool
|
||||
True if page contains a contiguous image.
|
||||
is_tiled : bool
|
||||
True if page contains tiled image.
|
||||
is_palette : bool
|
||||
True if page contains a palette-colored image and not OME or STK.
|
||||
is_reduced : bool
|
||||
True if page is a reduced image of another image.
|
||||
is_shaped : bool
|
||||
True if page contains shape in image_description tag.
|
||||
is_fluoview : bool
|
||||
True if page contains FluoView MM_STAMP tag.
|
||||
is_nih : bool
|
||||
True if page contains NIH image header.
|
||||
is_micromanager : bool
|
||||
True if page contains Micro-Manager metadata.
|
||||
is_ome : bool
|
||||
True if page contains OME-XML in image_description tag.
|
||||
is_sgi : bool
|
||||
True if page contains SGI image and tile depth tags.
|
||||
is_mdgel : bool
|
||||
True if page contains md_file_tag tag.
|
||||
is_mediacy : bool
|
||||
True if page contains Media Cybernetics Id tag.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_lsm : bool
|
||||
True if page contains LSM CZ_LSM_INFO tag.
|
||||
description : str
|
||||
Image description
|
||||
description1 : str
|
||||
Additional description
|
||||
is_imagej : None or str
|
||||
ImageJ metadata
|
||||
software : str
|
||||
Software used to create the TIFF file
|
||||
datetime : datetime.datetime
|
||||
Creation date and time
|
||||
|
||||
Metadata for writing
|
||||
--------------------
|
||||
photometric : {'minisblack', 'miniswhite', 'rgb'}
|
||||
The color space of the image data.
|
||||
By default this setting is inferred from the data shape.
|
||||
planarconfig : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
description : str
|
||||
The subject of the image. Saved with the first page only.
|
||||
compress : int
|
||||
Values from 0 to 9 controlling the level of zlib (deflate) compression.
|
||||
If 0, data are written uncompressed (default).
|
||||
compression : str, (int, int)
|
||||
Compression scheme used while writing the image. If omitted (default) the
|
||||
image is not uncompressed. Compression cannot be used to write contiguous
|
||||
series. Compressors may require certain data shapes, types or value ranges.
|
||||
For example, JPEG compression requires grayscale or RGB(A), uint8 or 12-bit
|
||||
uint16. JPEG compression is experimental. JPEG markers and TIFF tags may not
|
||||
match. Only a limited set of compression schemes are implemented. 'ZLIB' is
|
||||
short for ADOBE_DEFLATE. The value is written to the Compression tag.
|
||||
compressionargs:
|
||||
Extra arguments passed to compression codec, e.g., compression level. Refer
|
||||
to the Imagecodecs implementation for supported arguments.
|
||||
predictor : bool
|
||||
If True, horizontal differencing is applied before compression.
|
||||
Note that using an int literal 1 actually means no prediction scheme
|
||||
will be used.
|
||||
volume : bool
|
||||
If True, volume data are stored in one tile (if applicable) using
|
||||
the SGI image_depth and tile_depth tags.
|
||||
Image width and depth must be multiple of 16.
|
||||
Few software can read this format, e.g. MeVisLab.
|
||||
writeshape : bool
|
||||
If True, write the data shape to the image_description tag
|
||||
if necessary and no other description is given.
|
||||
extratags: sequence of tuples
|
||||
Additional tags as [(code, dtype, count, value, writeonce)].
|
||||
|
||||
code : int
|
||||
The TIFF tag Id.
|
||||
dtype : str
|
||||
Data type of items in 'value' in Python struct format.
|
||||
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
|
||||
count : int
|
||||
Number of data values. Not used for string values.
|
||||
value : sequence
|
||||
'Count' values compatible with 'dtype'.
|
||||
writeonce : bool
|
||||
If True, the tag is written to the first page only.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Global metadata is stored with the first frame in a TIFF file.
|
||||
Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame
|
||||
was written has no effect. Also, global metadata is ignored if metadata is
|
||||
provided via the `meta` argument of :py:meth:`Format.Writer.append_data`.
|
||||
|
||||
If you have installed tifffile as a Python package, imageio will attempt
|
||||
to use that as backend instead of the bundled backend. Doing so can
|
||||
provide access to new performance improvements and bug fixes.
|
||||
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from ..core import Format
|
||||
from ..core.request import URI_BYTES, URI_FILE
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
import tifffile as _tifffile
|
||||
except ImportError:
|
||||
warnings.warn(
|
||||
"ImageIO's vendored tifffile backend is deprecated and will be"
|
||||
" removed in ImageIO v3. Install the tifffile directly:"
|
||||
" `pip install imageio[tifffile]`",
|
||||
DeprecationWarning,
|
||||
)
|
||||
from . import _tifffile
|
||||
|
||||
|
||||
TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm")
|
||||
WRITE_METADATA_KEYS = (
|
||||
"photometric",
|
||||
"planarconfig",
|
||||
"resolution",
|
||||
"description",
|
||||
"compress",
|
||||
"compression",
|
||||
"compressionargs",
|
||||
"predictor",
|
||||
"volume",
|
||||
"writeshape",
|
||||
"extratags",
|
||||
"datetime",
|
||||
)
|
||||
READ_METADATA_KEYS = (
|
||||
"planar_configuration",
|
||||
"is_fluoview",
|
||||
"is_nih",
|
||||
"is_contig",
|
||||
"is_micromanager",
|
||||
"is_ome",
|
||||
"is_lsm",
|
||||
"is_palette",
|
||||
"is_reduced",
|
||||
"is_rgb",
|
||||
"is_sgi",
|
||||
"is_shaped",
|
||||
"is_stk",
|
||||
"is_tiled",
|
||||
"is_mdgel",
|
||||
"resolution_unit",
|
||||
"compression",
|
||||
"predictor",
|
||||
"is_mediacy",
|
||||
"orientation",
|
||||
"description",
|
||||
"description1",
|
||||
"is_imagej",
|
||||
"software",
|
||||
)
|
||||
|
||||
|
||||
class TiffFormat(Format):
|
||||
"""Provides support for a wide range of Tiff images using the tifffile
|
||||
backend.
|
||||
|
||||
Images that contain multiple pages can be read using ``imageio.mimread()``
|
||||
to read the individual pages, or ``imageio.volread()`` to obtain a
|
||||
single (higher dimensional) array.
|
||||
|
||||
Note that global metadata is stored with the first frame in a TIFF file.
|
||||
Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame
|
||||
was written has no effect. Also, global metadata is ignored if metadata is
|
||||
provided via the `meta` argument of :py:meth:`Format.Writer.append_data`.
|
||||
|
||||
If you have installed tifffile as a Python package, imageio will attempt
|
||||
to use that as backend instead of the bundled backend. Doing so can
|
||||
provide access to new performance improvements and bug fixes.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
offset : int
|
||||
Optional start position of embedded file. By default this is
|
||||
the current file position.
|
||||
size : int
|
||||
Optional size of embedded file. By default this is the number
|
||||
of bytes from the 'offset' to the end of the file.
|
||||
multifile : bool
|
||||
If True (default), series may include pages from multiple files.
|
||||
Currently applies to OME-TIFF only.
|
||||
multifile_close : bool
|
||||
If True (default), keep the handles of other files in multifile
|
||||
series closed. This is inefficient when few files refer to
|
||||
many pages. If False, the C runtime may run out of resources.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
bigtiff : bool
|
||||
If True, the BigTIFF format is used.
|
||||
byteorder : {'<', '>'}
|
||||
The endianness of the data in the file.
|
||||
By default this is the system's native byte order.
|
||||
software : str
|
||||
Name of the software used to create the image.
|
||||
Saved with the first page only.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
planar_configuration : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution_unit : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
compression : int
|
||||
Value indicating the compression algorithm used, e.g. 5 is LZW,
|
||||
7 is JPEG, 8 is deflate.
|
||||
If 1, data are uncompressed.
|
||||
predictor : int
|
||||
Value 2 indicates horizontal differencing was used before compression,
|
||||
while 3 indicates floating point horizontal differencing.
|
||||
If 1, no prediction scheme was used before compression.
|
||||
orientation : {'top_left', 'bottom_right', ...}
|
||||
Oriented of image array.
|
||||
is_rgb : bool
|
||||
True if page contains a RGB image.
|
||||
is_contig : bool
|
||||
True if page contains a contiguous image.
|
||||
is_tiled : bool
|
||||
True if page contains tiled image.
|
||||
is_palette : bool
|
||||
True if page contains a palette-colored image and not OME or STK.
|
||||
is_reduced : bool
|
||||
True if page is a reduced image of another image.
|
||||
is_shaped : bool
|
||||
True if page contains shape in image_description tag.
|
||||
is_fluoview : bool
|
||||
True if page contains FluoView MM_STAMP tag.
|
||||
is_nih : bool
|
||||
True if page contains NIH image header.
|
||||
is_micromanager : bool
|
||||
True if page contains Micro-Manager metadata.
|
||||
is_ome : bool
|
||||
True if page contains OME-XML in image_description tag.
|
||||
is_sgi : bool
|
||||
True if page contains SGI image and tile depth tags.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_mdgel : bool
|
||||
True if page contains md_file_tag tag.
|
||||
is_mediacy : bool
|
||||
True if page contains Media Cybernetics Id tag.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_lsm : bool
|
||||
True if page contains LSM CZ_LSM_INFO tag.
|
||||
description : str
|
||||
Image description
|
||||
description1 : str
|
||||
Additional description
|
||||
is_imagej : None or str
|
||||
ImageJ metadata
|
||||
software : str
|
||||
Software used to create the TIFF file
|
||||
datetime : datetime.datetime
|
||||
Creation date and time
|
||||
|
||||
Metadata for writing
|
||||
--------------------
|
||||
photometric : {'minisblack', 'miniswhite', 'rgb'}
|
||||
The color space of the image data.
|
||||
By default this setting is inferred from the data shape.
|
||||
planarconfig : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
description : str
|
||||
The subject of the image. Saved with the first page only.
|
||||
compress : int
|
||||
Values from 0 to 9 controlling the level of zlib (deflate) compression.
|
||||
If 0, data are written uncompressed (default).
|
||||
predictor : bool
|
||||
If True, horizontal differencing is applied before compression.
|
||||
Note that using an int literal 1 actually means no prediction scheme
|
||||
will be used.
|
||||
volume : bool
|
||||
If True, volume data are stored in one tile (if applicable) using
|
||||
the SGI image_depth and tile_depth tags.
|
||||
Image width and depth must be multiple of 16.
|
||||
Few software can read this format, e.g. MeVisLab.
|
||||
writeshape : bool
|
||||
If True, write the data shape to the image_description tag
|
||||
if necessary and no other description is given.
|
||||
extratags: sequence of tuples
|
||||
Additional tags as [(code, dtype, count, value, writeonce)].
|
||||
|
||||
code : int
|
||||
The TIFF tag Id.
|
||||
dtype : str
|
||||
Data type of items in 'value' in Python struct format.
|
||||
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
|
||||
count : int
|
||||
Number of data values. Not used for string values.
|
||||
value : sequence
|
||||
'Count' values compatible with 'dtype'.
|
||||
writeonce : bool
|
||||
If True, the tag is written to the first page only.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
try:
|
||||
_tifffile.TiffFile(request.get_file(), **request.kwargs)
|
||||
except ValueError:
|
||||
# vendored backend raises value exception
|
||||
return False
|
||||
except _tifffile.TiffFileError: # pragma: no-cover
|
||||
# current version raises custom exception
|
||||
return False
|
||||
finally:
|
||||
request.get_file().seek(0)
|
||||
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request._uri_type in [URI_FILE, URI_BYTES]:
|
||||
pass # special URI
|
||||
elif request.extension not in self.extensions:
|
||||
return False
|
||||
|
||||
try:
|
||||
_tifffile.TiffWriter(request.get_file(), **request.kwargs)
|
||||
except ValueError:
|
||||
# vendored backend raises value exception
|
||||
return False
|
||||
except _tifffile.TiffFileError: # pragma: no-cover
|
||||
# current version raises custom exception
|
||||
return False
|
||||
finally:
|
||||
request.get_file().seek(0)
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, **kwargs):
|
||||
# Allow loading from http; tifffile uses seek, so download first
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
self._f = f = open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._f = None
|
||||
f = self.request.get_file()
|
||||
self._tf = _tifffile.TiffFile(f, **kwargs)
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
if self._f is not None:
|
||||
self._f.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._tf.series)
|
||||
|
||||
def _get_data(self, index):
|
||||
if index < 0 or index >= self._get_length():
|
||||
raise IndexError("Index out of range while reading from tiff file")
|
||||
|
||||
im = self._tf.asarray(series=index)
|
||||
meta = self._get_meta_data(index)
|
||||
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
meta = {}
|
||||
page = self._tf.pages[index or 0]
|
||||
for key in READ_METADATA_KEYS:
|
||||
try:
|
||||
meta[key] = getattr(page, key)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# tifffile <= 0.12.1 use datetime, newer use DateTime
|
||||
for key in ("datetime", "DateTime"):
|
||||
try:
|
||||
meta["datetime"] = datetime.datetime.strptime(
|
||||
page.tags[key].value, "%Y:%m:%d %H:%M:%S"
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if 296 in page.tags:
|
||||
meta["resolution_unit"] = page.tags[296].value.value
|
||||
|
||||
if 282 in page.tags and 283 in page.tags and 296 in page.tags:
|
||||
resolution_x = page.tags[282].value
|
||||
resolution_y = page.tags[283].value
|
||||
if resolution_x[1] == 0 or resolution_y[1] == 0:
|
||||
warnings.warn(
|
||||
"Ignoring resulution metadata, "
|
||||
"because at least one direction has a 0 denominator.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
else:
|
||||
meta["resolution"] = (
|
||||
resolution_x[0] / resolution_x[1],
|
||||
resolution_y[0] / resolution_y[1],
|
||||
page.tags[296].value.name,
|
||||
)
|
||||
|
||||
return meta
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, bigtiff=None, byteorder=None, software=None):
|
||||
try:
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(),
|
||||
bigtiff=bigtiff,
|
||||
byteorder=byteorder,
|
||||
software=software,
|
||||
)
|
||||
self._software = None
|
||||
except TypeError:
|
||||
# In tifffile >= 0.15, the `software` arg is passed to
|
||||
# TiffWriter.save
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(), bigtiff=bigtiff, byteorder=byteorder
|
||||
)
|
||||
self._software = software
|
||||
|
||||
self._meta = {}
|
||||
self._frames_written = 0
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if meta is not None:
|
||||
meta = self._sanitize_meta(meta)
|
||||
else:
|
||||
# Use global metadata for first frame
|
||||
meta = self._meta if self._frames_written == 0 else {}
|
||||
if self._software is not None and self._frames_written == 0:
|
||||
meta["software"] = self._software
|
||||
# No need to check self.request.mode; tifffile figures out whether
|
||||
# this is a single page, or all page data at once.
|
||||
try:
|
||||
# TiffWriter.save has been deprecated in version 2020.9.30
|
||||
write_meth = self._tf.write
|
||||
except AttributeError:
|
||||
write_meth = self._tf.save
|
||||
write_meth(np.asanyarray(im), contiguous=False, **meta)
|
||||
self._frames_written += 1
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_meta(meta):
|
||||
ret = {}
|
||||
for key, value in meta.items():
|
||||
if key in WRITE_METADATA_KEYS:
|
||||
# Special case of previously read `predictor` int value
|
||||
# 1(=NONE) translation to False expected by TiffWriter.save
|
||||
if key == "predictor" and not isinstance(value, bool):
|
||||
ret[key] = value > 1
|
||||
elif key == "compress" and value != 0:
|
||||
warnings.warn(
|
||||
"The use of `compress` is deprecated. Use `compression` and `compressionargs` instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
if _tifffile.__version__ < "2022":
|
||||
ret["compression"] = (8, value)
|
||||
else:
|
||||
ret["compression"] = "zlib"
|
||||
ret["compressionargs"] = {"level": value}
|
||||
else:
|
||||
ret[key] = value
|
||||
return ret
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta = self._sanitize_meta(meta)
|
||||
@@ -1,434 +0,0 @@
|
||||
"""Read/Write TIFF files using tifffile.
|
||||
|
||||
.. note::
|
||||
To use this plugin you need to have `tifffile
|
||||
<https://github.com/cgohlke/tifffile>`_ installed::
|
||||
|
||||
pip install tifffile
|
||||
|
||||
This plugin wraps tifffile, a powerfull library to manipulate TIFF files. It
|
||||
superseeds our previous tifffile plugin and aims to expose all the features of
|
||||
tifffile.
|
||||
|
||||
The plugin treats individual TIFF series as ndimages. A series is a sequence of
|
||||
TIFF pages that, when combined describe a meaningful unit, e.g., a volumetric
|
||||
image (where each slice is stored on an individual page) or a multi-color
|
||||
staining picture (where each stain is stored on an individual page). Different
|
||||
TIFF flavors/variants use series in different ways and, as such, the resulting
|
||||
reading behavior may vary depending on the program used while creating a
|
||||
particular TIFF file.
|
||||
|
||||
Methods
|
||||
-------
|
||||
.. note::
|
||||
Check the respective function for a list of supported kwargs and detailed
|
||||
documentation.
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
TifffilePlugin.read
|
||||
TifffilePlugin.iter
|
||||
TifffilePlugin.write
|
||||
TifffilePlugin.properties
|
||||
TifffilePlugin.metadata
|
||||
|
||||
Additional methods available inside the :func:`imopen <imageio.v3.imopen>`
|
||||
context:
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
|
||||
TifffilePlugin.iter_pages
|
||||
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
from typing import Any, Dict, Optional, cast
|
||||
import warnings
|
||||
import datetime
|
||||
|
||||
import numpy as np
|
||||
import tifffile
|
||||
|
||||
from ..core.request import URI_BYTES, InitializationError, Request
|
||||
from ..core.v3_plugin_api import ImageProperties, PluginV3
|
||||
from ..typing import ArrayLike
|
||||
|
||||
|
||||
def _get_resolution(page):
|
||||
"""Get the resolution in a py3.7 compatible way"""
|
||||
|
||||
metadata = {
|
||||
# uncomment once py 3.7 reached EoL - in fact, refactor this
|
||||
# function :)
|
||||
# "resolution_unit": page.resolutionunit,
|
||||
# "resolution": page.resolution,
|
||||
}
|
||||
|
||||
if 296 in page.tags:
|
||||
metadata["resolution_unit"] = page.tags[296].value.value
|
||||
|
||||
if 282 in page.tags and 283 in page.tags and 296 in page.tags:
|
||||
resolution_x = page.tags[282].value
|
||||
resolution_y = page.tags[283].value
|
||||
if resolution_x[1] == 0 or resolution_y[1] == 0:
|
||||
warnings.warn(
|
||||
"Ignoring resulution metadata, "
|
||||
"because at least one direction has a 0 denominator.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
else:
|
||||
metadata["resolution"] = (
|
||||
resolution_x[0] / resolution_x[1],
|
||||
resolution_y[0] / resolution_y[1],
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def _get_datatime(page):
|
||||
"""Get the datetime in a python 3.7 compatible way"""
|
||||
|
||||
metadata = {
|
||||
# uncomment once python 3.7 is EoL
|
||||
# "datetime": page.datetime,
|
||||
}
|
||||
|
||||
try:
|
||||
metadata["datetime"] = datetime.datetime.strptime(
|
||||
page.tags[306].value, "%Y:%m:%d %H:%M:%S"
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
class TifffilePlugin(PluginV3):
|
||||
"""Support for tifffile as backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
request : iio.Request
|
||||
A request object that represents the users intent. It provides a
|
||||
standard interface for a plugin to access the various ImageResources.
|
||||
Check the docs for details.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to tifffile's constructor, i.e.
|
||||
to ``TiffFile`` for reading or ``TiffWriter`` for writing.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, request: Request, **kwargs) -> None:
|
||||
super().__init__(request)
|
||||
self._fh = None
|
||||
|
||||
if request.mode.io_mode == "r":
|
||||
try:
|
||||
self._fh = tifffile.TiffFile(request.get_file(), **kwargs)
|
||||
except tifffile.tifffile.TiffFileError:
|
||||
raise InitializationError("Tifffile can not read this file.")
|
||||
else:
|
||||
self._fh = tifffile.TiffWriter(request.get_file(), **kwargs)
|
||||
|
||||
# ---------------------
|
||||
# Standard V3 Interface
|
||||
# ---------------------
|
||||
|
||||
def read(self, *, index: int = None, page: int = None, **kwargs) -> np.ndarray:
|
||||
"""Read a ndimage or page.
|
||||
|
||||
The ndimage returned depends on the value of both ``index`` and
|
||||
``page``. ``index`` selects the series to read and ``page`` allows
|
||||
selecting a single page from the selected series. If ``index=None``,
|
||||
``page`` is understood as a flat index, i.e., the selection ignores
|
||||
individual series inside the file. If both ``index`` and ``page`` are
|
||||
``None``, then all the series are read and returned as a batch.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If ``int``, select the ndimage (series) located at that index inside
|
||||
the file and return ``page`` from it. If ``None`` and ``page`` is
|
||||
``int`` read the page located at that (flat) index inside the file.
|
||||
If ``None`` and ``page=None``, read all ndimages from the file and
|
||||
return them as a batch.
|
||||
page : int
|
||||
If ``None`` return the full selected ndimage. If ``int``, read the
|
||||
page at the selected index and return it.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to TiffFile's ``as_array`` method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndarray : np.ndarray
|
||||
The decoded ndimage or page.
|
||||
"""
|
||||
|
||||
if "key" not in kwargs:
|
||||
kwargs["key"] = page
|
||||
elif page is not None:
|
||||
raise ValueError("Can't use `page` and `key` at the same time.")
|
||||
|
||||
# set plugin default for ``index``
|
||||
if index is not None and "series" in kwargs:
|
||||
raise ValueError("Can't use `series` and `index` at the same time.")
|
||||
elif "series" in kwargs:
|
||||
index = kwargs.pop("series")
|
||||
elif index is not None:
|
||||
pass
|
||||
else:
|
||||
index = 0
|
||||
|
||||
if index is Ellipsis and page is None:
|
||||
# read all series in the file and return them as a batch
|
||||
ndimage = np.stack([x for x in self.iter(**kwargs)])
|
||||
else:
|
||||
index = None if index is Ellipsis else index
|
||||
ndimage = self._fh.asarray(series=index, **kwargs)
|
||||
|
||||
return ndimage
|
||||
|
||||
def iter(self, **kwargs) -> np.ndarray:
|
||||
"""Yield ndimages from the TIFF.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to the TiffPageSeries' ``as_array``
|
||||
method.
|
||||
|
||||
Yields
|
||||
------
|
||||
ndimage : np.ndarray
|
||||
A decoded ndimage.
|
||||
"""
|
||||
|
||||
for sequence in self._fh.series:
|
||||
yield sequence.asarray(**kwargs)
|
||||
|
||||
def write(
|
||||
self, ndimage: ArrayLike, *, is_batch: bool = False, **kwargs
|
||||
) -> Optional[bytes]:
|
||||
"""Save a ndimage as TIFF.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndimage : ArrayLike
|
||||
The ndimage to encode and write to the ImageResource.
|
||||
is_batch : bool
|
||||
If True, the first dimension of the given ndimage is treated as a
|
||||
batch dimension and each element will create a new series.
|
||||
kwargs : Any
|
||||
Additional kwargs are forwarded to TiffWriter's ``write`` method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
encoded_image : bytes
|
||||
If the ImageResource is ``"<bytes>"``, return the encoded bytes.
|
||||
Otherwise write returns None.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Incremental writing is supported. Subsequent calls to ``write`` will
|
||||
create new series unless ``contiguous=True`` is used, in which case the
|
||||
call to write will append to the current series.
|
||||
|
||||
"""
|
||||
|
||||
if not is_batch:
|
||||
ndimage = np.asarray(ndimage)[None, :]
|
||||
|
||||
for image in ndimage:
|
||||
self._fh.write(image, **kwargs)
|
||||
|
||||
if self._request._uri_type == URI_BYTES:
|
||||
self._fh.close()
|
||||
file = cast(BytesIO, self._request.get_file())
|
||||
return file.getvalue()
|
||||
|
||||
def metadata(
|
||||
self, *, index: int = Ellipsis, page: int = None, exclude_applied: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""Format-Specific TIFF metadata.
|
||||
|
||||
The metadata returned depends on the value of both ``index`` and
|
||||
``page``. ``index`` selects a series and ``page`` allows selecting a
|
||||
single page from the selected series. If ``index=Ellipsis``, ``page`` is
|
||||
understood as a flat index, i.e., the selection ignores individual
|
||||
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
|
||||
global (file-level) metadata is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
Select the series of which to extract metadata from. If Ellipsis, treat
|
||||
page as a flat index into the file's pages.
|
||||
page : int
|
||||
If not None, select the page of which to extract metadata from. If
|
||||
None, read series-level metadata or, if ``index=...`` global,
|
||||
file-level metadata.
|
||||
exclude_applied : bool
|
||||
For API compatibility. Currently ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
A dictionary with information regarding the tiff flavor (file-level)
|
||||
or tiff tags (page-level).
|
||||
"""
|
||||
|
||||
if index is not Ellipsis and page is not None:
|
||||
target = self._fh.series[index].pages[page]
|
||||
elif index is not Ellipsis and page is None:
|
||||
# This is based on my understanding that series-level metadata is
|
||||
# stored in the first TIFF page.
|
||||
target = self._fh.series[index].pages[0]
|
||||
elif index is Ellipsis and page is not None:
|
||||
target = self._fh.pages[page]
|
||||
else:
|
||||
target = None
|
||||
|
||||
metadata = {}
|
||||
if target is None:
|
||||
# return file-level metadata
|
||||
metadata["byteorder"] = self._fh.byteorder
|
||||
|
||||
for flag in tifffile.TIFF.FILE_FLAGS:
|
||||
flag_value = getattr(self._fh, "is_" + flag)
|
||||
metadata["is_" + flag] = flag_value
|
||||
|
||||
if flag_value and hasattr(self._fh, flag + "_metadata"):
|
||||
flavor_metadata = getattr(self._fh, flag + "_metadata")
|
||||
if isinstance(flavor_metadata, tuple):
|
||||
metadata.update(flavor_metadata[0])
|
||||
else:
|
||||
metadata.update(flavor_metadata)
|
||||
else:
|
||||
# tifffile may return a TiffFrame instead of a page
|
||||
target = target.keyframe
|
||||
|
||||
metadata.update({tag.name: tag.value for tag in target.tags})
|
||||
metadata.update(
|
||||
{
|
||||
"planar_configuration": target.planarconfig,
|
||||
"compression": target.compression,
|
||||
"predictor": target.predictor,
|
||||
"orientation": None, # TODO
|
||||
"description1": target.description1,
|
||||
"description": target.description,
|
||||
"software": target.software,
|
||||
# update once python 3.7 reached EoL
|
||||
**_get_resolution(target),
|
||||
**_get_datatime(target),
|
||||
}
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def properties(self, *, index: int = None, page: int = None) -> ImageProperties:
|
||||
"""Standardized metadata.
|
||||
|
||||
The properties returned depend on the value of both ``index`` and
|
||||
``page``. ``index`` selects a series and ``page`` allows selecting a
|
||||
single page from the selected series. If ``index=Ellipsis``, ``page`` is
|
||||
understood as a flat index, i.e., the selection ignores individual
|
||||
series inside the file. If ``index=Ellipsis`` and ``page=None`` then
|
||||
global (file-level) properties are returned. If ``index=Ellipsis``
|
||||
and ``page=...``, file-level properties for the flattened index are
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
If ``int``, select the ndimage (series) located at that index inside
|
||||
the file. If ``Ellipsis`` and ``page`` is ``int`` extract the
|
||||
properties of the page located at that (flat) index inside the file.
|
||||
If ``Ellipsis`` and ``page=None``, return the properties for the
|
||||
batch of all ndimages in the file.
|
||||
page : int
|
||||
If ``None`` return the properties of the full ndimage. If ``...``
|
||||
return the properties of the flattened index. If ``int``,
|
||||
return the properties of the page at the selected index only.
|
||||
|
||||
Returns
|
||||
-------
|
||||
image_properties : ImageProperties
|
||||
The standardized metadata (properties) of the selected ndimage or series.
|
||||
|
||||
"""
|
||||
index = index or 0
|
||||
page_idx = 0 if page in (None, Ellipsis) else page
|
||||
|
||||
if index is Ellipsis:
|
||||
target_page = self._fh.pages[page_idx]
|
||||
else:
|
||||
target_page = self._fh.series[index].pages[page_idx]
|
||||
|
||||
if index is Ellipsis and page is None:
|
||||
n_series = len(self._fh.series)
|
||||
props = ImageProperties(
|
||||
shape=(n_series, *target_page.shape),
|
||||
dtype=target_page.dtype,
|
||||
n_images=n_series,
|
||||
is_batch=True,
|
||||
spacing=_get_resolution(target_page)["resolution"],
|
||||
)
|
||||
elif index is Ellipsis and page is Ellipsis:
|
||||
n_pages = len(self._fh.pages)
|
||||
props = ImageProperties(
|
||||
shape=(n_pages, *target_page.shape),
|
||||
dtype=target_page.dtype,
|
||||
n_images=n_pages,
|
||||
is_batch=True,
|
||||
spacing=_get_resolution(target_page)["resolution"],
|
||||
)
|
||||
else:
|
||||
props = ImageProperties(
|
||||
shape=target_page.shape,
|
||||
dtype=target_page.dtype,
|
||||
is_batch=False,
|
||||
spacing=_get_resolution(target_page)["resolution"],
|
||||
)
|
||||
|
||||
return props
|
||||
|
||||
def close(self) -> None:
|
||||
if self._fh is not None:
|
||||
self._fh.close()
|
||||
|
||||
super().close()
|
||||
|
||||
# ------------------------------
|
||||
# Add-on Interface inside imopen
|
||||
# ------------------------------
|
||||
|
||||
def iter_pages(self, index=..., **kwargs):
|
||||
"""Yield pages from a TIFF file.
|
||||
|
||||
This generator walks over the flat index of the pages inside an
|
||||
ImageResource and yields them in order.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index : int
|
||||
The index of the series to yield pages from. If Ellipsis, walk over
|
||||
the file's flat index (and ignore individual series).
|
||||
kwargs : Any
|
||||
Additional kwargs are passed to TiffPage's ``as_array`` method.
|
||||
|
||||
Yields
|
||||
------
|
||||
page : np.ndarray
|
||||
A page stored inside the TIFF file.
|
||||
|
||||
"""
|
||||
|
||||
if index is Ellipsis:
|
||||
pages = self._fh.pages
|
||||
else:
|
||||
pages = self._fh.series[index]
|
||||
|
||||
for page in pages:
|
||||
yield page.asarray(**kwargs)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 773 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user