-
-*/
-div.bd-toc {
- position: sticky;
-}
-
-/* Set max width of container */
-div.container-xl {
- max-width: 1400px;
-}
diff --git a/python/cucim/docs/_static/images/RAPIDS_cuCIM.png b/python/cucim/docs/_static/images/RAPIDS_cuCIM.png
deleted file mode 100644
index aa7ed8cdc..000000000
Binary files a/python/cucim/docs/_static/images/RAPIDS_cuCIM.png and /dev/null differ
diff --git a/python/cucim/docs/api_reference/cucim.CuImage.rst b/python/cucim/docs/api_reference/cucim.CuImage.rst
deleted file mode 100644
index e66690f20..000000000
--- a/python/cucim/docs/api_reference/cucim.CuImage.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-cucim.CuImage
--------------
-
-.. autoclass:: cucim.CuImage
- :members:
diff --git a/python/cucim/docs/api_reference/cucim.clara.filesystem.CuFileDriver.rst b/python/cucim/docs/api_reference/cucim.clara.filesystem.CuFileDriver.rst
deleted file mode 100644
index 9c7e0fa84..000000000
--- a/python/cucim/docs/api_reference/cucim.clara.filesystem.CuFileDriver.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-cucim.clara.filesystem.CuFileDriver
------------------------------------
-
-.. autoclass:: cucim.clara.filesystem.CuFileDriver
- :members:
diff --git a/python/cucim/docs/api_reference/cucim.clara.filesystem.rst b/python/cucim/docs/api_reference/cucim.clara.filesystem.rst
deleted file mode 100644
index 995fc0860..000000000
--- a/python/cucim/docs/api_reference/cucim.clara.filesystem.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-cucim.clara.filesystem
-----------------------
-
-.. automodule:: cucim.clara.filesystem
- :members:
diff --git a/python/cucim/docs/api_reference/cucim.clara.io.Device.rst b/python/cucim/docs/api_reference/cucim.clara.io.Device.rst
deleted file mode 100644
index 5e5cd0b3c..000000000
--- a/python/cucim/docs/api_reference/cucim.clara.io.Device.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-cucim.clara.io.Device
----------------------
-
-.. autoclass:: cucim.clara.io.Device
- :members:
diff --git a/python/cucim/docs/api_reference/cucim.clara.io.rst b/python/cucim/docs/api_reference/cucim.clara.io.rst
deleted file mode 100644
index 230d12541..000000000
--- a/python/cucim/docs/api_reference/cucim.clara.io.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-cucim.clara.io
---------------
-
-.. autoclass:: cucim.clara.io.DeviceType
diff --git a/python/cucim/docs/api_reference/cucim.rst b/python/cucim/docs/api_reference/cucim.rst
deleted file mode 100644
index f67c1fc3a..000000000
--- a/python/cucim/docs/api_reference/cucim.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-cucim
------
-
-.. testsetup::
-
- from cucim import *
-
-.. automodule:: cucim
- :members:
diff --git a/python/cucim/docs/api_reference/index.md b/python/cucim/docs/api_reference/index.md
deleted file mode 100644
index 951c37970..000000000
--- a/python/cucim/docs/api_reference/index.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# API Reference
-
-
-```{toctree}
-:glob:
-:hidden:
-
-cucim
-cucim.CuImage
-cucim.clara.io
-cucim.clara.io.Device
-cucim.clara.filesystem
-cucim.clara.filesystem.CuFileDriver
-
-```
-
-## Python API
-
-```{toctree}
-:maxdepth: 2
-
-cucim
-cucim.CuImage
-cucim.clara.io
-cucim.clara.io.Device
-cucim.clara.filesystem
-cucim.clara.filesystem.CuFileDriver
-```
diff --git a/python/cucim/docs/conf.py b/python/cucim/docs/conf.py
deleted file mode 100644
index 0dbe8b296..000000000
--- a/python/cucim/docs/conf.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-
-import os
-
-# Versioning
-with open("../../../VERSION") as f:
- version_long = f.readline().strip()
-
-extensions = [
- "sphinx.ext.autodoc",
- "sphinx.ext.autosummary",
- "sphinx.ext.coverage",
- "sphinx.ext.doctest",
- "sphinx.ext.extlinks",
- "sphinx.ext.ifconfig",
- "sphinx.ext.napoleon",
- "sphinx.ext.todo",
- "sphinx.ext.viewcode",
- "sphinx.ext.intersphinx",
- "sphinxcontrib.bibtex",
- "myst_nb",
- "sphinx_copybutton",
- "sphinx_togglebutton",
- "sphinx_panels",
- "ablog",
- "sphinxemoji.sphinxemoji",
-]
-# source_suffix = {
-# '.rst': 'restructuredtext',
-# '.ipynb': 'myst-nb',
-# '.myst': 'myst-nb',
-# }
-master_doc = "index"
-project = "cuCIM"
-year = "2020-2021"
-author = "NVIDIA"
-copyright = "{0}, {1}".format(year, author)
-version = release = version_long
-
-pygments_style = "trac"
-templates_path = ["."]
-extlinks = {
- "issue": ("https://github.com/rapidsai/cucim/issues/%s", "#"),
- "pr": ("https://github.com/rapidsai/cucim/pull/%s", "PR #"),
-}
-# on_rtd is whether we are on readthedocs.org
-on_rtd = os.environ.get("READTHEDOCS", None) == "True"
-
-if not on_rtd: # only set the theme if we're building docs locally
- html_theme = "pydata_sphinx_theme" # 'sphinx_book_theme'
- # https://github.com/pandas-dev/pydata-sphinx-theme
- # https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/index.html
-
-html_use_smartypants = True
-html_last_updated_fmt = "%b %d, %Y"
-html_split_index = False
-# html_sidebars = {
-# '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
-# }
-html_short_title = "%s-%s" % (project, version)
-
-napoleon_use_ivar = True
-napoleon_use_rtype = False
-napoleon_use_param = False
-
-html_show_sourcelink = True
-
-# Options for linkcheck builder
-#
-# Reference
-# : https://www.sphinx-doc.org/en/master/usage/configuration.html?highlight=linkcheck#options-for-the-linkcheck-builder) # noqa
-linkcheck_ignore = [r"^\/", r"^\.\."]
-
-# Options for sphinx.ext.todo
-# (reference: https://www.sphinx-doc.org/en/master/usage/extensions/todo.html)
-
-todo_include_todos = True
-
-# Options for sphinxemoji.sphinxemoji
-# (reference: https://sphinxemojicodes.readthedocs.io/en/stable/#supported-codes) # noqa
-
-
-# Options for myst
-# (reference: https://myst-parser.readthedocs.io/en/latest/index.html)
-
-# https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#markdown-figures # noqa
-myst_enable_extensions = ["colon_fence"]
-
-# Options for pydata-sphinx-theme
-# (reference: https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html) # noqa
-
-html_static_path = ["_static"]
-html_css_files = [
- "css/custom.css",
-]
-
-html_theme_options = {
- "external_links": [
- {
- "name": "Submit Issue",
- "url": "https://github.com/rapidsai/cucim/issues/new/choose", # noqa
- }
- ]
-}
-
-# Options for Sphinx Book Theme
-# (reference: https://github.com/executablebooks/sphinx-book-theme/blob/master/setup.py) # noqa
-
-# html_theme_options = {
-# "repository_url": "https://github.com/rapidsai/cucim",
-# "use_repository_button": True,
-# "use_issues_button": True,
-# #"use_edit_page_button": True,
-# "repository_branch": "dev",
-# #"path_to_docs": "python/cucim/docs",
-# "home_page_in_toc": True,
-# }
-
-# Options for myst-nb
-# (reference: https://myst-nb.readthedocs.io/en/latest/)
-
-# Prevent the following error
-# MyST NB Configuration Error:
-# `nb_render_priority` not set for builder: doctest
-nb_render_priority = {"doctest": ()}
-
-# Prevent creating jupyter_execute folder in dist
-# https://myst-nb.readthedocs.io/en/latest/use/execute.html#executing-in-temporary-folders # noqa
-execution_in_temp = True
-jupyter_execute_notebooks = "off"
diff --git a/python/cucim/docs/development/index.md b/python/cucim/docs/development/index.md
deleted file mode 100644
index 459110d34..000000000
--- a/python/cucim/docs/development/index.md
+++ /dev/null
@@ -1 +0,0 @@
-# Development
diff --git a/python/cucim/docs/getting_started/index.md b/python/cucim/docs/getting_started/index.md
deleted file mode 100644
index 0dcd1d2b2..000000000
--- a/python/cucim/docs/getting_started/index.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# Getting Started
-
-```{toctree}
-:glob:
-:hidden:
-
-../notebooks/Basic_Usage.ipynb
-../notebooks/Accessing_File_with_GDS.ipynb
-../notebooks/File-access_Experiments_on_TIFF.ipynb
-../notebooks/Multi-thread_and_Multi-process_Tests.ipynb
-../notebooks/Working_with_DALI.ipynb
-../notebooks/Working_with_Albumentation.ipynb
-```
-
-## Installation
-
-Please download the latest SDK package (`cuCIM-v23.12.01-linux.tar.gz`).
-
-Untar the downloaded file.
-
-```bash
-mkdir -p cuCIM-v23.12.01
-tar -xzvf cuCIM-v23.12.01-linux.tar.gz -C cuCIM-v23.12.01
-
-cd cuCIM-v23.12.01
-```
-
-## Run command
-
-Executing `./run` command would show you available commands:
-
-```bash
-./run
-```
-```
-USAGE: ./run [command] [arguments]...
-
-Global Arguments
-
-Command List
- help ---------------------------- Print detailed description for a given argument (command name)
- Example
- download_testdata --------------- Download test data from Docker Hub
- launch_notebooks ---------------- Launch jupyter notebooks
- Build
- build_train --------------------- Build Clara Train Docker image with cuCIM (& OpenSlide)
- build_examples ------------------ Build cuCIM C++ examples
-```
-
-`./run help
` would show you detailed information about the command.
-
-```bash
-./run help build_train
-```
-```
-Build Clara Train Docker image with cuCIM (& OpenSlide)
-
-Build image from docker/Dockerfile-claratrain
-
-Arguments:
- $1 - docker image name (default:cucim-train)
-```
-
-### download_testdata
-
-It downloads test data from DockerHub (`gigony/svs-testdata:little-big`) and make it available at `notebooks/input` folder.
-
-The folder has the following four files.
-
-- `TUPAC-TR-488.svs`
-- `TUPAC-TR-467.svs`
-- `image.tif`
-- `image2.tif`
-
-#### Test Dataset
-
-`TUPAC-TR-488.svs` and `TUPAC-TR-467.svs` are from the dataset
-of Tumor Proliferation Assessment Challenge 2016 (TUPAC16 | MICCAI Grand Challenge).
-
-- Website:
-- Data link:
-
-#### Converted files
-
-- `image.tif` : 256x256 multi-resolution/tiled TIF conversion of TUPAC-TR-467.svs
-- `image2.tif` : 256x256 multi-resolution/tiled TIF conversion of TUPAC-TR-488.svs
-
-
-### launch_notebooks
-
-It launches a **Jupyter Lab** instance so that the user can experiment with cuCIM.
-
-After executing the command, type a password for the instance and open a web browser to access the instance.
-
-```bash
-./run launch_notebooks
-```
-
-```bash
-...
-Port 10001 would be used...(http://172.26.120.129:10001)
-2021-02-13 01:12:44 $ docker run --runtime nvidia --gpus all -it --rm -v /home/repo/cucim/notebooks:/notebooks -p 10001:10001 cucim-jupyter -c echo -n 'Enter New Password: '; jupyter lab --ServerApp.password="$(python3 -u -c "from jupyter_server.auth import passwd;pw=input();print(passwd(pw));" | egrep 'sha|argon')" --ServerApp.root_dir=/notebooks --allow-root --port=10001 --ip=0.0.0.0 --no-browser
-Enter New Password:
-[I 2021-02-13 01:12:47.981 ServerApp] dask_labextension | extension was successfully linked.
-[I 2021-02-13 01:12:47.981 ServerApp] jupyter_server_proxy | extension was successfully linked.
-...
-```
-
-### build_train
-
-It builds an image from the Clara Deploy SDK image. The image would install other useful python package as well as cu
-CIM wheel file.
-
-`nvcr.io/nvidian/dlmed/clara-train-sdk:v3.1-ga-qa-5` is used and `docker/Dockerfile-claratrain` has the recipe of the image.
-
-You will need to have a permission to access `nvidian/dlmed` group in NGC.
-
-```bash
-./run build_train
-
-docker run -it --rm cucim-train /bin/bash
-```
-
-### build_examples
-
-It builds C++ examples at `examples/cpp` folder by using `cmake` in `cucim-cmake` image that is built in runtime.
-
-After the execution, it would copy built file into `bin` folder and show how to execute it.
-
-```bash
-./run build_examples
-```
-
-```bash
-...
-
-Execute the binary with the following commands:
- # Set library path
- export LD_LIBRARY_PATH=/ssd/repo/cucim/dist/install/lib:$LD_LIBRARY_PATH
- # Execute
- ./bin/tiff_image notebooks/input/image.tif .
-```
-
-Its execution would show some metadata information and create two files -- `output.ppm` and `output2.ppm`.
-
-`.ppm` file can be viewed by `eog` in Ubuntu.
-```
-$ ./bin/tiff_image notebooks/input/image.tif .
-[Plugin: cucim.kit.cuslide] Loading...
-[Plugin: cucim.kit.cuslide] Loading the dynamic library from: cucim.kit.cuslide@23.12.01.so
-[Plugin: cucim.kit.cuslide] loaded successfully. Version: 0
-Initializing plugin: cucim.kit.cuslide (interfaces: [cucim::io::IImageFormat v0.1]) (impl: cucim.kit.cuslide)
-is_loaded: true
-device: cpu
-metadata: {"key": "value"}
-dims: YXC
-shape: (26420, 19920, 3)
-size('XY'): (19920, 26420)
-channel_names: (R, G, B)
-
-is_loaded: true
-device: cpu
-metadata: {"key": "value"}
-dims: YXC
-shape: (1024, 1024, 3)
-size('XY'): (1024, 1024)
-channel_names: (R, G, B)
-[Plugin: cucim.kit.cuslide] Unloaded.
-
-$ eog output.ppm
-$ eog output2.ppm
-```
diff --git a/python/cucim/docs/index.md b/python/cucim/docs/index.md
deleted file mode 100644
index 2eddbce6d..000000000
--- a/python/cucim/docs/index.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
-```{toctree}
-:maxdepth: 3
-:hidden:
-
-getting_started/index
-api_reference/index
-release_notes/index
-roadmap/index
-```
-
-
-
-# cuCIM Documentation
-
-Current latest version is [Version 23.12.01](release_notes/v23.12.01.md).
-
-**cuCIM** a toolkit to provide GPU accelerated I/O, image processing & computer vision primitives for N-Dimensional images with a focus on biomedical imaging.
-
-:::{figure-md} fig-cucim-architecture
-:class: myclass
-
-
-
-RAPIDS cuCIM Architecture
-:::
-
-
-
-
-
-
diff --git a/python/cucim/docs/release_notes/index.md b/python/cucim/docs/release_notes/index.md
deleted file mode 100644
index 84183cc4d..000000000
--- a/python/cucim/docs/release_notes/index.md
+++ /dev/null
@@ -1,60 +0,0 @@
-# Release Notes
-
-```{toctree}
-:glob:
-:hidden:
-:maxdepth: 2
-
-v0.19.0
-v0.18.3
-v0.18.2
-v0.18.1
-v0.18.0
-v0.3.0
-v0.2.0
-v0.1.1
-v0.1.0
-```
-
-## Version 0.19
-
-```{toctree}
-:maxdepth: 2
-
-v0.19.0
-```
-
-## Version 0.18
-
-```{toctree}
-:maxdepth: 2
-
-v0.18.3
-v0.18.2
-v0.18.1
-v0.18.0
-```
-
-## Version 0.3
-
-```{toctree}
-:maxdepth: 2
-
-v0.3.0
-```
-
-## Version 0.2
-
-```{toctree}
-:maxdepth: 2
-
-v0.2.0
-```
-## Version 0.1
-
-```{toctree}
-:maxdepth: 2
-
-v0.1.1
-v0.1.0
-```
diff --git a/python/cucim/docs/release_notes/v0.1.0.md b/python/cucim/docs/release_notes/v0.1.0.md
deleted file mode 100644
index 0d2f40150..000000000
--- a/python/cucim/docs/release_notes/v0.1.0.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Version 0.1.0 (October 28, 2020)
-
-## What are provided in the package?
-
-- API Documents
-- C++ & Python library packages
-- Example project (using CMake. For C++)
-- Example code in a Jupyter notebook (with a docker image)
-
-## Features
-
-For v1, we have a limited feature focusing on generic tiled/multi-resolution TIFF file format (Jpeg-compressed RGB image).
-
-- Loading part of the image using read_region() API
-- Saving the loaded image in .ppm format (loadable by 'eog' viewer in Ubuntu or PIL library in Python)
-
-## Limitations
-
-- The following feature is not implemented yet
- - Accessing image data through container() API (in C++) or as a numpy array (using `__array_interface__` in Python)
-- Errors are not handled properly yet (e.g., loading non-existing file would cause a crash)
-- Some metadata (e.g., physical size) is hard-coded for now
-- C++ library is forced to set `_GLIBCXX_USE_CXX11_ABI` to 0 due to [Dual ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) problem
- - Will package CXX11 ABI library separately later
diff --git a/python/cucim/docs/release_notes/v0.1.1.md b/python/cucim/docs/release_notes/v0.1.1.md
deleted file mode 100644
index 7ed9b5832..000000000
--- a/python/cucim/docs/release_notes/v0.1.1.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Version 0.1.1 (November 3, 2020)
-
-## What's new?
-
-The following features are implemented.
-- Access image data through `container()` API (in C++) or as a numpy array (using `__array_interface__` in Python)
- - [Example](../notebooks/Basic_Usage.html#array-interface-support)
-- Remove hard-coded metadata for `resolutions`
- - [Example](../notebooks/Basic_Usage.html#see-metadata)
-- Sort resolution levels (level 0: the largest resolution) for `CuImage::read_region()` method
- - Add `TIFF::level_ifd(size_t level_index)` method
-- Pass SWIPAT
-
-## Fixes
-
-- Fix a crash that occurs when opening a non-existing file
-- Fix an error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag
- - `Quantization table 0x00 was not defined` message can be shown
diff --git a/python/cucim/docs/release_notes/v0.18.0.md b/python/cucim/docs/release_notes/v0.18.0.md
deleted file mode 100644
index f4d16f682..000000000
--- a/python/cucim/docs/release_notes/v0.18.0.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Version 0.18.0 (March 16, 2021)
-
-## What's new?
-
-- The namespace of the project is changed from `cuimage` to `cucim` and project name is now `cuCIM`
-- Support Deflate(zlib) compression in Generic TIFF Format.
- - [libdeflate](https://github.com/ebiggers/libdeflate) library is used to decode the deflate-compressed data.
diff --git a/python/cucim/docs/release_notes/v0.18.1.md b/python/cucim/docs/release_notes/v0.18.1.md
deleted file mode 100644
index 0a1d15001..000000000
--- a/python/cucim/docs/release_notes/v0.18.1.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Version 0.18.1 (March 17, 2021)
-
-## What's new?
-
-- Disable using cuFile
- - Remove warning messages when libcufile.so is not available.
- - `[warning] CuFileDriver cannot be open. Falling back to use POSIX file IO APIs.`
diff --git a/python/cucim/docs/release_notes/v0.18.2.md b/python/cucim/docs/release_notes/v0.18.2.md
deleted file mode 100644
index 44b81fba3..000000000
--- a/python/cucim/docs/release_notes/v0.18.2.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Version 0.18.2 (March 29, 2021)
-
-## What's new?
-
-- Use the white background only for Philips TIFF file.
- - Generic TIFF file would have the black background by default.
-
-## Fixes
-
-- Fix upside-downed image for TIFF file if the image is not RGB & tiled image with JPEG/Deflate-compressed tiles.
- - Use slow path if the image is not RGB & tiled image with JPEG/Deflate-compressed tiles.
- - Show an error message if the out-of-boundary cases are requested with the slow path.
- - `ValueError: Cannot handle the out-of-boundary cases for a non-RGB image or a non-Jpeg/Deflate-compressed image.`
diff --git a/python/cucim/docs/release_notes/v0.18.3.md b/python/cucim/docs/release_notes/v0.18.3.md
deleted file mode 100644
index c3dead3c1..000000000
--- a/python/cucim/docs/release_notes/v0.18.3.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Version 0.18.3 (April 16, 2021)
-
-## Fixes
-
-- Fix memory leaks that occur when reading completely out-of-boundary regions.
diff --git a/python/cucim/docs/release_notes/v0.19.0.md b/python/cucim/docs/release_notes/v0.19.0.md
deleted file mode 100644
index 00b2b5083..000000000
--- a/python/cucim/docs/release_notes/v0.19.0.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Version 0.19.0 (April 19, 2021)
-
-## What's new?
-
-- The first release of cuClaraImage + [cupyimg](https://github.com/mritools/cupyimg) as a single project `cuCIM`.
- - `cucim.skimage` package is added from `cupyimg`.
- - CuPy (>=9.0.0b3), scipy, scikit-image is required to use cuCIM's scikit-image-compatible API.
diff --git a/python/cucim/docs/release_notes/v0.2.0.md b/python/cucim/docs/release_notes/v0.2.0.md
deleted file mode 100644
index e5817c379..000000000
--- a/python/cucim/docs/release_notes/v0.2.0.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Version 0.2.0 (December 18, 2020)
-
-## What's new?
-
-The following features are implemented.
-- Make it work without CUDA runtime installed
- - CUDA 11.0 runtime is embedded in the .whl file
-- Develop a wrapper for cufile API
- - Refer to `Accessing File with GDS` (/notebooks/Accessing_File_with_GDS.html) notebook
- - Did some experiments on accessing TIFF files (see `File-access Experiments on TIFF File` (/notebooks/File-access_Experiments_on_TIFF.html) notebook)
-- Support loading [Philips TIFF](https://openslide.org/formats/philips/) files
- - Loading multi-resolution images and associated images (such as 'macro' and 'label') from TIFF Image File Directory (IFD) are available
- - Please see `Basic Usage` (/notebooks/Basic_Usage.html#associated-images) notebook to know how to access the associated images.
-
- ```{admonition} Characteristic of Philips TIFF format
- As specified in [Philips format](https://openslide.org/formats/philips/),
-
- "slides may omit pixel data for TIFF tiles not in an ROI; this is represented as a TileOffset of 0 and a TileByteCount of 0. When such tiles are downsampled into a tile that does contain pixel data, their contents are rendered as white pixels."
-
- For the above reason, some Philips TIFF images can actually hold important information (‘tiles that are not ROIs or tissues’) which can expedite pre-processing by discarding unnecessarily tiles. Due to feature parity with Openslide, cuClaraImage also renders such tiles as white pixels. Please let us know and suggest APIs for getting the information if such non-ROI region information is useful to you.
- ```
- - The following tasks remain for feature-parity with OpenSlide
- - Support Philips TIFF associated image from metadata
- - Expose XML metadata of the Philips TIFF file as JSON
-- Provide an example/plan for the interoperability with DALI
- - Created a notebook for the feasibility and plan (see `Working with DALI` (/notebooks/Working_with_DALI.html) notebook)
-
-## Fixes/Improvements
-
-- Fix again for the error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag
- - `ERROR in line 126 while reading JPEG header tables: Not a JPEG file: starts with 0x01 0x00` message can be shown
-- Force-reinstall cucim Python package in the Tox environment whenever `gen_docs` or `gen_docs_dev` command is executed
diff --git a/python/cucim/docs/release_notes/v0.3.0.md b/python/cucim/docs/release_notes/v0.3.0.md
deleted file mode 100644
index bac97083b..000000000
--- a/python/cucim/docs/release_notes/v0.3.0.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Version 0.3.0 (February 16, 2021)
-
-## What's new?
-
-- A new name and namespace (currently `cuClaraImage` and `cucim`) will be picked in `v0.4.0` once it's finalized
-- Add metadata and associated images for Philips TIFF Format
- - Support Philips TIFF associated image from XML metadata
-- Expose metadata of the image as JSON
- - `raw_metadata` property returns the image description of the first IFD in the TIFF image
- - `resolution_dim_start` property of `CuImage` is removed
- - `physical_pixel_size` property is renamed to `spacing`
- - `ndim`/`origin`/`direction`/`coord_sys`/`spacing_units` properties are added
- - Please see `Basic Usage` (/notebooks/Basic_Usage.html#see-metadata) notebook to know how to access metadata.
-- Support reading out of boundary region
- - `read_region()` method now accepts a region that is out of the image boundary
- - `size` parameter accepts values that are up to the size of the highest-resolution image
- - The out of the boundary area would be filled with the white color
-- Showcase the interoperability with DALI
- - Please see `Working with DALI` (/notebooks/Working_with_DALI.html) notebook
-
-## Fixes/Improvements
-
-- Fix wrong parameter interpretation (`size` in `read_region()` method). Now only `location` is level-0 based coordinates (using the level-0 reference frame). `size` is output image size. (Thanks `@Behrooz Hashemian`!)
-- Static link with cufile when [libcufile.a is available](https://docs.google.com/document/d/1DQ_T805dlTcDU9bGW32E2ak5InX8iUcNI7Tq_lXAtLc/edit?ts=5f90bc5f) -- Implemented but disabled for now
-- Fix a memory leak for cuslide::tiff::TIFF object (248 bytes) in CuImage class.
-- Fix incorrect method visibility in a plugin file (.so)
-- Replace malloc with better allocator for small-sized memory
- - Use a custom allocator(pmr) for metadata data
-- Copy data using `std::vector::insert()` instead of `std::vector::push_back()`
- - Small improvement (when opening TIFF file), but benchmark result showed that time for assigning 50000 tile offset/size (uint64_t) is reduced from 118 us to 8 us
-- Parameterize input library/image for testing
-- Update test input path
- - Add test data under `test_data/private` : See `test_data/README.md` file.
-- Setup development environment with VSCode (in addition to CLion)
-- Use a VSCode plugin for local test execution
- - Now it uses `matepek.vscode-catch2-test-adapter` extension
- -
-- Prevent relative path problem of .so with no DT_SONAME
-- Refactoring
- - Add Development environment for VSCode
- - Update run script
- - Add settings for VSCode
- - Refactor CMakeLists.txt
- - Add definition `_GLIBCXX_USE_CXX11_ABI=0` to all sub directories
- - Compile multiple architectures for CUDA Kernels
- - Parameterize input files for tests
- - Add `test_data` folder for test data
- - plugin folder is from `CUCIM_TEST_PLUGIN_PATH` environment variable now (with static plugin name (cucim.kit.cuslide@0.3.0.so))
- - Move cucim_malloc() to memory_manager.cu
-
-## Limitations
-
-- Some metadata (`origin`/`direction`/`coord_sys`/`spacing`/`spacing_units`) doesn't have correct values for now.
diff --git a/python/cucim/docs/requirements.txt b/python/cucim/docs/requirements.txt
deleted file mode 100644
index f211153ad..000000000
--- a/python/cucim/docs/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Sphinx==4.5.0
-sphinx-autobuild
-myst-parser
-sphinx-book-theme
-numpy
-matplotlib
-ipywidgets
-pandas
-nbclient
-myst-nb
-sphinx-togglebutton
-sphinx-copybutton
-plotly<5
-sphinxcontrib-bibtex<2.0.0 # https://github.com/executablebooks/jupyter-book/issues/1137
-sphinx-thebe
-sphinx-panels
-ablog
-docutils==0.16 # 0.17 causes error. https://github.com/executablebooks/MyST-Parser/issues/343
-pydata_sphinx_theme
-sphinxemoji
-cupy-cuda110
-scipy
-scikit-image
diff --git a/python/cucim/docs/roadmap/index.md b/python/cucim/docs/roadmap/index.md
deleted file mode 100644
index 7ac8596a0..000000000
--- a/python/cucim/docs/roadmap/index.md
+++ /dev/null
@@ -1,370 +0,0 @@
-# Roadmap
-
-
-
-
-
-
-```{eval-rst}
-The following list is on the road |:smile:|
-```
-
-## cuCIM
-
-### {fa}`calendar-alt,text-info mr-1` `v0.1`
-
-- {fa}`check,text-success mr-1` Abstract C++ API -- [v0.1.0](../release_notes/v0.1.0.md)
-- {fa}`check,text-success mr-1` Benchmark with openslide (for generic tiff file) : link -- [v0.1.0](../release_notes/v0.1.0.md)
-- {fa}`check,text-success mr-1` Usage with C++ API -- [v0.1.0](../release_notes/v0.1.0.md)
-- {fa}`check,text-success mr-1` Implement Python API -- [v0.1.0](../release_notes/v0.1.0.md)
-- {fa}`check,text-success mr-1` Usage with Python API -- [v0.1.0](../release_notes/v0.1.0.md)
- 1. Setup document/build system
- 1. Package it
-- {fa}`check,text-success mr-1` Sort resolution levels (level 0: the largest resolution) for `CuImage::read_region()` method -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Fix a crash that occurs when opening a non-existing file -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Fix an error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag -- [v0.1.1](../release_notes/v0.1.1.md)
- - `Quantization table 0x00 was not defined` message can be shown
-- {fa}`check,text-success mr-1` Sort resolution levels (level 0: the largest resolution) for `CuImage::read_region()` method -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Pass SWIPAT -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Ignore link check for relative link with header that starts with `/` or `..` -- [v0.1.1](../release_notes/v0.1.1.md)
-
-### {fa}`calendar-alt,text-info mr-1` `v0.2`
-
-- {fa}`check,text-success mr-1` Make it work with various CUDA versions -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Develop a wrapper for cufile API -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Support loading [Philips TIFF](https://openslide.org/formats/philips/) files
- - {fa}`check,text-success mr-1` Support Philips TIFF multi-resolution images -- [v0.2.0](../release_notes/v0.2.0.md)
- - {fa}`check,text-success mr-1` Support Philips TIFF associated image from IFD -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Provide an example/plan for the interoperability with DALI -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Fix again for the error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Force-reinstall cucim Python package in the Tox environment whenever `gen_docs` or `gen_docs_dev` command is executed -- [v0.2.0](../release_notes/v0.2.0.md)
-
-### {fa}`calendar-alt,text-info mr-1` `v0.3`
-
-- {fa}`check,text-success mr-1` Add metadata and associated images for Philips TIFF Format
- - {fa}`check,text-success mr-1` Support Philips TIFF associated image from XML metadata -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Expose metadata of the image as JSON -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Support reading out of boundary region -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Showcase the interoperability with DALI -- [v0.3.0](../release_notes/v0.3.0.md)
-
-
-### {fa}`calendar-alt,text-info mr-1` `v0.18`
-
-- {fa}`check,text-success mr-1` Support Deflate(zlib)-compressed RGB Tiff Image -- [v0.18.0](../release_notes/v0.18.0.md)
-- {fa}`check,text-success mr-1` Change the namespaces (`cuimage` to `cucim`) -- [v0.18.0](../release_notes/v0.18.0.md)
-
-### {fa}`calendar-alt,text-info mr-1` `v0.19`
-
-- Refactor the cupyimg package to incorporate it in the adaption layer of cuCIM. Change the namespaces
-- Support `__cuda_array_interface__` and DLPack object in Python API
-- Support loading data to CUDA memory
-- Implement cache mechanism for tile-based image formats
-
-### {fa}`calendar-alt,text-info mr-1` `v0.20`
-
-- Make use of nvJPEG to decode TIFF Files
-- Support .svs format with nvJPEG2000
-- Design a plug-in mechanism for developing CUDA based 2D/3D imaging filters
-- Implement a filter (example: Otsu Thresholding)
-- Support loading MHD files
-
-### {fa}`calendar-alt,text-info mr-1` `v0.21`
-
-- Support JPEG, Jpeg 2000, PNG, BMP formats
-- Support MIRAX/3DHISTECH (.mrxs) format
-- Support LEICA (.scn) format
-
-### {fa}`calendar-alt,text-info mr-1` `v0.22`
-
-- Design a CT bone segmentation filter
-- Provide a robust CI/CD system
-- Define KPIs and publish report
-- Update project to use the latest [Carbonite SDK](https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/carb/Framework.html) for supporting plug-in architecture
-
-## TODOs
-
-### Image Format
-
-#### Generic TIFF(.tif)
-
-- {fa}`check,text-success mr-1` Access image data through container() API (in C++) or as a numpy array (using `__array_interface__` in Python) -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Fix a crash that occurs when opening a non-existing file -- [v0.1.1](../release_notes/v0.1.1.md)
-- {fa}`check,text-success mr-1` Fix an error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag -- [v0.1.1](../release_notes/v0.1.1.md)
- - `Quantization table 0x00 was not defined` message can be shown
-- {fa}`check,text-success mr-1` Fix again for the error that occurs when loading a TIFF image that has `TIFFTAG_JPEGTABLES` tag -- [v0.2.0](../release_notes/v0.2.0.md)
- - `ERROR in line 126 while reading JPEG header tables: Not a JPEG file: starts with 0x01 0x00` message can be shown
-- {fa}`check,text-success mr-1` Expose metadata of the TIFF file as JSON -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Support reading out of boundary region -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Support Deflate(zlib)-compressed RGB Tiff Image -- [v0.18.0](../release_notes/v0.18.0.md)
-- Implement cache mechanism for tile-based image formats -- [v0.19.1](../release_notes/v0.19.1.md)
-- Use CuFileDriver class for reading files
-- Make use of nvJPEG to decode TIFF Files -- [v0.20.0](../release_notes/v0.20.0.md)
-
-- Remove hard-coded metadata (Fill correct values for `cucim::io::format::ImageMetadataDesc`)
- - {fa}`check,text-success mr-1` `resolutions` -- [v0.1.1](../release_notes/v0.1.1.md)
- - `metadata`
-- Check if the `tile_rester` memory is freed by jpeg-turbo or not
- - {fa}`check,text-success mr-1` `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:365` in `IFD::read_region_tiles_libjpeg()` -- [v0.3.0](../release_notes/v0.3.0.md)
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/cuslide.cpp:123` in `parser_parse` -- [v0.19.1](../release_notes/v0.19.1.md)
-- Fill correct metadata information for `CuImage::read_region()`
- - `cpp/src/cucim.cpp:417` -- [v0.19.1](../release_notes/v0.19.1.md)
-- Check and use `ifd->samples_per_pixel()` once we can get RGB data instead of RGBA
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:280` in `IFD::read_region_tiles_libjpeg()` -- [v0.19.1](../release_notes/v0.19.1.md)
-- Consider endianness of the .tif file
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:329` in `IFD::read_region_tiles_libjpeg()`
-- Consider tile's depth tag
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:329` in `IFD::read_region_tiles_libjpeg()`
-- Make `file_handle_` object to pointer
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/tiff.cpp:50` in `TIFF::TIFF()`
-- Remove assumption of sub-resolution dims to 2
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/tiff.cpp:140` in `TIFF::read()`
-
-#### [Philips TIFF](https://openslide.org/formats/philips/) (.tif)
-
-- {fa}`check,text-success mr-1` Support Philips TIFF multi-resolution images -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Support Philips TIFF associated image from IFD -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Support Philips TIFF associated image from XML metadata -- [v0.3.0](../release_notes/v0.3.0.md)
-- {fa}`check,text-success mr-1` Expose XML metadata of the Philips TIFF file as JSON -- [v0.3.0](../release_notes/v0.3.0.md)
-
-#### .mhd
-
-- Support loading MHD files -- [v0.20.0](../release_notes/v0.20.0.md)
-
-#### .svs
-
-- Support .svs format with nvJPEG2000 -- [v0.20.0](../release_notes/v0.20.0.md)
-
-#### .png
-
-- Support .png with [libspng](https://github.com/randy408/libspng/) -- [v0.21.0](../release_notes/v0.21.0.md)
- - **libspng** is faster than **libpng** (but doesn't support encoding)
-
-#### .jpg
-
-- Support .jpg with libjpeg-turbo and nvJPEG -- [v0.21.0](../release_notes/v0.21.0.md)
-
-#### .jp2/.j2k
-
-- Support .jp2/.j2k files with OpenJpeg and nvJPEG2000 -- [v0.21.0](../release_notes/v0.21.0.md)
-
-#### .bmp
-
-- Support .bmp file natively -- [v0.21.0](../release_notes/v0.21.0.md)
-
-#### .mrxs
-
-- Support MIRAX/3DHISTECH (.mrxs) format -- [v0.21.0](../release_notes/v0.21.0.md)
-
-#### .scn
-
-- Support LEICA (.scn) format -- [v0.21.0](../release_notes/v0.21.0.md)
-
-#### .dcm
-
-- Support DICOM format
-- Support reading segmentation image instead of main pixel array
- - `examples/cpp/dicom_image/main.cpp:37`
-
-#### .iSyntax
-
-- Support Philips iSyntax format
- -
- -
-
-### Image Filter
-
-#### Basic Filter
-
-- Design a plug-in mechanism for developing CUDA based 2D/3D imaging filters -- [v0.20.0](../release_notes/v0.20.0.md)
-- Implement a filter (example: Otsu Thresholding) -- [v0.20.0](../release_notes/v0.20.0.md)
-
-#### Medical-specific Filter
-
-- Design a CT bone segmentation filter -- [v0.22.0](../release_notes/v0.22.0.md)
-
-### Performance Improvements
-
-- {fa}`check,text-success mr-1` Copy data using `std::vector::insert()` instead of `std::vector::push_back()` -- [v0.3.0](../release_notes/v0.3.0.md)
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:78` in `IFD::IFD()`
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:98` in `IFD::IFD()`
- - Benchmark result showed that time for assigning 50000 tile offset/size (uint64_t) is reduced from 118 us to 8 us.
-- {fa}`check,text-success mr-1` Replace malloc with better allocator for small-sized memory -- [v0.3.0](../release_notes/v0.3.0.md)
- - Use a custom allocator(pmr) for metadata data.
-- Try to use `__array_struct__`. Access to array interface could be faster
- -
- - Check the performance difference between python int vs python long later
- - `python/pybind11/cucim_py.cpp:234` in `get_array_interface()`
-- Check performance
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp:122` in `IFD::read()` : string concatenation
-
-### GPUDirect-Storage (GDS) Support
-
-- {fa}`check,text-success mr-1` Develop a wrapper for cufile API -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Static link with cufile when [libcufile.a is available](https://docs.google.com/document/d/1DQ_T805dlTcDU9bGW32E2ak5InX8iUcNI7Tq_lXAtLc/edit?ts=5f90bc5f) -- [v0.3.0](../release_notes/v0.3.0.md)
-
-### Interoperability
-
-- {fa}`check,text-success mr-1` Provide an example/plan for the interoperability with DALI -- [v0.2.0](../release_notes/v0.2.0.md)
-- {fa}`check,text-success mr-1` Showcase the interoperability with DALI -- [v0.3.0](../release_notes/v0.3.0.md)
-- Support `__cuda_array_interface__` and DLPack object in Python API -- [v0.19.1](../release_notes/v0.19.1.md)
- - https://docs.cupy.dev/en/stable/reference/interoperability.html#dlpack
- - https://github.com/pytorch/pytorch/pull/11984
-- Refactor the cupyimg package to incorporate it in the adaption layer of cuCIM. Change the namespaces -- [v0.19.0](../release_notes/v0.19.0.md)
- - Implement/expose `scikit-image`-like image loading APIs (such as `imread`) and filtering APIs for cuCIM library by using cuCIM's APIs
-- Support DALI's CPU/GPU Tensor:
-- Support loading data to CUDA memory -- [v0.19.1](../release_notes/v0.19.1.md)
-- Consider adding `to_xxx()` methods in Python API
- - `examples/python/tiff_image/main.py:125`
-- Support byte-like object for CuImage object so that the following method works -- [v0.19.1](../release_notes/v0.19.1.md)
- ```python
- from PIL import Image
- ...
- #np_img_arr = np.asarray(region)
- #Image.fromarray(np_img_arr)
-
- Image.fromarray(region)
- # /usr/local/lib/python3.6/dist-packages/PIL/Image.py in frombytes(self, data, decoder_name, *args)
- # 792 d = _getdecoder(self.mode, decoder_name, args)
- # 793 d.setimage(self.im)
- # --> 794 s = d.decode(data)
- # 795
- # 796 if s[0] >= 0:
- # TypeError: a bytes-like object is required, not 'cucim._cucim.CuImage'
- ```
-- Provide universal cucim adaptors for DALI (for cucim::io::format::IImageFormat and cucim::filter::IImageFilter interfaces)
-- Support pretty display for IPython(Jupyter Notebook)
- - https://ipython.readthedocs.io/en/stable/config/integrating.html#integrating-your-objects-with-ipython
-
-### Pipeline
-
-- Use app_dp_sample pipeline to convert input image(.svs) of Nuclei segmentation pipeline(app_dp_nuclei) to .tif image
- - Load .tif file using cuCIM for Nuclei segmentation pipeline
-
-### Python API
-
-- Feature parity with OpenSlide
-- Add context manager for CuImage class (for `close()` method) -- [v0.19.1](../release_notes/v0.19.1.md)
-
-### C++ API
-
-- Design filtering API (which can embrace CuPy/CVCore/CuPyImg/OpenCV/scikit-image/dask-image)
-- Feature parity with OpenSlide
-
-- {fa}`check,text-success mr-1` Sort resolution levels (level 0: the largest resolution) for `CuImage::read_region()` method -- [v0.1.1](../release_notes/v0.1.1.md)
- - Add `TIFF::level_ifd(size_t level_index)` method
-- {fa}`check,text-success mr-1` Support `metadata` and `raw_metadata` properties/methods -- [v0.3.0](../release_notes/v0.3.0.md)
- - Implement `CuImage::metadata()` with JSON library (Folly or Modern JSON)
- - `cpp/src/cucim.cpp:238`
-- `ImageMetadataDesc` struct
- - {fa}`check,text-success mr-1` `resolution_dim_start` field: Reconsider its use (may not be needed) -- [v0.3.0](../release_notes/v0.3.0.md)
- - `cpp/include/cucim/io/format/image_format.h:53`
- - `channel_names` field : `S`, `T`, and other dimension can have names so need to be generalized
- - `cpp/include/cucim/io/format/image_format.h:51`
-- `numpy_dtype()` method
- - Consider bfloat16:
- - Consider other byte-order (currently, we assume `little-endian`)
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/tiff.cpp:53)
- - `cpp/include/cucim/memory/dlpack.h:41`
-- `checker_is_valid()` method
- - Add `buf_size` parameter and implement the method
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/cuslide.cpp:68`
-
-- Consider default case (how to handle -1 index?)
- - `cpp/src/io/device.cpp` in `Device::Device()`
-- Implement `Device::parse_type()`
- - `cpp/src/io/device.cpp:81`
-- Implement `Device::validate_device()`
- - `cpp/src/io/device.cpp:116`
-
-- Check illegal characters for `DimIndices::DimIndices()`
- - `cpp/src/cucim.cpp:35`
- - `cpp/src/cucim.cpp:46`
-
-- Implement `detect_format()` method
- - `cpp/src/cucim.cpp:103`
-- Detect available format for the file path
- - Also consider if the given file path is folder path (DICOM case)
- - `cpp/src/cucim.cpp:117` in `CuImage::CuImage()`
-- Implement `CuImage::CuImage(const filesystem::Path& path, const std::string& plugin_name)`
- - `cpp/src/cucim.cpp:128`
-- Implement `CuImage::dtype()`
- - Support string conversion like Device class
- - `cpp/src/cucim.cpp:283`
-
-### Build
-
-- Check if `CMAKE_EXPORT_PACKAGE_REGISTRY` is duplicate and remove it
- - `cucim/cpp/plugins/cucim.kit.cuslide/CMakeLists.txt:255`
-- Install other dependencies for libtiff so that other compression methods are available
- - `cucim/Dockerfile:32`
-- {fa}`check,text-success mr-1` Setup development environment with VSCode (in addition to CLion) -- [v0.3.0](../release_notes/v0.3.0.md)
-- Use prebuilt libraries for dependencies
-
-### Test
-
-- {fa}`check,text-success mr-1` Parameterize input library/image -- [v0.3.0](../release_notes/v0.3.0.md)
- - `/ssd/repo/cucim/cpp/tests/test_read_region.cpp:69` in `Verify read_region`
- - `/ssd/repo/cucim/cpp/tests/test_cufile.cpp:79` in `Verify libcufile usage`
-- {fa}`check,text-success mr-1` Use a VSCode plugin for local test execution -- [v0.3.0](../release_notes/v0.3.0.md)
- - `matepek.vscode-catch2-test-adapter` extension
- -
-
-### Platform
-
-- Support Windows (currently only Linux package is available)
-
-### Package & CI/CD
-
-- {fa}`check,text-success mr-1` Make it work with various CUDA versions -- [v0.2.0](../release_notes/v0.2.0.md)
- - Currently, it is linked to CUDA 11.0 library
- - Refer to PyTorch's PyPi package
- - The PyPi package embeds CUDA runtime library.
- - https://github.com/pytorch/pytorch/issues/47268#issuecomment-721996861
-- Move to Github Project
-- Move `tox` setup from python folder to the project root folder
-- Setup Conda recipe
-- Setup automated test cases
-- Provide a robust CI/CD system -- [v0.22.0](../release_notes/v0.22.0.md)
-- Define KPIs and publish report -- [v0.22.0](../release_notes/v0.22.0.md)
-
-- Add license files to the package
-- Package a separate CXX11 ABI library
- - Currently, C++ library is forced to set `_GLIBCXX_USE_CXX11_ABI` to 0 due to [Dual ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) problem
- - `cpp/CMakeLists.txt:98`
-- Support CPack
- - `CMakeLists.txt:177`
-
-### Documentation
-
-- {fa}`check,text-success mr-1` Pass SWIPAT -- [v0.1.1](../release_notes/v0.1.1.md)
-- Refine README.md and relevant documents for the project
-- Move Sphinx docs to the project root folder
-- Add C++ API document
-- Add C++ examples to Jupyter Notebook
- - Can install C++ Kernel:
-- {fa}`check,text-success mr-1` Ignore link check for relative link with header that starts with `/` or `..` -- [v0.1.1](../release_notes/v0.1.1.md)
- - `python/cucim/docs/conf.py:71`
- -
-- {fa}`check,text-success mr-1` Force-reinstall cucim Python package in the Tox environment whenever `gen_docs` or `gen_docs_dev` command is executed -- [v0.2.0](../release_notes/v0.2.0.md)
- -
-- Simplify method signatures in Python API Docs
- - `cucim._cucim.CuImage` -> `cucim.CuImage`
-- Use new feature to reference a cross-link with header (from v0.13.0 of [myst-parser](https://pypi.org/project/myst-parser/))
- -
- -
- -
-
-### Plugin-system (Carbonite)
-
-- Update project to use the latest [Carbonite SDK](https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/carb/Framework.html) for supporting plug-in architecture -- [v0.22.0](../release_notes/v0.22.0.md)
- - Migrate to use Carbonite SDK as it is
- - Update to use Minimal Carbonite SDK
-
-- Handle errors and log error message once switched to use Carbonite SDK's built-in error routine
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp` : when reading field info
- - `cpp/plugins/cucim.kit.cuslide/src/cuslide/tiff/ifd.cpp` in `IFD::read()` : memory size check if `out_buf->data` has high-enough memory
-- Get plugin name from file_path
- - `cpp/src/core/cucim_plugin.cpp:53` in `Plugin::Plugin()`
-- Generalize `CuImage::ensure_init()`
- - 'LINUX' path separator is used. Need to make it generalize once filesystem library is available
- - `cucim/cpp/src/cucim.cpp:520`
diff --git a/python/cucim/docs/spelling_wordlist.txt b/python/cucim/docs/spelling_wordlist.txt
deleted file mode 100644
index f95eb78d8..000000000
--- a/python/cucim/docs/spelling_wordlist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-builtin
-builtins
-classmethod
-staticmethod
-classmethods
-staticmethods
-args
-kwargs
-callstack
-Changelog
-Indices
diff --git a/python/cucim/docs/user_guide/index.md b/python/cucim/docs/user_guide/index.md
deleted file mode 100644
index cd3d45227..000000000
--- a/python/cucim/docs/user_guide/index.md
+++ /dev/null
@@ -1 +0,0 @@
-# User Guide
diff --git a/python/cucim/pyproject.toml b/python/cucim/pyproject.toml
index ce9dc87e5..3d4aa443c 100644
--- a/python/cucim/pyproject.toml
+++ b/python/cucim/pyproject.toml
@@ -22,12 +22,12 @@ authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
-requires-python = ">=3.8"
+requires-python = ">=3.9"
dependencies = [
"cupy-cuda11x>=12.0.0",
"lazy_loader>=0.1",
"numpy",
- "scikit-image>=0.19.0,<0.22.0a0",
+ "scikit-image>=0.19.0,<0.23.0a0",
"scipy",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
@@ -39,22 +39,18 @@ classifiers = [
"Topic :: Scientific/Engineering",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
- "Environment :: GPU :: NVIDIA CUDA :: 11.0",
- "Environment :: GPU :: NVIDIA CUDA :: 12.0",
+ "Environment :: GPU :: NVIDIA CUDA :: 11",
+ "Environment :: GPU :: NVIDIA CUDA :: 12",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
]
[project.urls]
Homepage = "https://developer.nvidia.com/multidimensional-image-processing"
Documentation = "https://docs.rapids.ai/api/cucim/stable/"
-Changelog = "https://github.com/rapidsai/cucim/blob/branch-23.12/CHANGELOG.md"
+Changelog = "https://github.com/rapidsai/cucim/blob/branch-24.02/CHANGELOG.md"
Source = "https://github.com/rapidsai/cucim"
Tracker = "https://github.com/rapidsai/cucim/issues"
@@ -62,14 +58,18 @@ Tracker = "https://github.com/rapidsai/cucim/issues"
test = [
"GPUtil>=1.4.0",
"click",
+ "imagecodecs>=2021.6.8; platform_machine=='x86_64'",
"matplotlib",
+ "numpydoc>=1.5",
"opencv-python-headless>=4.6",
+ "openslide-python>=1.3.0; platform_machine=='x86_64'",
"pooch>=1.6.0",
"psutil>=5.8.0",
"pytest-cov>=2.12.1",
"pytest-lazy-fixture>=0.6.3",
"pytest-xdist",
- "pytest>=6.2.4",
+ "pytest>=6.2.4,<8.0.0a0",
+ "pywavelets>=1.0",
"tifffile>=2022.7.28",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
developer = [
@@ -81,7 +81,7 @@ developer = [
docs = [
"ipython",
"nbsphinx",
- "numpydoc",
+ "numpydoc>=1.5",
"pydata-sphinx-theme",
"recommonmark",
"sphinx<6",
diff --git a/python/cucim/requirements-test.txt b/python/cucim/requirements-test.txt
deleted file mode 100644
index e8324ced4..000000000
--- a/python/cucim/requirements-test.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-GPUtil>=1.4.0
-imagecodecs>=2021.6.8
-openslide-python>=1.1.2
-opencv-python-headless>=4.6
-psutil>=5.8.0
-pytest>=6.2.4
-pytest-cov>=2.12.1
-pytest-lazy-fixture>=0.6.3
-tifffile>=2022.7.28
diff --git a/python/cucim/src/cucim/py.typed b/python/cucim/src/cucim/py.typed
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/cucim/src/cucim/skimage/_shared/filters.py b/python/cucim/src/cucim/skimage/_shared/filters.py
index 2892050d5..fe4c000ec 100644
--- a/python/cucim/src/cucim/skimage/_shared/filters.py
+++ b/python/cucim/src/cucim/skimage/_shared/filters.py
@@ -10,26 +10,7 @@
import cucim.skimage._vendored.ndimage as ndi
-from .._shared.utils import _supported_float_type, convert_to_float, warn
-
-
-class _PatchClassRepr(type):
- """Control class representations in rendered signatures."""
-
- def __repr__(cls):
- return f"<{cls.__name__}>"
-
-
-class ChannelAxisNotSet(metaclass=_PatchClassRepr):
- """Signal that the `channel_axis` parameter is not set.
- This is a proxy object, used to signal to `skimage.filters.gaussian` that
- the `channel_axis` parameter has not been set, in which case the function
- will determine whether a color channel is present. We cannot use ``None``
- for this purpose as it has its own meaning which indicates that the given
- image is grayscale.
- This automatic behavior was broken in v0.19, recovered but deprecated in
- v0.20 and will be removed in v0.21.
- """
+from .._shared.utils import _supported_float_type, convert_to_float
def gaussian(
@@ -41,7 +22,7 @@ def gaussian(
preserve_range=False,
truncate=4.0,
*,
- channel_axis=ChannelAxisNotSet,
+ channel_axis=None,
):
"""Multi-dimensional Gaussian filter.
@@ -77,12 +58,10 @@ def gaussian(
to channels.
.. warning::
- Automatic detection of the color channel based on the old deprecated
- ``multichannel=None`` was broken in version 0.19. In 0.20 this
- behavior is recovered. The last axis of an `image` with dimensions
- (M, N, 3) is interpreted as a color channel if `channel_axis` is
- not set. Starting with release 23.04.02, ``channel_axis=None`` will
- be used as the new default value.
+ In versions prior to 24.02, the last axis of an `image` with
+ dimensions (M, N, 3) was interpreted as a color channel if
+ `channel_axis` was not set. Starting with release 24.02,
+ ``channel_axis=None`` will be used as the new default value.
Returns
-------
@@ -133,21 +112,7 @@ def gaussian(
>>> from skimage.data import astronaut
>>> image = cp.array(astronaut())
>>> filtered_img = gaussian(image, sigma=1, channel_axis=-1)
-
"""
- if channel_axis is ChannelAxisNotSet:
- if image.ndim == 3 and image.shape[-1] == 3:
- warn(
- "Automatic detection of the color channel was deprecated in "
- "v0.19, and `channel_axis=None` will be the new default in "
- "v0.21. Set `channel_axis=-1` explicitly to silence this "
- "warning.",
- FutureWarning,
- stacklevel=2,
- )
- channel_axis = -1
- else:
- channel_axis = None
# CuPy Backend: refactor to avoid overhead of cp.any(cp.asarray(sigma))
sigma_msg = "Sigma values less than zero are not valid"
diff --git a/python/cucim/src/cucim/skimage/_vendored/_internal.py b/python/cucim/src/cucim/skimage/_vendored/_internal.py
index 9ac4cef9a..f87e07744 100644
--- a/python/cucim/src/cucim/skimage/_vendored/_internal.py
+++ b/python/cucim/src/cucim/skimage/_vendored/_internal.py
@@ -3,7 +3,14 @@
from operator import mul
import cupy
-import numpy
+
+# TODO: when minimum numpy dependency is 1.25 use:
+# np.exceptions.AxisError instead of AxisError
+# and remove this try-except
+try:
+ from numpy import AxisError
+except ImportError:
+ from numpy.exceptions import AxisError
try:
# try importing Cython-based private axis handling functions from CuPy
@@ -37,7 +44,7 @@ def _normalize_axis_index(axis, ndim): # NOQA
if axis < 0:
axis += ndim
if not (0 <= axis < ndim):
- raise numpy.AxisError("axis out of bounds")
+ raise AxisError("axis out of bounds")
return axis
def _normalize_axis_indices(axes, ndim): # NOQA
diff --git a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py
index e1c8ade57..13cccd6b7 100644
--- a/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py
+++ b/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py
@@ -190,7 +190,6 @@ def _call_kernel(
template<> struct std::is_floating_point : std::true_type {};
template<> struct std::is_signed : std::true_type {};
-template struct std::is_signed> : std::is_signed {};
"""
diff --git a/python/cucim/src/cucim/skimage/color/colorconv.py b/python/cucim/src/cucim/skimage/color/colorconv.py
index 0b18aa910..89eeffabb 100644
--- a/python/cucim/src/cucim/skimage/color/colorconv.py
+++ b/python/cucim/src/cucim/skimage/color/colorconv.py
@@ -63,6 +63,14 @@
)
from ..util import dtype, dtype_limits
+# TODO: when minimum numpy dependency is 1.25 use:
+# np.exceptions.AxisError instead of AxisError
+# and remove this try-except
+try:
+ from numpy import AxisError
+except ImportError:
+ from numpy.exceptions import AxisError
+
def convert_colorspace(arr, fromspace, tospace, *, channel_axis=-1):
"""Convert an image array to a new color space.
@@ -173,7 +181,7 @@ def _validate_channel_axis(channel_axis, ndim):
if not isinstance(channel_axis, int):
raise TypeError("channel_axis must be an integer")
if channel_axis < -ndim or channel_axis >= ndim:
- raise np.AxisError("channel_axis exceeds array dimensions")
+ raise AxisError("channel_axis exceeds array dimensions")
@cp.memoize(for_each_device=True)
diff --git a/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py b/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py
index 7dfc80564..704e874df 100644
--- a/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py
+++ b/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py
@@ -63,6 +63,14 @@
)
from cucim.skimage.util import img_as_float, img_as_float32, img_as_ubyte
+# TODO: when minimum numpy dependency is 1.25 use:
+# np.exceptions.AxisError instead of AxisError
+# and remove this try-except
+try:
+ from numpy import AxisError
+except ImportError:
+ from numpy.exceptions import AxisError
+
data_dir = os.path.join(os.path.dirname(__file__), "data")
@@ -133,7 +141,7 @@ def test_rgba2rgb_error_channel_axis_invalid(self, channel_axis):
@pytest.mark.parametrize("channel_axis", [-4, 3])
def test_rgba2rgb_error_channel_axis_out_of_range(self, channel_axis):
- with pytest.raises(np.AxisError):
+ with pytest.raises(AxisError):
rgba2rgb(self.img_rgba, channel_axis=channel_axis)
def test_rgba2rgb_error_rgb(self):
diff --git a/python/cucim/src/cucim/skimage/feature/__init__.py b/python/cucim/src/cucim/skimage/feature/__init__.py
index 04b53fbfc..62d86f776 100644
--- a/python/cucim/src/cucim/skimage/feature/__init__.py
+++ b/python/cucim/src/cucim/skimage/feature/__init__.py
@@ -1,48 +1,3 @@
-from ._basic_features import multiscale_basic_features
-from ._canny import canny
-from ._daisy import daisy
-from .blob import blob_dog, blob_doh, blob_log
-from .corner import (
- corner_foerstner,
- corner_harris,
- corner_kitchen_rosenfeld,
- corner_peaks,
- corner_shi_tomasi,
- hessian_matrix,
- hessian_matrix_det,
- hessian_matrix_eigvals,
- shape_index,
- structure_tensor,
- structure_tensor_eigenvalues,
-)
-from .match import match_descriptors
-from .peak import peak_local_max
-from .template import match_template
+import lazy_loader as lazy
-__all__ = [
- "canny",
- "daisy",
- "multiscale_basic_features",
- "peak_local_max",
- "structure_tensor",
- "structure_tensor_eigenvalues",
- "structure_tensor_eigvals",
- "hessian_matrix",
- "hessian_matrix_det",
- "hessian_matrix_eigvals",
- "shape_index",
- "corner_kitchen_rosenfeld",
- "corner_harris",
- "corner_shi_tomasi",
- "corner_foerstner",
- # 'corner_subpix',
- "corner_peaks",
- # 'corner_moravec',
- # 'corner_fast',
- # 'corner_orientations',
- "match_template",
- "match_descriptors",
- "blob_dog",
- "blob_log",
- "blob_doh",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
diff --git a/python/cucim/src/cucim/skimage/feature/__init__.pyi b/python/cucim/src/cucim/skimage/feature/__init__.pyi
new file mode 100644
index 000000000..22519324e
--- /dev/null
+++ b/python/cucim/src/cucim/skimage/feature/__init__.pyi
@@ -0,0 +1,47 @@
+# Explicitly setting `__all__` is necessary for type inference engines
+# to know which symbols are exported. See
+# https://peps.python.org/pep-0484/#stub-files
+
+__all__ = [
+ "canny",
+ "daisy",
+ "peak_local_max",
+ "structure_tensor",
+ "structure_tensor_eigenvalues",
+ "hessian_matrix",
+ "hessian_matrix_det",
+ "hessian_matrix_eigvals",
+ "shape_index",
+ "corner_kitchen_rosenfeld",
+ "corner_harris",
+ "corner_shi_tomasi",
+ "corner_foerstner",
+ "corner_peaks",
+ "match_template",
+ "match_descriptors",
+ "blob_dog",
+ "blob_doh",
+ "blob_log",
+ "multiscale_basic_features",
+]
+
+from ._basic_features import multiscale_basic_features
+from ._canny import canny
+from ._daisy import daisy
+from .blob import blob_dog, blob_doh, blob_log
+from .corner import (
+ corner_foerstner,
+ corner_harris,
+ corner_kitchen_rosenfeld,
+ corner_peaks,
+ corner_shi_tomasi,
+ hessian_matrix,
+ hessian_matrix_det,
+ hessian_matrix_eigvals,
+ shape_index,
+ structure_tensor,
+ structure_tensor_eigenvalues,
+)
+from .match import match_descriptors
+from .peak import peak_local_max
+from .template import match_template
diff --git a/python/cucim/src/cucim/skimage/feature/peak.py b/python/cucim/src/cucim/skimage/feature/peak.py
index 84965b328..58e0f1899 100644
--- a/python/cucim/src/cucim/skimage/feature/peak.py
+++ b/python/cucim/src/cucim/skimage/feature/peak.py
@@ -23,7 +23,7 @@ def _get_high_intensity_peaks(image, mask, num_peaks, min_distance, p_norm):
coord = cp.nonzero(mask)
intensities = image[coord]
# Highest peak first
- idx_maxsort = cp.argsort(-intensities)
+ idx_maxsort = cp.argsort(-intensities, kind="stable")
coord = cp.column_stack(coord)[idx_maxsort]
if np.isfinite(num_peaks):
diff --git a/python/cucim/src/cucim/skimage/filters/ridges.py b/python/cucim/src/cucim/skimage/filters/ridges.py
index 0a929641e..34bd008d3 100644
--- a/python/cucim/src/cucim/skimage/filters/ridges.py
+++ b/python/cucim/src/cucim/skimage/filters/ridges.py
@@ -58,7 +58,7 @@ def meijering(
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
- Calculates the eigenvectors of the Hessian to compute the similarity of
+ Calculates the eigenvalues of the Hessian to compute the similarity of
an image region to neurites, according to the method described in [1]_.
Parameters
@@ -151,7 +151,7 @@ def sato(
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
- Defined only for 2-D and 3-D images. Calculates the eigenvectors of the
+ Defined only for 2-D and 3-D images. Calculates the eigenvalues of the
Hessian to compute the similarity of an image region to tubes, according to
the method described in [1]_.
@@ -334,7 +334,7 @@ def frangi(
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
- Defined only for 2-D and 3-D images. Calculates the eigenvectors of the
+ Defined only for 2-D and 3-D images. Calculates the eigenvalues of the
Hessian to compute the similarity of an image region to vessels, according
to the method described in [1]_.
diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py b/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py
index 73ffe943d..ae3349f43 100644
--- a/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py
+++ b/python/cucim/src/cucim/skimage/filters/tests/test_gaussian.py
@@ -2,8 +2,6 @@
import numpy as np
import pytest
-from cucim.skimage._shared._warnings import expected_warnings
-from cucim.skimage._vendored import pad
from cucim.skimage.filters._gaussian import difference_of_gaussians, gaussian
@@ -60,17 +58,12 @@ def test_multichannel(channel_axis):
)
if channel_axis % a.ndim == 2:
- # Test legacy behavior equivalent to old (channel_axis = -1)
- with expected_warnings(["Automatic detection of the color channel"]):
- gaussian_rgb_a = gaussian(
- a, sigma=1, mode="reflect", preserve_range=True
- )
-
# Check that the mean value is conserved in each channel
# (color channels are not mixed together)
assert cp.allclose(
a.mean(axis=spatial_axes), gaussian_rgb_a.mean(axis=spatial_axes)
)
+
# Iterable sigma
gaussian_rgb_a = gaussian(
a,
@@ -189,34 +182,3 @@ def test_shared_mem_check_fix(dtype, sigma):
# The exact range that fails depends on the shared memory limit
# of the GPU, so we test with a range of sigmas here.
gaussian(cp.ones((512, 512), dtype=dtype), sigma=sigma)
-
-
-def test_deprecated_automatic_channel_detection():
- rgb = cp.zeros((5, 5, 3))
- rgb[1, 1] = cp.arange(1, 4)
- gray = pad(rgb, pad_width=((0, 0), (0, 0), (1, 0)))
-
- # Warning is raised if channel_axis is not set and shape is (M, N, 3)
- with pytest.warns(
- FutureWarning,
- match="Automatic detection .* was deprecated .* Set `channel_axis=-1`",
- ):
- filtered_rgb = gaussian(rgb, sigma=1, mode="reflect")
- # Check that the mean value is conserved in each channel
- # (color channels are not mixed together)
- assert cp.allclose(filtered_rgb.mean(axis=(0, 1)), rgb.mean(axis=(0, 1)))
-
- # No warning if channel_axis is not set and shape is not (M, N, 3)
- filtered_gray = gaussian(gray, sigma=1, mode="reflect")
-
- # No warning is raised if channel_axis is explicitly set
- filtered_rgb2 = gaussian(rgb, sigma=1, mode="reflect", channel_axis=-1)
- assert cp.array_equal(filtered_rgb, filtered_rgb2)
- filtered_gray2 = gaussian(gray, sigma=1, mode="reflect", channel_axis=None)
- assert cp.array_equal(filtered_gray, filtered_gray2)
- assert not cp.array_equal(filtered_rgb, filtered_gray)
-
- # Check how the proxy value shows up in the rendered function signature
- from cucim.skimage._shared.filters import ChannelAxisNotSet
-
- assert repr(ChannelAxisNotSet) == ""
diff --git a/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py b/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py
index f08fe60b4..a454c9584 100644
--- a/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py
+++ b/python/cucim/src/cucim/skimage/filters/tests/test_thresholding.py
@@ -640,12 +640,28 @@ def test_mean():
assert threshold_mean(img) == 1.0
-def test_triangle_uint_images():
- text = cp.array(data.text())
- assert threshold_triangle(cp.invert(text)) == 151
- assert threshold_triangle(text) == 104
- assert threshold_triangle(coinsd) == 80
- assert threshold_triangle(cp.invert(coinsd)) == 175
+# also run cases with nbins > 100000 to also test CuPy-based code path.
+@pytest.mark.parametrize("kwargs", [{}, {"nbins": 300000}])
+@pytest.mark.parametrize("dtype", [cp.uint8, cp.int16, cp.float16, cp.float32])
+def test_triangle_uniform_images(dtype, kwargs):
+ assert threshold_triangle(cp.zeros((10, 10), dtype=dtype), **kwargs) == 0
+ assert threshold_triangle(cp.ones((10, 10), dtype=dtype), **kwargs) == 1
+ assert threshold_triangle(cp.full((10, 10), 2, dtype=dtype), **kwargs) == 2
+
+
+# also run cases with nbins > 100000 to also test CuPy-based code path.
+@pytest.mark.parametrize("kwargs", [{}, {"nbins": 300000}])
+@pytest.mark.parametrize(
+ "data, expected_value",
+ [
+ (cp.invert(cp.array(data.text())), 151),
+ (cp.array(data.text()), 104),
+ (coinsd, 80),
+ (cp.invert(coinsd), 175),
+ ],
+)
+def test_triangle_uint_images(data, expected_value, kwargs):
+ assert threshold_triangle(data, **kwargs) == expected_value
def test_triangle_float_images():
diff --git a/python/cucim/src/cucim/skimage/filters/thresholding.py b/python/cucim/src/cucim/skimage/filters/thresholding.py
index 53e8b8d55..76127e52a 100644
--- a/python/cucim/src/cucim/skimage/filters/thresholding.py
+++ b/python/cucim/src/cucim/skimage/filters/thresholding.py
@@ -902,6 +902,8 @@ def threshold_triangle(image, nbins=256):
Examples
--------
+ >>> import cupy as cp
+ >>> from cucim.skimage.filters import threshold_triangle
>>> from skimage.data import camera
>>> image = cp.array(camera())
>>> thresh = threshold_triangle(image)
@@ -910,12 +912,25 @@ def threshold_triangle(image, nbins=256):
# nbins is ignored for integer arrays
# so, we recalculate the effective nbins.
hist, bin_centers = histogram(image.ravel(), nbins, source_range="image")
- nbins = len(hist)
+ if hist.size == 1:
+ # integer-valued image with constant intensity will have just 1 bin
+ return image.ravel()[0]
+ # In most cases, nbins is small so it is better to process hist on the CPU
+ if nbins > 100000:
+ xp = cp
+ else:
+ xp = np
+ hist = cp.asnumpy(hist)
+
+ nbins = len(hist)
# Find peak, lowest and highest gray levels.
- arg_peak_height = cp.argmax(hist)
+ arg_peak_height = xp.argmax(hist)
+ arg_low_level, arg_high_level = xp.flatnonzero(hist)[[0, -1]]
+ if arg_low_level == arg_high_level:
+ # Image has constant intensity.
+ return image.ravel()[0]
peak_height = hist[arg_peak_height]
- arg_low_level, arg_high_level = cp.flatnonzero(hist)[[0, -1]]
# Flip is True if left tail is shorter.
flip = arg_peak_height - arg_low_level < arg_high_level - arg_peak_height
@@ -930,11 +945,11 @@ def threshold_triangle(image, nbins=256):
# Set up the coordinate system.
width = arg_peak_height - arg_low_level
- x1 = cp.arange(width)
+ x1 = xp.arange(width)
y1 = hist[x1 + arg_low_level]
# Normalize.
- norm = cp.sqrt(peak_height**2 + width**2)
+ norm = xp.sqrt(peak_height**2 + width**2)
try:
peak_height /= norm
width /= norm
@@ -952,7 +967,7 @@ def threshold_triangle(image, nbins=256):
# the length, but here we omit it as it does not affect the location of the
# minimum.
length = peak_height * x1 - width * y1
- arg_level = cp.argmax(length) + arg_low_level
+ arg_level = xp.argmax(length) + arg_low_level
if flip:
arg_level = nbins - arg_level - 1
diff --git a/python/cucim/src/cucim/skimage/measure/__init__.py b/python/cucim/src/cucim/skimage/measure/__init__.py
index 4f32c7980..62d86f776 100644
--- a/python/cucim/src/cucim/skimage/measure/__init__.py
+++ b/python/cucim/src/cucim/skimage/measure/__init__.py
@@ -1,56 +1,3 @@
-from ._blur_effect import blur_effect
-from ._colocalization import (
- intersection_coeff,
- manders_coloc_coeff,
- manders_overlap_coeff,
- pearson_corr_coeff,
-)
-from ._label import label
-from ._moments import (
- centroid,
- inertia_tensor,
- inertia_tensor_eigvals,
- moments,
- moments_central,
- moments_coords,
- moments_coords_central,
- moments_hu,
- moments_normalized,
-)
-from ._polygon import approximate_polygon, subdivide_polygon
-from ._regionprops import (
- euler_number,
- perimeter,
- perimeter_crofton,
- regionprops,
- regionprops_table,
-)
-from .block import block_reduce
-from .entropy import shannon_entropy
-from .profile import profile_line
+import lazy_loader as lazy
-__all__ = [
- "blur_effect",
- "regionprops",
- "regionprops_table",
- "perimeter",
- "approximate_polygon",
- "subdivide_polygon",
- "block_reduce",
- "centroid",
- "moments",
- "moments_central",
- "moments_coords",
- "moments_coords_central",
- "moments_normalized",
- "moments_hu",
- "inertia_tensor",
- "inertia_tensor_eigvals",
- "profile_line",
- "label",
- "shannon_entropy",
- "intersection_coeff",
- "manders_coloc_coeff",
- "manders_overlap_coeff",
- "pearson_corr_coeff",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
diff --git a/python/cucim/src/cucim/skimage/measure/__init__.pyi b/python/cucim/src/cucim/skimage/measure/__init__.pyi
new file mode 100644
index 000000000..4ae693f16
--- /dev/null
+++ b/python/cucim/src/cucim/skimage/measure/__init__.pyi
@@ -0,0 +1,62 @@
+# Explicitly setting `__all__` is necessary for type inference engines
+# to know which symbols are exported. See
+# https://peps.python.org/pep-0484/#stub-files
+
+__all__ = [
+ "regionprops",
+ "regionprops_table",
+ "perimeter",
+ "perimeter_crofton",
+ "euler_number",
+ "approximate_polygon",
+ "subdivide_polygon",
+ "block_reduce",
+ "centroid",
+ "moments",
+ "moments_central",
+ "moments_coords",
+ "moments_coords_central",
+ "moments_normalized",
+ "moments_hu",
+ "inertia_tensor",
+ "inertia_tensor_eigvals",
+ "profile_line",
+ "label",
+ "shannon_entropy",
+ "blur_effect",
+ "pearson_corr_coeff",
+ "manders_coloc_coeff",
+ "manders_overlap_coeff",
+ "intersection_coeff",
+]
+
+from ._blur_effect import blur_effect
+from ._colocalization import (
+ intersection_coeff,
+ manders_coloc_coeff,
+ manders_overlap_coeff,
+ pearson_corr_coeff,
+)
+from ._label import label
+from ._moments import (
+ centroid,
+ inertia_tensor,
+ inertia_tensor_eigvals,
+ moments,
+ moments_central,
+ moments_coords,
+ moments_coords_central,
+ moments_hu,
+ moments_normalized,
+)
+from ._polygon import approximate_polygon, subdivide_polygon
+from ._regionprops import (
+ euler_number,
+ perimeter,
+ perimeter_crofton,
+ regionprops,
+ regionprops_table,
+)
+from .block import block_reduce
+from .entropy import shannon_entropy
+from .profile import profile_line
diff --git a/python/cucim/src/cucim/skimage/measure/_blur_effect.py b/python/cucim/src/cucim/skimage/measure/_blur_effect.py
index e3ba81d1f..6fc77774c 100644
--- a/python/cucim/src/cucim/skimage/measure/_blur_effect.py
+++ b/python/cucim/src/cucim/skimage/measure/_blur_effect.py
@@ -5,6 +5,14 @@
from ..color import rgb2gray
from ..util import img_as_float
+# TODO: when minimum numpy dependency is 1.25 use:
+# np.exceptions.AxisError instead of AxisError
+# and remove this try-except
+try:
+ from numpy import AxisError
+except ImportError:
+ from numpy.exceptions import AxisError
+
__all__ = ["blur_effect"]
@@ -56,7 +64,7 @@ def blur_effect(image, h_size=11, channel_axis=None, reduce_func=max):
try:
# ensure color channels are in the final dimension
image = cp.moveaxis(image, channel_axis, -1)
- except cp.AxisError:
+ except AxisError:
print("channel_axis must be one of the image array dimensions")
raise
except TypeError:
diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops.py b/python/cucim/src/cucim/skimage/measure/_regionprops.py
index fb1da9dfa..6246798ee 100644
--- a/python/cucim/src/cucim/skimage/measure/_regionprops.py
+++ b/python/cucim/src/cucim/skimage/measure/_regionprops.py
@@ -34,6 +34,7 @@
"ConvexImage": "image_convex",
"convex_image": "image_convex",
"Coordinates": "coords",
+ "coords_scaled": "coords_scaled",
"Eccentricity": "eccentricity",
"EquivDiameter": "equivalent_diameter_area",
"equivalent_diameter": "equivalent_diameter_area",
@@ -67,6 +68,7 @@
"minor_axis_length": "axis_minor_length",
"Moments": "moments",
"NormalizedMoments": "moments_normalized",
+ "num_pixels": "num_pixels",
"Orientation": "orientation",
"Perimeter": "perimeter",
"CroftonPerimeter": "perimeter_crofton",
@@ -102,6 +104,7 @@
"centroid_weighted": float,
"centroid_weighted_local": float,
"coords": object,
+ "coords_scaled": object,
"eccentricity": float,
"equivalent_diameter_area": float,
"euler_number": int,
@@ -125,6 +128,7 @@
"moments_weighted_central": float,
"moments_weighted_hu": float,
"moments_weighted_normalized": float,
+ "num_pixels": int,
"orientation": float,
"perimeter": float,
"perimeter_crofton": float,
@@ -192,18 +196,18 @@ def _infer_regionprop_dtype(func, *, intensity, ndim):
dtype : NumPy data type
The data type of the returned property.
"""
- labels = [1, 2]
- sample = cp.zeros((3,) * ndim, dtype=np.intp)
- sample[(0,) * ndim] = labels[0]
- sample[(slice(1, None),) * ndim] = labels[1]
- propmasks = [(sample == n) for n in labels]
+ mask_1 = np.ones((1,) * ndim, dtype=bool)
+ mask_1 = np.pad(mask_1, (0, 1), constant_values=False)
+ mask_2 = np.ones((2,) * ndim, dtype=bool)
+ mask_2 = np.pad(mask_2, (1, 0), constant_values=False)
+ propmasks = [cp.array(mask_1), cp.array(mask_2)]
rng = cp.random.default_rng()
if intensity and _infer_number_of_required_args(func) == 2:
def _func(mask):
- return func(mask, rng.random(sample.shape))
+ return func(mask, rng.random(mask.shape))
else:
_func = func
@@ -921,7 +925,7 @@ def _props_to_dict(regions, properties=("label", "bbox"), separator="-"):
or prop in OBJECT_COLUMNS
or dtype is np.object_
):
- if prop in OBJECT_COLUMNS:
+ if prop in OBJECT_COLUMNS or dtype is np.object_:
# keep objects in a NumPy array
column_buffer = np.empty(n, dtype=dtype)
for i in range(n):
diff --git a/python/cucim/src/cucim/skimage/measure/block.py b/python/cucim/src/cucim/skimage/measure/block.py
index db8cbfeac..4c3c8dc8c 100644
--- a/python/cucim/src/cucim/skimage/measure/block.py
+++ b/python/cucim/src/cucim/skimage/measure/block.py
@@ -89,9 +89,10 @@ def block_reduce(image, block_size=2, func=cp.sum, cval=0, func_kwargs=None):
after_width = 0
pad_width.append((0, after_width))
- image = pad(
- image, pad_width=pad_width, mode="constant", constant_values=cval
- )
+ if any(after_width > 0 for _, after_width in pad_width):
+ image = pad(
+ image, pad_width=pad_width, mode="constant", constant_values=cval
+ )
blocked = view_as_blocks(image, block_size)
diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py
index 7ce1351a2..1ce3b841f 100644
--- a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py
+++ b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py
@@ -1,4 +1,5 @@
import math
+import re
import cupy as cp
import cupyx.scipy.ndimage as ndi
@@ -1240,6 +1241,20 @@ def test_column_dtypes_correct():
assert False, f"{col} dtype {t} {msg} {COL_DTYPES[col]}"
+def test_all_documented_items_in_col_dtypes():
+ numpydoc = pytest.importorskip("numpydoc")
+ docstring = numpydoc.docscrape.FunctionDoc(regionprops)
+ notes_lines = docstring["Notes"]
+ property_lines = filter(lambda line: line.startswith("**"), notes_lines)
+ pattern = r"\*\*(?P[a-z_]+)\*\*.*"
+ property_names = {
+ re.search(pattern, property_line).group("property_name")
+ for property_line in property_lines
+ }
+ column_keys = set(COL_DTYPES.keys())
+ assert column_keys == property_names
+
+
def pixelcount(regionmask):
"""a short test for an extra property"""
return cp.sum(regionmask)
@@ -1249,6 +1264,11 @@ def intensity_median(regionmask, image_intensity):
return cp.median(image_intensity[regionmask])
+def bbox_list(regionmask):
+ """Extra property whose output shape is dependent on mask shape."""
+ return [1] * regionmask.shape[1]
+
+
def too_many_args(regionmask, image_intensity, superfluous):
return 1
@@ -1314,11 +1334,15 @@ def test_extra_properties_table():
SAMPLE_MULTIPLE,
intensity_image=INTENSITY_SAMPLE_MULTIPLE,
properties=("label",),
- extra_properties=(intensity_median, pixelcount),
+ extra_properties=(intensity_median, pixelcount, bbox_list),
)
assert_array_almost_equal(out["intensity_median"], np.array([2.0, 4.0]))
assert_array_equal(out["pixelcount"], np.array([10, 2]))
+ assert out["bbox_list"].dtype == np.object_
+ assert out["bbox_list"][0] == [1] * 10
+ assert out["bbox_list"][1] == [1] * 1
+
def test_multichannel():
"""Test that computing multichannel properties works."""
diff --git a/python/cucim/src/cucim/skimage/morphology/__init__.py b/python/cucim/src/cucim/skimage/morphology/__init__.py
index 2c3cbf114..eedc0bc77 100644
--- a/python/cucim/src/cucim/skimage/morphology/__init__.py
+++ b/python/cucim/src/cucim/skimage/morphology/__init__.py
@@ -1,3 +1,11 @@
+"""Utilities that operate on shapes in images.
+
+These operations are particularly suited for binary images,
+although some may be useful for images of other types as well.
+
+Basic morphological operations include dilation and erosion.
+"""
+
from ._skeletonize import medial_axis, thin
from .binary import (
binary_closing,
diff --git a/python/cucim/src/cucim/skimage/registration/__init__.py b/python/cucim/src/cucim/skimage/registration/__init__.py
index 5734f266f..62d86f776 100644
--- a/python/cucim/src/cucim/skimage/registration/__init__.py
+++ b/python/cucim/src/cucim/skimage/registration/__init__.py
@@ -1,4 +1,3 @@
-from ._optical_flow import optical_flow_ilk, optical_flow_tvl1 # noqa
-from ._phase_cross_correlation import phase_cross_correlation # noqa
+import lazy_loader as lazy
-__all__ = ["optical_flow_ilk", "optical_flow_tvl1", "phase_cross_correlation"]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
diff --git a/python/cucim/src/cucim/skimage/registration/__init__.pyi b/python/cucim/src/cucim/skimage/registration/__init__.pyi
new file mode 100644
index 000000000..e6992c41e
--- /dev/null
+++ b/python/cucim/src/cucim/skimage/registration/__init__.pyi
@@ -0,0 +1,8 @@
+# Explicitly setting `__all__` is necessary for type inference engines
+# to know which symbols are exported. See
+# https://peps.python.org/pep-0484/#stub-files
+
+__all__ = ["optical_flow_ilk", "optical_flow_tvl1", "phase_cross_correlation"]
+
+from ._optical_flow import optical_flow_ilk, optical_flow_tvl1
+from ._phase_cross_correlation import phase_cross_correlation
diff --git a/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py
index 4fcc497a0..ba5b8935e 100644
--- a/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py
+++ b/python/cucim/src/cucim/skimage/registration/_phase_cross_correlation.py
@@ -5,13 +5,13 @@
import itertools
import math
-import warnings
import cupy as cp
import cupyx.scipy.ndimage as ndi
import numpy as np
from .._shared.fft import fftmodule as fft
+from .._shared.utils import remove_arg
from ._masked_phase_cross_correlation import _masked_phase_cross_correlation
@@ -188,6 +188,7 @@ def _disambiguate_shift(reference_image, moving_image, shift):
return real_shift
+@remove_arg("return_error", changed_version="24.06")
def phase_cross_correlation(
reference_image,
moving_image,
@@ -234,10 +235,6 @@ def phase_cross_correlation(
this parameter is set to ``True``, the *real* space cross-correlation
is computed for each possible shift, and the shift with the highest
cross-correlation within the overlapping area is returned.
- return_error : bool, {"always"}, optional
- Returns error and phase difference if "always" is given. If False, or
- either ``reference_mask`` or ``moving_mask`` are given, only the shift
- is returned.
reference_mask : ndarray
Boolean mask for ``reference_image``. The mask should evaluate
to ``True`` (or 1) on valid pixels. ``reference_mask`` should
@@ -268,13 +265,11 @@ def phase_cross_correlation(
error : float
Translation invariant normalized RMS error between
``reference_image`` and ``moving_image``. For masked cross-correlation
- this error is not available and NaN is returned if ``return_error``
- is "always".
+ this error is not available and NaN is returned.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative). For masked cross-correlation
- this phase difference is not available and NaN is returned if
- ``return_error`` is "always".
+ this phase difference is not available and NaN is returned.
Notes
-----
@@ -312,15 +307,11 @@ def phase_cross_correlation(
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
-
- def warn_return_error():
- warnings.warn(
- "In scikit-image 0.22, phase_cross_correlation will start "
- "returning a tuple or 3 items (shift, error, phasediff) always. "
- "To enable the new return behavior and silence this warning, use "
- "return_error='always'.",
- category=FutureWarning,
- stacklevel=3,
+ if not return_error:
+ raise ValueError(
+ "return_error must be True (or 'always'), False is no longer "
+ "supported as of cuCIM 24.02 and the `return_error` kwarg will be "
+ "removed in cuCIM 24.06."
)
if (reference_mask is not None) or (moving_mask is not None):
@@ -331,11 +322,7 @@ def warn_return_error():
moving_mask,
overlap_ratio,
)
- if return_error == "always":
- return shift, np.nan, np.nan
- else:
- warn_return_error()
- return shift
+ return shift, np.nan, np.nan
# images must be the same shape
if reference_image.shape != moving_image.shape:
@@ -373,14 +360,13 @@ def warn_return_error():
)
if upsample_factor == 1:
- if return_error:
- sabs = cp.abs(src_freq)
- sabs *= sabs
- tabs = cp.abs(target_freq)
- tabs *= tabs
- src_amp = np.sum(sabs) / src_freq.size
- target_amp = np.sum(tabs) / target_freq.size
- CCmax = cross_correlation[maxima]
+ sabs = cp.abs(src_freq)
+ sabs *= sabs
+ tabs = cp.abs(target_freq)
+ tabs *= tabs
+ src_amp = np.sum(sabs) / src_freq.size
+ target_amp = np.sum(tabs) / target_freq.size
+ CCmax = cross_correlation[maxima]
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
@@ -412,13 +398,12 @@ def warn_return_error():
maxima = tuple(float(m) - dftshift for m in maxima)
shift = tuple(s + m / upsample_factor for s, m in zip(shift, maxima))
- if return_error:
- src_amp = cp.abs(src_freq)
- src_amp *= src_amp
- src_amp = cp.sum(src_amp)
- target_amp = cp.abs(target_freq)
- target_amp *= target_amp
- target_amp = cp.sum(target_amp)
+ src_amp = cp.abs(src_freq)
+ src_amp *= src_amp
+ src_amp = cp.sum(src_amp)
+ target_amp = cp.abs(target_freq)
+ target_amp *= target_amp
+ target_amp = cp.sum(target_amp)
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
@@ -432,22 +417,19 @@ def warn_return_error():
moving_image = fft.ifftn(moving_image)
shift = _disambiguate_shift(reference_image, moving_image, shift)
- if return_error:
- # Redirect user to masked_phase_cross_correlation if NaNs are observed
- if cp.isnan(CCmax) or cp.isnan(src_amp) or cp.isnan(target_amp):
- raise ValueError(
- "NaN values found, please remove NaNs from your "
- "input data or use the `reference_mask`/`moving_mask` "
- "keywords, eg: "
- "phase_cross_correlation(reference_image, moving_image, "
- "reference_mask=~np.isnan(reference_image), "
- "moving_mask=~np.isnan(moving_image))"
- )
-
- return (
- shift,
- _compute_error(CCmax, src_amp, target_amp),
- _compute_phasediff(CCmax),
+ # Redirect user to masked_phase_cross_correlation if NaNs are observed
+ if cp.isnan(CCmax) or cp.isnan(src_amp) or cp.isnan(target_amp):
+ raise ValueError(
+ "NaN values found, please remove NaNs from your "
+ "input data or use the `reference_mask`/`moving_mask` "
+ "keywords, eg: "
+ "phase_cross_correlation(reference_image, moving_image, "
+ "reference_mask=~np.isnan(reference_image), "
+ "moving_mask=~np.isnan(moving_image))"
)
- else:
- return shift
+
+ return (
+ shift,
+ _compute_error(CCmax, src_amp, target_amp),
+ _compute_phasediff(CCmax),
+ )
diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py
index da089ed67..7a853fb86 100644
--- a/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py
+++ b/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py
@@ -141,7 +141,7 @@ def test_wrong_input():
template = cp.ones((5, 5))
with expected_warnings([r"invalid value encountered in true_divide|\A\Z"]):
with pytest.raises(ValueError):
- phase_cross_correlation(template, image, return_error=True)
+ phase_cross_correlation(template, image)
def test_4d_input_pixel():
@@ -202,7 +202,9 @@ def test_disambiguate_2d(shift0, shift1):
reference = image[slice0]
moving = image[slice1]
computed_shift, _, _ = phase_cross_correlation(
- reference, moving, disambiguate=True, return_error="always"
+ reference,
+ moving,
+ disambiguate=True,
)
np.testing.assert_equal(shift, computed_shift)
@@ -215,6 +217,9 @@ def test_disambiguate_zero_shift():
"""
image = cp.array(camera())
computed_shift, _, _ = phase_cross_correlation(
- image, image, disambiguate=True, return_error="always"
+ image,
+ image,
+ disambiguate=True,
+ return_error="always",
)
assert computed_shift == (0, 0)
diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py b/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py
index fa706a9d3..20a70b039 100644
--- a/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py
+++ b/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py
@@ -11,8 +11,8 @@ def _sin_flow_gen(image0, max_motion=4.5, npics=5):
"""Generate a synthetic ground truth optical flow with a sinusoid as
first component.
- Parameters:
- ----
+ Parameters
+ ----------
image0: ndarray
The base image to be warped.
max_motion: float
diff --git a/python/cucim/src/cucim/skimage/restoration/__init__.py b/python/cucim/src/cucim/skimage/restoration/__init__.py
index 07eaeb6b1..62d86f776 100644
--- a/python/cucim/src/cucim/skimage/restoration/__init__.py
+++ b/python/cucim/src/cucim/skimage/restoration/__init__.py
@@ -1,12 +1,3 @@
-from ._denoise import denoise_tv_chambolle
-from .deconvolution import richardson_lucy, unsupervised_wiener, wiener
-from .j_invariant import calibrate_denoiser, denoise_invariant
+import lazy_loader as lazy
-__all__ = [
- "wiener",
- "unsupervised_wiener",
- "richardson_lucy",
- "denoise_tv_chambolle",
- "calibrate_denoiser",
- "denoise_invariant",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
diff --git a/python/cucim/src/cucim/skimage/restoration/__init__.pyi b/python/cucim/src/cucim/skimage/restoration/__init__.pyi
new file mode 100644
index 000000000..89e9ec583
--- /dev/null
+++ b/python/cucim/src/cucim/skimage/restoration/__init__.pyi
@@ -0,0 +1,16 @@
+# Explicitly setting `__all__` is necessary for type inference engines
+# to know which symbols are exported. See
+# https://peps.python.org/pep-0484/#stub-files
+
+__all__ = [
+ "wiener",
+ "unsupervised_wiener",
+ "richardson_lucy",
+ "denoise_tv_chambolle",
+ "denoise_invariant",
+ "calibrate_denoiser",
+]
+
+from ._denoise import denoise_tv_chambolle
+from .deconvolution import richardson_lucy, unsupervised_wiener, wiener
+from .j_invariant import calibrate_denoiser, denoise_invariant
diff --git a/python/cucim/src/cucim/skimage/restoration/deconvolution.py b/python/cucim/src/cucim/skimage/restoration/deconvolution.py
index f30da5045..554232d62 100644
--- a/python/cucim/src/cucim/skimage/restoration/deconvolution.py
+++ b/python/cucim/src/cucim/skimage/restoration/deconvolution.py
@@ -76,7 +76,7 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
unknown original image, the Wiener filter is
.. math::
- \hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
+ \hat x = F^\dagger \left( |\Lambda_H|^2 + \lambda |\Lambda_D|^2 \right)^{-1}
\Lambda_H^\dagger F y
where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
@@ -113,7 +113,7 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
.. [2] B. R. Hunt "A matrix theory proof of the discrete
convolution theorem", IEEE Trans. on Audio and
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
- """
+ """ # noqa: E501
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not cp.iscomplexobj(reg):
@@ -307,7 +307,7 @@ def unsupervised_wiener(
prev_x_postmean = cp.zeros(trans_fct.shape, dtype=float_type)
# Difference between two successive mean
- delta = np.NAN
+ delta = np.nan
# Initial state of the chain
gn_chain, gx_chain = [1], [1]
diff --git a/python/cucim/src/cucim/skimage/restoration/j_invariant.py b/python/cucim/src/cucim/skimage/restoration/j_invariant.py
index ffc3cef91..10ec14d2f 100644
--- a/python/cucim/src/cucim/skimage/restoration/j_invariant.py
+++ b/python/cucim/src/cucim/skimage/restoration/j_invariant.py
@@ -141,7 +141,17 @@ def denoise_invariant(
.. [1] J. Batson & L. Royer. Noise2Self: Blind Denoising by
Self-Supervision, International Conference on Machine Learning,
p. 524-533 (2019).
- """
+
+ Examples
+ --------
+ >>> import cucim.skimage
+ >>> import cupy as cp
+ >>> import skimage
+ >>> from cucim.skimage.restoration import denoise_invariant, denoise_tv_chambolle
+ >>> image = cucim.skimage.util.img_as_float(cp.asarray(skimage.data.chelsea()))
+ >>> noisy = cucim.skimage.util.random_noise(image, var=0.2 ** 2)
+ >>> denoised = denoise_invariant(noisy, denoise_function=denoise_tv_chambolle)
+ """ # noqa: E501
image = img_as_float(image)
# promote float16->float32 if needed
@@ -273,7 +283,7 @@ def calibrate_denoiser(
... calibrate_denoiser)
>>> img = color.rgb2gray(cp.array(data.astronaut()[:50, :50]))
>>> noisy = img + 0.5 * img.std() * cp.random.randn(*img.shape)
- >>> parameters = {'weight': cp.arange(0.01, 0.5, 0.05)}
+ >>> parameters = {'weight': cp.arange(0.01, 0.3, 0.02)}
>>> denoising_function = calibrate_denoiser(noisy, denoise_tv_chambolle,
... denoise_parameters=parameters)
>>> denoised_img = denoising_function(img)
diff --git a/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py b/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py
index 584e4a711..6057a92c8 100644
--- a/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py
+++ b/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py
@@ -3,9 +3,6 @@
import pytest
from skimage.data import camera, chelsea
-# from cucim.skimage.restoration import denoise_wavelet
-from skimage.restoration import denoise_wavelet
-
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.data import binary_blobs
from cucim.skimage.metrics import mean_squared_error as mse
@@ -20,6 +17,10 @@
noisy_img_color = random_noise(test_img_color, mode="gaussian", var=0.01)
noisy_img_3d = random_noise(test_img_3d, mode="gaussian", var=0.1)
+# skip tests if skimage.restoration module cannot be imported
+skimage_restoration = pytest.importorskip("skimage.restoration")
+denoise_wavelet = skimage_restoration.denoise_wavelet
+
# TODO: replace with CuPy version once completed
def _denoise_wavelet(image, rescale_sigma=True, **kwargs):
diff --git a/python/cucim/src/cucim/skimage/segmentation/__init__.py b/python/cucim/src/cucim/skimage/segmentation/__init__.py
index 6d9544aa8..6e2969ae5 100644
--- a/python/cucim/src/cucim/skimage/segmentation/__init__.py
+++ b/python/cucim/src/cucim/skimage/segmentation/__init__.py
@@ -1,3 +1,6 @@
+"""Algorithms to partition images into meaningful regions or boundaries.
+"""
+
from ._chan_vese import chan_vese
from ._clear_border import clear_border
from ._expand_labels import expand_labels
diff --git a/python/cucim/src/cucim/skimage/transform/__init__.py b/python/cucim/src/cucim/skimage/transform/__init__.py
index 81fe7c6e8..62d86f776 100644
--- a/python/cucim/src/cucim/skimage/transform/__init__.py
+++ b/python/cucim/src/cucim/skimage/transform/__init__.py
@@ -1,58 +1,3 @@
-from ._geometric import (
- AffineTransform,
- EssentialMatrixTransform,
- EuclideanTransform,
- FundamentalMatrixTransform,
- PiecewiseAffineTransform,
- PolynomialTransform,
- ProjectiveTransform,
- SimilarityTransform,
- estimate_transform,
- matrix_transform,
-)
-from ._warps import (
- downscale_local_mean,
- rescale,
- resize,
- resize_local_mean,
- rotate,
- swirl,
- warp,
- warp_coords,
- warp_polar,
-)
-from .integral import integral_image, integrate
-from .pyramids import (
- pyramid_expand,
- pyramid_gaussian,
- pyramid_laplacian,
- pyramid_reduce,
-)
+import lazy_loader as lazy
-__all__ = [
- "integral_image",
- "integrate",
- "warp",
- "warp_coords",
- "warp_polar",
- "estimate_transform",
- "matrix_transform",
- "EuclideanTransform",
- "SimilarityTransform",
- "AffineTransform",
- "ProjectiveTransform",
- "EssentialMatrixTransform",
- "FundamentalMatrixTransform",
- "PolynomialTransform",
- "PiecewiseAffineTransform",
- "swirl",
- "resize",
- "resize_local_mean",
- "rotate",
- "rescale",
- "downscale_local_mean",
- "pyramid_reduce",
- "pyramid_expand",
- "pyramid_gaussian",
- "pyramid_laplacian",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
diff --git a/python/cucim/src/cucim/skimage/transform/__init__.pyi b/python/cucim/src/cucim/skimage/transform/__init__.pyi
new file mode 100644
index 000000000..8f8dcaadb
--- /dev/null
+++ b/python/cucim/src/cucim/skimage/transform/__init__.pyi
@@ -0,0 +1,62 @@
+# Explicitly setting `__all__` is necessary for type inference engines
+# to know which symbols are exported. See
+# https://peps.python.org/pep-0484/#stub-files
+
+__all__ = [
+ "integral_image",
+ "integrate",
+ "warp",
+ "warp_coords",
+ "warp_polar",
+ "estimate_transform",
+ "matrix_transform",
+ "EuclideanTransform",
+ "SimilarityTransform",
+ "AffineTransform",
+ "ProjectiveTransform",
+ "EssentialMatrixTransform",
+ "FundamentalMatrixTransform",
+ "PolynomialTransform",
+ "PiecewiseAffineTransform",
+ "swirl",
+ "resize",
+ "resize_local_mean",
+ "rotate",
+ "rescale",
+ "downscale_local_mean",
+ "pyramid_reduce",
+ "pyramid_expand",
+ "pyramid_gaussian",
+ "pyramid_laplacian",
+]
+
+from ._geometric import (
+ AffineTransform,
+ EssentialMatrixTransform,
+ EuclideanTransform,
+ FundamentalMatrixTransform,
+ PiecewiseAffineTransform,
+ PolynomialTransform,
+ ProjectiveTransform,
+ SimilarityTransform,
+ estimate_transform,
+ matrix_transform,
+)
+from ._warps import (
+ downscale_local_mean,
+ rescale,
+ resize,
+ resize_local_mean,
+ rotate,
+ swirl,
+ warp,
+ warp_coords,
+ warp_polar,
+)
+from .integral import integral_image, integrate
+from .pyramids import (
+ pyramid_expand,
+ pyramid_gaussian,
+ pyramid_laplacian,
+ pyramid_reduce,
+)
diff --git a/python/cucim/src/cucim/skimage/transform/_geometric.py b/python/cucim/src/cucim/skimage/transform/_geometric.py
index 9b5644bd7..8f3758b37 100644
--- a/python/cucim/src/cucim/skimage/transform/_geometric.py
+++ b/python/cucim/src/cucim/skimage/transform/_geometric.py
@@ -1365,6 +1365,13 @@ class EuclideanTransform(ProjectiveTransform):
translation parameters. The similarity transformation extends the Euclidean
transformation with a single scaling factor.
+ In 2D and 3D, the transformation parameters may be provided either via
+ `matrix`, the homogeneous transformation matrix, above, or via the
+ implicit parameters `rotation` and/or `translation` (where `a1` is the
+ translation along `x`, `b1` along `y`, etc.). Beyond 3D, if the
+ transformation is only a translation, you may use the implicit parameter
+ `translation`; otherwise, you must use `matrix`.
+
Parameters
----------
matrix : (D+1, D+1) ndarray, optional
@@ -1375,7 +1382,7 @@ class EuclideanTransform(ProjectiveTransform):
(single rotation) and 3D (Euler rotations) values are supported. For
higher dimensions, you must provide or estimate the transformation
matrix.
- translation : sequence of float, length D, optional
+ translation : (x, y[, z, ...]) sequence of float, length D, optional
Translation parameters for each axis.
dimensionality : int, optional
The dimensionality of the transform.
@@ -1510,7 +1517,7 @@ class SimilarityTransform(EuclideanTransform):
where ``s`` is a scale factor and the homogeneous transformation matrix is::
- [[a0 b0 a1]
+ [[a0 -b0 a1]
[b0 a0 b1]
[0 0 1]]
diff --git a/python/cucim/src/cucim/skimage/transform/pyramids.py b/python/cucim/src/cucim/skimage/transform/pyramids.py
index 3608c0de4..88c512691 100644
--- a/python/cucim/src/cucim/skimage/transform/pyramids.py
+++ b/python/cucim/src/cucim/skimage/transform/pyramids.py
@@ -4,7 +4,7 @@
from .._shared.filters import gaussian
from .._shared.utils import convert_to_float
-from ..transform import resize
+from ._warps import resize
def _smooth(image, sigma, mode, cval, channel_axis):
diff --git a/python/cucim/src/cucim/skimage/util/dtype.py b/python/cucim/src/cucim/skimage/util/dtype.py
index 866d515ee..2ba6dc7bb 100644
--- a/python/cucim/src/cucim/skimage/util/dtype.py
+++ b/python/cucim/src/cucim/skimage/util/dtype.py
@@ -3,6 +3,7 @@
from warnings import warn
import cupy as cp
+import numpy as np
from .._shared.utils import _supported_float_type
@@ -46,7 +47,6 @@
bool: (False, True),
cp.bool_: (False, True),
float: (-1, 1),
- cp.float_: (-1, 1),
cp.float16: (-1, 1),
cp.float32: (-1, 1),
cp.float64: (-1, 1),
@@ -279,7 +279,7 @@ def _convert(image, dtype, force_copy=False, uniform=False):
# is a subclass of that type (e.g. `cp.floating` will allow
# `float32` and `float64` arrays through)
- if cp.issubdtype(dtype_in, cp.obj2sctype(dtype)):
+ if cp.issubdtype(dtype_in, np.core.numerictypes.obj2sctype(dtype)):
if force_copy:
image = image.copy()
return image
diff --git a/python/cucim/tests/unit/clara/test_image_cache.py b/python/cucim/tests/unit/clara/test_image_cache.py
index 1b8987765..258e01da4 100644
--- a/python/cucim/tests/unit/clara/test_image_cache.py
+++ b/python/cucim/tests/unit/clara/test_image_cache.py
@@ -196,6 +196,7 @@ def test_reserve_more_cache_memory():
assert cache.miss_count == 0
+@pytest.mark.skip(reason="currently fails (gh-626)")
def test_cache_hit_miss(testimg_tiff_stripe_32x24_16_jpeg):
from cucim import CuImage
from cucim.clara.cache import preferred_memory_capacity
diff --git a/python/cucim/tox.ini b/python/cucim/tox.ini
deleted file mode 100644
index 04fe6475b..000000000
--- a/python/cucim/tox.ini
+++ /dev/null
@@ -1,111 +0,0 @@
-[testenv:bootstrap]
-deps =
- jinja2
- matrix
- tox
-skip_install = true
-commands =
- python ci/bootstrap.py --no-env
-passenv =
- *
-; a generative tox configuration, see: https://tox.readthedocs.io/en/latest/config.html#generative-envlist
-
-[tox]
-envlist =
- clean,
- check,
- docs,
- docs-dev,
- release,
- {py35,py36,py37,py38,py39,pypy,pypy3},
- report
-ignore_basepython_conflict = true
-
-[testenv]
-basepython =
- pypy: {env:TOXPYTHON:pypy}
- pypy3: {env:TOXPYTHON:pypy3}
- py35: {env:TOXPYTHON:python3.5}
- py36: {env:TOXPYTHON:python3.6}
- py37: {env:TOXPYTHON:python3.7}
- py38: {env:TOXPYTHON:python3.8}
- py39: {env:TOXPYTHON:python3.9}
- {bootstrap,clean,check,report,docs,docs-dev,release,codecov}: {env:TOXPYTHON:python3}
-setenv =
- PYTHONPATH={toxinidir}/tests
- PYTHONUNBUFFERED=yes
-passenv =
- *
-usedevelop = false
-deps =
- pytest
- pytest-travis-fold
- pytest-cov
-commands =
- {posargs:pytest --cov --cov-report=term-missing -vv tests}
-
-[testenv:check]
-deps =
- docutils
- check-manifest
- black
- ruff
- readme-renderer
- pygments
- isort
- twine
-skip_install = true
-
-; https://packaging.python.org/guides/making-a-pypi-friendly-readme/#validating-restructuredtext-markup
-commands =
- twine check dist/*.whl
- check-manifest {toxinidir}
- ruff .
- black --check .
- isort --verbose --check-only --diff --filter-files .
-
-[testenv:docs]
-; Installing from `sdist` package instead of `setup.py develop` (https://tox.readthedocs.io/en/latest/config.html#conf-usedevelop)
-usedevelop = false
-deps =
- -r{toxinidir}/docs/requirements.txt
-commands =
- sphinx-build -E -b doctest docs {posargs:-dist/docs}
- sphinx-build -E -b html docs {posargs:-dist/docs}
- sphinx-build -b linkcheck docs {posargs:-dist/docs}
-
-[testenv:docs-dev]
-; Installing from `sdist` package instead of `setup.py develop` (https://tox.readthedocs.io/en/latest/config.html#conf-usedevelop)
-usedevelop = false
-deps =
- -r{toxinidir}/docs/requirements.txt
-commands =
- ; https://pypi.org/project/sphinx-autobuild/
- sphinx-autobuild {posargs:---host 0.0.0.0 --port 9999 docs dist/docs}
-
-[testenv:release]
-usedevelop = false
-allowlist_externals = /bin/bash
-commands =
- /bin/bash -c "{posargs}"
-
-[testenv:codecov]
-deps =
- codecov
-skip_install = true
-commands =
- codecov []
-
-[testenv:report]
-deps =
- coverage
-skip_install = true
-commands =
- coverage report
- coverage html
-
-[testenv:clean]
-commands = coverage erase
-skip_install = true
-deps =
- coverage
diff --git a/run b/run
index b66c2f75a..c8bec742e 100755
--- a/run
+++ b/run
@@ -262,24 +262,6 @@ is_x86_64() {
# Section: Build
#==================================================================================
-build_manylinux2014_desc() { echo 'Build manylinux2014 image
-
-Arguments:
- $1 - cuda version (e.g., 110, 111)
-'
-}
-build_manylinux2014() {
- local cuda_version="${1:-110}"
- run_command docker build -f ${TOP}/Dockerfile-cuda${cuda_version} -t gigony/manylinux2014-x64:cuda${cuda_version} ${TOP}
-
- read -n 1 -r -p "$(c_str R "Do you want to update dockcross-manylinux2014-x64 with " G "cuda${cuda_version}" R " (y/n)?")"
- echo
- if [[ $REPLY =~ ^[Yy]$ ]]; then
- sed -i -e "s/manylinux2014-x64:cuda.../manylinux2014-x64:cuda${cuda_version}/g" ${TOP}/dockcross-manylinux2014-x64
- c_echo W "Done"
- fi
-}
-
build_local_libcucim_() {
local source_folder=${1:-${TOP}}
local build_type=${2:-debug}
@@ -626,11 +608,13 @@ install_python_test_deps_() {
# (https://github.com/rapidsai/cucim/pull/433)
run_command pip install -r ${TOP}/python/cucim/requirements-test.txt
else
+ pushd "${TOP}/python/cucim"
if [ -n "${VIRTUAL_ENV}" ]; then
- run_command pip3 install -r ${TOP}/python/cucim/requirements-test.txt
+ run_command pip3 install -e .[test]
else
- run_command pip3 install --user -r ${TOP}/python/cucim/requirements-test.txt
+ run_command pip3 install --user -e .[test]
fi
+ popd
fi
hash -r
}
diff --git a/run_gds.sh b/run_gds.sh
deleted file mode 100644
index 74b0d235a..000000000
--- a/run_gds.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2020-2021, NVIDIA CORPORATION.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-MNT_PATH=/nvme
-GDS_IMAGE=cucim-gds
-
-BUILD_VER=`uname -r`
-NV_DRIVER=`nvidia-smi -q -i 0 | sed -n 's/Driver Version.*: *\(.*\) *$/\1/p'`
-echo "using nvidia driver version $NV_DRIVER on kernel $BUILD_VER"
-
-
-ofed_version=$(ofed_info -s | grep MLNX)
-if [ $? -eq 0 ]; then
- rdma_core=$(dpkg -s libibverbs-dev | grep "Source: rdma-core")
- if [ $? -eq 0 ]; then
- CONFIG_MOFED_VERSION=$(echo $ofed_version | cut -d '-' -f 2)
- echo "Found MOFED version $CONFIG_MOFED_VERSION"
- fi
- MLNX_SRCS="--volume /usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:/usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:ro"
- MOFED_DEVS="--net=host --volume /sys/class/infiniband_verbs:/sys/class/infiniband_verbs/ "
-fi
-
-docker run \
- --ipc host \
- -it
- --rm
- --gpus all \
- --volume /run/udev:/run/udev:ro \
- --volume /sys/kernel/config:/sys/kernel/config/ \
- --volume /usr/src/nvidia-$NV_DRIVER:/usr/src/nvidia-$NV_DRIVER:ro ${MLNX_SRCS}\
- --volume /dev:/dev:ro \
- --privileged \
- --env NV_DRIVER=${NV_DRIVER} \
- --volume /lib/modules/$BUILD_VER/:/lib/modules/$BUILD_VER \
- --volume "${MNT_PATH}/data:/data:rw" \
- --volume "${MNT_PATH}/results:/results:rw" ${MOFED_DEVS} \
- -itd ${REPO_URI}/${GDS_IMAGE} \
- /bin/bash
diff --git a/scripts/auditwheel_repair.py b/scripts/auditwheel_repair.py
deleted file mode 100644
index 2581ea977..000000000
--- a/scripts/auditwheel_repair.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Apache License, Version 2.0
-# Copyright 2020 NVIDIA Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-import re
-from unittest.mock import patch
-
-import auditwheel.elfutils
-from auditwheel.main import main
-from auditwheel.wheeltools import InWheelCtx
-
-# How auditwheel repair works?
-#
-# From https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/wheel_abi.py#L38
-# 1) Find a Python extension libraries(.so)
-# if so ==> A, else ==> B
-# 2) `needed_libs` <== external libraries needed by A and B
-# 3) From b in B,
-# if b is not in `needed_libs`, b is added to A
-# 4) Only external libraries in A are patched to use locally copied .so files
-# - external libraries that exists under wheel path are discarded
-# - https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/policy/external_references.py#L61
-# - https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/repair.py#L62
-#
-# With current implementation,
-# - `cucim/_cucim.cpython-XX-x86_64-linux-gnu.so` files are in A by 1)
-# - `cucim/libcucim.so.??` is in B by 1)
-# - `cucim/libcucim.so.??` and `libcudart.so.11.0` are in `needed_libs` by 2)
-# - `cucim/cucim.kit.cuslide@??.??.??.so` is in A by 3)
-#
-# And only libz and libcudart are considered as external libraries.
-
-# To work with cuCIM, we need to
-# 1) make `cucim/libcucim.so.??` as Python extension library
-# - Patch elf_is_python_extension : https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/elfutils.py#L81
-# 2) control how to copy external libraries
-# - Patch copylib: https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/repair.py#L108
-# - Need for libnvjpeg library
-# 3) preprocess wheel metadata
-# - patch InWheelCtx.__enter__ : https://github.com/pypa/auditwheel/blob/3.2.0/auditwheel/wheeltools.py#L158
-# - `Root-Is-Purelib: true` -> `Root-Is-Purelib: false` from WHEEL file
-
-
-# Parameters
-PYTHON_EXTENSION_LIBRARIES = [r"cucim/libcucim\.so\.\d{1,2}"]
-
-# 1) auditwheel.elfutils.elf_is_python_extension replacement
-orig_elf_is_python_extension = auditwheel.elfutils.elf_is_python_extension
-
-
-@functools.wraps(orig_elf_is_python_extension)
-def elf_is_python_extension(fn, elf):
- if any(map(lambda x: re.fullmatch(x, fn), PYTHON_EXTENSION_LIBRARIES)):
- print("[cuCIM] Consider {} as a python extension.".format(fn))
- return True, 3
- return orig_elf_is_python_extension(fn, elf)
-
-
-# 3) auditwheel.wheeltools.InWheelCtx.__enter__ replacement
-orig_inwheelctx_enter = InWheelCtx.__enter__
-
-
-@functools.wraps(orig_inwheelctx_enter)
-def inwheelctx_enter(self):
- rtn = orig_inwheelctx_enter(self)
-
- # `self.path` is a path that extracted files from the wheel file exists
-
- # base_dir = glob.glob(join(self.path, '*.dist-info'))
- # wheel_path = join(base_dir[0], 'WHEEL')
- # with open(wheel_path, 'r') as f:
- # wheel_text = f.read()
-
- # wheel_text = wheel_text.replace('Root-Is-Purelib: true', 'Root-Is-Purelib: false') # noqa: E501
-
- # with open(wheel_path, 'w') as f:
- # f.write(wheel_text)
-
- return rtn
-
-
-# # sys.argv replacement
-# testargs = ["auditwheel_repair.py", "repair", "--plat", "manylinux2014_x86_64", "-w", "wherehouse", "cuclara_image-0.1.1-py3-none-manylinux2014_x86_64.whl"] # noqa: E501
-# with patch.object(sys, 'argv', testargs):
-
-if __name__ == "__main__":
- # Patch
- with patch.object(
- auditwheel.elfutils, "elf_is_python_extension", elf_is_python_extension
- ):
- with patch.object(InWheelCtx, "__enter__", inwheelctx_enter):
- main()
diff --git a/scripts/run-dist b/scripts/run-dist
deleted file mode 100755
index 60ed771e0..000000000
--- a/scripts/run-dist
+++ /dev/null
@@ -1,500 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2020, NVIDIA CORPORATION.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-init_globals() {
- if [ "$0" != "/bin/bash" ]; then
- SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
- export RUN_SCRIPT_FILE="$(readlink -f "$0")"
- else
- export RUN_SCRIPT_FILE="$(readlink -f "${BASH_SOURCE[0]}")"
- fi
-
- export TOP=$(dirname "${RUN_SCRIPT_FILE}")
-}
-
-################################################################################
-# Utility functions
-################################################################################
-
-#######################################
-# Get list of available commands from a given input file.
-#
-# Available commands and command summary are extracted by checking a pattern
-# "_desc() { echo '".
-# Section title is extracted by checking a pattern "# Section: ".
-# This command is used for listing available commands in CLI.
-#
-# e.g.)
-# "# Section: String/IO functions"
-# => "# String/IO functions"
-# "to_lower_desc() { echo 'Convert to lower case"
-# => "to_lower ----------------- Convert to lower case"
-#
-# Arguments:
-# $1 - input file that defines commands
-# Returns:
-# Print list of available commands from $1
-#######################################
-get_list_of_available_commands() {
- local file_name="$1"
- if [ ! -e "$1" ]; then
- echo "$1 doesn't exist!"
- fi
-
- local line_str='--------------------------------'
- local IFS= cmd_lines="$(IFS= cat "$1" | grep -E -e "^(([[:alpha:]_[:digit:]]+)_desc\(\)|# Section: )" | sed "s/_desc() *{ *echo '/ : /")"
- local line
- while IFS= read -r line; do
- local cmd=$(echo "$line" | cut -d":" -f1)
- local desc=$(echo "$line" | cut -d":" -f2-)
- if [ "$cmd" = "# Section" ]; then
- c_echo B "${desc}"
- else
- # there is no substring operation in 'sh' so use 'cut'
- local dash_line="$(echo "${line_str}" | cut -c ${#cmd}-)" # = "${line_str:${#cmd}}"
- c_echo Y " ${cmd}" w " ${dash_line} ${desc}"
- fi
- # use <&2 echo "$@"
-}
-
-c_echo_err() {
- >&2 c_echo "$@"
-}
-
-printf_err() {
- >&2 printf "$@"
-}
-
-get_item_ranges() {
- local indexes="$1"
- local list="$2"
- echo -n "$(echo "${list}" | xargs | cut -d " " -f "${indexes}")"
- return $?
-}
-
-get_unused_ports() {
- local num_of_ports=${1:-1}
- local start=${2:-49152}
- local end=${3:-61000}
- comm -23 \
- <(seq ${start} ${end} | sort) \
- <(ss -tan | awk '{print $4}' | while read line; do echo ${line##*\:}; done | grep '[0-9]\{1,5\}' | sort -u) \
- | shuf | tail -n ${num_of_ports} # use tail instead head to avoid broken pipe in VSCode terminal
-}
-
-newline() {
- echo
-}
-
-info() {
- c_echo W "$(date -u '+%Y-%m-%d %H:%M:%S') [INFO] " Z "$@"
-}
-
-error() {
- echo R "$(date -u '+%Y-%m-%d %H:%M:%S') [ERROR] " Z "$@"
-}
-
-fatal() {
- echo R "$(date -u '+%Y-%m-%d %H:%M:%S') [FATAL] " Z "$@"
- echo
- if [ -n "${SCRIPT_DIR}" ]; then
- exit 1
- fi
-}
-
-run_command() {
- local status=0
- local cmd="$*"
-
- c_echo B "$(date -u '+%Y-%m-%d %H:%M:%S') " W "\$ " G "${cmd}"
-
- [ "$(echo -n "$@")" = "" ] && return 1 # return 1 if there is no command available
-
- "$@"
- status=$?
-
- unset IFS
-
- return $status
-}
-
-retry() {
- local retries=$1
- shift
-
- local count=0
- until run_command "$@"; do
- exit=$?
- wait=$((2 ** count))
- count=$((count + 1))
- if [ $count -lt $retries ]; then
- info "Retry $count/$retries. Exit code=$exit, Retrying in $wait seconds..."
- sleep $wait
- else
- fatal "Retry $count/$retries. Exit code=$exit, no more retries left."
- return 1
- fi
- done
- return 0
-}
-
-#==================================================================================
-# Section: Example
-#==================================================================================
-
-download_testdata_desc() { echo 'Download test data from Docker Hub
-'
-}
-download_testdata() {
- c_echo W "Downloading test data..."
- run_command mkdir -p ${TOP}/notebooks/input
- if [ ! -e ${TOP}/notebooks/input/README.md ]; then
- run_command rm -rf ${TOP}/notebooks/input
- id=$(docker create gigony/svs-testdata:little-big)
- run_command docker cp $id:/input ${TOP}/notebooks
- run_command docker rm -v $id
- c_echo G "Test data is downloaded to ${TOP}/notebooks/input!"
- else
- c_echo G "Test data already exists at ${TOP}/notebooks/input!"
- fi
-}
-
-copy_gds_files_() {
- [ ! -d /usr/local/cuda/gds ] && c_echo_err R "GDS is not available at /usr/local/cuda/gds !" && return 1
-
- rm -rf ${TOP}/temp/gds
- mkdir -p ${TOP}/temp/gds/lib64
- cp -P -r /usr/local/cuda/gds/* ${TOP}/temp/gds/
- cp -P /usr/local/cuda/lib64/cufile.h /usr/local/cuda/lib64/libcufile* ${TOP}/temp/gds/lib64/
-}
-
-launch_notebooks_desc() { echo 'Launch jupyter notebooks
-
-Arguments:
- -p - port number
- -h - hostname to serve documentation on (default: 0.0.0.0)
- -g - launch GDS-enabled container
-'
-}
-launch_notebooks() {
- local OPTIND
- local port=$(get_unused_ports 1 10000 10030)
- local host='0.0.0.0'
- local gds_postfix=''
- local gds_nvme_path=''
-
- while getopts 'p:h:g:' option;
- do
- case "${option}" in
- p)
- port="$OPTARG"
- ;;
- h)
- host="$OPTARG"
- ;;
- g)
- gds_postfix='-gds'
- echo "# OPTARG:$OPTARG"
- [ -z "$OPTARG" ] && c_echo_err R "Please specify NVMe path!" && return 1
- gds_nvme_path=$(readlink -f "$OPTARG")
- [ ! -d "$gds_nvme_path" ] && c_echo_err R "Folder $gds_nvme_path doesn't exist!" && return 1
-
- # Copy cufile SDK from host system to temp/gds
- copy_gds_files_
- ;;
- *)
- return 1
- esac
- done
-
- download_testdata
-
- run_command cp ${TOP}/*.whl ${TOP}/notebooks
-
- run_command docker build --runtime nvidia -t cucim-jupyter${gds_postfix} -f ${TOP}/docker/Dockerfile-jupyter${gds_postfix} ${TOP}
-
- [ $? -ne 0 ] && return 1
-
- c_echo W "Port " G "$port" W " would be used...(" B "http://$(hostname -I | cut -d' ' -f 1):${port}" W ")"
-
- if [ -z "${gds_postfix}" ]; then
- run_command docker run --runtime nvidia --gpus all -it --rm \
- -v ${TOP}/notebooks:/notebooks \
- -p ${port}:${port} \
- cucim-jupyter \
- -c "echo -n 'Enter New Password: '; jupyter lab --ServerApp.password=\"\$(python3 -u -c \"from jupyter_server.auth import passwd;pw=input();print(passwd(pw));\" | egrep 'sha|argon')\" --ServerApp.root_dir=/notebooks --allow-root --port=${port} --ip=${host} --no-browser"
- else
- local MNT_PATH=/nvme
- local GDS_IMAGE=cucim-jupyter${gds_postfix}
-
- local BUILD_VER=`uname -r`
- local NV_DRIVER=`nvidia-smi -q -i 0 | sed -n 's/Driver Version.*: *\(.*\) *$/\1/p'`
- echo "using nvidia driver version $NV_DRIVER on kernel $BUILD_VER"
-
- local ofed_version=$(ofed_info -s | grep MLNX)
- if [ $? -eq 0 ]; then
- local rdma_core=$(dpkg -s libibverbs-dev | grep "Source: rdma-core")
- if [ $? -eq 0 ]; then
- local CONFIG_MOFED_VERSION=$(echo $ofed_version | cut -d '-' -f 2)
- echo "Found MOFED version $CONFIG_MOFED_VERSION"
- fi
- local MLNX_SRCS="--volume /usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:/usr/src/mlnx-ofed-kernel-${CONFIG_MOFED_VERSION}:ro"
- local MOFED_DEVS="--net=host --volume /sys/class/infiniband_verbs:/sys/class/infiniband_verbs/ "
- fi
-
- docker run \
- --ipc host \
- -it \
- --rm \
- --gpus all \
- -v ${TOP}/notebooks:/notebooks \
- -p ${port}:${port} \
- --volume /run/udev:/run/udev:ro \
- --volume /sys/kernel/config:/sys/kernel/config/ \
- --volume /usr/src/nvidia-$NV_DRIVER:/usr/src/nvidia-$NV_DRIVER:ro ${MLNX_SRCS}\
- --volume /dev:/dev:ro \
- --privileged \
- --env NV_DRIVER=${NV_DRIVER} \
- --volume /lib/modules/$BUILD_VER/:/lib/modules/$BUILD_VER \
- --volume "${MNT_PATH}:/notebooks/nvme:rw" \
- ${MOFED_DEVS} \
- ${GDS_IMAGE} \
- -c "echo -n 'Enter New Password: '; jupyter lab --ServerApp.password=\"\$(python3 -u -c \"from jupyter_server.auth import passwd;pw=input();print(passwd(pw));\" | egrep 'sha|argon')\" --ServerApp.root_dir=/notebooks --allow-root --port=${port} --ip=${host} --no-browser"
- fi
-}
-
-#==================================================================================
-# Section: Build
-#==================================================================================
-
-build_train() {
- local image_name=${1:-cucim-train}
- run_command docker build -t ${image_name} -f ${TOP}/docker/Dockerfile-claratrain ${TOP}/docker
-}
-
-build_train_desc() { echo 'Build Clara Train Docker image with cuCIM (& OpenSlide)
-
-Build image from docker/Dockerfile-claratrain
-
-Arguments:
- $1 - docker image name (default:cucim-train)
-'
-}
-build_train() {
- local image_name=${1:-cucim-train}
- run_command docker build -t ${image_name} -f ${TOP}/docker/Dockerfile-claratrain ${TOP}
-}
-
-build_examples_desc() { echo 'Build cuCIM C++ examples
-'
-}
-build_examples() {
- local image_name=cucim-cmake
- run_command docker build -t ${image_name} -f ${TOP}/docker/Dockerfile-cmake ${TOP}/docker
- run_command docker run -it --rm \
- -v ${TOP}:/workspace \
- ${image_name} \
- -c "
- mkdir -p /workspace/examples/cpp/build;
- rm -rf /workspace/examples/cpp/build/*;
- cd /workspace/examples/cpp/build;
- cmake .. && make"
- c_echo W "Copying binary files to ${TOP}/bin folder..."
- run_command mkdir -p ${TOP}/bin
- run_command cp ${TOP}/examples/cpp/build/bin/* ${TOP}/bin
-
- download_testdata
-
- c_echo W "Execute the binary with the following commands:"
- c_echo " # Set library path"
- c_echo B " export LD_LIBRARY_PATH=${TOP}/install/lib:\$LD_LIBRARY_PATH"
- c_echo " # Execute"
- c_echo B " ./bin/tiff_image notebooks/input/image.tif ."
-}
-
-parse_args() {
- local OPTIND
- while getopts 'yh' option;
- do
- case "${option}" in
- y)
- ALWAYS_YES=true;
- ;;
- h)
- print_usage
- exit 1
- ;;
- *)
- ;;
- esac
- done
- shift $((OPTIND-1))
-
- CMD="$1"
- shift
-
- ARGS=("$@")
-}
-
-print_usage() {
- set +x
- echo_err
- echo_err "USAGE: $0 [command] [arguments]..."
- echo_err ""
- c_echo_err W "Global Arguments"
- echo_err
- c_echo_err W "Command List"
- c_echo_err Y " help " w "---------------------------- Print detailed description for a given argument (command name)"
- echo_err "$(get_list_of_available_commands "${RUN_SCRIPT_FILE}" | my_cat_prefix " ")"
- echo_err
-}
-
-print_cmd_help_messages() {
- local cmd="$1"
- if [ -n "${cmd}" ]; then
- if type ${cmd}_desc > /dev/null 2>&1; then
- ${cmd}_desc
- exit 0
- else
- c_echo_err R "Command '${cmd}' doesn't exist!"
- exit 1
- fi
- fi
- print_usage
- return 0
-}
-
-main() {
- local ret=0
- parse_args "$@"
-
- case "$CMD" in
- help)
- print_cmd_help_messages "${ARGS[@]}"
- exit 0
- ;;
- build)
- build_examples "${ARGS[@]}"
- ;;
- notebooks)
- launch_notebooks "${ARGS[@]}"
- ;;
- ''|main)
- print_usage
- ;;
- *)
- if type ${CMD} > /dev/null 2>&1; then
- "$CMD" "${ARGS[@]}"
- else
- print_usage
- exit 1
- fi
- ;;
- esac
- ret=$?
- if [ -n "${SCRIPT_DIR}" ]; then
- exit $ret
- fi
-}
-
-init_globals
-
-if [ -n "${SCRIPT_DIR}" ]; then
- main "$@"
-fi