Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cast image to original data type #521

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions transforms/images/apply-flatfield-tool/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## [2.0.1-dev1] - 2024-03-04
### Added
- Added a new input boolean argument dataType to cast images into original data type
2 changes: 1 addition & 1 deletion transforms/images/apply-flatfield-tool/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM polusai/bfio:2.3.3
FROM polusai/bfio:2.3.6

# environment variables defined in polusai/bfio
ENV EXEC_DIR="/opt/executables"
Expand Down
4 changes: 3 additions & 1 deletion transforms/images/apply-flatfield-tool/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Apply Flatfield Plugin (v2.0.1)
# Apply Flatfield Plugin (v2.0.1-dev1)


This WIPP plugin applies a flatfield operation on every image in a collection.
The algorithm used to apply the flatfield is as follows:
Expand Down Expand Up @@ -54,4 +55,5 @@ Command line options:
| `--ffPattern` | Filename pattern used to match flatfield files to image files | Input | string |
| `--dfPattern` | Filename pattern used to match darkfield files to image files | Input | string |
| `--outDir` | Output collection | Output | collection |
| `--dataType` | Save image in original data type | Input | boolean |
| `--preview` | Preview the output images' names without actually running computation | Input | boolean |
2 changes: 1 addition & 1 deletion transforms/images/apply-flatfield-tool/VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2.0.1
2.0.1-dev1
15 changes: 13 additions & 2 deletions transforms/images/apply-flatfield-tool/plugin.json
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
{
"name": "Apply Flatfield",
"version": "2.0.1",
"version": "2.0.1-dev1",
"title": "Apply Flatfield",
"description": "Apply a flatfield algorithm to a collection of images.",
"author": "Nick Schaub ([email protected]), Najib Ishaq ([email protected])",
"institution": "National Center for Advancing Translational Sciences, National Institutes of Health",
"repository": "https://github.com/labshare/polus-plugins",
"website": "https://ncats.nih.gov/preclinical/core/informatics",
"citation": "",
"containerId": "polusai/apply-flatfield-tool:2.0.1",
"containerId": "polusai/apply-flatfield-tool:2.0.1-dev1",
"baseCommand": [
"python3",
"-m",
Expand Down Expand Up @@ -45,6 +45,12 @@
"description": "Filename pattern used to match darkfield files to image files",
"required": false
},
{
"name": "dataType",
"type": "boolean",
"description": "Save image in original data type",
"required": false
},
{
"name": "preview",
"type": "boolean",
Expand Down Expand Up @@ -85,6 +91,11 @@
"title": "Darkfield file pattern",
"description": "Filename pattern used to match darkfield files to image files"
},
{
"key": "inputs.dataType",
"title": "Data Type",
"description": "Save image in original data type"
},
{
"key": "inputs.preview",
"title": "Preview Output",
Expand Down
4 changes: 2 additions & 2 deletions transforms/images/apply-flatfield-tool/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "polus-images-transforms-images-apply-flatfield"
version = "2.0.1"
version = "2.0.1-dev1"
description = ""
authors = [
"Nick Schaub <[email protected]>",
Expand All @@ -11,7 +11,7 @@ packages = [{include = "polus", from = "src"}]

[tool.poetry.dependencies]
python = ">=3.9,<3.12"
bfio = { version = "^2.3.3", extras = ["all"] }
bfio = { version = "^2.3.6", extras = ["all"] }
filepattern = "2.0.4"
typer = { version = "^0.7.0", extras = ["all"] }
numpy = "^1.24.3"
Expand Down
3 changes: 2 additions & 1 deletion transforms/images/apply-flatfield-tool/run-plugin.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,5 @@ docker run --mount type=bind,source=${datapath},target=/data/ \
--ffDir ${ffDir} \
--ffPattern ${ffPattern} \
--ffPattern ${dfPattern} \
--outDir ${outDir}
--outDir ${outDir} \
--dataType
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
from . import utils
from .apply_flatfield import apply

__version__ = "2.0.1"
__version__ = "2.0.1-dev1"
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,11 @@ def main( # noqa: PLR0913
resolve_path=True,
file_okay=False,
),
data_type: typing.Optional[bool] = typer.Option(
False,
"--dataType",
help="Save images in original data type.",
),
preview: bool = typer.Option(
False,
"--preview",
Expand All @@ -86,6 +91,7 @@ def main( # noqa: PLR0913
logger.info(f"ffPattern = {ff_pattern}")
logger.info(f"dfPattern = {df_pattern}")
logger.info(f"outDir = {out_dir}")
logger.info(f"dataType = {data_type}")
logger.info(f"preview = {preview}")

out_files = apply(
Expand All @@ -95,6 +101,7 @@ def main( # noqa: PLR0913
ff_pattern=ff_pattern,
df_pattern=df_pattern,
out_dir=out_dir,
data_type=data_type,
preview=preview,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def apply( # noqa: PLR0913
ff_pattern: str,
df_pattern: typing.Optional[str],
out_dir: pathlib.Path,
data_type: typing.Optional[bool] = False,
preview: bool = False,
) -> list[pathlib.Path]:
"""Run batch-wise flatfield correction on the image collection.
Expand All @@ -41,6 +42,7 @@ def apply( # noqa: PLR0913
saved.
preview: if True, return the paths to the images that would be saved
without actually performing any other computation.
data_type: if True, save images in original dtype.
"""
img_fp = FilePattern(str(img_dir), img_pattern)
img_variables = img_fp.get_variables()
Expand Down Expand Up @@ -82,7 +84,7 @@ def apply( # noqa: PLR0913
if preview:
out_files.extend(img_paths)
else:
_unshade_images(img_paths, out_dir, ff_path, df_path)
_unshade_images(img_paths, out_dir, ff_path, df_path, data_type)

return out_files

Expand All @@ -92,6 +94,7 @@ def _unshade_images(
out_dir: pathlib.Path,
ff_path: pathlib.Path,
df_path: typing.Optional[pathlib.Path],
data_type: typing.Optional[bool] = False,
) -> None:
"""Remove the given flatfield components from all images and save outputs.

Expand All @@ -100,6 +103,7 @@ def _unshade_images(
out_dir: directory to save the corrected images
ff_path: path to the flatfield image
df_path: path to the darkfield image
data_type: Save images in original dtype
"""
logger.info(f"Applying flatfield correction to {len(img_paths)} images ...")
logger.info(f"{ff_path.name = } ...")
Expand All @@ -122,19 +126,15 @@ def _unshade_images(
zip(batch_indices[:-1], batch_indices[1:]),
total=len(batch_indices) - 1,
):
_unshade_batch(
img_paths[i_start:i_end],
out_dir,
ff_image,
df_image,
)
_unshade_batch(img_paths[i_start:i_end], out_dir, ff_image, df_image, data_type)


def _unshade_batch(
batch_paths: list[pathlib.Path],
out_dir: pathlib.Path,
ff_image: numpy.ndarray,
df_image: typing.Optional[numpy.ndarray] = None,
data_type: typing.Optional[bool] = False,
) -> None:
"""Apply flatfield correction to a batch of images.

Expand All @@ -143,6 +143,7 @@ def _unshade_batch(
out_dir: directory to save the corrected images
ff_image: component to be used for flatfield correction
df_image: component to be used for flatfield correction
data_type: Save images in original dtype
"""
# Load images
with preadator.ProcessManager(
Expand Down Expand Up @@ -175,5 +176,11 @@ def _unshade_batch(
threads_per_process=2,
) as save_executor:
for inp_path, img in zip(batch_paths, img_stack):
save_executor.submit_process(utils.save_img, inp_path, img, out_dir)
save_executor.submit_process(
utils.save_img,
inp_path,
img,
out_dir,
data_type,
)
save_executor.join_processes()
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import multiprocessing
import os
import pathlib
import re
import typing

import bfio
import numpy
Expand Down Expand Up @@ -33,23 +35,37 @@ def save_img(
inp_path: pathlib.Path,
image: numpy.ndarray,
out_dir: pathlib.Path,
data_type: typing.Optional[bool] = False,
) -> None:
"""Save image to disk.

Args:
inp_path: path to input image
image: image to be saved
out_dir: directory to save image
data_type: Save images in original dtype
"""
out_stem = inp_path.stem
if ".ome" in out_stem:
out_stem = out_stem.split(".ome")[0]

out_path = out_dir / f"{out_stem}{POLUS_IMG_EXT}"
match = re.search(r"^(.*?)\.", inp_path.name)
if match is not None:
name = match.group(1)
else:
ValueError("Unable to detect files in a directory")
out_path = out_dir / f"{name}{POLUS_IMG_EXT}"
with bfio.BioReader(inp_path, MAX_WORKERS) as reader, bfio.BioWriter(
out_path,
MAX_WORKERS,
metadata=reader.metadata,
) as writer:
writer.dtype = image.dtype
writer[:] = image
if data_type:
info_uint_type = numpy.iinfo(reader.dtype)
scaled_image = (
(image - numpy.min(image))
/ (numpy.max(image) - numpy.min(image))
* int(info_uint_type.max)
)
uint_image = scaled_image.astype(reader.dtype)
writer.dtype = reader.dtype
writer[:] = uint_image
else:
writer.dtype = image.dtype
writer[:] = image
Loading