diff --git a/napari_aicsimageio/core.py b/napari_aicsimageio/core.py index a32277a..c5d57bd 100644 --- a/napari_aicsimageio/core.py +++ b/napari_aicsimageio/core.py @@ -2,19 +2,16 @@ # -*- coding: utf-8 -*- from functools import partial -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional +import napari import xarray as xr -from aicsimageio import AICSImage, exceptions, types +from aicsimageio import AICSImage, exceptions from aicsimageio.dimensions import DimensionNames +from qtpy.QtWidgets import QListWidget, QListWidgetItem -############################################################################### - -LayerData = Union[Tuple[types.ArrayLike, Dict[str, Any], str]] -PathLike = Union[str, List[str]] -ReaderFunction = Callable[[PathLike], List[LayerData]] - -############################################################################### +if TYPE_CHECKING: + from napari.types import LayerData, PathLike, ReaderFunction def _get_full_image_data(img: AICSImage, in_memory: bool) -> Optional[xr.DataArray]: @@ -41,9 +38,89 @@ def _get_full_image_data(img: AICSImage, in_memory: bool) -> Optional[xr.DataArr return None +# Function to handle multi-scene files. +def _get_scenes(img: AICSImage, in_memory: bool) -> None: + + # Create the list widget and populate with the ids & scenes in the file + list_widget = QListWidget() + for i, scene in enumerate(img.scenes): + list_widget.addItem(f"{i} :: {scene}") + viewer = napari.current_viewer() + viewer.window.add_dock_widget(list_widget, area="right", name="Scene Selector") + + # Function to create image layer from a scene selected in the list widget + def open_scene(item: QListWidgetItem) -> None: + scene_text = item.text() + + # Use scene indexes to cover for duplicate names + scene_index = int(scene_text.split(" :: ")[0]) + img.set_scene(scene_index) + if DimensionNames.MosaicTile in img.reader.dims.order: + try: + if in_memory: + data = img.reader.mosaic_xarray_data + else: + data = img.reader.mosaic_xarray_dask_data + + # Catch reader does not support tile stitching + except NotImplementedError: + print( + "AICSImageIO: Mosaic tile stitching " + "not yet supported for this file format reader." + ) + else: + if in_memory: + data = img.reader.xarray_data + else: + data = img.reader.xarray_dask_data + meta = _get_meta(data, img) + viewer.add_image(data, name=scene_text, metadata=meta, scale=meta["scale"]) + + list_widget.currentItemChanged.connect(open_scene) + + +# Function to get Metadata to provide with data +def _get_meta(data: xr.DataArray, img: AICSImage) -> Dict[str, Any]: + meta = {} + if DimensionNames.Channel in data.dims: + + # Construct basic metadata + meta["name"] = data.coords[DimensionNames.Channel].data.tolist() + meta["channel_axis"] = data.dims.index(DimensionNames.Channel) + + # Not multi-channel, use current scene as image name + else: + meta["name"] = img.reader.current_scene + + # Handle samples / RGB + if DimensionNames.Samples in img.reader.dims.order: + meta["rgb"] = True + + # Handle scales + scale: List[float] = [] + for dim in img.reader.dims.order: + if dim in [ + DimensionNames.SpatialX, + DimensionNames.SpatialY, + DimensionNames.SpatialZ, + ]: + scale_val = getattr(img.physical_pixel_sizes, dim) + if scale_val is not None: + scale.append(scale_val) + + # Apply scales + if len(scale) > 0: + meta["scale"] = tuple(scale) + + # Apply all other metadata + meta["metadata"] = {"ome_types": img.metadata} + + return meta + + def reader_function( - path: PathLike, in_memory: bool, scene_name: Optional[str] = None -) -> Optional[List[LayerData]]: + path: "PathLike", in_memory: bool, scene_name: Optional[str] = None +) -> Optional[List["LayerData"]]: """ Given a single path return a list of LayerData tuples. """ @@ -57,56 +134,32 @@ def reader_function( # Open file and get data img = AICSImage(path) - print( - f"AICSImageIO: Image contains {len(img.scenes)} scenes. " - f"napari-aicsimageio currently only supports loading first scene, " - f"will load scene: '{img.current_scene}'." - ) - data = _get_full_image_data(img, in_memory=in_memory) + # Check for multiple scenes + if len(img.scenes) > 1: + print( + f"AICSImageIO: Image contains {len(img.scenes)} scenes. " + f"Supporting more than the first scene is experimental. " + f"Select a scene from the list widget. There may be dragons!" + ) + # Launch the list widget + _get_scenes(img, in_memory=in_memory) - # Catch None data - if data is None: - return None + # Return an empty LayerData list; ImgLayers will be handled via the widget. + # HT Jonas Windhager + return [(None,)] else: - # Metadata to provide with data - meta = {} - if DimensionNames.Channel in data.dims: - # Construct basic metadata - meta["name"] = data.coords[DimensionNames.Channel].data.tolist() - meta["channel_axis"] = data.dims.index(DimensionNames.Channel) - - # Not multi-channel, use current scene as image name - else: - meta["name"] = img.reader.current_scene - - # Handle samples / RGB - if DimensionNames.Samples in img.reader.dims.order: - meta["rgb"] = True - - # Handle scales - scale: List[float] = [] - for dim in img.reader.dims.order: - if dim in [ - DimensionNames.SpatialX, - DimensionNames.SpatialY, - DimensionNames.SpatialZ, - ]: - scale_val = getattr(img.physical_pixel_sizes, dim) - if scale_val is not None: - scale.append(scale_val) + data = _get_full_image_data(img, in_memory=in_memory) - # Apply scales - if len(scale) > 0: - meta["scale"] = tuple(scale) - - # Apply all other metadata - meta["metadata"] = {"ome_types": img.metadata} - - return [(data.data, meta, "image")] + # Catch None data + if data is None: + return None + else: + meta = _get_meta(data, img) + return [(data.data, meta, "image")] -def get_reader(path: PathLike, in_memory: bool) -> Optional[ReaderFunction]: +def get_reader(path: "PathLike", in_memory: bool) -> Optional["ReaderFunction"]: """ Given a single path or list of paths, return the appropriate aicsimageio reader. """ diff --git a/napari_aicsimageio/in_memory.py b/napari_aicsimageio/in_memory.py index e6331bd..77ca58d 100644 --- a/napari_aicsimageio/in_memory.py +++ b/napari_aicsimageio/in_memory.py @@ -3,6 +3,7 @@ from typing import Optional +from napari.types import PathLike, ReaderFunction from napari_plugin_engine import napari_hook_implementation from . import core @@ -11,5 +12,5 @@ @napari_hook_implementation -def napari_get_reader(path: core.PathLike) -> Optional[core.ReaderFunction]: +def napari_get_reader(path: PathLike) -> Optional[ReaderFunction]: return core.get_reader(path, in_memory=True) diff --git a/napari_aicsimageio/out_of_memory.py b/napari_aicsimageio/out_of_memory.py index b76efe9..4fc22fb 100644 --- a/napari_aicsimageio/out_of_memory.py +++ b/napari_aicsimageio/out_of_memory.py @@ -3,6 +3,7 @@ from typing import Optional +from napari.types import PathLike, ReaderFunction from napari_plugin_engine import napari_hook_implementation from . import core @@ -11,5 +12,5 @@ @napari_hook_implementation -def napari_get_reader(path: core.PathLike) -> Optional[core.ReaderFunction]: +def napari_get_reader(path: PathLike) -> Optional[ReaderFunction]: return core.get_reader(path, in_memory=False) diff --git a/napari_aicsimageio/tests/test_core.py b/napari_aicsimageio/tests/test_core.py index a6259a1..29fc9a8 100644 --- a/napari_aicsimageio/tests/test_core.py +++ b/napari_aicsimageio/tests/test_core.py @@ -2,9 +2,10 @@ # -*- coding: utf-8 -*- from pathlib import Path -from typing import Any, Dict, Tuple +from typing import Any, Callable, Dict, Tuple import dask.array as da +import napari import numpy as np import pytest @@ -57,7 +58,7 @@ { "name": ["Gray", "Red", "Green", "Cyan"], "channel_axis": 1, - "scale": (4.984719055966396, 4.984719055966396), + "scale": (0.20061311154598827, 0.20061311154598827), }, ), ], @@ -94,3 +95,61 @@ def test_reader( # Check meta meta.pop("metadata", None) assert meta == expected_meta # type: ignore + + +SINGLESCENE_FILE = "s_1_t_1_c_1_z_1.czi" +MULTISCENE_FILE = "s_3_t_1_c_3_z_5.czi" + + +@pytest.mark.parametrize( + "in_memory, expected_dtype", + [ + (True, np.ndarray), + (False, da.core.Array), + ], +) +@pytest.mark.parametrize( + "filename, nr_widgets, expected_shape", + [ + (SINGLESCENE_FILE, 0, (1, 325, 475)), + (MULTISCENE_FILE, 1, (3, 5, 325, 475)), + ], +) +def test_for_multiscene_widget( + make_napari_viewer: Callable[..., napari.Viewer], + resources_dir: Path, + filename: str, + in_memory: bool, + nr_widgets: int, + expected_dtype: type, + expected_shape: Tuple[int, ...], +) -> None: + # Make a viewer + viewer = make_napari_viewer() + assert len(viewer.layers) == 0 + assert len(viewer.window._dock_widgets) == 0 + + # Resolve filename to filepath + if isinstance(filename, str): + path = str(resources_dir / filename) + + # Get reader + reader = core.get_reader(path, in_memory) + + if reader is not None: + # Call reader on path + reader(path) + + # Check for list widget + assert len(viewer.window._dock_widgets) == nr_widgets + + if len(viewer.window._dock_widgets) != 0: + assert list(viewer.window._dock_widgets.keys())[0] == "Scene Selector" + viewer.window._dock_widgets["Scene Selector"].widget().setCurrentRow(1) + data = viewer.layers[0].data + assert isinstance(data.data, expected_dtype) # type: ignore + assert data.shape == expected_shape # type: ignore + else: + data, meta, _ = reader(path)[0] + assert isinstance(data, expected_dtype) # type: ignore + assert data.shape == expected_shape # type: ignore diff --git a/setup.py b/setup.py index d853762..2eb5ee5 100644 --- a/setup.py +++ b/setup.py @@ -22,6 +22,7 @@ "mypy>=0.800", "psutil>=5.7.0", "pytest>=5.4.3", + "pytest-qt", "pytest-cov>=2.9.0", "pytest-raises>=0.11", "quilt3~=3.4.0", @@ -40,9 +41,9 @@ ] requirements = [ - "aicsimageio[all]~=4.0.2", + "aicsimageio[all]~=4.1.0", "fsspec[http]", # no version pin, we pull from aicsimageio - "napari~=0.4.10", + "napari[all]~=0.4.11", "napari_plugin_engine~=0.1.4", ]